From 81bd6fdb400d56336b01265283f171f13ad48416 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 09:43:09 +0000 Subject: [PATCH 01/20] replace tab with spaces --- .../cpu_train_best_practice_en.rst | 34 +-- docs/api/paddle/shard_index_cn.rst | 2 +- .../distributed/async_training_en.rst | 18 +- .../distributed/cluster_train_data_en.rst | 66 ++-- ...large_scale_sparse_feature_training_en.rst | 10 +- .../distributed/sync_training_en.rst | 38 +-- .../low_level/layers/activations.rst | 6 +- .../low_level/layers/activations_en.rst | 6 +- docs/api_guides/low_level/layers/pooling.rst | 2 +- docs/design/phi/design.md | 286 +++++++++--------- .../api_design_guidelines_standard_cn.md | 27 +- .../new_python_api_cn.md | 4 +- .../dev_guides/docs_contributing_guides_cn.md | 14 +- .../git_guides/local_dev_guide_cn.md | 10 +- .../10_contribution/docs_contribution.md | 14 +- docs/guides/flags/debug_cn.rst | 6 +- docs/guides/flags/debug_en.rst | 6 +- .../analysis_tools/index_cn.rst | 8 +- .../analysis_tools/index_en.rst | 8 +- docs/install/compile/fromsource.rst | 18 +- docs/install/compile/fromsource_en.rst | 10 +- docs/install/conda/fromconda.rst | 10 +- docs/install/conda/fromconda_en.rst | 10 +- docs/install/docker/fromdocker.rst | 6 +- docs/install/docker/fromdocker_en.rst | 6 +- docs/install/index_cn.rst | 16 +- docs/install/index_en.rst | 14 +- docs/install/install_script.md | 14 +- docs/install/pip/frompip.rst | 10 +- docs/install/pip/frompip_en.rst | 10 +- 30 files changed, 344 insertions(+), 345 deletions(-) diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst index f072828b153..8b8a9914b02 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst @@ -20,21 +20,21 @@ For detailed API usage, please refer to :ref:`api_fluid_ParallelExecutor` . A si .. code-block:: python - # Configure the execution strategy, mainly to set the number of threads - exec_strategy = fluid.ExecutionStrategy() - exec_strategy.num_threads = 8 - - # Configure the composition strategy, for CPU training, you should use the Reduce mode for training. - build_strategy = fluid.BuildStrategy() - if int(os.getenv("CPU_NUM")) > 1: - build_strategy.reduce_strategy=fluid.BuildStrategy.ReduceStrategy.Reduce - - pe = fluid.ParallelExecutor( - use_cuda=False, - loss_name=avg_cost.name, - main_program=main_program, - build_strategy=build_strategy, - exec_strategy=exec_strategy) + # Configure the execution strategy, mainly to set the number of threads + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = 8 + + # Configure the composition strategy, for CPU training, you should use the Reduce mode for training. + build_strategy = fluid.BuildStrategy() + if int(os.getenv("CPU_NUM")) > 1: + build_strategy.reduce_strategy=fluid.BuildStrategy.ReduceStrategy.Reduce + + pe = fluid.ParallelExecutor( + use_cuda=False, + loss_name=avg_cost.name, + main_program=main_program, + build_strategy=build_strategy, + exec_strategy=exec_strategy) Among the parameters above: @@ -54,8 +54,8 @@ To reduce the amount of communication data and improve communication speed is ac .. code-block:: python - data = fluid.layers.data(name='ids', shape=[1], dtype='int64') - fc = fluid.layers.embedding(input=data, size=[dict_size, 16], is_sparse=True) + data = fluid.layers.data(name='ids', shape=[1], dtype='int64') + fc = fluid.layers.embedding(input=data, size=[dict_size, 16], is_sparse=True) Among the parameters above: diff --git a/docs/api/paddle/shard_index_cn.rst b/docs/api/paddle/shard_index_cn.rst index 4c6d8c5ff9b..4caf4e2d19e 100644 --- a/docs/api/paddle/shard_index_cn.rst +++ b/docs/api/paddle/shard_index_cn.rst @@ -10,7 +10,7 @@ shard_index :: shard_size = (index_num + nshards - 1) // nshards - + 对于输入\ `input`\ 中的每个值\ `v`\,我们根据下面的公式设置它新的值: :: diff --git a/docs/api_guides/low_level/distributed/async_training_en.rst b/docs/api_guides/low_level/distributed/async_training_en.rst index d2646559163..9ee37a3412c 100644 --- a/docs/api_guides/low_level/distributed/async_training_en.rst +++ b/docs/api_guides/low_level/distributed/async_training_en.rst @@ -15,15 +15,15 @@ For detailed API, please refer to :ref:`api_fluid_transpiler_DistributeTranspile .. code-block:: python - config = fluid.DistributeTranspilerConfig() - #Configuring config policy - config.slice_var_up = False - t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id, - program=main_program, - pservers="192.168.0.1:6174,192.168.0.2:6174", - trainers=1, - sync_mode=False) + config = fluid.DistributeTranspilerConfig() + #Configuring config policy + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=False) For the description of parameters above, please refer to `Sync Training <../distributed/sync_training_en.html>`_ . diff --git a/docs/api_guides/low_level/distributed/cluster_train_data_en.rst b/docs/api_guides/low_level/distributed/cluster_train_data_en.rst index 3c810417c5e..b39b6cefc48 100644 --- a/docs/api_guides/low_level/distributed/cluster_train_data_en.rst +++ b/docs/api_guides/low_level/distributed/cluster_train_data_en.rst @@ -11,18 +11,18 @@ Read datasets in distributed training by defining a cluster_reader Generally, you can implement a cluster_reader, regarding the number of training processes and the process serial number(i.e. trainer_id) to decide which data to read: - .. code-block:: python - - def cluster_reader(reader, trainers, trainer_id): - def reader_creator(): - for idx, data in enumerate(reader()): - if idx % trainers == trainer_id: - yield data - return reader + .. code-block:: python + + def cluster_reader(reader, trainers, trainer_id): + def reader_creator(): + for idx, data in enumerate(reader()): + if idx % trainers == trainer_id: + yield data + return reader - trainers = int(os.getenv("PADDLE_TRAINERS", "1")) - trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) - train_reader = cluster_reader(paddle.dataset.mnist.train(), trainers, trainer_id) + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + train_reader = cluster_reader(paddle.dataset.mnist.train(), trainers, trainer_id) In the code above, `trainers` and `trainer_id` are respectively the total number of training processes and the serial number of the current training process, which can be passed to the Python program through environment variables or parameters. @@ -33,32 +33,32 @@ Since `cluster_reader` is still used to read the full set of data, for tasks wit For example, in a Linux system, the training data can be split into multiple small files using the `split `_ command: .. code-block:: bash - $ split -d -a 4 -d -l 100 housing.data cluster/housing.data. - $ find ./cluster - cluster/ - cluster/housing.data.0002 - cluster/housing.data.0003 - cluster/housing.data.0004 - cluster/housing.data.0000 - cluster/housing.data.0001 - cluster/housing.data.0005 + $ split -d -a 4 -d -l 100 housing.data cluster/housing.data. + $ find ./cluster + cluster/ + cluster/housing.data.0002 + cluster/housing.data.0003 + cluster/housing.data.0004 + cluster/housing.data.0000 + cluster/housing.data.0001 + cluster/housing.data.0005 After the data is split, you can define a file_dispatcher function that determines which files need to be read based on the number of training processes and the serial number: - .. code-block:: python + .. code-block:: python - def file_dispatcher(files_pattern, trainers, trainer_id): - file_list = glob.glob(files_pattern) - ret_list = [] - for idx, f in enumerate(file_list): - if (idx + trainers) % trainers == trainer_id: - ret_list.append(f) - return ret_list - - trainers = int(os.getenv("PADDLE_TRAINERS", "1")) - trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) - files_pattern = "cluster/housing.data.*" + def file_dispatcher(files_pattern, trainers, trainer_id): + file_list = glob.glob(files_pattern) + ret_list = [] + for idx, f in enumerate(file_list): + if (idx + trainers) % trainers == trainer_id: + ret_list.append(f) + return ret_list + + trainers = int(os.getenv("PADDLE_TRAINERS", "1")) + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + files_pattern = "cluster/housing.data.*" - my_files = file_dispatcher(files_pattern, triners, trainer_id) + my_files = file_dispatcher(files_pattern, triners, trainer_id) In the example above, `files_pattern` is a `glob expression `_ of the training file and can generally be represented by a wildcard. diff --git a/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst b/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst index 932697f8a8b..f1c27668eb7 100644 --- a/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst +++ b/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training_en.rst @@ -23,11 +23,11 @@ Parameter :code:`dict_size` defines the total number of ids in the data. The id .. code-block:: python emb = fluid.layers.embedding( - is_distributed=True, - input=input, - size=[dict_size, embedding_width], - is_sparse=True, - is_distributed=True) + is_distributed=True, + input=input, + size=[dict_size, embedding_width], + is_sparse=True, + is_distributed=True) Model storage and inference diff --git a/docs/api_guides/low_level/distributed/sync_training_en.rst b/docs/api_guides/low_level/distributed/sync_training_en.rst index 6b8cd734316..fefb7818099 100644 --- a/docs/api_guides/low_level/distributed/sync_training_en.rst +++ b/docs/api_guides/low_level/distributed/sync_training_en.rst @@ -13,15 +13,15 @@ For API Reference, please refer to :ref:`DistributeTranspiler`. A simple example .. code-block:: python - config = fluid.DistributeTranspilerConfig() - #Configuring policy config - config.slice_var_up = False - t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id, - program=main_program, - pservers="192.168.0.1:6174,192.168.0.2:6174", - trainers=1, - sync_mode=True) + config = fluid.DistributeTranspilerConfig() + #Configuring policy config + config.slice_var_up = False + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + pservers="192.168.0.1:6174,192.168.0.2:6174", + trainers=1, + sync_mode=True) Among parameters above: @@ -29,8 +29,8 @@ Among parameters above: - :code:`program` : The :code:`program` to transpile, where :code:`fluid.default_main_program()` is used by default. - :code:`pservers` : list of IP ports of the pserver nodes in the current training task. - :code:`trainers` : int type, the number of trainer nodes in the current training task. Please note: - * In pserver mode, the number of trainer nodes can be different from the number of pserver nodes, such as 20 pservers and 50 trainers. In practical training tasks, you can get the best performance by adjusting the number of pserver nodes and trainer nodes. - * In NCCL2 mode, this parameter is a string specifying the IP port list of the trainer nodes. + * In pserver mode, the number of trainer nodes can be different from the number of pserver nodes, such as 20 pservers and 50 trainers. In practical training tasks, you can get the best performance by adjusting the number of pserver nodes and trainer nodes. + * In NCCL2 mode, this parameter is a string specifying the IP port list of the trainer nodes. - :code:`sync_mode` : Whether it is in synchronous training mode, the default is True. Even though this parameter is not set, it is the synchronous training mode by default. @@ -63,14 +63,14 @@ Use the following code to convert the current :code:`Program` to a Fluid :code:` .. code-block:: python - Config = fluid.DistributeTranspilerConfig() - Config.mode = "nccl2" - t = fluid.DistributeTranspiler(config=config) - t.transpile(trainer_id, - program=main_program, - startup_program=startup_program, - trainers="192.168.0.1:6174,192.168.0.2:6174", - current_endpoint="192.168.0.1:6174") + Config = fluid.DistributeTranspilerConfig() + Config.mode = "nccl2" + t = fluid.DistributeTranspiler(config=config) + t.transpile(trainer_id, + program=main_program, + startup_program=startup_program, + trainers="192.168.0.1:6174,192.168.0.2:6174", + current_endpoint="192.168.0.1:6174") Among them: diff --git a/docs/api_guides/low_level/layers/activations.rst b/docs/api_guides/low_level/layers/activations.rst index 4f193cb6f7e..020b6f26e74 100644 --- a/docs/api_guides/low_level/layers/activations.rst +++ b/docs/api_guides/low_level/layers/activations.rst @@ -17,12 +17,12 @@ PaddlePaddle Fluid 对大部分的激活函数进行了支持,其中有: .. code-block:: python - conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") - Fluid为每个Activation提供了接口,我们可以显式的对它们进行调用。 .. code-block:: python - conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) - relu1 = fluid.layers.relu(conv2d) + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) + relu1 = fluid.layers.relu(conv2d) diff --git a/docs/api_guides/low_level/layers/activations_en.rst b/docs/api_guides/low_level/layers/activations_en.rst index 0273d83e617..53829ae5696 100644 --- a/docs/api_guides/low_level/layers/activations_en.rst +++ b/docs/api_guides/low_level/layers/activations_en.rst @@ -38,12 +38,12 @@ PaddlePaddle Fluid supports most of the activation functions, including: .. code-block:: python - conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") - Fluid provides an interface for each Activation, and we can explicitly call it. .. code-block:: python - conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) - relu1 = fluid.layers.relu(conv2d) + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3) + relu1 = fluid.layers.relu(conv2d) diff --git a/docs/api_guides/low_level/layers/pooling.rst b/docs/api_guides/low_level/layers/pooling.rst index 56d90c190ae..3ef868bf28c 100644 --- a/docs/api_guides/low_level/layers/pooling.rst +++ b/docs/api_guides/low_level/layers/pooling.rst @@ -35,7 +35,7 @@ PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3 - 非\ ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding) / stride(步长) + 1`` - ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding + stride - 1) / stride + 1`` - + api汇总: diff --git a/docs/design/phi/design.md b/docs/design/phi/design.md index 12c7f13c689..62a7dc4a468 100644 --- a/docs/design/phi/design.md +++ b/docs/design/phi/design.md @@ -24,11 +24,11 @@ 1. 当一个Op去复用另一个Op的`Opkernel::Compute`方法,都需要先构造一个`ExecutionContext`,复用上是比较繁琐的 - - 如果能直接调用一个函数形式的Kernel,就会方便很多 + - 如果能直接调用一个函数形式的Kernel,就会方便很多 2. 由于额外的数据结构构造及独立Op调度引入了开销,从计算性能的角度考虑,复用Op不如直接把计算代码copy过来,导致我们逐渐抛弃了早期反向Op复用前向Op的原则,开始为每个反向Op单独实现Kernel - - 只有Op之前复用的开销足够小,复用已有Op实现新Op才有可能被大范围推广 + - 只有Op之前复用的开销足够小,复用已有Op实现新Op才有可能被大范围推广 ### 1.1.2 执行调度的简洁性与细粒度化 @@ -94,32 +94,32 @@ Python 2.0 API项目规范了Paddle Python端API的参数列表,使其变得 - 在目录设计上支持算子库的各种拆分编译需求,包括 - - 按运算设备拆分编译 - - 例如:仅编译cpu的,或者仅编译cuda的 - - 按训练和推理场景拆分编译 - - 例如:推理不编译反向相关kernel,也不编译带有Intermediate输出的前向kernel - - 按移动端设备实际使用算子精准裁剪编译(目前尚未支持) - - 例如:一个模型只用了add和mul,极致情况下应该能裁到仅剩2个kernel + - 按运算设备拆分编译 + - 例如:仅编译cpu的,或者仅编译cuda的 + - 按训练和推理场景拆分编译 + - 例如:推理不编译反向相关kernel,也不编译带有Intermediate输出的前向kernel + - 按移动端设备实际使用算子精准裁剪编译(目前尚未支持) + - 例如:一个模型只用了add和mul,极致情况下应该能裁到仅剩2个kernel - 长线上支持良好的kernel复用实现需求 - - 解释:kernel复用实现时,能否通过简单的include引入对应函数,不会因为目录过于复杂而找不到复用的kernel + - 解释:kernel复用实现时,能否通过简单的include引入对应函数,不会因为目录过于复杂而找不到复用的kernel - 长线上支持跨设备kernel的写法统一需求,并且直观易用,不引入不必要的模板参数 - - 解释:算子库下层还有Kernel Primitive API模块,其长线愿景是每个运算,只要一个kernel,能够适应多种设备,真正区分设备的代码,仅在Kernel Primitive API实现中;不希望未来的kernel在复用时从传入较复杂的模板参数,需要尽可能限制地简洁一些 + - 解释:算子库下层还有Kernel Primitive API模块,其长线愿景是每个运算,只要一个kernel,能够适应多种设备,真正区分设备的代码,仅在Kernel Primitive API实现中;不希望未来的kernel在复用时从传入较复杂的模板参数,需要尽可能限制地简洁一些 - 易用性上,开发者能精准理解自己新增Kernel应该放到什么位置,无歧义 - - 解释:开发者新增一个API,不会困惑自己应该将对应kernel放在那个目录,也不会出现不同的人对于同一个kernel应该放在什么位置出现二义性的理解 + - 解释:开发者新增一个API,不会困惑自己应该将对应kernel放在那个目录,也不会出现不同的人对于同一个kernel应该放在什么位置出现二义性的理解 - 不引入大量的重复目录设计 - - 解释:概念拆分是需要的,但也要有边界,避免在多个目录下有命名相同的子目录,容易混乱,比如不能cpu下面有eigen, funcs, math等,gpu下面也有。新算子库的目录设计以根据设备拆分为主,其他层次的目录拆分尽可能弱化,比如尽量不根据功能拆分,尽量不根据领域拆分等 + - 解释:概念拆分是需要的,但也要有边界,避免在多个目录下有命名相同的子目录,容易混乱,比如不能cpu下面有eigen, funcs, math等,gpu下面也有。新算子库的目录设计以根据设备拆分为主,其他层次的目录拆分尽可能弱化,比如尽量不根据功能拆分,尽量不根据领域拆分等 - 不造成迁移时的文件数目膨胀 - - 解释:不能因为kernel设备拆分,导致kernel实现文件大规模增多 + - 解释:不能因为kernel设备拆分,导致kernel实现文件大规模增多 - 不引入层级过深的目录设计 - - 解释:目录层级不应过深,理解和维护成本都较高 + - 解释:目录层级不应过深,理解和维护成本都较高 - 不引入过高的迁移成本 - - 解释:迁移kernel时,不能要求对kernel本身做太多改动和拆分,否则迁移成本太高 + - 解释:迁移kernel时,不能要求对kernel本身做太多改动和拆分,否则迁移成本太高 ### 2.2.2 具体目录设计 @@ -128,8 +128,8 @@ Python 2.0 API项目规范了Paddle Python端API的参数列表,使其变得 ``` paddle/phi ./api (对外暴露的高层API及其实现) - ./include(对外暴露的高层API头文件) - ./lib(对外暴露API的实现) + ./include(对外暴露的高层API头文件) + ./lib(对外暴露API的实现) ./common (内外部均会使用到的基础数据结构) ./core (基础组件,比如基础Tensor相关接口,kernel注册接口,管理单元等) ./backends (各设备及后端的基础组件,下设cpu,gpu等后端目录) @@ -142,18 +142,18 @@ paddle/phi 部分目录结构说明: - `api`:API模块,面向外部用户 - - 直接使用类Python的C++ Tensor计算 API,和Python端形式高度一致 - - 该部分可能反向依赖框架的DeviceContextPool等实现,所以单独管理 - - 在该类API上,训练和预测也可能是不同的 + - 直接使用类Python的C++ Tensor计算 API,和Python端形式高度一致 + - 该部分可能反向依赖框架的DeviceContextPool等实现,所以单独管理 + - 在该类API上,训练和预测也可能是不同的 - `common`:phi内部及phi api目录均要使用的数据结构,这些数据结构既不属于phi core,也不属于api目录 - `core`:phi内部会有一些自己需要的,公用的模块实现,比如基础DenseTensor、kernel注册及管理模块 - `backends`:backends中组织后续需要为各个后端的新增的数据结构,比如CPUContext、GPUContext等 - - core中放置对于算子库来讲通用的基础数据结构,而特定后端的专用数据结构不放在core中,且依赖关系严格保证backends依赖core,但core不能依赖backends - - 例1:Context如果有基类,则在core中,而继承的CPUContext在backends/cpu中,GPUContext在baackends/gpu中 - - 例2:TensorBase在core中,DenseTensor给多数设备使用,也在core中,如果有MKLDNNTensor的话,因为它只给mkldnn用,应该在backends/dnnl中 + - core中放置对于算子库来讲通用的基础数据结构,而特定后端的专用数据结构不放在core中,且依赖关系严格保证backends依赖core,但core不能依赖backends + - 例1:Context如果有基类,则在core中,而继承的CPUContext在backends/cpu中,GPUContext在baackends/gpu中 + - 例2:TensorBase在core中,DenseTensor给多数设备使用,也在core中,如果有MKLDNNTensor的话,因为它只给mkldnn用,应该在backends/dnnl中 - `infermeta`: infermeta函数的整理位置,infermeta函数相当于infershape+inferdtype+inferlayout等 - `kernels`:各设备相关kernels - - `cpu, gpu, ...` + - `cpu, gpu, ...` - `ops`: ops中组织新形式的Op定义、以及兼容原有op的一些组件 @@ -176,29 +176,29 @@ paddle/phi/kernels 目录结构说明如下: - kernels下主目录,放置设备无关的kernel.h和kernel.cc,原则上每个kernel一个.h和.cc - - 例如一个kernel是使用Primitive api实现的,或者是复用其他基础kernel实现的,那么不论在什么设备上,应该都只有一种实现,所以它的声明和实现均直接放置到kernels目录下即可(这是将来的理想状态) - - 目前我们大部分kernel都不具备跨设备实现统一的特征,但是kernel的输入参数返回值除了DeviceContext之外,应该是一致的,所以kernel参数声明头文件还放到主目录下(和原先的设计保持一致,DeviceContext和T作为模板参数),各设备的函数实现在相应的设备文件夹中 - - 注意,这里跨设备实现统一,并不是指一个kernel的CPU和GPU实现就算统一了,而是在所有设备的实现都一样,目前至少包括CPU,GPU,XPU,MKLDNN,GPUDNN等 - - 反向kernel如果不需要支持裁剪,可以做适当归并(但如果要为支持端侧训练留可能性,反向kernel可能也是裁剪的潜在目标) + - 例如一个kernel是使用Primitive api实现的,或者是复用其他基础kernel实现的,那么不论在什么设备上,应该都只有一种实现,所以它的声明和实现均直接放置到kernels目录下即可(这是将来的理想状态) + - 目前我们大部分kernel都不具备跨设备实现统一的特征,但是kernel的输入参数返回值除了DeviceContext之外,应该是一致的,所以kernel参数声明头文件还放到主目录下(和原先的设计保持一致,DeviceContext和T作为模板参数),各设备的函数实现在相应的设备文件夹中 + - 注意,这里跨设备实现统一,并不是指一个kernel的CPU和GPU实现就算统一了,而是在所有设备的实现都一样,目前至少包括CPU,GPU,XPU,MKLDNN,GPUDNN等 + - 反向kernel如果不需要支持裁剪,可以做适当归并(但如果要为支持端侧训练留可能性,反向kernel可能也是裁剪的潜在目标) - kernels下一级子目录,原则上按照backend分类按需新建,仅保留两个特殊的目录: - - funcs:为了兼容原先fluid operators中functor和function设计保留的目录,放置支持多种后端的function和functor,还按照原先的一个头文件,多个.cc(u)的方式组织(这部分代码在将来可能被移除,因为会逐渐被Kernel Primirive API及Kernel间复用替代,这里不做过度设计) - - 例1:一个公共函数XXXFunction在reduce CPU和reduce CUDA的kernel实现中都被调用,并且reduce CPU和reduce GPU的kernel实现是不一样的,那么这个XXXFunction应该在funcs目录中 - - primitive:Kernel Primitive API,多设备统一kernel实现的一些基础工具 - - impl:paddle目前的op kernel实现,有很多仍然是CPU和GPU复用同一份代码的,在大量的xx_op.h,这部分代码,不适合放在cpu或者gpu目录中,也不适合放在funcs目录中(会导致funcs目录中最终放置了相当一部分kernel实现,过于臃肿且混乱,funcs目录的定位是放置原先operators/math目录下那样的工具functor和function),也不适合放到kernels根目录下(并不是真正设备无关的实现,仅是cpu和gpu共用的实现),因此为了使这部分代码迁移时不需要做过多考虑,并且放置的位置也相对符合其实现性质,创建了impl这个目录 - - impl目录下,仅放置跨部分设备实现一致的kernel函数,均为头文件,命名均以xxx_kernel_impl.h为后缀 - - 例如:scale,fill_constant,fill_any_like这些kernel均属于此类情况 + - funcs:为了兼容原先fluid operators中functor和function设计保留的目录,放置支持多种后端的function和functor,还按照原先的一个头文件,多个.cc(u)的方式组织(这部分代码在将来可能被移除,因为会逐渐被Kernel Primirive API及Kernel间复用替代,这里不做过度设计) + - 例1:一个公共函数XXXFunction在reduce CPU和reduce CUDA的kernel实现中都被调用,并且reduce CPU和reduce GPU的kernel实现是不一样的,那么这个XXXFunction应该在funcs目录中 + - primitive:Kernel Primitive API,多设备统一kernel实现的一些基础工具 + - impl:paddle目前的op kernel实现,有很多仍然是CPU和GPU复用同一份代码的,在大量的xx_op.h,这部分代码,不适合放在cpu或者gpu目录中,也不适合放在funcs目录中(会导致funcs目录中最终放置了相当一部分kernel实现,过于臃肿且混乱,funcs目录的定位是放置原先operators/math目录下那样的工具functor和function),也不适合放到kernels根目录下(并不是真正设备无关的实现,仅是cpu和gpu共用的实现),因此为了使这部分代码迁移时不需要做过多考虑,并且放置的位置也相对符合其实现性质,创建了impl这个目录 + - impl目录下,仅放置跨部分设备实现一致的kernel函数,均为头文件,命名均以xxx_kernel_impl.h为后缀 + - 例如:scale,fill_constant,fill_any_like这些kernel均属于此类情况 - kernel迁移过来之后,首先创建对应kenrel头文件直接放置到kernels的根目录中,各后端的kernel实现放在相应的设备文件夹中 - - 可参考原先op的归并程度,如matmul原先是单独的.h/.cc,那移过来之后保持,但activation相关的基本写在一个.h/.cc,移过来也仍然保持归并(后续有必要再进一步拆分) - - 例1:原先cast op的Kernel在cast_op.h中,迁移过来之后在根目录创建cast_kernel.h,cast_kernel.cc/cu根据使用的后端放到对应的目录,即cast_kernel.cc放置到cpu中,cast_kernel.cu放置到gpu中 - - 例2:原先scale op的kernel使用eigen实现,CPU和GPU实现一致,迁移过来之后,公共实现应该在impl中的scale_kernel_impl.h中,公共头文件在kernels根目录下的scale_kernel.h中,scale_kernel.cc在cpu中,scale_kernel.cu在gpu中 + - 可参考原先op的归并程度,如matmul原先是单独的.h/.cc,那移过来之后保持,但activation相关的基本写在一个.h/.cc,移过来也仍然保持归并(后续有必要再进一步拆分) + - 例1:原先cast op的Kernel在cast_op.h中,迁移过来之后在根目录创建cast_kernel.h,cast_kernel.cc/cu根据使用的后端放到对应的目录,即cast_kernel.cc放置到cpu中,cast_kernel.cu放置到gpu中 + - 例2:原先scale op的kernel使用eigen实现,CPU和GPU实现一致,迁移过来之后,公共实现应该在impl中的scale_kernel_impl.h中,公共头文件在kernels根目录下的scale_kernel.h中,scale_kernel.cc在cpu中,scale_kernel.cu在gpu中 - 迁移时,只有本kernel用到的辅助函数,一律和kernel实现放到同一个backend文件中,创建.h管理代码,不再单独在别处整理代码,除非这些辅助的函数实现是有多处使用的 - - 即使有多处调用,如果仍然限于同一设备,直接建头文件放到同一个目录下 + - 即使有多处调用,如果仍然限于同一设备,直接建头文件放到同一个目录下 - 反向kernel与前向kernel实现放置在不同的文件中,文件后缀采用``*_grad_kernel.*``,便于cmake分离编译 - - 不再为反向kernel单独创建目录,否则反向kernel目录下还要创建cpu/gpu等目录 - - 二阶导、三阶导的实现统一也放到grad kernel实现文件中 + - 不再为反向kernel单独创建目录,否则反向kernel目录下还要创建cpu/gpu等目录 + - 二阶导、三阶导的实现统一也放到grad kernel实现文件中 - 为什么目录名叫`gpu`而不是`cuda`和`hip`? - - cuda和hip代码重复度非常高,统一实现维护成本较低 + - cuda和hip代码重复度非常高,统一实现维护成本较低 ## 2.3 核心组件 @@ -380,26 +380,26 @@ void FullKernel(const Context& dev_ctx, #### 2.3.2.1 API Tensor接口 - 最上层是API级别的Tensor接口封装,里面包含两个指针成员,TensorBase和AbstractAutogradMeta。 - - 两个成员均使用了Interface设计,不会依赖于真实的Tensor和Autograd实现 - - AutogradMeta仅在动态图API级别的Tensor中有意义,在具体的kernel计算中,不会被使用到,所以将其放到最上层的Tensor接口中 - - 另外,这样设计也是为了方便数据共享,并且减少拷贝开销 - - 当一个Tensor赋值给另一个Tensor,或者Tensor作为函数返回值时,实际上只会拷贝指针,不会产生真实的数据拷贝 + - 两个成员均使用了Interface设计,不会依赖于真实的Tensor和Autograd实现 + - AutogradMeta仅在动态图API级别的Tensor中有意义,在具体的kernel计算中,不会被使用到,所以将其放到最上层的Tensor接口中 + - 另外,这样设计也是为了方便数据共享,并且减少拷贝开销 + - 当一个Tensor赋值给另一个Tensor,或者Tensor作为函数返回值时,实际上只会拷贝指针,不会产生真实的数据拷贝 - 最上层C++ Tensor与Python端Tensor扮演类似的角色,在接口设计上尽可能与Python端保持一致 - - 包含基础的Tensor属性访问及数据访问方法 - - shape, place, dtype, data - - 包含动态图Tensor需要的autograd方法 - - gradient, backward - - 包含Tensor间的转换方法 - - cpu, gpu, xpu等 - - 包含tensor相关的计算方法(暂未添加) - - `paddle.tensor` 模块下所有方法 + - 包含基础的Tensor属性访问及数据访问方法 + - shape, place, dtype, data + - 包含动态图Tensor需要的autograd方法 + - gradient, backward + - 包含Tensor间的转换方法 + - cpu, gpu, xpu等 + - 包含tensor相关的计算方法(暂未添加) + - `paddle.tensor` 模块下所有方法 - 编译解耦: - - 这里带有的autograd信息,只是一个指针索引,默认为空 - - `std::unique_ptr autograd_meta_ = nullptr;` - - 而这里的AbstractAutogradMeta是一个抽象类接口,不会依赖autograd的任何模块,因此不会影响 phi 的独立编译,同时又兼顾了动态图Tensor需要持有反向信息的需求 + - 这里带有的autograd信息,只是一个指针索引,默认为空 + - `std::unique_ptr autograd_meta_ = nullptr;` + - 而这里的AbstractAutogradMeta是一个抽象类接口,不会依赖autograd的任何模块,因此不会影响 phi 的独立编译,同时又兼顾了动态图Tensor需要持有反向信息的需求 - 这里的AutogradMeta仅在动态图场景中才会设置,不需要的场景,比如静态图内就仅仅是个空指针而已 @@ -427,22 +427,22 @@ Tensor mkldnn() const; ``` - 这个转换的过程可能是cast,也可能是copy - - 如果不需要进行数据拷贝,就是cast - - 如果需要进行数据拷贝,就是copy - - 转换通过函数式kernel去实现 + - 如果不需要进行数据拷贝,就是cast + - 如果需要进行数据拷贝,就是copy + - 转换通过函数式kernel去实现 - 在API场景中的使用 - - 用户在完整训练场景中,使用API的时候,最初读入的数据一般是从磁盘读入,先放入CPU,然后再转换到具体执行设备上,比如DataLoader + - 用户在完整训练场景中,使用API的时候,最初读入的数据一般是从磁盘读入,先放入CPU,然后再转换到具体执行设备上,比如DataLoader #### 2.3.2.2 TensorBase - Tensor实现的接口类,接口中仅包含必要的纯虚Tensor方法,不包含有实际含义的成员,这里的方法在开发过程中也要严格控制 - 为什么要在这一层用抽象类设计? - - 一方面是为了隔离Tensor API与Tensor具体实现,不产生过多依赖,如果将来Tensor API需要重新设计,或者说需要放弃掉autograd信息,只需要重新设计一个Tensor API即可,对于底层Tensor的实现几乎没有影响 - - 另一方面是为了给异构化的Tensor保留充足的扩展空间,框架API层仅需要一个Tensor数据结构即可,不需要再暴露多种数据结构设计,这里其实做了一个大范围定义,框架内所有数据结构均是Tensor - - 对于内存布局基本一致,或者说Tensor描述基本一致的实现,可以基于一种DenseTensor的实现去继承 - - 如果是异构化程度高的Tensor,可以直接从Interface继承去实现新的Tensor分支,比如只有一个Object的Tensor,确保在Tensor扩展灵活性上不会出现瓶颈 + - 一方面是为了隔离Tensor API与Tensor具体实现,不产生过多依赖,如果将来Tensor API需要重新设计,或者说需要放弃掉autograd信息,只需要重新设计一个Tensor API即可,对于底层Tensor的实现几乎没有影响 + - 另一方面是为了给异构化的Tensor保留充足的扩展空间,框架API层仅需要一个Tensor数据结构即可,不需要再暴露多种数据结构设计,这里其实做了一个大范围定义,框架内所有数据结构均是Tensor + - 对于内存布局基本一致,或者说Tensor描述基本一致的实现,可以基于一种DenseTensor的实现去继承 + - 如果是异构化程度高的Tensor,可以直接从Interface继承去实现新的Tensor分支,比如只有一个Object的Tensor,确保在Tensor扩展灵活性上不会出现瓶颈 #### 2.3.3.3 DenseTensor、SparseTensor @@ -512,11 +512,11 @@ Tensor scale(const Tensor& x, **这个新建的C++ API体系目前主要用于什么场景?** 1. 作为自定义算子开发时可调用的C++ API,提升易用性 - - 例如现在用户在自定义算子中初始化一个Tensor需要循环遍历Tensor数据并赋值,有API之后可以直接调用`paddle::ones`,`paddle::fill`这些API + - 例如现在用户在自定义算子中初始化一个Tensor需要循环遍历Tensor数据并赋值,有API之后可以直接调用`paddle::ones`,`paddle::fill`这些API 2. 作为新动态图的基础调用单元 - - 新动态图会以API作为调度计算单元,不会再调用Op体系,以提升调度性能 + - 新动态图会以API作为调度计算单元,不会再调用Op体系,以提升调度性能 3. 作为反向Op复用前向Op进行开发的基础 - - 现在反向op kernel需要单独实现,在API体系成型后,希望可以通过复用前向API完成反向Op实现 + - 现在反向op kernel需要单独实现,在API体系成型后,希望可以通过复用前向API完成反向Op实现 #### 2.3.3.2 C++ API自动生成 @@ -601,22 +601,22 @@ void Scale(const Context& dev_ctx, - 不同设备的kernel要有不同的函数实现,函数名采用**驼峰式命名**,除了首字母大写之外,命名尽可能和API函数名保持一致,同一个计算的函数命名保持一致,通过不同文件或者目录管理不同设备的函数 - 一般有两个模板参数,T和Context(尽可能),用于运行时决定数据类型和设备类型 - - 按照我们目前的体系,绝大多数的Kernel都是按照**特化DeviceContext和数据类型**这种方式缩减代码的,这与原先OpKernel的形式一致性比较强 - - 形式要统一,将来如果Kernel层也作为细粒度API暴露的话,易用性有保障 + - 按照我们目前的体系,绝大多数的Kernel都是按照**特化DeviceContext和数据类型**这种方式缩减代码的,这与原先OpKernel的形式一致性比较强 + - 形式要统一,将来如果Kernel层也作为细粒度API暴露的话,易用性有保障 - 函数输入参数规定: - - 以具体的DeviceContext作为第一个输入参数,如CPUContext,CUDAContext,用于满足运行时需要特定上下文信息的需求,如多stream需要传stream进来 - - 暂不支持一个Kernel传入多个DeviceContext参数,目前认为这样的需求不太合理 - - 参数列表和API保持一致,如果有其他的特殊信息需要传入Kernel,通过Context传递 - - 随后是所有的输入Tensor与输入Attribute,均以const &方式传入,POD类型直接以值传入 - - 输入的Tensor是具体的Tensor类型,如DenseTensor或SelectedRows,不是对外接口API那个Tensor - - 最后是函数的返回值Tensor,以指针形式传入 - - 为了满足灵活性,让kernel可以适配更多的场景,后续会允许声明灵活类型的输入、输出和参数,参考tfrt的Argument(输入), Attribute,(属性) Return(输出)等模板,以适配非Tensor的输入输出,以及Tensor类的Attribute,让机制更加灵活 + - 以具体的DeviceContext作为第一个输入参数,如CPUContext,CUDAContext,用于满足运行时需要特定上下文信息的需求,如多stream需要传stream进来 + - 暂不支持一个Kernel传入多个DeviceContext参数,目前认为这样的需求不太合理 + - 参数列表和API保持一致,如果有其他的特殊信息需要传入Kernel,通过Context传递 + - 随后是所有的输入Tensor与输入Attribute,均以const &方式传入,POD类型直接以值传入 + - 输入的Tensor是具体的Tensor类型,如DenseTensor或SelectedRows,不是对外接口API那个Tensor + - 最后是函数的返回值Tensor,以指针形式传入 + - 为了满足灵活性,让kernel可以适配更多的场景,后续会允许声明灵活类型的输入、输出和参数,参考tfrt的Argument(输入), Attribute,(属性) Return(输出)等模板,以适配非Tensor的输入输出,以及Tensor类的Attribute,让机制更加灵活 - 函数内部实现按需决定: - - 短期: - - 将现有OpKernel内实现,迁移到具体的设备Kernel内 - - 将存在设备公用的OpKernel实现抽离为函数,由多个设备Kernel共同调用 - - 长期: - - 复杂Kernel直接调用基础Kernel完成计算,鼓励Kernel复用,简化代码 + - 短期: + - 将现有OpKernel内实现,迁移到具体的设备Kernel内 + - 将存在设备公用的OpKernel实现抽离为函数,由多个设备Kernel共同调用 + - 长期: + - 复杂Kernel直接调用基础Kernel完成计算,鼓励Kernel复用,简化代码 > FAQ: @@ -696,69 +696,69 @@ void SignKernel(const Context& dev_ctx, 1. fluid的Kernel注册写法,有不少冗余信息,以scale为例,可以看到每个kernel除了最后的data type,前面函数名和DeviceContext特化的信息都是冗余的 - ``` - REGISTER_OP_CPU_KERNEL( - scale, ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel, - ops::ScaleKernel); - ``` + ``` + REGISTER_OP_CPU_KERNEL( + scale, ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel); + ``` 2. Paddle-Lite的kernel注册写法,为每一个Kernel都声明了输入输出信息,但由于每个数据类型的kernel都是不同的,也会造成写法上的冗余,如下代码可以看到,除了data type,其他的信息也基本是冗余的 - ``` - #ifdef LITE_BUILD_EXTRA - using scale_int32_f = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_int32_f, int32) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) - .Finalize(); - - using scale_int64_f = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_int64_f, int64) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - #endif // LITE_BUILD_EXTRA - - #ifdef ENABLE_ARM_FP16 - using scale_float16 = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kFP16, kNCHW, scale_float16, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFP16))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFP16))}) - .Finalize(); - - #endif // ENABLE_ARM_FP16 - - using scale_float = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_float, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))}) - .Finalize(); - - using scale_int32 = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kInt32, kNCHW, scale_int32, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) - .Finalize(); - - using scale_int64 = - paddle::lite::kernels::arm::ScaleCompute; - REGISTER_LITE_KERNEL(scale, kARM, kInt64, kNCHW, scale_int64, def) - .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) - .Finalize(); - ``` + ``` + #ifdef LITE_BUILD_EXTRA + using scale_int32_f = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_int32_f, int32) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) + .Finalize(); + + using scale_int64_f = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_int64_f, int64) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) + .Finalize(); + #endif // LITE_BUILD_EXTRA + + #ifdef ENABLE_ARM_FP16 + using scale_float16 = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kFP16, kNCHW, scale_float16, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFP16))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFP16))}) + .Finalize(); + + #endif // ENABLE_ARM_FP16 + + using scale_float = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kFloat, kNCHW, scale_float, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))}) + .Finalize(); + + using scale_int32 = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kInt32, kNCHW, scale_int32, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))}) + .Finalize(); + + using scale_int64 = + paddle::lite::kernels::arm::ScaleCompute; + REGISTER_LITE_KERNEL(scale, kARM, kInt64, kNCHW, scale_int64, def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt64))}) + .Finalize(); + ``` 因此,本次设计,不希望继续保持目前这种冗余的写法,希望kernel注册方法足够简洁,同时还能够灵活地满足Kernel输入输出信息配置的需求。 @@ -1592,7 +1592,7 @@ class KernelContext { - 我们如何描述一个人?1. 他叫什么,长什么样;2. 他的工作、兴趣、爱好、特长、品质等 - 我们如何描述一个物品?1. 它叫什么,长什么样;2. 它的用途和功能是什么 - - 比如一个杯子:1. 它叫水杯,长这样;2. 它用来盛水的 + - 比如一个杯子:1. 它叫水杯,长这样;2. 它用来盛水的 简单说,我们描述一个对象,可以采用两段式结构: @@ -1732,7 +1732,7 @@ phi期望的Op开发方式:**“完形填空”式算子描述实现 + “堆 ``` template Fc(const Context& dev_ctx, const Tensor& x, const Tensor& w, const Tensor& b, Tensor* out) { - phi::add(phi::mul(x, w), b, out); + phi::add(phi::mul(x, w), b, out); } PT_REGISTE_KERNEL("fc", Fc, ...) diff --git a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md index 753c4f25100..19a8eec12f7 100644 --- a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md @@ -68,7 +68,7 @@ - 常用的API可以在更高层级建立别名,当前规则如下: 1. paddle.tensor目录下的API,均在paddle根目录建立别名,其他所有API在paddle根目录下均没有别名。 2. paddle.nn目录下除了functional目录以外的所有API,在paddle.nn目录下均有别名。 - + ```python paddle.nn.functional.mse_loss # functional下的函数不建立别名,使用完整名称 paddle.nn.Conv2D # 为paddle.nn.layer.conv.Conv2D建立的别名 @@ -156,7 +156,7 @@ paddle.optimizer.lr.LambdaDecay ``` -- 由多个单词组成的类名,最后一个单词应表示类型 +- 由多个单词组成的类名,最后一个单词应表示类型 ```python # SimpleRNNCell 继承自 RNNCellBase @@ -176,7 +176,7 @@ paddle.nn.functional.batch_norm paddle.nn.functional.log_softmax ``` - + - 但一些约定俗成的例子可以保持不加下划线 ```python @@ -195,10 +195,10 @@ paddle.tensor.less_than # optimizer 不使用缩写 paddle.optimizer.SGD - # parameter 不使用缩写 + # parameter 不使用缩写 paddle.nn.create_parameter ``` - + - 在用于API命名时,常见的缩写列表如下: ```python @@ -206,7 +206,7 @@ ``` - 在用于API命名时,以下建议使用全称,不推荐使用缩写 - + | 不规范命名 | 规范命名 | | :-------- | :----------- | | div | divide | @@ -227,9 +227,9 @@ | img | image | | loc | location | | len | length | - - - + + + - API命名不应包含版本号 ```python @@ -238,7 +238,7 @@ ``` - 常见的数学计算API中的逐元素操作不需要加上elementwise前缀,按照某一轴操作不需要加上reduce前缀,一些例子如下 - + | paddle2.0之前 | pytorch | numpy | tensorflow | paddle2.0之后 | | :------------- | :----- | :------ | :--------- | :--------------- | | elementwise_add | add | add | add | add | @@ -255,8 +255,8 @@ | reduce_any | any | any | reduce_any | any | | reduce_mean | mean | mean | reduce_mean | mean | - - + + - 整数取模和取余 目前整除和取余取模运算机器运算符重载在不同的语言和库中对应关系比较复杂混乱(取余运算中余数和被除数同号,取模运算中模和除数同号。取余整除是对商向 0 取整,取模整除是对商向负取整) @@ -359,7 +359,7 @@ | 逐元素乘 | mul/elementwise_mul | | | | 逐元素加 | add/elementwise_add | | | | 按轴求和 | reduce_sum | | | - + - 常用参数表 | 中文名 | 推荐 | 不推荐写法 | 示例 | 备注 | @@ -1041,4 +1041,3 @@ | 精确推断 | exact inference | | | 潜层 | latent layer | | | 知识图谱 | knowledge graph | | - diff --git a/docs/dev_guides/api_contributing_guides/new_python_api_cn.md b/docs/dev_guides/api_contributing_guides/new_python_api_cn.md index 77fca8c68f8..51119c89205 100644 --- a/docs/dev_guides/api_contributing_guides/new_python_api_cn.md +++ b/docs/dev_guides/api_contributing_guides/new_python_api_cn.md @@ -75,7 +75,7 @@ from a import f # it's ok, too ```Python # Python/paddle/tensor/math.py def logsumexp(...): - ... + ... # Python/paddle/tensor/__init__.py from .math import logsumexp @@ -164,7 +164,7 @@ def mm(input, mat2, name=None): elif _in_legacy_dygraph(): return _C_ops.matmul_v2(input, mat2) - # 静态分支 + # 静态分支 ## 检测输入 __check_input(input, mat2) diff --git a/docs/dev_guides/docs_contributing_guides_cn.md b/docs/dev_guides/docs_contributing_guides_cn.md index 1b6969525f2..8ddb8f47300 100644 --- a/docs/dev_guides/docs_contributing_guides_cn.md +++ b/docs/dev_guides/docs_contributing_guides_cn.md @@ -104,14 +104,14 @@ no changes added to commit (use "git add" and/or "git commit -a") ``` **如果你不想提交本次修改**,使用 ``git checkout -- `` 取消上面对``guides/04_dygraph_to_static/debugging_cn.md``文件的提交,可以将它恢复至上一次提交的状态: - + ``` ➜ git checkout -- guides/04_dygraph_to_static/debugging_cn.md ``` 恢复后重新进行修改并提交文件即可。 - + - pre-commit:提交修改说明前,需要对本次修改做一些格式化检查: - + ``` ➜ pre-commit CRLF end-lines remover...............................(no files to check)Skipped @@ -151,7 +151,7 @@ copyright_checker........................................................Passed - 填写提交说明:Git 每次提交代码,都需要写提交说明,让其他人知道这次提交做了哪些改变,可以通过 ``git commit`` 完成: ``` -➜ git commit -m "fix docs bugs" +➜ git commit -m "fix docs bugs" ``` ### 3.2 确保本地仓库是最新的 @@ -164,8 +164,8 @@ copyright_checker........................................................Passed ➜ git remote origin ➜ git remote -v -origin https://github.com/USERNAME/docs (fetch) -origin https://github.com/USERNAME/docs (push) +origin https://github.com/USERNAME/docs (fetch) +origin https://github.com/USERNAME/docs (push) ``` 这里 origin 是你 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来创建一个原始 Paddle 仓库的远程主机,命名为 upstream。 @@ -212,4 +212,4 @@ Paddle 中与文档相关的CI 流水线是 `FluidDoc1`等,主要对以下几 - 若需要执行示例代码则执行看能否正常运行 如果无法通过该CI,请点击对应CI的details,查看CI运行的的log,并根据log修改你的PR,直至通过CI。 -未选择任何文件 \ No newline at end of file +未选择任何文件 diff --git a/docs/dev_guides/git_guides/local_dev_guide_cn.md b/docs/dev_guides/git_guides/local_dev_guide_cn.md index 3dfbb4292b7..21dfcffb38e 100644 --- a/docs/dev_guides/git_guides/local_dev_guide_cn.md +++ b/docs/dev_guides/git_guides/local_dev_guide_cn.md @@ -50,12 +50,12 @@ Changes not staged for commit: (use "git add ..." to update what will be committed) (use "git checkout -- ..." to discard changes in working directory) - modified: README.md + modified: README.md Untracked files: (use "git add ..." to include in what will be committed) - test + test no changes added to commit (use "git add" and/or "git commit -a") ``` @@ -93,7 +93,7 @@ On branch test Untracked files: (use "git add ..." to include in what will be committed) - test + test nothing added to commit but untracked files present (use "git add" to track) ➜ git add test @@ -128,8 +128,8 @@ clang-format.......................................(no files to check)Skipped ➜ git remote origin ➜ git remote -v -origin https://github.com/USERNAME/Paddle (fetch) -origin https://github.com/USERNAME/Paddle (push) +origin https://github.com/USERNAME/Paddle (fetch) +origin https://github.com/USERNAME/Paddle (push) ``` 这里 origin 是我们 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来我们创建一个原始 Paddle 仓库的远程主机,命名为 upstream。 diff --git a/docs/guides/10_contribution/docs_contribution.md b/docs/guides/10_contribution/docs_contribution.md index 71e3da3ea2e..a1e6a52c5b8 100644 --- a/docs/guides/10_contribution/docs_contribution.md +++ b/docs/guides/10_contribution/docs_contribution.md @@ -57,7 +57,7 @@ docs/api | |--utils | |--vision |-- api_label # 英文API文档的标签,用于API文档的相互引用 -|-- display_doc_list +|-- display_doc_list |-- gen_alias_api.py # 生成全量的API别名关系 |-- gen_alias_mapping.sh # 已废弃 |-- gen_doc.py # 生成英文API文档目录树程序 @@ -103,9 +103,9 @@ no changes added to commit (use "git add" and/or "git commit -a") ➜ git checkout -- paddle/all_cn.rst ``` 恢复后重新进行修改并提交文件即可。 - + - pre-commit:提交修改说明前,需要对本次修改做一些格式化检查: - + ``` ➜ pre-commit yapf.................................................(no files to check)Skipped @@ -143,7 +143,7 @@ convert-markdown-into-html...............................................Passed - 填写提交说明:Git 每次提交代码,都需要写提交说明,让其他人知道这次提交做了哪些改变,可以通过 ``git commit`` 完成: ``` -➜ git commit -m "fix all docs bugs" +➜ git commit -m "fix all docs bugs" ``` ### 3.2 确保本地仓库是最新的 @@ -156,8 +156,8 @@ convert-markdown-into-html...............................................Passed ➜ git remote origin ➜ git remote -v -origin https://github.com/USERNAME/docs (fetch) -origin https://github.com/USERNAME/docs (push) +origin https://github.com/USERNAME/docs (fetch) +origin https://github.com/USERNAME/docs (push) ``` 这里 origin 是你 clone 的远程仓库的名字,也就是自己用户名下的 Paddle,接下来创建一个原始 Paddle 仓库的远程主机,命名为 upstream。 @@ -203,4 +203,4 @@ Paddle 中与文档相关的CI 流水线是 `Docs-NEW`等,主要对以下几 - 检查增量修改的API是否需要相关人员审核 - 若需要执行示例代码则执行看能否正常运行 -如果无法通过该CI,请点击对应CI的details,查看CI运行的的log,并根据log修改你的PR,直至通过CI。 \ No newline at end of file +如果无法通过该CI,请点击对应CI的details,查看CI运行的的log,并根据log修改你的PR,直至通过CI。 diff --git a/docs/guides/flags/debug_cn.rst b/docs/guides/flags/debug_cn.rst index 63eb1bec4b0..9504f314e14 100644 --- a/docs/guides/flags/debug_cn.rst +++ b/docs/guides/flags/debug_cn.rst @@ -81,7 +81,7 @@ FLAGS_reader_queue_speed_test_mode=True - 启用pyreader测试模式。 ------- 仅当使用py_reader时该flag才有效。 -.. toctree:: - :hidden: +.. toctree:: + :hidden: - check_nan_inf_cn.md + check_nan_inf_cn.md diff --git a/docs/guides/flags/debug_en.rst b/docs/guides/flags/debug_en.rst index 713e7d95482..62f20df686c 100644 --- a/docs/guides/flags/debug_en.rst +++ b/docs/guides/flags/debug_en.rst @@ -80,7 +80,7 @@ Note ------- This flag will work only when you are using py_reader. -.. toctree:: - :hidden: +.. toctree:: + :hidden: - check_nan_inf_en.md \ No newline at end of file + check_nan_inf_en.md \ No newline at end of file diff --git a/docs/guides/performance_improving/analysis_tools/index_cn.rst b/docs/guides/performance_improving/analysis_tools/index_cn.rst index 3bb5ba2c568..c0a50dfb9d0 100644 --- a/docs/guides/performance_improving/analysis_tools/index_cn.rst +++ b/docs/guides/performance_improving/analysis_tools/index_cn.rst @@ -5,11 +5,11 @@ ############### .. toctree:: - :hidden: + :hidden: - cpu_profiling_cn.md - host_memory_profiling_cn.md - timeline_cn.md + cpu_profiling_cn.md + host_memory_profiling_cn.md + timeline_cn.md 本模块介绍 Fluid 使用过程中的调优方法,包括: diff --git a/docs/guides/performance_improving/analysis_tools/index_en.rst b/docs/guides/performance_improving/analysis_tools/index_en.rst index d83eeaeec51..c303e49f349 100644 --- a/docs/guides/performance_improving/analysis_tools/index_en.rst +++ b/docs/guides/performance_improving/analysis_tools/index_en.rst @@ -3,12 +3,12 @@ Performance Profiling and Optimization ####################################### .. toctree:: - :hidden: + :hidden: - cpu_profiling_en.md - host_memory_profiling_en.md - timeline_en.md + cpu_profiling_en.md + host_memory_profiling_en.md + timeline_en.md This section illustrates how to optimize performance of Fluid: diff --git a/docs/install/compile/fromsource.rst b/docs/install/compile/fromsource.rst index f4a88d373c0..21ff48887a3 100644 --- a/docs/install/compile/fromsource.rst +++ b/docs/install/compile/fromsource.rst @@ -2,13 +2,13 @@ **从源码编译** =========================== -.. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - linux-compile.md - macos-compile.md - windows-compile.md - arm-compile.md - sw-compile.md - zhaoxin-compile.md - mips-compile.md + linux-compile.md + macos-compile.md + windows-compile.md + arm-compile.md + sw-compile.md + zhaoxin-compile.md + mips-compile.md diff --git a/docs/install/compile/fromsource_en.rst b/docs/install/compile/fromsource_en.rst index a5bed1dfe18..b69144e1996 100644 --- a/docs/install/compile/fromsource_en.rst +++ b/docs/install/compile/fromsource_en.rst @@ -4,9 +4,9 @@ You can also choose to compile and install PaddlePaddle in the way of source code compilation. However, due to the diversity of the native environment, complicated problems may occur when compiling the source code, which may cause your installation to fail. In order to ensure your smooth installation, it is recommended that you prefer the normal installation method. -.. toctree:: - +.. toctree:: + - linux-compile_en.md - macos-compile_en.md - windows-compile_en.md + linux-compile_en.md + macos-compile_en.md + windows-compile_en.md diff --git a/docs/install/conda/fromconda.rst b/docs/install/conda/fromconda.rst index dbf5c5cccfc..5ee1b787c78 100644 --- a/docs/install/conda/fromconda.rst +++ b/docs/install/conda/fromconda.rst @@ -2,9 +2,9 @@ **Conda安装** =========================== -.. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - linux-conda.md - macos-conda.md - windows-conda.md + linux-conda.md + macos-conda.md + windows-conda.md diff --git a/docs/install/conda/fromconda_en.rst b/docs/install/conda/fromconda_en.rst index 25817110240..ac3878e69fb 100644 --- a/docs/install/conda/fromconda_en.rst +++ b/docs/install/conda/fromconda_en.rst @@ -2,9 +2,9 @@ **Install via conda** ============================== -.. toctree:: - +.. toctree:: + - linux-conda_en.md - macos-conda_en.md - windows-conda_en.md + linux-conda_en.md + macos-conda_en.md + windows-conda_en.md diff --git a/docs/install/docker/fromdocker.rst b/docs/install/docker/fromdocker.rst index 62905f664d7..aeeb05626f1 100644 --- a/docs/install/docker/fromdocker.rst +++ b/docs/install/docker/fromdocker.rst @@ -2,7 +2,7 @@ **Docker安装** =========================== -.. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - macos-docker.md + macos-docker.md diff --git a/docs/install/docker/fromdocker_en.rst b/docs/install/docker/fromdocker_en.rst index af6a1a7fafe..1fd18637fd0 100644 --- a/docs/install/docker/fromdocker_en.rst +++ b/docs/install/docker/fromdocker_en.rst @@ -2,7 +2,7 @@ **Install via docker** ============================== -.. toctree:: - +.. toctree:: + - macos-docker_en.md + macos-docker_en.md diff --git a/docs/install/index_cn.rst b/docs/install/index_cn.rst index 875b5ad038a..e46ad856ed2 100644 --- a/docs/install/index_cn.rst +++ b/docs/install/index_cn.rst @@ -199,11 +199,11 @@ - 如果您有开发PaddlePaddle的需求,请参考:`从源码编译 `_ -.. toctree:: - :hidden: - - pip/frompip.rst - compile/fromsource.rst - install_Kunlun_zh.md - install_ROCM_zh.md - Tables.md +.. toctree:: + :hidden: + + pip/frompip.rst + compile/fromsource.rst + install_Kunlun_zh.md + install_ROCM_zh.md + Tables.md diff --git a/docs/install/index_en.rst b/docs/install/index_en.rst index 9a665c0f159..08d97c18c09 100644 --- a/docs/install/index_en.rst +++ b/docs/install/index_en.rst @@ -58,7 +58,7 @@ The manuals will guide you to build and install PaddlePaddle on your 64-bit desk * Windows install GPU version - * Windows 7 / 8 / 10 support CUDA 10.1/10.2/11.2 single-card mode, but don't support CUDA 9.1/9.2/10.1 + * Windows 7 / 8 / 10 support CUDA 10.1/10.2/11.2 single-card mode, but don't support CUDA 9.1/9.2/10.1 * don't support install using **nvidia-docker** * Ubuntu install GPU version @@ -208,10 +208,10 @@ The second way to install: compile and install with source code - If you use PaddlePaddle only, we suggest you installation methods **pip** to install. - If you need to develop PaddlePaddle, please refer to `compile from source code `_ -.. toctree:: - :hidden: +.. toctree:: + :hidden: - pip/frompip_en.rst - compile/fromsource_en.rst - install_Kunlun_en.md - Tables_en.md + pip/frompip_en.rst + compile/fromsource_en.rst + install_Kunlun_en.md + Tables_en.md diff --git a/docs/install/install_script.md b/docs/install/install_script.md index 5acea897a57..34809aead08 100644 --- a/docs/install/install_script.md +++ b/docs/install/install_script.md @@ -8,17 +8,17 @@ 脚本会执行以下几步: -1. GPU检测 +1. GPU检测 - 检测您的机器是否含有我们支持的GPU,如果有,会安装GPU版本的PaddlePaddle,否则会安装CPU版本。 - (PaddlePaddle目前支持NVIDIA[官网](https://developer.nvidia.com/cuda-gpus#collapseOne)列出的,算力7.0以下的GPU和v100系列的GPU) + 检测您的机器是否含有我们支持的GPU,如果有,会安装GPU版本的PaddlePaddle,否则会安装CPU版本。 + (PaddlePaddle目前支持NVIDIA[官网](https://developer.nvidia.com/cuda-gpus#collapseOne)列出的,算力7.0以下的GPU和v100系列的GPU) 2. CUDA,cuDNN检测 - 检测您的机器是否安装我们支持的CUDA,cuDNN,具体地: + 检测您的机器是否安装我们支持的CUDA,cuDNN,具体地: - 1. 在`/usr/local/` 及其子目录下寻找 `cuda10.1/cuda10.2/cuda11.0/cuda11.2` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 - 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的cuDNN未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的cuDNN版本的`cudnn.h`路径并按提示输入 + 1. 在`/usr/local/` 及其子目录下寻找 `cuda10.1/cuda10.2/cuda11.0/cuda11.2` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 + 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的cuDNN未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的cuDNN版本的`cudnn.h`路径并按提示输入 如果未找到相应文件,则会安装CPU版本的PaddlePaddle @@ -46,7 +46,7 @@ 1. 选择PaddlePaddle版本 我们为您提供2种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 -2. 检查Python版本 +2. 检查Python版本 由于 macOS 自带的 Python 通常依赖于系统环境,因此我们不支持 macOS 自带的 Python 环境,请重新从 Python.org 安装 Python,然后根据提示输入您希望使用的 Python 的路径 3. 检查是否支持[AVX](https://zh.wikipedia.org/zh-hans/AVX指令集)指令集 diff --git a/docs/install/pip/frompip.rst b/docs/install/pip/frompip.rst index fe1ee5b8be7..85e089a9c88 100644 --- a/docs/install/pip/frompip.rst +++ b/docs/install/pip/frompip.rst @@ -2,9 +2,9 @@ **Pip安装** =========================== -.. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - linux-pip.md - macos-pip.md - windows-pip.md + linux-pip.md + macos-pip.md + windows-pip.md diff --git a/docs/install/pip/frompip_en.rst b/docs/install/pip/frompip_en.rst index 4c9bddbe2fe..77cd91b4683 100644 --- a/docs/install/pip/frompip_en.rst +++ b/docs/install/pip/frompip_en.rst @@ -2,9 +2,9 @@ **Install via pip** ============================== -.. toctree:: - +.. toctree:: + - linux-pip_en.md - macos-pip_en.md - windows-pip_en.md + linux-pip_en.md + macos-pip_en.md + windows-pip_en.md From 992bbf19fa352ae02afa61e987db08d5080b5064 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 09:46:21 +0000 Subject: [PATCH 02/20] CRLF -> LF --- docs/api_guides/X2Paddle/Caffe-Fluid.rst | 90 +++--- .../low_level/compiled_program_en.rst | 102 +++---- docs/api_guides/low_level/executor_en.rst | 68 ++--- docs/api_guides/low_level/inference_en.rst | 116 +++---- .../low_level/layers/control_flow_en.rst | 116 +++---- docs/api_guides/low_level/layers/conv.rst | 130 ++++---- docs/api_guides/low_level/layers/conv_en.rst | 114 +++---- .../low_level/layers/data_feeder_en.rst | 80 ++--- .../low_level/layers/data_in_out_en.rst | 58 ++-- .../low_level/layers/detection_en.rst | 122 ++++---- .../layers/learning_rate_scheduler_en.rst | 90 +++--- .../low_level/layers/loss_function_en.rst | 120 ++++---- .../low_level/layers/pooling_en.rst | 160 +++++----- .../low_level/layers/sparse_update.rst | 90 +++--- .../low_level/layers/sparse_update_en.rst | 90 +++--- .../api_guides/low_level/layers/tensor_en.rst | 282 +++++++++--------- docs/api_guides/low_level/metrics_en.rst | 98 +++--- .../low_level/model_save_reader_en.rst | 116 +++---- docs/api_guides/low_level/optimizer_en.rst | 180 +++++------ 19 files changed, 1111 insertions(+), 1111 deletions(-) diff --git a/docs/api_guides/X2Paddle/Caffe-Fluid.rst b/docs/api_guides/X2Paddle/Caffe-Fluid.rst index c3b078d29c1..1440910056f 100644 --- a/docs/api_guides/X2Paddle/Caffe-Fluid.rst +++ b/docs/api_guides/X2Paddle/Caffe-Fluid.rst @@ -1,45 +1,45 @@ -.. _Caffe-Fluid: - -######################## -Caffe-Fluid常用层对应表 -######################## - -本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 - - -.. csv-table:: - :header: "序号", "Caffe Layer", "Fluid接口", "备注" - :widths: 1, 8, 8, 3 - - "1", "`AbsVal `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" - "2", "`Accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "`差异对比 `_" - "3", "`ArgMax `_", ":ref:`cn_api_fluid_layers_argmax`", "`差异对比 `_" - "4", "`BatchNorm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "`差异对比 `_" - "5", "`BNLL `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" - "6", "`Concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" - "7", "`Convolution `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" - "8", "`Crop `_", ":ref:`cn_api_fluid_layers_crop`", "`差异对比 `_" - "9", "`Deconvolution `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" - "10", "`Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" - "11", "`Eltwise `_", "无相应接口", "`Fluid实现 `_" - "12", "`ELU `_", ":ref:`cn_api_fluid_layers_elu`", "功能一致" - "13", "`EuclideanLoss `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" - "14", "`Exp `_", ":ref:`cn_api_fluid_layers_exp`", "`差异对比 `_" - "15", "`Flatten `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" - "16", "`InnerProduct `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" - "17", "`Input `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" - "18", "`Log `_", ":ref:`cn_api_fluid_layers_log`", "`差异对比 `_" - "19", "`LRN `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" - "20", "`Pooling `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" - "21", "`Power `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" - "22", "`PReLU `_", ":ref:`cn_api_fluid_layers_prelu`", "功能一致" - "23", "`Reduction `_", "无相应接口", "`Fluid实现 `_" - "24", "`ReLU `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" - "25", "`Reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" - "26", "`SigmoidCrossEntropyLoss `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" - "27", "`Sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" - "28", "`Slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" - "29", "`SoftmaxWithLoss `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" - "30", "`Softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "`差异对比 `_" - "31", "`TanH `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" - "32", "`Tile `_", ":ref:`cn_api_fluid_layers_expand`", "`差异对比 `_" +.. _Caffe-Fluid: + +######################## +Caffe-Fluid常用层对应表 +######################## + +本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 + + +.. csv-table:: + :header: "序号", "Caffe Layer", "Fluid接口", "备注" + :widths: 1, 8, 8, 3 + + "1", "`AbsVal `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" + "2", "`Accuracy `_", ":ref:`cn_api_fluid_layers_accuracy`", "`差异对比 `_" + "3", "`ArgMax `_", ":ref:`cn_api_fluid_layers_argmax`", "`差异对比 `_" + "4", "`BatchNorm `_", ":ref:`cn_api_fluid_layers_batch_norm`", "`差异对比 `_" + "5", "`BNLL `_", ":ref:`cn_api_fluid_layers_softplus`", "功能一致" + "6", "`Concat `_", ":ref:`cn_api_fluid_layers_concat`", "功能一致" + "7", "`Convolution `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" + "8", "`Crop `_", ":ref:`cn_api_fluid_layers_crop`", "`差异对比 `_" + "9", "`Deconvolution `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" + "10", "`Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" + "11", "`Eltwise `_", "无相应接口", "`Fluid实现 `_" + "12", "`ELU `_", ":ref:`cn_api_fluid_layers_elu`", "功能一致" + "13", "`EuclideanLoss `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" + "14", "`Exp `_", ":ref:`cn_api_fluid_layers_exp`", "`差异对比 `_" + "15", "`Flatten `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "16", "`InnerProduct `_", ":ref:`cn_api_fluid_layers_fc`", "`差异对比 `_" + "17", "`Input `_", ":ref:`cn_api_fluid_layers_data`", "`差异对比 `_" + "18", "`Log `_", ":ref:`cn_api_fluid_layers_log`", "`差异对比 `_" + "19", "`LRN `_", ":ref:`cn_api_fluid_layers_lrn`", "`差异对比 `_" + "20", "`Pooling `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" + "21", "`Power `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" + "22", "`PReLU `_", ":ref:`cn_api_fluid_layers_prelu`", "功能一致" + "23", "`Reduction `_", "无相应接口", "`Fluid实现 `_" + "24", "`ReLU `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" + "25", "`Reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" + "26", "`SigmoidCrossEntropyLoss `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" + "27", "`Sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" + "28", "`Slice `_", ":ref:`cn_api_fluid_layers_slice`", "`差异对比 `_" + "29", "`SoftmaxWithLoss `_", ":ref:`cn_api_fluid_layers_softmax_with_cross_entropy`", "`差异对比 `_" + "30", "`Softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "`差异对比 `_" + "31", "`TanH `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" + "32", "`Tile `_", ":ref:`cn_api_fluid_layers_expand`", "`差异对比 `_" diff --git a/docs/api_guides/low_level/compiled_program_en.rst b/docs/api_guides/low_level/compiled_program_en.rst index 77ea883d6cc..43de0f7e1ed 100755 --- a/docs/api_guides/low_level/compiled_program_en.rst +++ b/docs/api_guides/low_level/compiled_program_en.rst @@ -1,51 +1,51 @@ -.. _api_guide_compiled_program_en: - -################ -CompiledProgram -################ - -The :ref:`api_fluid_CompiledProgram` is used to transform a program for various optimizations. For example, you can use :code:`with_data_parallel` to transform the program to data parallel program so that it can be run in multiple devices. - - -.. code-block:: python - - # Note: - # - If you want to specify the GPU cards which are used to run - # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES - # in environment. - # - If you want to use multi CPU to run the program in ParallelExecutor, - # you should define the CPU_NUM in the environment. - - # First create the Executor. - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - - # Run the startup program once and only once. - exe.run(fluid.default_startup_program()) - - # Run the main program directly without compile. - loss = exe.run(fluid.default_main_program(), - feed=feed_dict, - fetch_list=[loss.name]) - - # Or, compiled the program, and then run the model with data parallel. - exec_strategy = fluid.ExecutionStrategy() - exec_strategy.num_threads = dev_count * 4 # the size of thread pool. - build_strategy = fluid.BuildStrategy() - build_strategy.memory_optimize = True if memory_opt else False - - compiled_prog = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, - build_strategy=build_strategy, - exec_strategy=exec_strategy) - - loss, = exe.run(compiled_prog, - feed=feed_dict, - fetch_list=[loss.name]) - -**Note**: :code:`fluid.Porgram` and :code:`compiler.CompiledPorgram` are completely different :code:`Programs`. :code:`fluid.Porgram` is composed of a series of operators. :code:`compiler.CompiledPorgram` compiles the :code:`fluid.Porgram` and converts it into a computational graph. :code:`compiler.CompiledPorgram` cannot be saved at present. - - -- Related API : - - :ref:`api_fluid_CompiledProgram` +.. _api_guide_compiled_program_en: + +################ +CompiledProgram +################ + +The :ref:`api_fluid_CompiledProgram` is used to transform a program for various optimizations. For example, you can use :code:`with_data_parallel` to transform the program to data parallel program so that it can be run in multiple devices. + + +.. code-block:: python + + # Note: + # - If you want to specify the GPU cards which are used to run + # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES + # in environment. + # - If you want to use multi CPU to run the program in ParallelExecutor, + # you should define the CPU_NUM in the environment. + + # First create the Executor. + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + # Run the main program directly without compile. + loss = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) + + # Or, compiled the program, and then run the model with data parallel. + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_threads = dev_count * 4 # the size of thread pool. + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = True if memory_opt else False + + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + loss, = exe.run(compiled_prog, + feed=feed_dict, + fetch_list=[loss.name]) + +**Note**: :code:`fluid.Porgram` and :code:`compiler.CompiledPorgram` are completely different :code:`Programs`. :code:`fluid.Porgram` is composed of a series of operators. :code:`compiler.CompiledPorgram` compiles the :code:`fluid.Porgram` and converts it into a computational graph. :code:`compiler.CompiledPorgram` cannot be saved at present. + + +- Related API : + - :ref:`api_fluid_CompiledProgram` diff --git a/docs/api_guides/low_level/executor_en.rst b/docs/api_guides/low_level/executor_en.rst index eb92d2dd665..f6f2c32544b 100755 --- a/docs/api_guides/low_level/executor_en.rst +++ b/docs/api_guides/low_level/executor_en.rst @@ -1,34 +1,34 @@ -.. _api_guide_executor_en: - -################ -Executor -################ - -:code:`Executor` realizes a simple executor in which all operators will be executed in order. You can run :code:`Executor` in a Python script. There are two kinds of executors in PaddlePaddle Fluid. One is single-thread executor which is the default option for :code:`Executor` and the other is the parallel executor which is illustrated in :ref:`api_guide_parallel_executor_en` . The config of `Executor` and :ref:`api_guide_parallel_executor_en` is different, it may be a bit confusing for some users. To make the executor more facility, we introduce :ref:`api_guide_compiled_program_en` , :ref:`api_guide_compiled_program_en` is used to transform a program for various optimizations, and it can be run by :code:`Executor`. - -The logic of :code:`Executor` is very simple. It is suggested to thoroughly run the model with :code:`Executor` in debugging phase on one computer and then switch to mode of multiple devices or multiple computers to compute. - -:code:`Executor` receives a :code:`Place` at construction, which can either be :ref:`api_fluid_CPUPlace` or :ref:`api_fluid_CUDAPlace`. - -.. code-block:: python - - # First create the Executor. - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - - # Run the startup program once and only once. - exe.run(fluid.default_startup_program()) - - # Run the main program directly. - loss, = exe.run(fluid.default_main_program(), - feed=feed_dict, - fetch_list=[loss.name]) - - -For simple example please refer to `basics_fit_a_line <../../beginners_guide/basics/fit_a_line/README.html>`_ - -- Related API : - - :ref:`api_fluid_Executor` - - - +.. _api_guide_executor_en: + +################ +Executor +################ + +:code:`Executor` realizes a simple executor in which all operators will be executed in order. You can run :code:`Executor` in a Python script. There are two kinds of executors in PaddlePaddle Fluid. One is single-thread executor which is the default option for :code:`Executor` and the other is the parallel executor which is illustrated in :ref:`api_guide_parallel_executor_en` . The config of `Executor` and :ref:`api_guide_parallel_executor_en` is different, it may be a bit confusing for some users. To make the executor more facility, we introduce :ref:`api_guide_compiled_program_en` , :ref:`api_guide_compiled_program_en` is used to transform a program for various optimizations, and it can be run by :code:`Executor`. + +The logic of :code:`Executor` is very simple. It is suggested to thoroughly run the model with :code:`Executor` in debugging phase on one computer and then switch to mode of multiple devices or multiple computers to compute. + +:code:`Executor` receives a :code:`Place` at construction, which can either be :ref:`api_fluid_CPUPlace` or :ref:`api_fluid_CUDAPlace`. + +.. code-block:: python + + # First create the Executor. + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + # Run the main program directly. + loss, = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=[loss.name]) + + +For simple example please refer to `basics_fit_a_line <../../beginners_guide/basics/fit_a_line/README.html>`_ + +- Related API : + - :ref:`api_fluid_Executor` + + + diff --git a/docs/api_guides/low_level/inference_en.rst b/docs/api_guides/low_level/inference_en.rst index 33bd5d12ad3..4faf6de48e9 100755 --- a/docs/api_guides/low_level/inference_en.rst +++ b/docs/api_guides/low_level/inference_en.rst @@ -1,58 +1,58 @@ -.. _api_guide_inference_en: - -################# -Inference Engine -################# - -Inference engine provides interfaces to save inference model :ref:`api_fluid_io_save_inference_model` and load inference model :ref:`api_fluid_io_load_inference_model` . - -Format of Saved Inference Model -===================================== - -There are two formats of saved inference model, which are controlled by :code:`model_filename` and :code:`params_filename` parameters in the two interfaces above. - -- Parameters are saved into independent separate files, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`None` - - .. code-block:: bash - - ls recognize_digits_conv.inference.model/* - __model__ conv2d_1.w_0 conv2d_2.w_0 fc_1.w_0 conv2d_1.b_0 conv2d_2.b_0 fc_1.b_0 - -- Parameters are saved into the same file, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`__params__` - - .. code-block:: bash - - ls recognize_digits_conv.inference.model/* - __model__ __params__ - -Save Inference model -=============================== - -To save an inference model, we normally use :code:`fluid.io.save_inference_model` to tailor the default :code:`fluid.Program` and only keep the parts useful for predicting :code:`predict_var`. -After being tailored, :code:`program` will be saved under :code:`./infer_model/__model__` while the parameters will be saved into independent files under :code:`./infer_model` . - -Sample Code: - -.. code-block:: python - - exe = fluid.Executor(fluid.CPUPlace()) - path = "./infer_model" - fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], - target_vars=[predict_var], executor=exe) - - -Load Inference Model -===================== - -.. code-block:: python - - exe = fluid.Executor(fluid.CPUPlace()) - path = "./infer_model" - [inference_program, feed_target_names, fetch_targets] = - fluid.io.load_inference_model(dirname=path, executor=exe) - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) - -In this example, at first we call :code:`fluid.io.load_inference_model` to get inference :code:`inference_program` , :code:`feed_target_names`-name of input data and :code:`fetch_targets` of output; -then call :code:`executor` to run inference :code:`inference_program` to get inferred result. +.. _api_guide_inference_en: + +################# +Inference Engine +################# + +Inference engine provides interfaces to save inference model :ref:`api_fluid_io_save_inference_model` and load inference model :ref:`api_fluid_io_load_inference_model` . + +Format of Saved Inference Model +===================================== + +There are two formats of saved inference model, which are controlled by :code:`model_filename` and :code:`params_filename` parameters in the two interfaces above. + +- Parameters are saved into independent separate files, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`None` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ conv2d_1.w_0 conv2d_2.w_0 fc_1.w_0 conv2d_1.b_0 conv2d_2.b_0 fc_1.b_0 + +- Parameters are saved into the same file, such as :code:`model_filename` set as :code:`None` and :code:`params_filename` set as :code:`__params__` + + .. code-block:: bash + + ls recognize_digits_conv.inference.model/* + __model__ __params__ + +Save Inference model +=============================== + +To save an inference model, we normally use :code:`fluid.io.save_inference_model` to tailor the default :code:`fluid.Program` and only keep the parts useful for predicting :code:`predict_var`. +After being tailored, :code:`program` will be saved under :code:`./infer_model/__model__` while the parameters will be saved into independent files under :code:`./infer_model` . + +Sample Code: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + target_vars=[predict_var], executor=exe) + + +Load Inference Model +===================== + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + [inference_program, feed_target_names, fetch_targets] = + fluid.io.load_inference_model(dirname=path, executor=exe) + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +In this example, at first we call :code:`fluid.io.load_inference_model` to get inference :code:`inference_program` , :code:`feed_target_names`-name of input data and :code:`fetch_targets` of output; +then call :code:`executor` to run inference :code:`inference_program` to get inferred result. diff --git a/docs/api_guides/low_level/layers/control_flow_en.rst b/docs/api_guides/low_level/layers/control_flow_en.rst index ecc42e30f79..28f9b18edd8 100755 --- a/docs/api_guides/low_level/layers/control_flow_en.rst +++ b/docs/api_guides/low_level/layers/control_flow_en.rst @@ -1,59 +1,59 @@ -.. api_guide_control_flow_en: - -############# -Control Flow -############# - -In programming languages, the control flow determines the order in which statements are executed. Common control flows contain sequential execution, branching, and looping. PaddlePaddle Fluid inherits this concept and provides a variety of control flow APIs to control the execution logic of the deep learning model during training or prediction. - -IfElse -====== - -Conditional branch, for the input of a batch, according to the given conditions, select the process in :code:`true_block` or :code:`false_block` to execute respectively, and then merge the outputs of the two branches into one after the execution. In general, conditional expressions can be generated by a logical comparison API such as :ref:`api_fluid_layers_less_than`, :ref:`api_fluid_layers_equal`. - -Please refer to :ref:`api_fluid_layers_IfElse` - -**Note:** A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse`` . OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` . - -Switch -====== - -Switch, like the :code:`switch-case` declaration commonly found in programming languages, selects different branch to execute depending on the value of the input expression. Specifically, the :code:`Switch` control flow defined by Fluid has the following characteristics: - -* The condition of the case is a bool type value, which is a tensor type Variable in the Program; -* It checks each case one by one, selects the first case that satisfies the condition, and exits the block after completion of the execution; -* If all cases do not meet the conditions, the default case will be selected for execution. - -Please refer to :ref:`api_fluid_layers_Switch` - -**Note:** A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` . OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` . - -While -===== - -When the condition is true, repeatedly execute logic in the :code:`block` which :code:`While` flow belongs to until the condition is judged to be false and the loop will be ended. The related APIs are as follows: - -* :ref:`api_fluid_layers_increment` : It is usually used to count the number of loops; -* :ref:`api_fluid_layers_array_read` : Reads Variable from the specified location in :code:`LOD_TENSOR_ARRAY` to perform calculations; -* :ref:`api_fluid_layers_array_write` : Writes the Variable back to the specified location in :code:`LOD_TENSOR_ARRAY` and stores the result of the calculation. - -Please refer to :ref:`api_fluid_layers_While` - -**Note**: A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` . OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` . - - -DynamicRNN -========== - -Dynamic RNN can process a batch of unequal(variable)-length sequence data, which accepts the variable with :code:`lod_level=1` as input. In the :code:`block` of :code:`DynamicRNN`, the user needs to customize RNN's single-step calculation logic. At each time step, the user can write the state to be remembered to the :code:`memory` of :code:`DynamicRNN` and write the required output to its :code:`output`. - -:ref:`api_fluid_layers_sequence_last_step` gets the output of the last time step of :code:`DynamicRNN`. - -Please refer to :ref:`api_fluid_layers_DynamicRNN` - -StaticRNN -========= - -Static RNN can only process fixed-length sequence data, and accept Variable with :code:`lod_level=0` as input. Similar to :code:`DynamicRNN`, at each single time step of the RNN, the user needs to customize the calculation logic and export the status and output. - +.. api_guide_control_flow_en: + +############# +Control Flow +############# + +In programming languages, the control flow determines the order in which statements are executed. Common control flows contain sequential execution, branching, and looping. PaddlePaddle Fluid inherits this concept and provides a variety of control flow APIs to control the execution logic of the deep learning model during training or prediction. + +IfElse +====== + +Conditional branch, for the input of a batch, according to the given conditions, select the process in :code:`true_block` or :code:`false_block` to execute respectively, and then merge the outputs of the two branches into one after the execution. In general, conditional expressions can be generated by a logical comparison API such as :ref:`api_fluid_layers_less_than`, :ref:`api_fluid_layers_equal`. + +Please refer to :ref:`api_fluid_layers_IfElse` + +**Note:** A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse`` . OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` . + +Switch +====== + +Switch, like the :code:`switch-case` declaration commonly found in programming languages, selects different branch to execute depending on the value of the input expression. Specifically, the :code:`Switch` control flow defined by Fluid has the following characteristics: + +* The condition of the case is a bool type value, which is a tensor type Variable in the Program; +* It checks each case one by one, selects the first case that satisfies the condition, and exits the block after completion of the execution; +* If all cases do not meet the conditions, the default case will be selected for execution. + +Please refer to :ref:`api_fluid_layers_Switch` + +**Note:** A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` . OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` . + +While +===== + +When the condition is true, repeatedly execute logic in the :code:`block` which :code:`While` flow belongs to until the condition is judged to be false and the loop will be ended. The related APIs are as follows: + +* :ref:`api_fluid_layers_increment` : It is usually used to count the number of loops; +* :ref:`api_fluid_layers_array_read` : Reads Variable from the specified location in :code:`LOD_TENSOR_ARRAY` to perform calculations; +* :ref:`api_fluid_layers_array_write` : Writes the Variable back to the specified location in :code:`LOD_TENSOR_ARRAY` and stores the result of the calculation. + +Please refer to :ref:`api_fluid_layers_While` + +**Note**: A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` . OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` . + + +DynamicRNN +========== + +Dynamic RNN can process a batch of unequal(variable)-length sequence data, which accepts the variable with :code:`lod_level=1` as input. In the :code:`block` of :code:`DynamicRNN`, the user needs to customize RNN's single-step calculation logic. At each time step, the user can write the state to be remembered to the :code:`memory` of :code:`DynamicRNN` and write the required output to its :code:`output`. + +:ref:`api_fluid_layers_sequence_last_step` gets the output of the last time step of :code:`DynamicRNN`. + +Please refer to :ref:`api_fluid_layers_DynamicRNN` + +StaticRNN +========= + +Static RNN can only process fixed-length sequence data, and accept Variable with :code:`lod_level=0` as input. Similar to :code:`DynamicRNN`, at each single time step of the RNN, the user needs to customize the calculation logic and export the status and output. + Please refer to :ref:`api_fluid_layers_StaticRNN` \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/conv.rst b/docs/api_guides/low_level/layers/conv.rst index 5a15e40349d..0580936bee4 100644 --- a/docs/api_guides/low_level/layers/conv.rst +++ b/docs/api_guides/low_level/layers/conv.rst @@ -1,65 +1,65 @@ -.. _api_guide_conv: - -##### -卷积 -##### - -卷积有两组输入:特征图和卷积核,依据输入特征和卷积核的形状、Layout不同、计算方式的不同,在Fluid里,有针对变长序列特征的一维卷积,有针对定长图像特征的二维(2D Conv)、三维卷积(3D Conv),同时也有卷积计算的逆向过程,下面先介绍Fluid里的2D/3D卷积,再来介绍序列卷积。 - - -2D/3D卷积 -============== - -1. 卷积输入参数: ---------------------- - -卷积需要依据滑动步长(stride)、填充长度(padding)、卷积核窗口大小(filter size)、分组数(groups)、扩张系数(dilation rate)来决定如何计算。groups最早在 `AlexNet `_ 中引入, 可以理解为将原始的卷积分为独立若干组卷积计算。 - - **注意**: 同cuDNN的方式,Fluid目前只支持在特征图上下填充相同的长度,左右也是。 - -- 输入输出Layout: - - 2D卷积输入特征的Layout为[N, C, H, W]或[N, H, W, C], N即batch size,C是通道数,H、W是特征的高度和宽度,输出特征和输入特征的Layout一致。(相应的3D卷积输入特征的Layout为[N, C, D, H, W]或[N, D, H, W, C],但 **注意**,Fluid的卷积当前只支持[N, C, H, W],[N, C, D, H, W]。) - -- 卷积核的Layout: - - Fluid中2D卷积的卷积核(也称权重)的Layout为[C_o, C_in / groups, f_h, f_w],C_o、C_in表示输出、输入通道数,f_h、f_w表示卷积核窗口的高度和宽度,按行序存储。(相应的3D卷积的卷积核Layout为[C_o, C_in / groups, f_d, f_h, d_w],同样按行序存储。) - -- 深度可分离卷积(depthwise separable convolution): - - 在深度可分离卷积中包括depthwise convolution和pointwise convolution两组,这两个卷积的接口和上述普通卷积接口相同。前者可以通过给普通卷积设置groups来做,后者通过设置卷积核filters的大小为1x1,深度可分离卷积减少参数的同时减少了计算量。 - - 对于depthwise convolution,可以设置groups等于输入通道数,此时,2D卷积的卷积核形状为[C_o, 1, f_h, f_w]。 - 对于pointwise convolution,卷积核的形状为[C_o, C_in, 1, 1]。 - - **注意**:Fluid针对depthwise convolution的GPU计算做了高度优化,您可以通过在 - :code:`fluid.layers.conv2d` 接口设置 :code:`use_cudnn=False` 来使用Fluid自身优化的CUDA程序。 - -- 空洞卷积(dilated convolution): - - 空洞卷积相比普通卷积而言,卷积核在特征图上取值时不在连续,而是间隔的,这个间隔数称作dilation,等于1时,即为普通卷积,空洞卷积相比普通卷积的感受野更大。 - -- API汇总: - - :ref:`cn_api_fluid_layers_conv2d` - - :ref:`cn_api_fluid_layers_conv3d` - - :ref:`cn_api_fluid_layers_conv2d_transpose` - - :ref:`cn_api_fluid_layers_conv3d_transpose` - - -1D序列卷积 -============== - -Fluid可以表示变长的序列结构,这里的变长是指不同样本的时间步(step)数不一样,通常是一个2D的Tensor和一个能够区分的样本长度的辅助结构来表示。假定,2D的Tensor的形状是shape,shape[0]是所有样本的总时间步数,shape[1]是序列特征的大小。 - -基于此数据结构的卷积在Fluid里称作序列卷积,也表示一维卷积。同图像卷积,序列卷积的输入参数有卷积核大小、填充大小、滑动步长,但与2D卷积不同的是,这些参数个数都为1。**注意**,目前仅支持stride为1的情况,输出序列的时间步数和输入序列相同。 - -假如:输入序列形状为(T, N), T即该序列的时间步数,N是序列特征大小;卷积核的上下文步长为K,输出序列长度为M,则卷积核权重形状为(K * N, M),输出序列形状为(T, M)。 - -另外,参考DeepSpeech,Fluid实现了行卷积row convolution, 或称 -`look ahead convolution `_ , -该卷积相比上述普通序列卷积可以减少参数。 - - -- API汇总: - - :ref:`cn_api_fluid_layers_sequence_conv` - - :ref:`cn_api_fluid_layers_row_conv` +.. _api_guide_conv: + +##### +卷积 +##### + +卷积有两组输入:特征图和卷积核,依据输入特征和卷积核的形状、Layout不同、计算方式的不同,在Fluid里,有针对变长序列特征的一维卷积,有针对定长图像特征的二维(2D Conv)、三维卷积(3D Conv),同时也有卷积计算的逆向过程,下面先介绍Fluid里的2D/3D卷积,再来介绍序列卷积。 + + +2D/3D卷积 +============== + +1. 卷积输入参数: +--------------------- + +卷积需要依据滑动步长(stride)、填充长度(padding)、卷积核窗口大小(filter size)、分组数(groups)、扩张系数(dilation rate)来决定如何计算。groups最早在 `AlexNet `_ 中引入, 可以理解为将原始的卷积分为独立若干组卷积计算。 + + **注意**: 同cuDNN的方式,Fluid目前只支持在特征图上下填充相同的长度,左右也是。 + +- 输入输出Layout: + + 2D卷积输入特征的Layout为[N, C, H, W]或[N, H, W, C], N即batch size,C是通道数,H、W是特征的高度和宽度,输出特征和输入特征的Layout一致。(相应的3D卷积输入特征的Layout为[N, C, D, H, W]或[N, D, H, W, C],但 **注意**,Fluid的卷积当前只支持[N, C, H, W],[N, C, D, H, W]。) + +- 卷积核的Layout: + + Fluid中2D卷积的卷积核(也称权重)的Layout为[C_o, C_in / groups, f_h, f_w],C_o、C_in表示输出、输入通道数,f_h、f_w表示卷积核窗口的高度和宽度,按行序存储。(相应的3D卷积的卷积核Layout为[C_o, C_in / groups, f_d, f_h, d_w],同样按行序存储。) + +- 深度可分离卷积(depthwise separable convolution): + + 在深度可分离卷积中包括depthwise convolution和pointwise convolution两组,这两个卷积的接口和上述普通卷积接口相同。前者可以通过给普通卷积设置groups来做,后者通过设置卷积核filters的大小为1x1,深度可分离卷积减少参数的同时减少了计算量。 + + 对于depthwise convolution,可以设置groups等于输入通道数,此时,2D卷积的卷积核形状为[C_o, 1, f_h, f_w]。 + 对于pointwise convolution,卷积核的形状为[C_o, C_in, 1, 1]。 + + **注意**:Fluid针对depthwise convolution的GPU计算做了高度优化,您可以通过在 + :code:`fluid.layers.conv2d` 接口设置 :code:`use_cudnn=False` 来使用Fluid自身优化的CUDA程序。 + +- 空洞卷积(dilated convolution): + + 空洞卷积相比普通卷积而言,卷积核在特征图上取值时不在连续,而是间隔的,这个间隔数称作dilation,等于1时,即为普通卷积,空洞卷积相比普通卷积的感受野更大。 + +- API汇总: + - :ref:`cn_api_fluid_layers_conv2d` + - :ref:`cn_api_fluid_layers_conv3d` + - :ref:`cn_api_fluid_layers_conv2d_transpose` + - :ref:`cn_api_fluid_layers_conv3d_transpose` + + +1D序列卷积 +============== + +Fluid可以表示变长的序列结构,这里的变长是指不同样本的时间步(step)数不一样,通常是一个2D的Tensor和一个能够区分的样本长度的辅助结构来表示。假定,2D的Tensor的形状是shape,shape[0]是所有样本的总时间步数,shape[1]是序列特征的大小。 + +基于此数据结构的卷积在Fluid里称作序列卷积,也表示一维卷积。同图像卷积,序列卷积的输入参数有卷积核大小、填充大小、滑动步长,但与2D卷积不同的是,这些参数个数都为1。**注意**,目前仅支持stride为1的情况,输出序列的时间步数和输入序列相同。 + +假如:输入序列形状为(T, N), T即该序列的时间步数,N是序列特征大小;卷积核的上下文步长为K,输出序列长度为M,则卷积核权重形状为(K * N, M),输出序列形状为(T, M)。 + +另外,参考DeepSpeech,Fluid实现了行卷积row convolution, 或称 +`look ahead convolution `_ , +该卷积相比上述普通序列卷积可以减少参数。 + + +- API汇总: + - :ref:`cn_api_fluid_layers_sequence_conv` + - :ref:`cn_api_fluid_layers_row_conv` diff --git a/docs/api_guides/low_level/layers/conv_en.rst b/docs/api_guides/low_level/layers/conv_en.rst index fd02bde43d8..cae792133e0 100755 --- a/docs/api_guides/low_level/layers/conv_en.rst +++ b/docs/api_guides/low_level/layers/conv_en.rst @@ -1,58 +1,58 @@ -.. _api_guide_conv_en: - -############# -Convolution -############# - -Convolution has two sets of inputs: feature maps and convolution kernels. Depending on the input features, the shape of the convolution kernel, the layout and the calculation method, in Fluid, there is a one-dimensional convolution for variable-length sequence features, two-dimensional (2D Conv) and three-dimensional convolution (3D Conv) for fixed-length image features. At the same time, there is also a reverse(backward) process of convolution calculation. The subsequent content describes the 2D/3D convolution in Fluid, and then introduces the sequence convolution. - - -2D/3D Convolution -================== - -1. Input parameters of convolution: --------------------------------------- -The convolution needs to be determined according to stride, padding, filter size, groups, and dilation rate. Groups were first introduced in `AlexNet `_ . It can be considered that the original convolution is split into independent sets of convolution to be calculated. - -**Note**: In the same way as cuDNN, Fluid currently only supports padding upper and lower part of feature maps with equal length , as well as that for left and right part. - -- The layout(shape) of Input and Output : - - Layout of input feature of 2D convolution is [N, C, H, W] or [N, H, W, C], where N is the batch size, C is the number of channels, H,W is the height and width of feature. Layout of input feature is the same as that of output feature. (Layout of input feature of 3D convolution is [N, C, D, H, W] or [N, D, H, W, C]. But **note**, Fluid convolution currently only supports [N, C, H, W],[N, C, D, H, W].) - -- The layout of convolution kernel: - - The layout of the 2D_conv convolution kernel (also called weight) in Fluid is [C_o, C_in / groups, f_h, f_w], where C_o, C_in represent the number of output and input channels, and f_h, f_w represent the height and width of filter, which are stored in row order. (The corresponding 2D_conv convolution kernel layout is [C_o, C_in / groups, f_d, f_h, d_w], which is also stored in row order.) - -- Depthwise Separable Convolution: - - Depthwise Separable Convolution contains depthwise convolution和pointwise convolution. The interfaces of these two convolutions are the same as the above normal convolutional interfaces. The former can be performed by setting groups for ordinary convolutions. The latter can be realised by setting the size of the convolution kernel filters to 1x1. Depthwise Separable Convolution reduces the parameters as well as the volume of computation. - - For depthwise convolution, you can set groups equal to the number of input channels. At this time, the convolution kernel shape of the 2D convolution is [C_o, 1, f_h, f_w]. For pointwise convolution, the shape of the convolution kernel is [C_o, C_in, 1, 1]. - - **Note**: Fluid optimized GPU computing for depthwise convolution greatly. You can use Fluid's self-optimized CUDA program by setting :code:`use_cudnn=False` in the :code:`fluid.layers.conv2d` interface. - -- Dilated Convolution: - - Compared with ordinary convolution, for dilated convolution, the convolution kernel does not continuously read values from the feature map, but with intervals. This interval is called dilation. When it is equal to 1, it becomes ordinary convolution. And receptive fields of dilated convolution is larger than that of ordinary convolution. - - -- related API: - - :ref:`api_fluid_layers_conv2d` - - :ref:`api_fluid_layers_conv3d` - - :ref:`api_fluid_layers_conv2d_transpose` - - :ref:`api_fluid_layers_conv3d_transpose` - - -1D sequence convolution -========================= - -Fluid can represent a variable-length sequence structure. The variable length here means that the number of time steps of different samples is different. It is usually represented by a 2D Tensor and an auxiliary structure that can distinguish the sample length. Assume that the shape of the 2D Tensor is shape, shape[0] is the total number of time steps for all samples, and shape[1] is the size of the sequence feature. - -Convolution based on this data structure is called sequence convolution in Fluid and also represents one-dimensional convolution. Similar to image convolution, the input parameters of the sequence convolution contain the filter size, the padding size, and the size of sliding stride. But unlike the 2D convolution, the number of each parameter is 1. **Note**, it currently only supports stride = 1. The output sequence has the same number of time steps as the input sequence. - -Suppose the input sequence shape is (T, N), while T is the number of time steps of the sequence, and N is the sequence feature size; The convolution kernel has a context stride of K. The length of output sequence is M, the shape of convolution kernel weight is (K * N, M), and the shape of output sequence is (T, M). - -- related API: - - :ref:`api_fluid_layers_sequence_conv` +.. _api_guide_conv_en: + +############# +Convolution +############# + +Convolution has two sets of inputs: feature maps and convolution kernels. Depending on the input features, the shape of the convolution kernel, the layout and the calculation method, in Fluid, there is a one-dimensional convolution for variable-length sequence features, two-dimensional (2D Conv) and three-dimensional convolution (3D Conv) for fixed-length image features. At the same time, there is also a reverse(backward) process of convolution calculation. The subsequent content describes the 2D/3D convolution in Fluid, and then introduces the sequence convolution. + + +2D/3D Convolution +================== + +1. Input parameters of convolution: +-------------------------------------- +The convolution needs to be determined according to stride, padding, filter size, groups, and dilation rate. Groups were first introduced in `AlexNet `_ . It can be considered that the original convolution is split into independent sets of convolution to be calculated. + +**Note**: In the same way as cuDNN, Fluid currently only supports padding upper and lower part of feature maps with equal length , as well as that for left and right part. + +- The layout(shape) of Input and Output : + + Layout of input feature of 2D convolution is [N, C, H, W] or [N, H, W, C], where N is the batch size, C is the number of channels, H,W is the height and width of feature. Layout of input feature is the same as that of output feature. (Layout of input feature of 3D convolution is [N, C, D, H, W] or [N, D, H, W, C]. But **note**, Fluid convolution currently only supports [N, C, H, W],[N, C, D, H, W].) + +- The layout of convolution kernel: + + The layout of the 2D_conv convolution kernel (also called weight) in Fluid is [C_o, C_in / groups, f_h, f_w], where C_o, C_in represent the number of output and input channels, and f_h, f_w represent the height and width of filter, which are stored in row order. (The corresponding 2D_conv convolution kernel layout is [C_o, C_in / groups, f_d, f_h, d_w], which is also stored in row order.) + +- Depthwise Separable Convolution: + + Depthwise Separable Convolution contains depthwise convolution和pointwise convolution. The interfaces of these two convolutions are the same as the above normal convolutional interfaces. The former can be performed by setting groups for ordinary convolutions. The latter can be realised by setting the size of the convolution kernel filters to 1x1. Depthwise Separable Convolution reduces the parameters as well as the volume of computation. + + For depthwise convolution, you can set groups equal to the number of input channels. At this time, the convolution kernel shape of the 2D convolution is [C_o, 1, f_h, f_w]. For pointwise convolution, the shape of the convolution kernel is [C_o, C_in, 1, 1]. + + **Note**: Fluid optimized GPU computing for depthwise convolution greatly. You can use Fluid's self-optimized CUDA program by setting :code:`use_cudnn=False` in the :code:`fluid.layers.conv2d` interface. + +- Dilated Convolution: + + Compared with ordinary convolution, for dilated convolution, the convolution kernel does not continuously read values from the feature map, but with intervals. This interval is called dilation. When it is equal to 1, it becomes ordinary convolution. And receptive fields of dilated convolution is larger than that of ordinary convolution. + + +- related API: + - :ref:`api_fluid_layers_conv2d` + - :ref:`api_fluid_layers_conv3d` + - :ref:`api_fluid_layers_conv2d_transpose` + - :ref:`api_fluid_layers_conv3d_transpose` + + +1D sequence convolution +========================= + +Fluid can represent a variable-length sequence structure. The variable length here means that the number of time steps of different samples is different. It is usually represented by a 2D Tensor and an auxiliary structure that can distinguish the sample length. Assume that the shape of the 2D Tensor is shape, shape[0] is the total number of time steps for all samples, and shape[1] is the size of the sequence feature. + +Convolution based on this data structure is called sequence convolution in Fluid and also represents one-dimensional convolution. Similar to image convolution, the input parameters of the sequence convolution contain the filter size, the padding size, and the size of sliding stride. But unlike the 2D convolution, the number of each parameter is 1. **Note**, it currently only supports stride = 1. The output sequence has the same number of time steps as the input sequence. + +Suppose the input sequence shape is (T, N), while T is the number of time steps of the sequence, and N is the sequence feature size; The convolution kernel has a context stride of K. The length of output sequence is M, the shape of convolution kernel weight is (K * N, M), and the shape of output sequence is (T, M). + +- related API: + - :ref:`api_fluid_layers_sequence_conv` - :ref:`api_fluid_layers_row_conv` \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/data_feeder_en.rst b/docs/api_guides/low_level/layers/data_feeder_en.rst index 053aefd4a30..3662da5babe 100755 --- a/docs/api_guides/low_level/layers/data_feeder_en.rst +++ b/docs/api_guides/low_level/layers/data_feeder_en.rst @@ -1,41 +1,41 @@ -.. _api_guide_data_feeder_en: - -Feed training/inference data with DataFeeder -######################################################## - -Fluid provides the :code:`DataFeeder` class, which converts data types such as numpy array into a :code:`LoDTensor` type to feed the training/inference network. - -To create a :code:`DataFeeder` object: - -.. code-block:: python - - import paddle.fluid as fluid - - image = fluid.layers.data(name='image', shape=[-1, 3, 224, 224], dtype='float32') - label = fluid.layers.data(name='label', shape=[-1, 1], dtype='int64') - place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[image, label], place=place) - -The :code:`feed_list` parameter is a list of variables created by :code:`fluid.layers.data()` . -The :code:`place` parameter indicates that data such as numpy array passed in from the Python side should be converted to GPU or CPU :code:`LoDTensor`. -After creating the :code:`DataFeeder` object, the user can call the :code:`feed(iterable)` method to convert :code:`iterable` data given by user into :code:`LoDTensor` . - -:code:`iterable` should be a object of Python List or a Tuple type, and each element in :code:`iterable` is a Python List of length N or Tuple type object, where N is the number of :code:`feed_list` variables passed in when the :code:`DataFeeder` object is created. - -The concrete format of :code:`iterable` is: - -.. code-block:: python - - iterable = [ - (image_1, label_1), - (image_2, label_2), - ... - (image_n, label_n) - ] - -:code:`image_i` and :code:`label_i` are both numpy array data. If the dimension of the input data is [1], such as :code:`label_i`, -you can feed Python int, float, and other types of data. The data types and dimensions of :code:`image_i` and :code:`label_i` are not necessarily -the same as :code:`dtype` and :code:`shape` specified at :code:`fluid.layers.data()`. :code:`DataFeeder` internally -performs the conversion of data types and dimensions. If the :code:`lod_level` of the variable in :code:`feed_list` is not zero, in Fluid, the 0th dimension of each row in the dimensionally converted :code:`iterable` will be returned as :code:`LoD` . - +.. _api_guide_data_feeder_en: + +Feed training/inference data with DataFeeder +######################################################## + +Fluid provides the :code:`DataFeeder` class, which converts data types such as numpy array into a :code:`LoDTensor` type to feed the training/inference network. + +To create a :code:`DataFeeder` object: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name='image', shape=[-1, 3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[-1, 1], dtype='int64') + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=[image, label], place=place) + +The :code:`feed_list` parameter is a list of variables created by :code:`fluid.layers.data()` . +The :code:`place` parameter indicates that data such as numpy array passed in from the Python side should be converted to GPU or CPU :code:`LoDTensor`. +After creating the :code:`DataFeeder` object, the user can call the :code:`feed(iterable)` method to convert :code:`iterable` data given by user into :code:`LoDTensor` . + +:code:`iterable` should be a object of Python List or a Tuple type, and each element in :code:`iterable` is a Python List of length N or Tuple type object, where N is the number of :code:`feed_list` variables passed in when the :code:`DataFeeder` object is created. + +The concrete format of :code:`iterable` is: + +.. code-block:: python + + iterable = [ + (image_1, label_1), + (image_2, label_2), + ... + (image_n, label_n) + ] + +:code:`image_i` and :code:`label_i` are both numpy array data. If the dimension of the input data is [1], such as :code:`label_i`, +you can feed Python int, float, and other types of data. The data types and dimensions of :code:`image_i` and :code:`label_i` are not necessarily +the same as :code:`dtype` and :code:`shape` specified at :code:`fluid.layers.data()`. :code:`DataFeeder` internally +performs the conversion of data types and dimensions. If the :code:`lod_level` of the variable in :code:`feed_list` is not zero, in Fluid, the 0th dimension of each row in the dimensionally converted :code:`iterable` will be returned as :code:`LoD` . + Read :ref:`api_fluid_DataFeeder` for specific usage. \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/data_in_out_en.rst b/docs/api_guides/low_level/layers/data_in_out_en.rst index a5c8d16a16f..db74e069740 100755 --- a/docs/api_guides/low_level/layers/data_in_out_en.rst +++ b/docs/api_guides/low_level/layers/data_in_out_en.rst @@ -1,30 +1,30 @@ -.. _api_guide_data_in_out_en: - -Data input and output -###################### - - -Data input -------------- - -Fluid supports two methods for data input, including: - -1. Python Reader: A pure Python Reader. The user defines the :code:`fluid.layers.data` layer on the Python side and builds the network. -Then, read the data by calling :code:`executor.run(feed=...)` . The process of data reading and model training/inference is performed simultaneously. - -2. PyReader: An Efficient and flexible C++ Reader interface. PyReader internally maintains a queue with size of :code:`capacity` (queue capacity is determined by -:code:`capacity` parameter in the :code:`fluid.layers.py_reader` interface ). Python side call queue :code:`push` to feed the training/inference data, and the C++ side training/inference program calls the :code:`pop` method to retrieve the data sent by the Python side. PyReader can work in conjunction with :code:`double_buffer` to realize asynchronous execution of data reading and model training/inference. - -For details, please refer to :ref:`api_fluid_layers_py_reader`. - - -Data output ------------- - -Fluid supports obtaining data for the current batch in the training/inference phase. - -The user can fetch expected variables from :code:`executor.run(fetch_list=[...], return_numpy=...)` . User can determine whether to convert the output data to numpy array by setting the :code:`return_numpy` parameter. -If :code:`return_numpy` is :code:`False` , data of type :code:`LoDTensor` will be returned. - -For specific usage, please refer to the relevant API documentation :ref:`api_fluid_executor_Executor` and +.. _api_guide_data_in_out_en: + +Data input and output +###################### + + +Data input +------------- + +Fluid supports two methods for data input, including: + +1. Python Reader: A pure Python Reader. The user defines the :code:`fluid.layers.data` layer on the Python side and builds the network. +Then, read the data by calling :code:`executor.run(feed=...)` . The process of data reading and model training/inference is performed simultaneously. + +2. PyReader: An Efficient and flexible C++ Reader interface. PyReader internally maintains a queue with size of :code:`capacity` (queue capacity is determined by +:code:`capacity` parameter in the :code:`fluid.layers.py_reader` interface ). Python side call queue :code:`push` to feed the training/inference data, and the C++ side training/inference program calls the :code:`pop` method to retrieve the data sent by the Python side. PyReader can work in conjunction with :code:`double_buffer` to realize asynchronous execution of data reading and model training/inference. + +For details, please refer to :ref:`api_fluid_layers_py_reader`. + + +Data output +------------ + +Fluid supports obtaining data for the current batch in the training/inference phase. + +The user can fetch expected variables from :code:`executor.run(fetch_list=[...], return_numpy=...)` . User can determine whether to convert the output data to numpy array by setting the :code:`return_numpy` parameter. +If :code:`return_numpy` is :code:`False` , data of type :code:`LoDTensor` will be returned. + +For specific usage, please refer to the relevant API documentation :ref:`api_fluid_executor_Executor` and :ref:`api_fluid_ParallelExecutor`. \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/detection_en.rst b/docs/api_guides/low_level/layers/detection_en.rst index f7ffed049b2..5321de0dc88 100755 --- a/docs/api_guides/low_level/layers/detection_en.rst +++ b/docs/api_guides/low_level/layers/detection_en.rst @@ -1,62 +1,62 @@ - -.. _api_guide_detection_en: - - -Image Detection -################# - -PaddlePaddle Fluid implements several unique operators for image detection tasks. This article introduces related APIs grouped by diverse model types. - -General operations --------------------- - -Some common operations in image detection are a series of operations on the bounding boxes, including: - -* Encoding and decoding of the bounding box : Conversion between encoding and decoding between the two kinds of boxes. For example, the training phase encodes the prior box and the ground-truth box to obtain the training target value. For API Reference, please refer to :ref:`api_fluid_layers_box_coder` - -* Compare the two bounding boxes and match them: - - * iou_similarity: Calculate the IOU value of the two sets of boxes. For API Reference, please refer to :ref:`api_fluid_layers_iou_similarity` - - * bipartite_match: Get the row with the largest distance in each column by the greedy binary matching algorithm. For API Reference, please refer to :ref:`api_fluid_layers_bipartite_match` - -* Get classification and regression target values ​​(target_assign) based on the bounding boxes and labels: Get the target values and corresponding weights by matched indices and negative indices. For API Reference, please refer to :ref:`api_fluid_layers_target_assign` - - -Faster RCNN -------------- - -`Faster RCNN `_ is a typical dual-stage target detector. Compared with the traditional extraction method, the RPN network in Faster RCNN greatly improves the extraction efficiency by sharing convolution layer parameters, and proposes high-quality region proposals. The RPN network needs to compare the input anchor with the ground-truth value to generate a primary candidate region, and assigns a classification and regression value to the primary candidate box. The following four unique apis are required: - -* rpn_target_assign: Assign the classification and regression target values ​​of the RPN network to the anchor through the anchor and the ground-truth box. For API Reference, please refer to :ref:`api_fluid_layers_rpn_target_assign` - -* anchor_generator: Generate a series of anchors for each location. For API Reference, please refer to :ref:`api_fluid_layers_anchor_generator` - -* generate_proposal_labels: Get the classification and regression target values ​​of the RCNN part through the candidate box and the ground-truth box obtained by generate_proposals. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposal_labels` - -* generate_proposals: Decode the RPN network output box and selects a new region proposal. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposals` - - -SSD ----------------- - -`SSD `_ , the acronym for Single Shot MultiBox Detector, is one of the latest and better detection algorithms in the field of target detection. It has the characteristics of fast detection speed and high detection accuracy. Unlike the dual-stage detection method, the single-stage target detection does not perform regional proposals, but directly returns the target's bounding box and classification probability from the feature map. The SSD network calculates the loss through six metrics of features maps and performs prediction. SSD requires the following five unique apis: - -* Prior Box: Generate a series of candidate boxes for each input position based on different parameters. For API Reference, please refer to :ref:`api_fluid_layers_prior_box` - -* multi_box_head : Get the position and confidence of different prior boxes. For API Reference, please refer to :ref:`api_fluid_layers_multi_box_head` - -* detection_output: Decode the prior box and obtains the detection result by multi-class NMS. For API Reference, please refer to :ref:`api_fluid_layers_detection_output` - -* ssd_loss: Calculate the loss by prediction value of position offset, confidence, bounding box position and ground-truth box position and label. For API Reference, please refer to :ref:`api_fluid_layers_ssd_loss` - -* detection map: Evaluate the SSD network model using mAP. For API Reference, please refer to :ref:`api_fluid_layers_detection_map` - -OCR ---------- - -Scene text recognition is a process of converting image information into a sequence of characters in the case of complex image background, low resolution, diverse fonts, random distribution and so on. It can be considered as a special translation process: translation of image input into natural language output. The OCR task needs to perform irregular transformation on the bounding box, which requires the following two APIs: - -* roi_perspective_transform: Make a perspective transformation on the input RoI. For API Reference, please refer to :ref:`api_fluid_layers_roi_perspective_transform` - + +.. _api_guide_detection_en: + + +Image Detection +################# + +PaddlePaddle Fluid implements several unique operators for image detection tasks. This article introduces related APIs grouped by diverse model types. + +General operations +-------------------- + +Some common operations in image detection are a series of operations on the bounding boxes, including: + +* Encoding and decoding of the bounding box : Conversion between encoding and decoding between the two kinds of boxes. For example, the training phase encodes the prior box and the ground-truth box to obtain the training target value. For API Reference, please refer to :ref:`api_fluid_layers_box_coder` + +* Compare the two bounding boxes and match them: + + * iou_similarity: Calculate the IOU value of the two sets of boxes. For API Reference, please refer to :ref:`api_fluid_layers_iou_similarity` + + * bipartite_match: Get the row with the largest distance in each column by the greedy binary matching algorithm. For API Reference, please refer to :ref:`api_fluid_layers_bipartite_match` + +* Get classification and regression target values ​​(target_assign) based on the bounding boxes and labels: Get the target values and corresponding weights by matched indices and negative indices. For API Reference, please refer to :ref:`api_fluid_layers_target_assign` + + +Faster RCNN +------------- + +`Faster RCNN `_ is a typical dual-stage target detector. Compared with the traditional extraction method, the RPN network in Faster RCNN greatly improves the extraction efficiency by sharing convolution layer parameters, and proposes high-quality region proposals. The RPN network needs to compare the input anchor with the ground-truth value to generate a primary candidate region, and assigns a classification and regression value to the primary candidate box. The following four unique apis are required: + +* rpn_target_assign: Assign the classification and regression target values ​​of the RPN network to the anchor through the anchor and the ground-truth box. For API Reference, please refer to :ref:`api_fluid_layers_rpn_target_assign` + +* anchor_generator: Generate a series of anchors for each location. For API Reference, please refer to :ref:`api_fluid_layers_anchor_generator` + +* generate_proposal_labels: Get the classification and regression target values ​​of the RCNN part through the candidate box and the ground-truth box obtained by generate_proposals. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposal_labels` + +* generate_proposals: Decode the RPN network output box and selects a new region proposal. For API Reference, please refer to :ref:`api_fluid_layers_generate_proposals` + + +SSD +---------------- + +`SSD `_ , the acronym for Single Shot MultiBox Detector, is one of the latest and better detection algorithms in the field of target detection. It has the characteristics of fast detection speed and high detection accuracy. Unlike the dual-stage detection method, the single-stage target detection does not perform regional proposals, but directly returns the target's bounding box and classification probability from the feature map. The SSD network calculates the loss through six metrics of features maps and performs prediction. SSD requires the following five unique apis: + +* Prior Box: Generate a series of candidate boxes for each input position based on different parameters. For API Reference, please refer to :ref:`api_fluid_layers_prior_box` + +* multi_box_head : Get the position and confidence of different prior boxes. For API Reference, please refer to :ref:`api_fluid_layers_multi_box_head` + +* detection_output: Decode the prior box and obtains the detection result by multi-class NMS. For API Reference, please refer to :ref:`api_fluid_layers_detection_output` + +* ssd_loss: Calculate the loss by prediction value of position offset, confidence, bounding box position and ground-truth box position and label. For API Reference, please refer to :ref:`api_fluid_layers_ssd_loss` + +* detection map: Evaluate the SSD network model using mAP. For API Reference, please refer to :ref:`api_fluid_layers_detection_map` + +OCR +--------- + +Scene text recognition is a process of converting image information into a sequence of characters in the case of complex image background, low resolution, diverse fonts, random distribution and so on. It can be considered as a special translation process: translation of image input into natural language output. The OCR task needs to perform irregular transformation on the bounding box, which requires the following two APIs: + +* roi_perspective_transform: Make a perspective transformation on the input RoI. For API Reference, please refer to :ref:`api_fluid_layers_roi_perspective_transform` + * polygon_box_transform: Coordinate transformation of the irregular bounding box. For API Reference, please refer to :ref:`api_fluid_layers_polygon_box_transform` \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst b/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst index f6937851aac..28f47cac1d7 100755 --- a/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst +++ b/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst @@ -1,46 +1,46 @@ -.. _api_guide_learning_rate_scheduler_en: - -######################## -Learning rate scheduler -######################## - -When we use a method such as the gradient descent method to train the model, the training speed and loss are generally taken into consideration to select a relatively appropriate learning rate. However, if a fixed learning rate is used throughout the training process, the loss of the training set will not continue to decline after falling to a certain extent, but will 'jump' within a certain range. The jumping principle is shown in the figure below. When the loss function converges to the local minimum value, the update step will be too large due to the excessive learning rate. The parameter update will repeatedly *jump over* the local minimum value and an oscillation-like phenomenon will occur. - -.. image:: ../../../images/learning_rate_scheduler.png - :scale: 80 % - :align: center - - -The learning rate scheduler defines a commonly used learning rate decay strategy to dynamically generate the learning rate. The learning rate decay function takes epoch or step as the parameter and returns a learning rate that gradually decreases with training. Thereby it reduces the training time and finds the local minimum value at the same time. - -The following content describes the APIs related to the learning rate scheduler: - -====== - -* :code:`NoamDecay`: Noam decay. Please refer to `Attention Is All You Need `_ for related algorithms. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_NoamDecay` - -* :code:`ExponentialDecay`: Exponential decay. That is, each time the current learning rate is multiplied by the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_ExponentialDecay` - -* :code:`NaturalExpDecay`: Natural exponential decay. That is, each time the current learning rate is multiplied by the natural exponent of the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_NaturalExpDecay` - -* :code:`InverseTimeDecay`: Inverse time decay. The decayed learning rate is inversely proportional to the current number of decays. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_InverseTimeDecay` - -* :code:`PolynomialDecay`: Polynomial decay, i.e. the decayed learning rate is calculated in a polynomial format with the initial learning rate and the end learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_PolynomialDecay` - -* :code:`PiecewiseDecay`: Piecewise decay. That is, the stair-like decay for a given number of steps, the learning rate stays the same within each step. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_PiecewiseDecay` - -* :code:`CosineAnnealingDecay`: Cosine attenuation. It means the learning rate changes with the number of steps in the form of a cosine function. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_CosineAnnealingDecay` - -* :code:`LinearWarmup`: The learning rate increases linearly to an appointed rate with the number of steps. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_LinearWarmup` - -* :code:`StepDecay`: Decay the learning rate every certain number of steps, and ``step_size`` needs to be specified. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_StepDecay` - -* :code:`MultiStepDecay`: Decay the learning rate at specified step, and ``milestones`` needs to be specified. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_MultiStepDecay` - -* :code:`LambdaDecay`: Decay the learning rate by lambda function. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_LambdaDecay` - -* :code:`ReduceOnPlateau`: Adjuge the learning rate according to monitoring index(In general, it's loss), and decay the learning rate when monitoring index becomes stable. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_ReduceOnPlateau` - -* :code:`OneCycleLR`: One cycle decay. That is, the initial learning rate first increases to maximum learning rate, and then it decreases to minimum learning rate which is much less than initial learning rate. For related API Reference please refer to :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` - +.. _api_guide_learning_rate_scheduler_en: + +######################## +Learning rate scheduler +######################## + +When we use a method such as the gradient descent method to train the model, the training speed and loss are generally taken into consideration to select a relatively appropriate learning rate. However, if a fixed learning rate is used throughout the training process, the loss of the training set will not continue to decline after falling to a certain extent, but will 'jump' within a certain range. The jumping principle is shown in the figure below. When the loss function converges to the local minimum value, the update step will be too large due to the excessive learning rate. The parameter update will repeatedly *jump over* the local minimum value and an oscillation-like phenomenon will occur. + +.. image:: ../../../images/learning_rate_scheduler.png + :scale: 80 % + :align: center + + +The learning rate scheduler defines a commonly used learning rate decay strategy to dynamically generate the learning rate. The learning rate decay function takes epoch or step as the parameter and returns a learning rate that gradually decreases with training. Thereby it reduces the training time and finds the local minimum value at the same time. + +The following content describes the APIs related to the learning rate scheduler: + +====== + +* :code:`NoamDecay`: Noam decay. Please refer to `Attention Is All You Need `_ for related algorithms. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_NoamDecay` + +* :code:`ExponentialDecay`: Exponential decay. That is, each time the current learning rate is multiplied by the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_ExponentialDecay` + +* :code:`NaturalExpDecay`: Natural exponential decay. That is, each time the current learning rate is multiplied by the natural exponent of the given decay rate to get the next learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_NaturalExpDecay` + +* :code:`InverseTimeDecay`: Inverse time decay. The decayed learning rate is inversely proportional to the current number of decays. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_InverseTimeDecay` + +* :code:`PolynomialDecay`: Polynomial decay, i.e. the decayed learning rate is calculated in a polynomial format with the initial learning rate and the end learning rate. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_PolynomialDecay` + +* :code:`PiecewiseDecay`: Piecewise decay. That is, the stair-like decay for a given number of steps, the learning rate stays the same within each step. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_PiecewiseDecay` + +* :code:`CosineAnnealingDecay`: Cosine attenuation. It means the learning rate changes with the number of steps in the form of a cosine function. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_CosineAnnealingDecay` + +* :code:`LinearWarmup`: The learning rate increases linearly to an appointed rate with the number of steps. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_LinearWarmup` + +* :code:`StepDecay`: Decay the learning rate every certain number of steps, and ``step_size`` needs to be specified. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_StepDecay` + +* :code:`MultiStepDecay`: Decay the learning rate at specified step, and ``milestones`` needs to be specified. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_MultiStepDecay` + +* :code:`LambdaDecay`: Decay the learning rate by lambda function. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_LambdaDecay` + +* :code:`ReduceOnPlateau`: Adjuge the learning rate according to monitoring index(In general, it's loss), and decay the learning rate when monitoring index becomes stable. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_ReduceOnPlateau` + +* :code:`OneCycleLR`: One cycle decay. That is, the initial learning rate first increases to maximum learning rate, and then it decreases to minimum learning rate which is much less than initial learning rate. For related API Reference please refer to :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` + * :code:`CyclicLR`: Cyclic decay. That is, the learning rate cycles between minimum and maximum learning rate with a constant frequency in specified a sacle method. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_CyclicLR` \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/loss_function_en.rst b/docs/api_guides/low_level/layers/loss_function_en.rst index 9e7c6a3a667..5c80ec6b8ed 100755 --- a/docs/api_guides/low_level/layers/loss_function_en.rst +++ b/docs/api_guides/low_level/layers/loss_function_en.rst @@ -1,61 +1,61 @@ -.. _api_guide_loss_function_en: - -############## -Loss function -############## - -The loss function defines the difference between the inference result and the ground-truth result. As the optimization target, it directly determines whether the model training is good or not, and many researches also focus on the optimization of the loss function design. -Paddle Fluid offers diverse types of loss functions for a variety of tasks. Let's take a look at the commonly-used loss functions included in Paddle Fluid. - -Regression -=========== - -The squared error loss uses the square of the error between the predicted value and the ground-truth value as the sample loss, which is the most basic loss function in the regression problems. -For API Reference, please refer to :ref:`api_fluid_layers_square_error_cost`. - -Smooth L1 loss (smooth_l1 loss) is a piecewise loss function that is relatively insensitive to outliers and therefore more robust. -For API Reference, please refer to :ref:`api_fluid_layers_smooth_l1`. - - -Classification -================ - -`cross entropy `_ is the most widely used loss function in classification problems. The interfaces in Paddle Fluid for the cross entropy loss functions are divided into the one accepting fractional input of normalized probability values ​​and another for non-normalized input. And Fluid supports two types labels, namely soft label and hard label. -For API Reference, please refer to :ref:`api_fluid_layers_cross_entropy` and :ref:`api_fluid_layers_softmax_with_cross_entropy`. - -Multi-label classification ----------------------------- -For the multi-label classification, such as the occasion that an article belongs to multiple categories like politics, technology, it is necessary to calculate the loss by treating each category as an independent binary-classification problem. We provide the sigmoid_cross_entropy_with_logits loss function for this purpose. -For API Reference, please refer to :ref:`api_fluid_layers_sigmoid_cross_entropy_with_logits`. - -Large-scale classification ------------------------------ -For large-scale classification problems, special methods and corresponding loss functions are usually needed to speed up the training. The commonly used methods are -`Noise contrastive estimation (NCE) `_ and `Hierarchical sigmoid `_ . - -* NCE solves the binary-classification problem of discriminating the true distribution and the noise distribution by converting the multi-classification problem into a classifier. The maximum likelihood estimation is performed based on the binary-classification to avoid calculating the normalization factor in the full-class space to reduce computational complexity. -* Hierarchical sigmoid realizes multi-classification by hierarchical classification of binary trees. The loss of each sample corresponds to the sum of the cross-entropy of the binary-classification for each node on the coding path, which avoids the calculation of the normalization factor and reduces the computational complexity. -The loss functions for both methods are available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_nce` and :ref:`api_fluid_layers_hsigmoid`. - -Sequence classification -------------------------- -Sequence classification can be divided into the following three types: - -* Sequence Classification problem is that the entire sequence corresponds to a prediction label, such as text classification. This is a common classification problem, you can use cross entropy as the loss function. -* Segment Classification problem is that each segment in the sequence corresponds to its own category tag, such as named entity recognition. For this sequence labeling problem, `the (Linear Chain) Conditional Random Field (CRF) `_ is a commonly used model. The method uses the likelihood probability on the sentence level, and the labels for different positions in the sequence are no longer conditionally independent, which can effectively solve the label offset problem. Support for CRF loss functions is available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_linear_chain_crf` . -* Temporal Classification problem needs to label unsegmented sequences, such as speech recognition. For this time-based classification problem, `CTC(Connectionist Temporal Classification) `_ loss function does not need to align input data and labels, and is able to perform end-to-end training. Paddle Fluid provides a warpctc interface to calculate the corresponding loss. For API Reference, please refer to :ref:`api_fluid_layers_warpctc` . - -Rank -========= - -`Rank problems `_ can use learning methods of Pointwise, Pairwise, and Listwise. Different methods require different loss functions: - -* The Pointwise method solves the ranking problem by approximating the regression problem. Therefore the loss function of the regression problem can be used. -* Pairwise's method requires a special loss function. Pairwise solves the sorting problem by approximating the classification problem, using relevance score of two documents and the query to use the partial order as the binary-classification label to calculate the loss. Paddle Fluid provides two commonly used loss functions for Pairwise methods. For API Reference please refer to :ref:`api_fluid_layers_rank_loss` and :ref:`api_fluid_layers_margin_rank_loss`. - -More -==== - -For more complex loss functions, try to use combinations of other loss functions; the :ref:`api_fluid_layers_dice_loss` provided in Paddle Fluid for image segmentation tasks is an example of using combinations of other operators (calculate the average likelihood probability of each pixel position). The multi-objective loss function can also be considered similarly, such as Faster RCNN that uses the weighted sum of cross entropy and smooth_l1 loss as a loss function. - +.. _api_guide_loss_function_en: + +############## +Loss function +############## + +The loss function defines the difference between the inference result and the ground-truth result. As the optimization target, it directly determines whether the model training is good or not, and many researches also focus on the optimization of the loss function design. +Paddle Fluid offers diverse types of loss functions for a variety of tasks. Let's take a look at the commonly-used loss functions included in Paddle Fluid. + +Regression +=========== + +The squared error loss uses the square of the error between the predicted value and the ground-truth value as the sample loss, which is the most basic loss function in the regression problems. +For API Reference, please refer to :ref:`api_fluid_layers_square_error_cost`. + +Smooth L1 loss (smooth_l1 loss) is a piecewise loss function that is relatively insensitive to outliers and therefore more robust. +For API Reference, please refer to :ref:`api_fluid_layers_smooth_l1`. + + +Classification +================ + +`cross entropy `_ is the most widely used loss function in classification problems. The interfaces in Paddle Fluid for the cross entropy loss functions are divided into the one accepting fractional input of normalized probability values ​​and another for non-normalized input. And Fluid supports two types labels, namely soft label and hard label. +For API Reference, please refer to :ref:`api_fluid_layers_cross_entropy` and :ref:`api_fluid_layers_softmax_with_cross_entropy`. + +Multi-label classification +---------------------------- +For the multi-label classification, such as the occasion that an article belongs to multiple categories like politics, technology, it is necessary to calculate the loss by treating each category as an independent binary-classification problem. We provide the sigmoid_cross_entropy_with_logits loss function for this purpose. +For API Reference, please refer to :ref:`api_fluid_layers_sigmoid_cross_entropy_with_logits`. + +Large-scale classification +----------------------------- +For large-scale classification problems, special methods and corresponding loss functions are usually needed to speed up the training. The commonly used methods are +`Noise contrastive estimation (NCE) `_ and `Hierarchical sigmoid `_ . + +* NCE solves the binary-classification problem of discriminating the true distribution and the noise distribution by converting the multi-classification problem into a classifier. The maximum likelihood estimation is performed based on the binary-classification to avoid calculating the normalization factor in the full-class space to reduce computational complexity. +* Hierarchical sigmoid realizes multi-classification by hierarchical classification of binary trees. The loss of each sample corresponds to the sum of the cross-entropy of the binary-classification for each node on the coding path, which avoids the calculation of the normalization factor and reduces the computational complexity. +The loss functions for both methods are available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_nce` and :ref:`api_fluid_layers_hsigmoid`. + +Sequence classification +------------------------- +Sequence classification can be divided into the following three types: + +* Sequence Classification problem is that the entire sequence corresponds to a prediction label, such as text classification. This is a common classification problem, you can use cross entropy as the loss function. +* Segment Classification problem is that each segment in the sequence corresponds to its own category tag, such as named entity recognition. For this sequence labeling problem, `the (Linear Chain) Conditional Random Field (CRF) `_ is a commonly used model. The method uses the likelihood probability on the sentence level, and the labels for different positions in the sequence are no longer conditionally independent, which can effectively solve the label offset problem. Support for CRF loss functions is available in Paddle Fluid. For API Reference please refer to :ref:`api_fluid_layers_linear_chain_crf` . +* Temporal Classification problem needs to label unsegmented sequences, such as speech recognition. For this time-based classification problem, `CTC(Connectionist Temporal Classification) `_ loss function does not need to align input data and labels, and is able to perform end-to-end training. Paddle Fluid provides a warpctc interface to calculate the corresponding loss. For API Reference, please refer to :ref:`api_fluid_layers_warpctc` . + +Rank +========= + +`Rank problems `_ can use learning methods of Pointwise, Pairwise, and Listwise. Different methods require different loss functions: + +* The Pointwise method solves the ranking problem by approximating the regression problem. Therefore the loss function of the regression problem can be used. +* Pairwise's method requires a special loss function. Pairwise solves the sorting problem by approximating the classification problem, using relevance score of two documents and the query to use the partial order as the binary-classification label to calculate the loss. Paddle Fluid provides two commonly used loss functions for Pairwise methods. For API Reference please refer to :ref:`api_fluid_layers_rank_loss` and :ref:`api_fluid_layers_margin_rank_loss`. + +More +==== + +For more complex loss functions, try to use combinations of other loss functions; the :ref:`api_fluid_layers_dice_loss` provided in Paddle Fluid for image segmentation tasks is an example of using combinations of other operators (calculate the average likelihood probability of each pixel position). The multi-objective loss function can also be considered similarly, such as Faster RCNN that uses the weighted sum of cross entropy and smooth_l1 loss as a loss function. + **Note**, after defining the loss function, in order to optimize with :ref:`api_guide_optimizer_en`, you usually need to use :ref:`api_fluid_layers_mean` or other operations to convert the high-dimensional Tensor returned by the loss function to a Scalar value. \ No newline at end of file diff --git a/docs/api_guides/low_level/layers/pooling_en.rst b/docs/api_guides/low_level/layers/pooling_en.rst index d7fcb53db11..8d19e8d0ff2 100755 --- a/docs/api_guides/low_level/layers/pooling_en.rst +++ b/docs/api_guides/low_level/layers/pooling_en.rst @@ -1,80 +1,80 @@ -.. _api_guide_pool_en: - -######## -Pooling -######## - -Pooling is to downsample the input features and reduce overfitting. Reducing overfitting is the result of reducing the output size, which also reduces the number of parameters in subsequent layers. - -Pooling usually only takes the feature maps of the previous layer as input, and some parameters are needed to determine the specific operation of the pooling. In PaddlePaddle, we also choose the specific pooling by setting parameters like the size, method, step, whether to pool globally, whether to use cudnn, whether to use ceil function to calculate output. -PaddlePaddle has two-dimensional (pool2d), three-dimensional convolution (pool3d), RoI pooling (roi_pool) for fixed-length image features, and sequence pooling (sequence_pool) for sequences, as well as the reverse(backward) process of pooling calculations. The following text describes the 2D/3D pooling, and the RoI pooling, and then the sequence pooling. - --------------- - -1. pool2d/pool3d ------------------------- - -- ``input`` : The pooling operation receives any ``Tensor`` that conforms to the layout: ``N(batch size)* C(channel size) * H(height) * W(width)`` format as input. - -- ``pool_size`` : It is used to determine the size of the pooling ``filter``, which determines the size of data to be pooled into a single value. - -- ``num_channels`` : It is used to determine the number of ``channel`` of input. If it is not set or is set to ``None``, its actual value will be automatically set to the ``channel`` quantity of input. - -- ``pool_type`` : It receives one of ``agg`` and ``max`` as the pooling method. The default value is ``max`` . ``max`` means maximum pooling, i.e. calculating the maximum value of the data in the pooled ``filter`` area as output; and ``avg`` means averaging pooling, i.e. calculating the average of the data in the pooled ``filter`` area as output. - -- ``pool_stride`` : It is the stride size in which the pooling ``filter`` moves on the input feature map. - -- ``pool_padding`` : It is used to determine the size of ``padding`` in the pooling, ``padding`` is used to pool the features of the edges of feature maps. The ``pool_padding`` size determines how much zero is padded to the edge of the feature maps. Thereby it determines the extent to which the edge features are pooled. - -- ``global_pooling`` : It Means whether to use global pooling. Global pooling refers to pooling using ``filter`` of the same size as the feature map. This process can also use average pooling or the maximum pooling as the pooling method. Global pooling is usually used to replace the fully connected layer to greatly reduce the parameters to prevent overfitting. - -- The ``use_cudnn`` : This option allows you to choose whether or not to use cudnn to accelerate pooling. - -- ``ceil_mode`` : Whether to use the ceil function to calculate the output height and width. ``ceil mode`` means ceiling mode, which means that, in the feature map, the edge parts that are smaller than ``filter size`` will be retained, and separately calculated. It can be understood as supplementing the original data with edge with a value of -NAN. By contrast, The floor mode directly discards the edges smaller than the ``filter size``. The specific calculation formula is as follows: -    - * Non ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding) / stride (stride size) + 1`` -     - * ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding + stride - 1) / stride + 1`` -     - - -related API: - -- :ref:`api_fluid_layers_pool2d` -- :ref:`api_fluid_layers_pool3d` - - -2. roi_pool ------------------- - -``roi_pool`` is generally used in detection networks, and the input feature map is pooled to a specific size by the bounding box. - -- ``rois`` : It receives ``LoDTensor`` type to indicate the Regions of Interest that needs to be pooled. For an explanation of RoI, please refer to `Paper `__ - -- ``pooled_height`` and ``pooled_width`` : accept non-square pooling box sizes - -- ``spatial_scale`` : Used to set the scale of scaling the RoI and the original image. Note that the settings here require the user to manually calculate the actual scaling of the RoI and the original image. -  - -related API: - -- :ref:`api_fluid_layers_roi_pool` - - -3. sequence_pool --------------------- - -``sequence_pool`` is an interface used to pool variable-length sequences. It pools the features of all time steps of each instance, and also supports -one of ``average``, ``sum``, ``sqrt`` and ``max`` to be used as the pooling method. Specifically: - -- ``average`` sums up the data in each time step and takes its average as the pooling result. - -- ``sum`` take the sum of the data in each time step as pooling result. - -- ``sqrt`` sums the data in each time step and takes its square root as the pooling result. - -- ``max`` takes the maximum value for each time step as the pooling result. - -related API: - -- :ref:`api_fluid_layers_sequence_pool` +.. _api_guide_pool_en: + +######## +Pooling +######## + +Pooling is to downsample the input features and reduce overfitting. Reducing overfitting is the result of reducing the output size, which also reduces the number of parameters in subsequent layers. + +Pooling usually only takes the feature maps of the previous layer as input, and some parameters are needed to determine the specific operation of the pooling. In PaddlePaddle, we also choose the specific pooling by setting parameters like the size, method, step, whether to pool globally, whether to use cudnn, whether to use ceil function to calculate output. +PaddlePaddle has two-dimensional (pool2d), three-dimensional convolution (pool3d), RoI pooling (roi_pool) for fixed-length image features, and sequence pooling (sequence_pool) for sequences, as well as the reverse(backward) process of pooling calculations. The following text describes the 2D/3D pooling, and the RoI pooling, and then the sequence pooling. + +-------------- + +1. pool2d/pool3d +------------------------ + +- ``input`` : The pooling operation receives any ``Tensor`` that conforms to the layout: ``N(batch size)* C(channel size) * H(height) * W(width)`` format as input. + +- ``pool_size`` : It is used to determine the size of the pooling ``filter``, which determines the size of data to be pooled into a single value. + +- ``num_channels`` : It is used to determine the number of ``channel`` of input. If it is not set or is set to ``None``, its actual value will be automatically set to the ``channel`` quantity of input. + +- ``pool_type`` : It receives one of ``agg`` and ``max`` as the pooling method. The default value is ``max`` . ``max`` means maximum pooling, i.e. calculating the maximum value of the data in the pooled ``filter`` area as output; and ``avg`` means averaging pooling, i.e. calculating the average of the data in the pooled ``filter`` area as output. + +- ``pool_stride`` : It is the stride size in which the pooling ``filter`` moves on the input feature map. + +- ``pool_padding`` : It is used to determine the size of ``padding`` in the pooling, ``padding`` is used to pool the features of the edges of feature maps. The ``pool_padding`` size determines how much zero is padded to the edge of the feature maps. Thereby it determines the extent to which the edge features are pooled. + +- ``global_pooling`` : It Means whether to use global pooling. Global pooling refers to pooling using ``filter`` of the same size as the feature map. This process can also use average pooling or the maximum pooling as the pooling method. Global pooling is usually used to replace the fully connected layer to greatly reduce the parameters to prevent overfitting. + +- The ``use_cudnn`` : This option allows you to choose whether or not to use cudnn to accelerate pooling. + +- ``ceil_mode`` : Whether to use the ceil function to calculate the output height and width. ``ceil mode`` means ceiling mode, which means that, in the feature map, the edge parts that are smaller than ``filter size`` will be retained, and separately calculated. It can be understood as supplementing the original data with edge with a value of -NAN. By contrast, The floor mode directly discards the edges smaller than the ``filter size``. The specific calculation formula is as follows: +    + * Non ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding) / stride (stride size) + 1`` +     + * ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding + stride - 1) / stride + 1`` +     + + +related API: + +- :ref:`api_fluid_layers_pool2d` +- :ref:`api_fluid_layers_pool3d` + + +2. roi_pool +------------------ + +``roi_pool`` is generally used in detection networks, and the input feature map is pooled to a specific size by the bounding box. + +- ``rois`` : It receives ``LoDTensor`` type to indicate the Regions of Interest that needs to be pooled. For an explanation of RoI, please refer to `Paper `__ + +- ``pooled_height`` and ``pooled_width`` : accept non-square pooling box sizes + +- ``spatial_scale`` : Used to set the scale of scaling the RoI and the original image. Note that the settings here require the user to manually calculate the actual scaling of the RoI and the original image. +  + +related API: + +- :ref:`api_fluid_layers_roi_pool` + + +3. sequence_pool +-------------------- + +``sequence_pool`` is an interface used to pool variable-length sequences. It pools the features of all time steps of each instance, and also supports +one of ``average``, ``sum``, ``sqrt`` and ``max`` to be used as the pooling method. Specifically: + +- ``average`` sums up the data in each time step and takes its average as the pooling result. + +- ``sum`` take the sum of the data in each time step as pooling result. + +- ``sqrt`` sums the data in each time step and takes its square root as the pooling result. + +- ``max`` takes the maximum value for each time step as the pooling result. + +related API: + +- :ref:`api_fluid_layers_sequence_pool` diff --git a/docs/api_guides/low_level/layers/sparse_update.rst b/docs/api_guides/low_level/layers/sparse_update.rst index 55357da92d6..63b8522978e 100644 --- a/docs/api_guides/low_level/layers/sparse_update.rst +++ b/docs/api_guides/low_level/layers/sparse_update.rst @@ -1,45 +1,45 @@ -.. _api_guide_sparse_update: - -##### -稀疏更新 -##### - -Fluid的 :ref:`cn_api_fluid_layers_embedding` 层在单机训练和分布式训练时,均可以支持“稀疏更新”,即梯度以sparse tensor 结构存储,只保存梯度不为0的行。 -在分布式训练中,对于较大的embedding层,开启稀疏更新有助于减少通信数据量,提升训练速度。 - -在paddle内部,我们用lookup_table来实现embedding。下边这张图说明了embedding在正向和反向计算的过程: - -如图所示:一个Tensor中有两行不为0,正向计算的过程中,我们使用ids存储不为0的行,并使用对应的两行数据来进行计算;反向更新的过程也只更新这两行。 - -.. image:: ../../../images/lookup_table_training.png - :scale: 50 % - -embedding使用例子: ---------------------- - -API详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一个简单的例子: - -.. code-block:: python - - DICT_SIZE = 10000 * 10 - EMBED_SIZE = 64 - IS_SPARSE = False - def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): - embed = fluid.layers.embedding( - input=word, - size=[dict_size, embed_size], - dtype='float32', - param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), - is_sparse=IS_SPARSE, - is_distributed=False) - return embed - -以上参数中: - -- :code:`is_sparse` : 反向计算的时候梯度是否为sparse tensor。如果不设置,梯度是一个 :ref:`Lod_Tensor ` 。默认为False。 - -- :code:`is_distributed` : 标志是否是用在分布式的场景下。一般大规模稀疏更新(embedding的第0维维度很大,比如几百万以上)才需要设置。具体可以参考大规模稀疏的API guide :ref:`cn_api_guide_async_training` 。默认为False。 - -- API汇总: - - :ref:`cn_api_fluid_layers_embedding` +.. _api_guide_sparse_update: + +##### +稀疏更新 +##### + +Fluid的 :ref:`cn_api_fluid_layers_embedding` 层在单机训练和分布式训练时,均可以支持“稀疏更新”,即梯度以sparse tensor 结构存储,只保存梯度不为0的行。 +在分布式训练中,对于较大的embedding层,开启稀疏更新有助于减少通信数据量,提升训练速度。 + +在paddle内部,我们用lookup_table来实现embedding。下边这张图说明了embedding在正向和反向计算的过程: + +如图所示:一个Tensor中有两行不为0,正向计算的过程中,我们使用ids存储不为0的行,并使用对应的两行数据来进行计算;反向更新的过程也只更新这两行。 + +.. image:: ../../../images/lookup_table_training.png + :scale: 50 % + +embedding使用例子: +--------------------- + +API详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一个简单的例子: + +.. code-block:: python + + DICT_SIZE = 10000 * 10 + EMBED_SIZE = 64 + IS_SPARSE = False + def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): + embed = fluid.layers.embedding( + input=word, + size=[dict_size, embed_size], + dtype='float32', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), + is_sparse=IS_SPARSE, + is_distributed=False) + return embed + +以上参数中: + +- :code:`is_sparse` : 反向计算的时候梯度是否为sparse tensor。如果不设置,梯度是一个 :ref:`Lod_Tensor ` 。默认为False。 + +- :code:`is_distributed` : 标志是否是用在分布式的场景下。一般大规模稀疏更新(embedding的第0维维度很大,比如几百万以上)才需要设置。具体可以参考大规模稀疏的API guide :ref:`cn_api_guide_async_training` 。默认为False。 + +- API汇总: + - :ref:`cn_api_fluid_layers_embedding` diff --git a/docs/api_guides/low_level/layers/sparse_update_en.rst b/docs/api_guides/low_level/layers/sparse_update_en.rst index f97ef279b8f..8e0f8fc7885 100755 --- a/docs/api_guides/low_level/layers/sparse_update_en.rst +++ b/docs/api_guides/low_level/layers/sparse_update_en.rst @@ -1,45 +1,45 @@ -.. _api_guide_sparse_update_en: - -############### -Sparse update -############### - -Fluid's :ref:`api_fluid_layers_embedding` layer supports "sparse updates" in both single-node and distributed training, which means gradients are stored in a sparse tensor structure where only rows with non-zero gradients are saved. -In distributed training, for larger embedding layers, sparse updates reduce the amount of communication data and speed up training. - -In paddle, we use lookup_table to implement embedding. The figure below illustrates the process of embedding in the forward and backward calculations: - -As shown in the figure: two rows in a Tensor are not 0. In the process of forward calculation, we use ids to store rows that are not 0, and use the corresponding two rows of data for calculation; the process of backward update is only to update the two lines. - -.. image:: ../../../images/lookup_table_training.png - :scale: 50 % - -Example --------------------------- - -API reference :ref:`api_fluid_layers_embedding` . Here is a simple example: - -.. code-block:: python - - DICT_SIZE = 10000 * 10 - EMBED_SIZE = 64 - IS_SPARSE = False - def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): - embed = fluid.layers.embedding( - input=word, - size=[dict_size, embed_size], - dtype='float32', - param_attr=fluid.ParamAttr( - initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), - is_sparse=IS_SPARSE, - is_distributed=False) - return embed - -The parameters: - -- :code:`is_sparse` : Whether the gradient is a sparse tensor in the backward calculation. If not set, the gradient is a `LodTensor `_ . The default is False. - -- :code:`is_distributed` : Whether the current training is in a distributed scenario. Generally, this parameter can only be set in large-scale sparse updates (the 0th dimension of embedding is very large, such as several million or more). For details, please refer to the large-scale sparse API guide :ref:`api_guide_async_training`. The default is False. - -- API : - - :ref:`api_fluid_layers_embedding` +.. _api_guide_sparse_update_en: + +############### +Sparse update +############### + +Fluid's :ref:`api_fluid_layers_embedding` layer supports "sparse updates" in both single-node and distributed training, which means gradients are stored in a sparse tensor structure where only rows with non-zero gradients are saved. +In distributed training, for larger embedding layers, sparse updates reduce the amount of communication data and speed up training. + +In paddle, we use lookup_table to implement embedding. The figure below illustrates the process of embedding in the forward and backward calculations: + +As shown in the figure: two rows in a Tensor are not 0. In the process of forward calculation, we use ids to store rows that are not 0, and use the corresponding two rows of data for calculation; the process of backward update is only to update the two lines. + +.. image:: ../../../images/lookup_table_training.png + :scale: 50 % + +Example +-------------------------- + +API reference :ref:`api_fluid_layers_embedding` . Here is a simple example: + +.. code-block:: python + + DICT_SIZE = 10000 * 10 + EMBED_SIZE = 64 + IS_SPARSE = False + def word_emb(word, dict_size=DICT_SIZE, embed_size=EMBED_SIZE): + embed = fluid.layers.embedding( + input=word, + size=[dict_size, embed_size], + dtype='float32', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Normal(scale=1/math.sqrt(dict_size))), + is_sparse=IS_SPARSE, + is_distributed=False) + return embed + +The parameters: + +- :code:`is_sparse` : Whether the gradient is a sparse tensor in the backward calculation. If not set, the gradient is a `LodTensor `_ . The default is False. + +- :code:`is_distributed` : Whether the current training is in a distributed scenario. Generally, this parameter can only be set in large-scale sparse updates (the 0th dimension of embedding is very large, such as several million or more). For details, please refer to the large-scale sparse API guide :ref:`api_guide_async_training`. The default is False. + +- API : + - :ref:`api_fluid_layers_embedding` diff --git a/docs/api_guides/low_level/layers/tensor_en.rst b/docs/api_guides/low_level/layers/tensor_en.rst index 4a14864d2e6..d188b62d131 100755 --- a/docs/api_guides/low_level/layers/tensor_en.rst +++ b/docs/api_guides/low_level/layers/tensor_en.rst @@ -1,141 +1,141 @@ -.. _api_guide_tensor_en: - -######## -Tensor -######## - -There are two data structures used in Fluid to host the data, namely `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . LoD-Tensor is a unique concept of Fluid, which appends sequence information to Tensor. The data that can be transferred in the framework includes: input, output, and learnable parameters in the network. All of them are uniformly represented by LoD-Tensor. In addition, tensor can be regarded as a special LoD-Tensor. - -Now let's take a closer look at the operations related to these two types of data. - -Tensor -======= - -1. create_tensor ---------------------- -Tensor is used to carry data in the framework, using :code:`create_tensor` to create a Lod-Tensor variable of the specified the data type. - -API reference : :ref:`api_fluid_layers_create_tensor` - - -2. create_parameter ---------------------- -The neural network training process is a learning process for parameters. Fluid uses :code:`create_parameter` to create a learnable parameter. The value of this parameter can be changed by the operator. - -API reference : :ref:`api_fluid_layers_create_parameter` - - - -3. create_global_var ---------------------- -Fluid uses :code:`create_global_var` to create a global tensor and this API allows you to specify the data type, shape, and value of the Tensor variable being created. - -API reference : :ref:`api_fluid_layers_create_global_var` - - -4. cast ---------------- - -Fluid uses :code:`cast` to convert the data to the specified type. - -API reference : :ref:`api_fluid_layers_cast` - - -5.concat ----------------- - -Fluid uses :code:`concat` to concatenate input data along a specified dimension. - -API reference : :ref:`api_fluid_layers_concat` - - -6. sums ----------------- - -Fluid uses :code:`sums` to sum up the input data. - -API reference : :ref:`api_fluid_layers_sums` - -7. fill_constant ------------------ - -Fluid uses :code:`fill_constant` to create a Tensor with a specific shape and type. The initial value of this variable can be set via :code:`value`. - -API reference : :ref:`api_fluid_layers_fill_constant` - -8. assign ---------------- - -Fluid uses :code:`assign` to duplicate a variable. - -API reference : :ref:`api_fluid_layers_assign` - -9. argmin --------------- - -Fluid uses :code:`argmin` to calculate the index of the smallest element on the specified axis of Tensor. - -API reference : :ref:`api_fluid_layers_argmin` - -10. argmax ------------ - -Fluid uses :code:`argmax` to calculate the index of the largest element on the specified axis of Tensor. - -API reference : :ref:`api_fluid_layers_argmax` - -11. argsort ------------- - -Fluid uses :code:`argsort` to sort the input Tensor on the specified axis and it will return the sorted data variables and their corresponding index values. - -API reference : :ref:`api_fluid_layers_argsort` - -12. ones -------------- - -Fluid uses :code:`ones` to create a Tensor of the specified size and data type with an initial value of 1. - -API reference : :ref:`api_fluid_layers_ones` - -13. zeros ---------------- - -Fluid uses :code:`zeros` to create a Tensor of the specified size and data type with an initial value of zero. - -API reference : :ref:`api_fluid_layers_zeros` - -14. reverse -------------------- - -Fluid uses :code:`reverse` to invert Tensor along the specified axis. - -API reference : :ref:`api_fluid_layers_reverse` - - - -LoD-Tensor -============ - -LoD-Tensor is very suitable for sequence data. For related knowledge, please read `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . - -1.create_lod_tensor ------------------------ - -Fluid uses :code:`create_lod_tensor` to create a LoD_Tensor with new hierarchical information based on a numpy array, a list, or an existing LoD_Tensor. - -API reference : :ref:`api_fluid_create_lod_tensor` - -2. create_random_int_lodtensor ----------------------------------- - -Fluid uses :code:`create_random_int_lodtensor` to create a LoD_Tensor composed of random integers. - -API reference : :ref:`api_fluid_create_random_int_lodtensor` - -3. reorder_lod_tensor_by_rank ---------------------------------- - -Fluid uses :code:`reorder_lod_tensor_by_rank` to reorder the sequence information of the input LoD_Tensor in the specified order. - -API reference : :ref:`api_fluid_layers_reorder_lod_tensor_by_rank` +.. _api_guide_tensor_en: + +######## +Tensor +######## + +There are two data structures used in Fluid to host the data, namely `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . LoD-Tensor is a unique concept of Fluid, which appends sequence information to Tensor. The data that can be transferred in the framework includes: input, output, and learnable parameters in the network. All of them are uniformly represented by LoD-Tensor. In addition, tensor can be regarded as a special LoD-Tensor. + +Now let's take a closer look at the operations related to these two types of data. + +Tensor +======= + +1. create_tensor +--------------------- +Tensor is used to carry data in the framework, using :code:`create_tensor` to create a Lod-Tensor variable of the specified the data type. + +API reference : :ref:`api_fluid_layers_create_tensor` + + +2. create_parameter +--------------------- +The neural network training process is a learning process for parameters. Fluid uses :code:`create_parameter` to create a learnable parameter. The value of this parameter can be changed by the operator. + +API reference : :ref:`api_fluid_layers_create_parameter` + + + +3. create_global_var +--------------------- +Fluid uses :code:`create_global_var` to create a global tensor and this API allows you to specify the data type, shape, and value of the Tensor variable being created. + +API reference : :ref:`api_fluid_layers_create_global_var` + + +4. cast +--------------- + +Fluid uses :code:`cast` to convert the data to the specified type. + +API reference : :ref:`api_fluid_layers_cast` + + +5.concat +---------------- + +Fluid uses :code:`concat` to concatenate input data along a specified dimension. + +API reference : :ref:`api_fluid_layers_concat` + + +6. sums +---------------- + +Fluid uses :code:`sums` to sum up the input data. + +API reference : :ref:`api_fluid_layers_sums` + +7. fill_constant +----------------- + +Fluid uses :code:`fill_constant` to create a Tensor with a specific shape and type. The initial value of this variable can be set via :code:`value`. + +API reference : :ref:`api_fluid_layers_fill_constant` + +8. assign +--------------- + +Fluid uses :code:`assign` to duplicate a variable. + +API reference : :ref:`api_fluid_layers_assign` + +9. argmin +-------------- + +Fluid uses :code:`argmin` to calculate the index of the smallest element on the specified axis of Tensor. + +API reference : :ref:`api_fluid_layers_argmin` + +10. argmax +----------- + +Fluid uses :code:`argmax` to calculate the index of the largest element on the specified axis of Tensor. + +API reference : :ref:`api_fluid_layers_argmax` + +11. argsort +------------ + +Fluid uses :code:`argsort` to sort the input Tensor on the specified axis and it will return the sorted data variables and their corresponding index values. + +API reference : :ref:`api_fluid_layers_argsort` + +12. ones +------------- + +Fluid uses :code:`ones` to create a Tensor of the specified size and data type with an initial value of 1. + +API reference : :ref:`api_fluid_layers_ones` + +13. zeros +--------------- + +Fluid uses :code:`zeros` to create a Tensor of the specified size and data type with an initial value of zero. + +API reference : :ref:`api_fluid_layers_zeros` + +14. reverse +------------------- + +Fluid uses :code:`reverse` to invert Tensor along the specified axis. + +API reference : :ref:`api_fluid_layers_reverse` + + + +LoD-Tensor +============ + +LoD-Tensor is very suitable for sequence data. For related knowledge, please read `Tensor and LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor_en.html>`_ . + +1.create_lod_tensor +----------------------- + +Fluid uses :code:`create_lod_tensor` to create a LoD_Tensor with new hierarchical information based on a numpy array, a list, or an existing LoD_Tensor. + +API reference : :ref:`api_fluid_create_lod_tensor` + +2. create_random_int_lodtensor +---------------------------------- + +Fluid uses :code:`create_random_int_lodtensor` to create a LoD_Tensor composed of random integers. + +API reference : :ref:`api_fluid_create_random_int_lodtensor` + +3. reorder_lod_tensor_by_rank +--------------------------------- + +Fluid uses :code:`reorder_lod_tensor_by_rank` to reorder the sequence information of the input LoD_Tensor in the specified order. + +API reference : :ref:`api_fluid_layers_reorder_lod_tensor_by_rank` diff --git a/docs/api_guides/low_level/metrics_en.rst b/docs/api_guides/low_level/metrics_en.rst index 358e4d33652..98cafe5420c 100755 --- a/docs/api_guides/low_level/metrics_en.rst +++ b/docs/api_guides/low_level/metrics_en.rst @@ -1,50 +1,50 @@ -.. _api_guide_metrics_en: - - -Metrics -######### -During or after the training of the neural network, it is necessary to evaluate the training effect of the model. The method of evaluation generally is calculating the distance between the overall predicted value and the overall label. Different types of tasks are applied with different evaluation methods, or with a combination of evaluation methods. In a specific task, one or more evaluation methods can be selected. Now let's take a look at commonly used evaluation methods grouped by the type of task. - -Classification task evaluation -------------------------------- -The most common classification task is the binary classification task, and the multi-classification task can also be transformed into a combination of multiple binary classification tasks. The metrics commonly adopted in the two-category tasks are accuracy, correctness, recall rate, AUC and average accuracy. - -- :code:`Precision` , which is used to measure the proportion of recalled ground-truth values in recalled values ​​in binary classification. - -  For API Reference, please refer to :ref:`api_fluid_metrics_Precision` - -- :code:`Accuracy`, which is used to measure the proportion of the recalled ground-truth value in the total number of samples in binary classification. It should be noted that the definitions of precision and accuracy are different and can be analogized to :code:`Variance` and :code:`Bias` in error analysis. - -  For API Reference, please refer to :ref:`api_fluid_metrics_Accuracy` - - -- :code:`Recall`, which is used to measure the ratio of the recalled values to the total number of samples in binary classification. The choice of accuracy and recall rate is mutually constrained, and trade-offs are needed in the actual model. Refer to the documentation `Precision_and_recall `_ . - -  For API Reference, please refer to :ref:`api_fluid_metrics_Recall` - -- :code:`Area Under Curve`, a classification model for binary classification, used to calculate the cumulative area of ​​the `ROC curve `_ . :code:`Auc` is implemented via python. If you are concerned about performance, you can use :code:`fluid.layers.auc` instead. - -  For API Reference, please refer to :ref:`api_fluid_metrics_Auc` - -- :code:`Average Precision`, commonly used in object detection tasks such as Faster R-CNN and SSD. The average precision is calculated under different recall conditions. For details, please refer to the document `Average precision `_ and `SSD Single Shot MultiBox Detector `_ . - -  For API Reference, please refer to :ref:`api_fluid_metrics_DetectionMAP` - - - -Sequence labeling task evaluation ----------------------------------- -In the sequence labeling task, the group of tokens is called a chunk, and the model will group and classify the input tokens at the same time. The commonly used evaluation method is the chunk evaluation method. - -- The chunk evaluation method :code:`ChunkEvaluator` receives the output of the :code:`chunk_eval` interface, and accumulates the statistics of chunks in each mini-batch , and finally calculates the accuracy, recall and F1 values. :code:`ChunkEvaluator` supports four labeling modes: IOB, IOE, IOBES and IO. You can refer to the documentation `Chunking with Support Vector Machines `_. - -  For API Reference, please refer to :ref:`api_fluid_metrics_ChunkEvaluator` - - -Generation/Synthesis task evaluation ----------------------------- -The generation task produces output directly from the input. In NLP tasks (such as speech recognition), a new string is generated. There are several ways to evaluate the distance between a generated string and a target string, such as a multi-classification evaluation method, and another commonly used method is called editing distance. - -- Edit distance: :code:`EditDistance` to measure the similarity of two strings. You can refer to the documentation `Edit_distance `_. - +.. _api_guide_metrics_en: + + +Metrics +######### +During or after the training of the neural network, it is necessary to evaluate the training effect of the model. The method of evaluation generally is calculating the distance between the overall predicted value and the overall label. Different types of tasks are applied with different evaluation methods, or with a combination of evaluation methods. In a specific task, one or more evaluation methods can be selected. Now let's take a look at commonly used evaluation methods grouped by the type of task. + +Classification task evaluation +------------------------------- +The most common classification task is the binary classification task, and the multi-classification task can also be transformed into a combination of multiple binary classification tasks. The metrics commonly adopted in the two-category tasks are accuracy, correctness, recall rate, AUC and average accuracy. + +- :code:`Precision` , which is used to measure the proportion of recalled ground-truth values in recalled values ​​in binary classification. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Precision` + +- :code:`Accuracy`, which is used to measure the proportion of the recalled ground-truth value in the total number of samples in binary classification. It should be noted that the definitions of precision and accuracy are different and can be analogized to :code:`Variance` and :code:`Bias` in error analysis. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Accuracy` + + +- :code:`Recall`, which is used to measure the ratio of the recalled values to the total number of samples in binary classification. The choice of accuracy and recall rate is mutually constrained, and trade-offs are needed in the actual model. Refer to the documentation `Precision_and_recall `_ . + +  For API Reference, please refer to :ref:`api_fluid_metrics_Recall` + +- :code:`Area Under Curve`, a classification model for binary classification, used to calculate the cumulative area of ​​the `ROC curve `_ . :code:`Auc` is implemented via python. If you are concerned about performance, you can use :code:`fluid.layers.auc` instead. + +  For API Reference, please refer to :ref:`api_fluid_metrics_Auc` + +- :code:`Average Precision`, commonly used in object detection tasks such as Faster R-CNN and SSD. The average precision is calculated under different recall conditions. For details, please refer to the document `Average precision `_ and `SSD Single Shot MultiBox Detector `_ . + +  For API Reference, please refer to :ref:`api_fluid_metrics_DetectionMAP` + + + +Sequence labeling task evaluation +---------------------------------- +In the sequence labeling task, the group of tokens is called a chunk, and the model will group and classify the input tokens at the same time. The commonly used evaluation method is the chunk evaluation method. + +- The chunk evaluation method :code:`ChunkEvaluator` receives the output of the :code:`chunk_eval` interface, and accumulates the statistics of chunks in each mini-batch , and finally calculates the accuracy, recall and F1 values. :code:`ChunkEvaluator` supports four labeling modes: IOB, IOE, IOBES and IO. You can refer to the documentation `Chunking with Support Vector Machines `_. + +  For API Reference, please refer to :ref:`api_fluid_metrics_ChunkEvaluator` + + +Generation/Synthesis task evaluation +---------------------------- +The generation task produces output directly from the input. In NLP tasks (such as speech recognition), a new string is generated. There are several ways to evaluate the distance between a generated string and a target string, such as a multi-classification evaluation method, and another commonly used method is called editing distance. + +- Edit distance: :code:`EditDistance` to measure the similarity of two strings. You can refer to the documentation `Edit_distance `_. +   For API Reference, please refer to :ref:`api_fluid_metrics_EditDistance` \ No newline at end of file diff --git a/docs/api_guides/low_level/model_save_reader_en.rst b/docs/api_guides/low_level/model_save_reader_en.rst index bc3f575c7a7..82c43d22359 100755 --- a/docs/api_guides/low_level/model_save_reader_en.rst +++ b/docs/api_guides/low_level/model_save_reader_en.rst @@ -1,59 +1,59 @@ -.. _api_guide_model_save_reader_en: - -####################### -Save and Load a Model -####################### - -To save and load model, there are eight APIs playing an important role: -:code:`fluid.io.save_vars`, :code:`fluid.io.save_params`, :code:`fluid.io.save_persistables`, :code:`fluid.io.save_inference_model`, :code:`fluid.io.load_vars`, :code:`fluid.io.load_params`, :code:`fluid.io.load_persistables` and :code:`fluid.io.load_inference_model` . - -Variables, Persistables and Parameters -================================================ - -In :code:`Paddle` , every input and output of operator( :code:`Operator` ) is a variable( :code:`Variable` ), and parameter( :code:`Parameter` ) is a derived class of Variable( :code:`Variable` ). Persistables (:code:`Persistables`) are variables that won't be deleted after each iteration. Parameter is a kind of persistable variable which will be updated by optimizer ( :ref:`api_guide_optimizer_en` ) after each iteration. Training of neural network in essence is to update parameters. - -Introduction to APIs for saving a model -======================================== - -- :code:`fluid.io.save_vars`: Variables are saved in specified directory by executor( :ref:`api_guide_executor_en` ). There are two ways to save variables: - - 1)Set :code:`vars` in the API to assign the variable list to be saved. - - 2)Assign an existed program( :code:`Program` ) to :code:`main_program` in the API, and then all variables in the program will be saved. - - The first one has a higher priority than the second one. - - For API Reference , please refer to :ref:`api_fluid_io_save_vars`. - -- :code:`fluid.io.save_params`: Set :code:`main_program` in the API with the model Program( :code:`Program` ). This API will filter all parameters( :code:`Parameter` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . - - For API Reference , please refer to :ref:`api_fluid_io_save_params`. - -- :code:`fluid.io.save_persistables`: :code:`main_program` of API assigns program( :code:`Program` ). This API will filter all persistables( :code:`persistable==True` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . - - For API Reference, please refer to :ref:`api_fluid_io_save_persistables`. - -- :code:`fluid.io.save_inference_model`: please refer to :ref:`api_guide_inference_en`. - -Introduction to APIs for loading a model -======================================== - -- :code:`fluid.io.load_vars`: Executor( :code:`Executor` ) loads variables into the target directory. There are two ways to load variables: - - 1):code:`vars` in the API assigns variable list to be loaded. - - 2)Assign an existed program( :code:`Program` ) to the :code:`main_program` field in the API, and then all variables in the program will be loaded. - - The first loading method has higher priority than the second one. - - For API Reference, please refer to :ref:`api_fluid_io_load_vars`. - -- :code:`fluid.io.load_params`: This API filters all parameters( :code:`Parameter` ) in program assigned by :code:`main_program` and load these parameters from folder assigned by :code:`dirname` or file assigned by :code:`filename` . - - For API Reference, please refer to :ref:`api_fluid_io_load_params` . - -- :code:`fluid.io.load_persistables`:This API filters all persistables( :code:`persistable==True` ) in program assigned by :code:`main_program` and load these persistables from folder assigned by :code:`dirname` or file assigned by :code:`filename` . - - For API Reference, please refer to :ref:`api_fluid_io_load_persistables` . - +.. _api_guide_model_save_reader_en: + +####################### +Save and Load a Model +####################### + +To save and load model, there are eight APIs playing an important role: +:code:`fluid.io.save_vars`, :code:`fluid.io.save_params`, :code:`fluid.io.save_persistables`, :code:`fluid.io.save_inference_model`, :code:`fluid.io.load_vars`, :code:`fluid.io.load_params`, :code:`fluid.io.load_persistables` and :code:`fluid.io.load_inference_model` . + +Variables, Persistables and Parameters +================================================ + +In :code:`Paddle` , every input and output of operator( :code:`Operator` ) is a variable( :code:`Variable` ), and parameter( :code:`Parameter` ) is a derived class of Variable( :code:`Variable` ). Persistables (:code:`Persistables`) are variables that won't be deleted after each iteration. Parameter is a kind of persistable variable which will be updated by optimizer ( :ref:`api_guide_optimizer_en` ) after each iteration. Training of neural network in essence is to update parameters. + +Introduction to APIs for saving a model +======================================== + +- :code:`fluid.io.save_vars`: Variables are saved in specified directory by executor( :ref:`api_guide_executor_en` ). There are two ways to save variables: + + 1)Set :code:`vars` in the API to assign the variable list to be saved. + + 2)Assign an existed program( :code:`Program` ) to :code:`main_program` in the API, and then all variables in the program will be saved. + + The first one has a higher priority than the second one. + + For API Reference , please refer to :ref:`api_fluid_io_save_vars`. + +- :code:`fluid.io.save_params`: Set :code:`main_program` in the API with the model Program( :code:`Program` ). This API will filter all parameters( :code:`Parameter` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference , please refer to :ref:`api_fluid_io_save_params`. + +- :code:`fluid.io.save_persistables`: :code:`main_program` of API assigns program( :code:`Program` ). This API will filter all persistables( :code:`persistable==True` ) of targeted program and save them in folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_save_persistables`. + +- :code:`fluid.io.save_inference_model`: please refer to :ref:`api_guide_inference_en`. + +Introduction to APIs for loading a model +======================================== + +- :code:`fluid.io.load_vars`: Executor( :code:`Executor` ) loads variables into the target directory. There are two ways to load variables: + + 1):code:`vars` in the API assigns variable list to be loaded. + + 2)Assign an existed program( :code:`Program` ) to the :code:`main_program` field in the API, and then all variables in the program will be loaded. + + The first loading method has higher priority than the second one. + + For API Reference, please refer to :ref:`api_fluid_io_load_vars`. + +- :code:`fluid.io.load_params`: This API filters all parameters( :code:`Parameter` ) in program assigned by :code:`main_program` and load these parameters from folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_load_params` . + +- :code:`fluid.io.load_persistables`:This API filters all persistables( :code:`persistable==True` ) in program assigned by :code:`main_program` and load these persistables from folder assigned by :code:`dirname` or file assigned by :code:`filename` . + + For API Reference, please refer to :ref:`api_fluid_io_load_persistables` . + - :code:`fluid.io.load_inference_model`: please refer to :ref:`api_guide_inference_en` . \ No newline at end of file diff --git a/docs/api_guides/low_level/optimizer_en.rst b/docs/api_guides/low_level/optimizer_en.rst index f135a297c5e..732b786f791 100755 --- a/docs/api_guides/low_level/optimizer_en.rst +++ b/docs/api_guides/low_level/optimizer_en.rst @@ -1,90 +1,90 @@ -.. _api_guide_optimizer_en: - -########### -Optimizer -########### - -Neural network in essence is a `Optimization problem `_ . -With `forward computing and back propagation `_ , -:code:`Optimizer` use back-propagation gradients to optimize parameters in a neural network. - -1.SGD/SGDOptimizer ------------------- - -:code:`SGD` is an offspring class of :code:`Optimizer` implementing `Random Gradient Descent `_ which is a method of `Gradient Descent `_ . -When it needs to train a large number of samples, we usually choose :code:`SGD` to make loss function converge more quickly. - -API Reference: :ref:`api_fluid_optimizer_SGDOptimizer` - - -2.Momentum/MomentumOptimizer ----------------------------- - -:code:`Momentum` optimizer adds momentum on the basis of :code:`SGD` , reducing noise problem in the process of random gradient descent. -You can set :code:`ues_nesterov` as False or True, respectively corresponding to traditional `Momentum(Section 4.1 in thesis) -`_ algorithm and `Nesterov accelerated gradient(Section 4.2 in thesis) -`_ algorithm. - -API Reference: :ref:`api_fluid_optimizer_MomentumOptimizer` - - -3. Adagrad/AdagradOptimizer ---------------------------- -`Adagrad `_ Optimizer can adaptively allocate different learning rates for parameters to solve the problem of different sample sizes for different parameters. - -API Reference: :ref:`api_fluid_optimizer_AdagradOptimizer` - - -4.RMSPropOptimizer ------------------- -`RMSProp optimizer `_ is a method to adaptively adjust learning rate. -It mainly solves the problem of dramatic decrease of learning rate in the mid-term and end term of model training after Adagrad is used. - -API Reference: :ref:`api_fluid_optimizer_RMSPropOptimizer` - - - -5.Adam/AdamOptimizer --------------------- -Optimizer of `Adam `_ is a method to adaptively adjust learning rate, -fit for most non- `convex optimization `_ , big data set and high-dimensional scenarios. :code:`Adam` is the most common optimization algorithm. - -API Reference: :ref:`api_fluid_optimizer_AdamOptimizer` - - - -6.Adamax/AdamaxOptimizer ------------------------- - -`Adamax `_ is a variant of :code:`Adam` algorithm, simplifying limits of learning rate, especially upper limit. - -API Reference: :ref:`api_fluid_optimizer_AdamaxOptimizer` - - - -7.DecayedAdagrad/DecayedAdagradOptimizer -------------------------------------------- - -`DecayedAdagrad `_ Optimizer can be regarded as an :code:`Adagrad` algorithm incorporated with decay rate to solve the problem of dramatic descent of learning rate in mid-term and end term of model training. - -API Reference: :ref:`api_fluid_optimizer_DecayedAdagrad` - - - - -8. Ftrl/FtrlOptimizer ----------------------- - -`FtrlOptimizer `_ Optimizer combines the high accuracy of `FOBOS algorithm `_ and the sparsity of `RDA algorithm `_ , which is an `Online Learning `_ algorithm with significantly satisfying effect. - -API Reference: :ref:`api_fluid_optimizer_FtrlOptimizer` - - - -9.ModelAverage ------------------ - -:code:`ModelAverage` Optimizer accumulates history parameters through sliding window during the model training. We use averaged parameters at inference time to upgrade general accuracy of inference. - -API Reference: :ref:`api_fluid_optimizer_ModelAverage` - +.. _api_guide_optimizer_en: + +########### +Optimizer +########### + +Neural network in essence is a `Optimization problem `_ . +With `forward computing and back propagation `_ , +:code:`Optimizer` use back-propagation gradients to optimize parameters in a neural network. + +1.SGD/SGDOptimizer +------------------ + +:code:`SGD` is an offspring class of :code:`Optimizer` implementing `Random Gradient Descent `_ which is a method of `Gradient Descent `_ . +When it needs to train a large number of samples, we usually choose :code:`SGD` to make loss function converge more quickly. + +API Reference: :ref:`api_fluid_optimizer_SGDOptimizer` + + +2.Momentum/MomentumOptimizer +---------------------------- + +:code:`Momentum` optimizer adds momentum on the basis of :code:`SGD` , reducing noise problem in the process of random gradient descent. +You can set :code:`ues_nesterov` as False or True, respectively corresponding to traditional `Momentum(Section 4.1 in thesis) +`_ algorithm and `Nesterov accelerated gradient(Section 4.2 in thesis) +`_ algorithm. + +API Reference: :ref:`api_fluid_optimizer_MomentumOptimizer` + + +3. Adagrad/AdagradOptimizer +--------------------------- +`Adagrad `_ Optimizer can adaptively allocate different learning rates for parameters to solve the problem of different sample sizes for different parameters. + +API Reference: :ref:`api_fluid_optimizer_AdagradOptimizer` + + +4.RMSPropOptimizer +------------------ +`RMSProp optimizer `_ is a method to adaptively adjust learning rate. +It mainly solves the problem of dramatic decrease of learning rate in the mid-term and end term of model training after Adagrad is used. + +API Reference: :ref:`api_fluid_optimizer_RMSPropOptimizer` + + + +5.Adam/AdamOptimizer +-------------------- +Optimizer of `Adam `_ is a method to adaptively adjust learning rate, +fit for most non- `convex optimization `_ , big data set and high-dimensional scenarios. :code:`Adam` is the most common optimization algorithm. + +API Reference: :ref:`api_fluid_optimizer_AdamOptimizer` + + + +6.Adamax/AdamaxOptimizer +------------------------ + +`Adamax `_ is a variant of :code:`Adam` algorithm, simplifying limits of learning rate, especially upper limit. + +API Reference: :ref:`api_fluid_optimizer_AdamaxOptimizer` + + + +7.DecayedAdagrad/DecayedAdagradOptimizer +------------------------------------------- + +`DecayedAdagrad `_ Optimizer can be regarded as an :code:`Adagrad` algorithm incorporated with decay rate to solve the problem of dramatic descent of learning rate in mid-term and end term of model training. + +API Reference: :ref:`api_fluid_optimizer_DecayedAdagrad` + + + + +8. Ftrl/FtrlOptimizer +---------------------- + +`FtrlOptimizer `_ Optimizer combines the high accuracy of `FOBOS algorithm `_ and the sparsity of `RDA algorithm `_ , which is an `Online Learning `_ algorithm with significantly satisfying effect. + +API Reference: :ref:`api_fluid_optimizer_FtrlOptimizer` + + + +9.ModelAverage +----------------- + +:code:`ModelAverage` Optimizer accumulates history parameters through sliding window during the model training. We use averaged parameters at inference time to upgrade general accuracy of inference. + +API Reference: :ref:`api_fluid_optimizer_ModelAverage` + From 7dbb8050da85dfffa390a6876fb72ed36ac0f175 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 09:50:01 +0000 Subject: [PATCH 03/20] trim trailing whitespace --- .../cpu_train_best_practice.rst | 12 +- .../cpu_train_best_practice_en.rst | 18 +- .../dist_training_gpu.rst | 4 +- .../gpu_training_with_recompute.rst | 20 +- .../gpu_training_with_recompute_en.rst | 28 +- docs/api/index_en.rst | 8 +- docs/api/paddle/DataParallel_cn.rst | 4 +- docs/api/paddle/Model_cn.rst | 12 +- docs/api/paddle/Overview_cn.rst | 12 +- docs/api/paddle/ParamAttr_cn.rst | 2 +- docs/api/paddle/Tensor/Overview_en.rst | 28 +- docs/api/paddle/Tensor_cn.rst | 14 +- docs/api/paddle/add_n_cn.rst | 2 +- docs/api/paddle/amp/GradScaler_cn.rst | 18 +- docs/api/paddle/amp/Overview_cn.rst | 2 +- .../api/paddle/autograd/PyLayerContext_cn.rst | 2 +- docs/api/paddle/autograd/backward_cn.rst | 2 +- docs/api/paddle/callbacks/LRScheduler_cn.rst | 4 +- .../paddle/callbacks/ModelCheckpoint_cn.rst | 4 +- .../paddle/callbacks/ReduceLROnPlateau_cn.rst | 16 +- docs/api/paddle/clip_cn.rst | 2 +- docs/api/paddle/compat/floor_division_cn.rst | 2 +- .../compat/get_exception_message_cn.rst | 2 +- docs/api/paddle/compat/to_bytes_cn.rst | 8 +- docs/api/paddle/compat/to_text_cn.rst | 6 +- docs/api/paddle/cross_cn.rst | 4 +- .../device/cuda/get_device_capability_cn.rst | 2 +- .../paddle/device/cuda/get_device_name_cn.rst | 2 +- docs/api/paddle/diagonal_cn.rst | 2 +- docs/api/paddle/diff_cn.rst | 2 +- .../paddle/distributed/InMemoryDataset_cn.rst | 12 +- docs/api/paddle/distributed/Overview_cn.rst | 6 +- .../api/paddle/distributed/ParallelEnv_cn.rst | 12 +- .../paddle/distributed/QueueDataset_cn.rst | 6 +- docs/api/paddle/distributed/alltoall_cn.rst | 2 +- .../fleet/DistributedStrategy_cn.rst | 18 +- .../api/paddle/distributed/fleet/Fleet_cn.rst | 2 +- .../fleet/UserDefinedRoleMaker_cn.rst | 2 +- .../paddle/distributed/fleet/UtilBase_cn.rst | 10 +- .../distributed/fleet/utils/HDFSClient_cn.rst | 2 +- .../distributed/fleet/utils/LocalFS_cn.rst | 2 +- docs/api/paddle/distributed/irecv_cn.rst | 2 +- docs/api/paddle/distributed/launch_cn.rst | 32 +- .../paddle/distributed/reduce_scatter_cn.rst | 2 +- .../sharding/group_sharded_parallel_cn.rst | 1 - docs/api/paddle/distributed/split_cn.rst | 2 +- .../distributed/utils/global_gather_cn.rst | 2 +- .../distributed/utils/global_scatter_cn.rst | 2 +- .../paddle/distribution/AbsTransform_cn.rst | 14 +- .../distribution/AffineTransform_cn.rst | 14 +- docs/api/paddle/distribution/Beta_cn.rst | 8 +- .../paddle/distribution/Categorical_cn.rst | 6 +- .../paddle/distribution/ChainTransform_cn.rst | 14 +- docs/api/paddle/distribution/Dirichlet_cn.rst | 10 +- .../paddle/distribution/ExpTransform_cn.rst | 14 +- .../distribution/IndependentTransform_cn.rst | 18 +- .../paddle/distribution/Independent_cn.rst | 6 +- .../paddle/distribution/Multinomial_cn.rst | 10 +- .../paddle/distribution/PowerTransform_cn.rst | 14 +- .../distribution/ReshapeTransform_cn.rst | 14 +- .../distribution/SigmoidTransform_cn.rst | 14 +- .../distribution/SoftmaxTransform_cn.rst | 14 +- .../paddle/distribution/StackTransform_cn.rst | 14 +- .../StickBreakingTransform_cn.rst | 14 +- .../paddle/distribution/TanhTransform_cn.rst | 16 +- docs/api/paddle/distribution/Transform_cn.rst | 36 +- .../TransformedDistribution_cn.rst | 6 +- docs/api/paddle/distribution/Uniform_cn.rst | 6 +- .../paddle/distribution/kl_divergence_cn.rst | 6 +- docs/api/paddle/einsum_cn.rst | 4 +- docs/api/paddle/empty_cn.rst | 2 +- docs/api/paddle/empty_like_cn.rst | 2 +- docs/api/paddle/equal_cn.rst | 2 +- docs/api/paddle/fft/Overview_cn.rst | 24 +- docs/api/paddle/fft/fft2_cn.rst | 2 +- docs/api/paddle/fft/fftfreq_cn.rst | 2 +- docs/api/paddle/fft/fftn_cn.rst | 2 +- docs/api/paddle/fft/hfft_cn.rst | 8 +- docs/api/paddle/fft/hfftn_cn.rst | 4 +- docs/api/paddle/fft/ifft2_cn.rst | 4 +- docs/api/paddle/fft/ifft_cn.rst | 2 +- docs/api/paddle/fft/ifftn_cn.rst | 4 +- docs/api/paddle/fft/ihfft2_cn.rst | 2 +- docs/api/paddle/fft/ihfft_cn.rst | 2 +- docs/api/paddle/fft/ihfftn_cn.rst | 2 +- docs/api/paddle/fft/irfft_cn.rst | 8 +- docs/api/paddle/fft/irfftn_cn.rst | 6 +- docs/api/paddle/fft/rfft2_cn.rst | 2 +- docs/api/paddle/fft/rfft_cn.rst | 2 +- docs/api/paddle/fft/rfftfreq_cn.rst | 2 +- docs/api/paddle/fft/rfftn_cn.rst | 2 +- docs/api/paddle/flatten_cn.rst | 2 +- docs/api/paddle/full_cn.rst | 2 +- docs/api/paddle/full_like_cn.rst | 4 +- docs/api/paddle/gather_cn.rst | 4 +- docs/api/paddle/gather_nd_cn.rst | 10 +- docs/api/paddle/get_cuda_rng_state_cn.rst | 2 +- docs/api/paddle/get_flags_cn.rst | 2 +- docs/api/paddle/grad_cn.rst | 2 +- docs/api/paddle/greater_equal_cn.rst | 2 +- docs/api/paddle/greater_than_cn.rst | 2 +- docs/api/paddle/hub/Overview_cn.rst | 8 +- .../paddle/incubate/autograd/Hessian_cn.rst | 4 +- .../paddle/incubate/autograd/Jacobian_cn.rst | 4 +- .../paddle/incubate/autograd/Overview_cn.rst | 28 +- docs/api/paddle/incubate/graph_reindex_cn.rst | 2 +- .../incubate/graph_sample_neighbors_cn.rst | 2 +- .../fused_multi_head_attention_cn.rst | 2 +- docs/api/paddle/incubate/segment_max_cn.rst | 2 +- docs/api/paddle/incubate/segment_mean_cn.rst | 2 +- docs/api/paddle/incubate/segment_min_cn.rst | 2 +- docs/api/paddle/incubate/segment_sum_cn.rst | 2 +- .../paddle/incubate/softmax_mask_fuse_cn.rst | 2 +- docs/api/paddle/index_sample_cn.rst | 4 +- docs/api/paddle/index_select_cn.rst | 4 +- docs/api/paddle/inner_cn.rst | 3 +- docs/api/paddle/io/BatchSampler_cn.rst | 2 +- docs/api/paddle/io/DataLoader_cn.rst | 44 +- .../paddle/io/DistributedBatchSampler_cn.rst | 20 +- docs/api/paddle/io/IterableDataset_cn.rst | 8 +- docs/api/paddle/io/Overview_cn.rst | 8 +- docs/api/paddle/io/Sampler_cn.rst | 2 +- docs/api/paddle/io/SequenceSampler_cn.rst | 2 +- .../paddle/io/WeightedRandomSampler_cn.rst | 2 +- docs/api/paddle/is_complex_cn.rst | 2 +- docs/api/paddle/is_integer_cn.rst | 2 +- docs/api/paddle/jit/ProgramTranslator_cn.rst | 2 +- docs/api/paddle/jit/TracedLayer_cn.rst | 2 +- docs/api/paddle/jit/load_cn.rst | 8 +- docs/api/paddle/jit/save_cn.rst | 2 +- docs/api/paddle/less_equal_cn.rst | 2 +- docs/api/paddle/less_than_cn.rst | 2 +- docs/api/paddle/linalg/Overview_cn.rst | 2 +- docs/api/paddle/linalg/eigvals_cn.rst | 2 +- docs/api/paddle/linalg/lstsq_cn.rst | 2 +- docs/api/paddle/linalg/lu_cn.rst | 12 +- docs/api/paddle/linalg/lu_unpack_cn.rst | 13 +- docs/api/paddle/linalg/qr_cn.rst | 2 +- docs/api/paddle/linspace_cn.rst | 2 +- docs/api/paddle/load_cn.rst | 8 +- docs/api/paddle/log10_cn.rst | 2 +- docs/api/paddle/log1p_cn.rst | 2 +- docs/api/paddle/log2_cn.rst | 2 +- docs/api/paddle/log_cn.rst | 2 +- docs/api/paddle/logcumsumexp_cn.rst | 2 +- docs/api/paddle/logit_cn.rst | 2 +- docs/api/paddle/logspace_cn.rst | 2 +- docs/api/paddle/masked_select_cn.rst | 2 +- docs/api/paddle/meshgrid_cn.rst | 2 +- docs/api/paddle/metric/Auc_cn.rst | 34 +- docs/api/paddle/metric/Metric_cn.rst | 10 +- docs/api/paddle/metric/Precision_cn.rst | 18 +- docs/api/paddle/metric/Recall_cn.rst | 18 +- docs/api/paddle/multiplex_cn.rst | 4 +- docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst | 8 +- docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst | 12 +- docs/api/paddle/nn/AlphaDropout_cn.rst | 2 +- docs/api/paddle/nn/BatchNorm1D_cn.rst | 8 +- docs/api/paddle/nn/BatchNorm2D_cn.rst | 8 +- docs/api/paddle/nn/BatchNorm3D_cn.rst | 6 +- docs/api/paddle/nn/BatchNorm_cn.rst | 2 +- docs/api/paddle/nn/BeamSearchDecoder_cn.rst | 24 +- docs/api/paddle/nn/BiRNN_cn.rst | 4 +- .../api/paddle/nn/ClipGradByGlobalNorm_cn.rst | 14 +- docs/api/paddle/nn/ClipGradByNorm_cn.rst | 8 +- docs/api/paddle/nn/ClipGradByValue_cn.rst | 6 +- docs/api/paddle/nn/Conv1DTranspose_cn.rst | 2 +- docs/api/paddle/nn/Conv1D_cn.rst | 2 +- docs/api/paddle/nn/Conv2DTranspose_cn.rst | 2 +- docs/api/paddle/nn/Conv2D_cn.rst | 4 +- docs/api/paddle/nn/Conv3D_cn.rst | 2 +- docs/api/paddle/nn/Dropout2D_cn.rst | 2 +- docs/api/paddle/nn/Dropout3D_cn.rst | 2 +- docs/api/paddle/nn/Dropout_cn.rst | 2 +- docs/api/paddle/nn/GRUCell_cn.rst | 6 +- docs/api/paddle/nn/GRU_cn.rst | 2 +- docs/api/paddle/nn/GroupNorm_cn.rst | 2 +- docs/api/paddle/nn/InstanceNorm1D_cn.rst | 4 +- docs/api/paddle/nn/InstanceNorm2D_cn.rst | 6 +- docs/api/paddle/nn/InstanceNorm3D_cn.rst | 4 +- docs/api/paddle/nn/KLDivLoss_cn.rst | 2 +- docs/api/paddle/nn/L1Loss_cn.rst | 4 +- docs/api/paddle/nn/LSTMCell_cn.rst | 20 +- docs/api/paddle/nn/LSTM_cn.rst | 2 +- docs/api/paddle/nn/LayerDict_cn.rst | 1 - docs/api/paddle/nn/LayerList_cn.rst | 4 +- docs/api/paddle/nn/LayerNorm_cn.rst | 4 +- docs/api/paddle/nn/Layer_cn.rst | 30 +- docs/api/paddle/nn/LocalResponseNorm_cn.rst | 2 +- docs/api/paddle/nn/LogSoftmax_cn.rst | 2 +- docs/api/paddle/nn/MSELoss_cn.rst | 2 +- docs/api/paddle/nn/MarginRankingLoss_cn.rst | 6 +- .../paddle/nn/MultiLabelSoftMarginLoss_cn.rst | 12 +- docs/api/paddle/nn/Overview_cn.rst | 90 +- docs/api/paddle/nn/RNN_cn.rst | 4 +- docs/api/paddle/nn/RReLU_cn.rst | 2 +- docs/api/paddle/nn/Sigmoid_cn.rst | 2 +- docs/api/paddle/nn/SimpleRNNCell_cn.rst | 4 +- docs/api/paddle/nn/SimpleRNN_cn.rst | 2 +- docs/api/paddle/nn/SmoothL1Loss_cn.rst | 2 +- docs/api/paddle/nn/Softmax_cn.rst | 6 +- docs/api/paddle/nn/SpectralNorm_cn.rst | 2 +- docs/api/paddle/nn/SyncBatchNorm_cn.rst | 2 +- docs/api/paddle/nn/Transformer_cn.rst | 4 +- docs/api/paddle/nn/Upsample_cn.rst | 2 +- docs/api/paddle/nn/UpsamplingNearest2D_cn.rst | 2 +- .../nn/functional/adaptive_avg_pool3d_cn.rst | 2 +- .../nn/functional/conv3d_transpose_cn.rst | 2 +- .../nn/functional/cosine_similarity_cn.rst | 4 +- .../paddle/nn/functional/cross_entropy_cn.rst | 14 +- .../paddle/nn/functional/diag_embed_cn.rst | 6 +- .../paddle/nn/functional/gather_tree_cn.rst | 4 +- .../paddle/nn/functional/grid_sample_cn.rst | 12 +- .../paddle/nn/functional/interpolate_cn.rst | 2 +- docs/api/paddle/nn/functional/kl_div_cn.rst | 2 +- docs/api/paddle/nn/functional/l1_loss_cn.rst | 4 +- .../nn/functional/local_response_norm_cn.rst | 2 +- .../paddle/nn/functional/log_softmax_cn.rst | 2 +- .../nn/functional/margin_ranking_loss_cn.rst | 8 +- docs/api/paddle/nn/functional/mse_loss_cn.rst | 4 +- .../paddle/nn/functional/npair_loss_cn.rst | 2 +- docs/api/paddle/nn/functional/one_hot_cn.rst | 2 +- docs/api/paddle/nn/functional/pad_cn.rst | 2 +- .../paddle/nn/functional/sequence_mask_cn.rst | 2 +- docs/api/paddle/nn/functional/softmax_cn.rst | 6 +- .../softmax_with_cross_entropy_cn.rst | 2 +- .../nn/functional/temporal_shift_cn.rst | 2 +- docs/api/paddle/nn/functional/upsample_cn.rst | 6 +- .../nn/initializer/KaimingNormal_cn.rst | 2 +- .../nn/initializer/KaimingUniform_cn.rst | 2 +- .../paddle/nn/initializer/Orthogonal_cn.rst | 1 - docs/api/paddle/nn/initializer/Uniform_cn.rst | 2 +- .../paddle/nn/initializer/XavierNormal_cn.rst | 2 +- docs/api/paddle/nn/utils/spectral_norm_cn.rst | 2 +- docs/api/paddle/nn/utils/weight_norm_cn.rst | 4 +- docs/api/paddle/nonzero_cn.rst | 4 +- docs/api/paddle/not_equal_cn.rst | 2 +- docs/api/paddle/ones_like_cn.rst | 2 +- docs/api/paddle/optimizer/Adadelta_cn.rst | 2 +- docs/api/paddle/optimizer/Adagrad_cn.rst | 4 +- docs/api/paddle/optimizer/AdamW_cn.rst | 4 +- docs/api/paddle/optimizer/Adam_cn.rst | 8 +- docs/api/paddle/optimizer/Adamax_cn.rst | 8 +- docs/api/paddle/optimizer/Lamb_cn.rst | 4 +- docs/api/paddle/optimizer/Momentum_cn.rst | 4 +- docs/api/paddle/optimizer/Optimizer_cn.rst | 6 +- docs/api/paddle/optimizer/Overview_cn.rst | 2 +- docs/api/paddle/optimizer/RMSProp_cn.rst | 20 +- docs/api/paddle/optimizer/SGD_cn.rst | 8 +- .../optimizer/lr/CosineAnnealingDecay_cn.rst | 8 +- docs/api/paddle/optimizer/lr/CyclicLR_cn.rst | 2 +- .../paddle/optimizer/lr/LRScheduler_cn.rst | 4 +- .../paddle/optimizer/lr/MultiStepDecay_cn.rst | 2 +- .../api/paddle/optimizer/lr/OneCycleLR_cn.rst | 2 +- .../optimizer/lr/PolynomialDecay_cn.rst | 4 +- docs/api/paddle/optimizer/lr/StepDecay_cn.rst | 2 +- docs/api/paddle/outer_cn.rst | 4 +- docs/api/paddle/prod_cn.rst | 6 +- docs/api/paddle/profiler/Overview_cn.rst | 6 +- docs/api/paddle/profiler/Profiler_cn.rst | 2 +- docs/api/paddle/randperm_cn.rst | 2 +- docs/api/paddle/regularizer/L1Decay_cn.rst | 10 +- docs/api/paddle/regularizer/L2Decay_cn.rst | 6 +- docs/api/paddle/roll_cn.rst | 2 +- docs/api/paddle/rsqrt_cn.rst | 2 +- docs/api/paddle/save_cn.rst | 2 +- docs/api/paddle/scatter_cn.rst | 8 +- docs/api/paddle/scatter_nd_add_cn.rst | 4 +- docs/api/paddle/scatter_nd_cn.rst | 2 +- docs/api/paddle/seed_cn.rst | 2 +- docs/api/paddle/set_cuda_rng_state_cn.rst | 2 +- docs/api/paddle/set_flags_cn.rst | 2 +- docs/api/paddle/shard_index_cn.rst | 2 +- docs/api/paddle/signal/Overview_cn.rst | 2 +- docs/api/paddle/signal/istft_cn.rst | 6 +- docs/api/paddle/signal/stft_cn.rst | 12 +- docs/api/paddle/sort_cn.rst | 2 +- .../paddle/sparse/sparse_coo_tensor_cn.rst | 2 +- .../paddle/sparse/sparse_csr_tensor_cn.rst | 2 +- docs/api/paddle/squeeze_cn.rst | 6 +- docs/api/paddle/stack_cn.rst | 10 +- docs/api/paddle/static/BuildStrategy_cn.rst | 6 +- docs/api/paddle/static/CompiledProgram_cn.rst | 2 +- .../paddle/static/ExecutionStrategy_cn.rst | 2 +- docs/api/paddle/static/Executor_cn.rst | 20 +- .../static/ExponentialMovingAverage_cn.rst | 6 +- docs/api/paddle/static/InputSpec_cn.rst | 2 +- .../api/paddle/static/ParallelExecutor_cn.rst | 10 +- docs/api/paddle/static/Program_cn.rst | 8 +- docs/api/paddle/static/Variable_cn.rst | 4 +- .../paddle/static/WeightNormParamAttr_cn.rst | 6 +- docs/api/paddle/static/auc_cn.rst | 2 +- .../paddle/static/default_main_program_cn.rst | 2 +- .../static/deserialize_persistables_cn.rst | 2 +- docs/api/paddle/static/device_guard_cn.rst | 2 +- docs/api/paddle/static/gradients_cn.rst | 2 +- docs/api/paddle/static/nn/batch_norm_cn.rst | 6 +- docs/api/paddle/static/nn/cond_cn.rst | 2 +- docs/api/paddle/static/nn/conv2d_cn.rst | 2 +- .../paddle/static/nn/conv2d_transpose_cn.rst | 4 +- docs/api/paddle/static/nn/conv3d_cn.rst | 2 +- .../paddle/static/nn/conv3d_transpose_cn.rst | 4 +- .../api/paddle/static/nn/deform_conv2d_cn.rst | 10 +- docs/api/paddle/static/nn/embedding_cn.rst | 4 +- docs/api/paddle/static/nn/fc_cn.rst | 2 +- docs/api/paddle/static/nn/group_norm_cn.rst | 2 +- .../api/paddle/static/nn/instance_norm_cn.rst | 2 +- docs/api/paddle/static/nn/layer_norm_cn.rst | 2 +- .../paddle/static/nn/multi_box_head_cn.rst | 4 +- docs/api/paddle/static/nn/nce_cn.rst | 2 +- docs/api/paddle/static/nn/prelu_cn.rst | 2 +- docs/api/paddle/static/nn/row_conv_cn.rst | 2 +- .../paddle/static/nn/sequence_concat_cn.rst | 2 +- .../api/paddle/static/nn/sequence_conv_cn.rst | 4 +- .../static/nn/sequence_enumerate_cn.rst | 8 +- .../static/nn/sequence_expand_as_cn.rst | 4 +- .../static/nn/sequence_first_step_cn.rst | 4 +- .../static/nn/sequence_last_step_cn.rst | 4 +- .../api/paddle/static/nn/sequence_pool_cn.rst | 6 +- .../paddle/static/nn/sequence_scatter_cn.rst | 2 +- .../paddle/static/nn/sequence_softmax_cn.rst | 4 +- .../paddle/static/nn/sparse_embedding_cn.rst | 6 +- .../api/paddle/static/nn/spectral_norm_cn.rst | 2 +- docs/api/paddle/static/nn/while_loop_cn.rst | 4 +- docs/api/paddle/static/program_guard_cn.rst | 2 +- docs/api/paddle/static/py_func_cn.rst | 14 +- .../static/serialize_persistables_cn.rst | 2 +- docs/api/paddle/strided_slice_cn.rst | 8 +- docs/api/paddle/sysconfig/get_include_cn.rst | 2 +- docs/api/paddle/sysconfig/get_lib_cn.rst | 2 +- docs/api/paddle/tan_cn.rst | 4 +- docs/api/paddle/tensordot_cn.rst | 2 +- docs/api/paddle/text/Conll05st_cn.rst | 2 +- docs/api/paddle/text/Movielens_cn.rst | 2 +- docs/api/paddle/text/Overview_cn.rst | 2 +- docs/api/paddle/text/UCIHousing_cn.rst | 4 +- docs/api/paddle/tile_cn.rst | 2 +- docs/api/paddle/tolist_cn.rst | 4 +- docs/api/paddle/topk_cn.rst | 2 +- docs/api/paddle/transpose_cn.rst | 2 +- docs/api/paddle/unique_consecutive_cn.rst | 2 +- .../utils/cpp_extension/CUDAExtension_cn.rst | 2 +- .../utils/cpp_extension/CppExtension_cn.rst | 2 +- .../paddle/utils/cpp_extension/load_cn.rst | 2 +- .../paddle/utils/cpp_extension/setup_cn.rst | 2 +- docs/api/paddle/utils/deprecated_cn.rst | 2 +- docs/api/paddle/var_cn.rst | 4 +- docs/api/paddle/vision/models/VGG_cn.rst | 2 +- .../api/paddle/vision/ops/DeformConv2D_cn.rst | 6 +- docs/api/paddle/vision/ops/RoIPool_cn.rst | 4 +- .../paddle/vision/ops/deform_conv2d_cn.rst | 8 +- docs/api/paddle/vision/ops/nms_cn.rst | 2 +- docs/api/paddle/vision/ops/roi_pool_cn.rst | 2 +- .../paddle/vision/set_image_backend_cn.rst | 2 +- .../vision/transforms/BaseTransform_cn.rst | 14 +- .../transforms/BrightnessTransform_cn.rst | 3 +- .../vision/transforms/CenterCrop_cn.rst | 4 +- .../vision/transforms/ColorJitter_cn.rst | 3 +- .../paddle/vision/transforms/Compose_cn.rst | 3 +- .../transforms/ContrastTransform_cn.rst | 3 +- .../paddle/vision/transforms/Grayscale_cn.rst | 3 +- .../vision/transforms/HueTransform_cn.rst | 3 +- .../paddle/vision/transforms/Normalize_cn.rst | 2 +- docs/api/paddle/vision/transforms/Pad_cn.rst | 10 +- .../vision/transforms/RandomErasing_cn.rst | 2 +- .../transforms/RandomHorizontalFlip_cn.rst | 2 +- .../transforms/RandomResizedCrop_cn.rst | 4 +- .../vision/transforms/RandomRotation_cn.rst | 8 +- .../transforms/RandomVerticalFlip_cn.rst | 4 +- .../paddle/vision/transforms/Resize_cn.rst | 24 +- .../transforms/SaturationTransform_cn.rst | 3 +- .../paddle/vision/transforms/ToTensor_cn.rst | 8 +- .../paddle/vision/transforms/Transpose_cn.rst | 2 +- .../transforms/adjust_brightness_cn.rst | 2 +- .../paddle/vision/transforms/normalize_cn.rst | 2 +- docs/api/paddle/vision/transforms/pad_cn.rst | 10 +- .../paddle/vision/transforms/resize_cn.rst | 24 +- .../paddle/vision/transforms/rotate_cn.rst | 6 +- .../vision/transforms/to_grayscale_cn.rst | 8 +- .../paddle/vision/transforms/to_tensor_cn.rst | 4 +- docs/api/paddle/where_cn.rst | 2 +- docs/api/paddle/zeros_like_cn.rst | 2 +- docs/api_guides/X2Paddle/Caffe-Fluid.rst | 4 +- docs/api_guides/X2Paddle/TensorFlow-Fluid.rst | 4 +- docs/api_guides/low_level/backward_en.rst | 4 +- .../api_guides/low_level/compiled_program.rst | 12 +- .../low_level/compiled_program_en.rst | 16 +- .../distributed/cluster_train_data_cn.rst | 4 +- .../distributed/cluster_train_data_en.rst | 4 +- ...large_scale_sparse_feature_training_en.rst | 2 +- .../low_level/layers/activations.rst | 6 +- .../low_level/layers/control_flow.rst | 2 +- docs/api_guides/low_level/layers/conv_en.rst | 22 +- .../low_level/layers/data_feeder_en.rst | 2 +- .../api_guides/low_level/layers/detection.rst | 2 +- .../low_level/layers/loss_function_en.rst | 2 +- docs/api_guides/low_level/layers/pooling.rst | 8 +- .../low_level/layers/pooling_en.rst | 8 +- docs/api_guides/low_level/metrics.rst | 8 +- .../low_level/model_save_reader.rst | 6 +- .../low_level/model_save_reader_en.rst | 6 +- docs/api_guides/low_level/optimizer.rst | 2 +- docs/api_guides/low_level/optimizer_en.rst | 2 +- .../low_level/parallel_executor_en.rst | 10 +- docs/api_guides/low_level/parameter_en.rst | 2 +- docs/design/algorithm/parameter_average.md | 2 +- docs/design/concepts/block.md | 2 +- docs/design/concepts/cpp_data_feeding.md | 2 +- docs/design/concepts/python_data_feeding.md | 4 +- docs/design/concepts/tensor.md | 2 +- docs/design/concurrent/channel.md | 2 +- .../concurrent/concurrent_programming.md | 10 +- docs/design/concurrent/csp.md | 2 +- docs/design/concurrent/go_op.md | 4 +- docs/design/concurrent/select_op.md | 6 +- docs/design/data_type/float16.md | 8 +- docs/design/mkldnn/inplace/inplace.md | 6 +- docs/design/mkldnn/nhwc/nhwc.md | 2 +- docs/design/modules/regularization.md | 2 +- docs/design/motivation/fluid.md | 4 +- docs/design/others/gan_api.md | 2 +- .../api_contributing_guides_cn.rst | 4 +- .../api_design_guidelines_standard_cn.md | 2 +- .../api_docs_guidelines_cn.md | 2 +- .../custom_kernel_docs/cpp_api_en.rst | 6 +- .../custom_device_docs/custom_kernel_en.rst | 4 +- .../custom_device_docs/custom_runtime_en.rst | 6 +- .../kernel_primitive_api/io_api_cn.md | 2 +- .../kernel_primitive_api/io_api_en.md | 2 +- ...op_optimization_contributing_guides_cn.rst | 6 +- .../sugon/paddle_c86_fix_guides_cn.md | 8 +- ...200\220Hackathon No.113\343\200\221 PR.md" | 2 +- docs/faq/index_cn.rst | 2 +- docs/faq/install_cn.md | 12 +- docs/faq/params_cn.md | 20 +- docs/faq/train_cn.md | 2 +- .../cluster_overview_ps_cn.rst | 22 +- .../cluster_quick_start_collective_cn.rst | 16 +- .../cluster_quick_start_en.rst | 10 +- .../cluster_quick_start_ps_cn.rst | 36 +- .../data_parallel/gradient_merge_cn.rst | 2 +- .../data_parallel/recompute_cn.rst | 26 +- .../06_distributed_training/deployment_cn.rst | 12 +- .../fleet_api_howto_cn.rst | 28 +- .../group_sharded_parallel_cn.rst | 24 +- .../model_parallel_cn.rst | 20 +- .../guides/06_distributed_training/moe_cn.rst | 10 +- .../pipeline_parallel_cn.rst | 4 +- .../10_contribution/docs_contribution.md | 2 +- docs/guides/10_contribution/faq_cn.rst | 2 +- docs/guides/10_contribution/hackathon_cn.md | 2 +- docs/guides/advanced/gradient_clip_cn.rst | 4 +- docs/guides/advanced/gradient_clip_en.rst | 6 +- docs/guides/advanced/index_cn.rst | 1 - docs/guides/advanced/model_to_onnx_cn.rst | 26 +- docs/guides/advanced/visualdl_usage_cn.md | 2 +- docs/guides/advanced/visualdl_usage_en.md | 128 +- docs/guides/beginner/model_save_load_cn.rst | 14 +- docs/guides/beginner/tensor_cn.md | 2 +- docs/guides/beginner/tensor_en.md | 2 +- docs/guides/custom_op/new_python_op_cn.md | 2 +- docs/guides/flags/cudnn_cn.rst | 2 +- docs/guides/flags/data_en.rst | 2 +- docs/guides/flags/distributed_cn.rst | 4 +- docs/guides/flags/memory_cn.rst | 4 +- docs/guides/flags/memory_en.rst | 14 +- .../rocm_docs/infer_example_cn.md | 2 +- .../rocm_docs/paddle_install_cn.md | 20 +- .../hardware_support/xpu_docs/index_cn.rst | 4 +- docs/guides/index_cn.rst | 2 +- docs/guides/index_en.rst | 2 +- .../infer/paddleslim/paddle_slim_en.rst | 4 +- docs/guides/jit/case_analysis_cn.md | 6 +- docs/guides/jit/grammar_list_cn.md | 2 +- docs/guides/jit/grammar_list_en.md | 4 +- docs/guides/jit/index_cn.rst | 2 +- docs/guides/model_convert/index_en.rst | 2 +- .../load_old_format_model_cn.rst | 10 +- docs/guides/model_convert/migration_cn.rst | 6 +- .../model_convert/paddle_api_mapping_cn.rst | 4 +- docs/guides/model_convert/update_en.md | 4 +- docs/guides/performance_improving/amp_cn.md | 8 +- docs/guides/performance_improving/amp_en.md | 10 +- .../analysis_tools/benchmark_cn.md | 2 +- .../memory_optimize_en.rst | 42 +- .../paddle_tensorrt_infer.md | 26 +- .../paddle_tensorrt_infer_en.md | 22 +- .../performance_improving/profiling_model.md | 280 +-- docs/install/compile/fromsource_en.rst | 2 +- docs/install/compile/linux-compile.md | 2 +- docs/install/compile/linux-compile_en.md | 2 +- docs/install/conda/fromconda_en.rst | 2 +- docs/install/docker/fromdocker_en.rst | 2 +- docs/install/index_cn.rst | 22 +- docs/install/index_en.rst | 32 +- docs/install/install_Kunlun_en.md | 74 +- docs/install/pip/frompip_en.rst | 2 +- docs/practices/cv/index_cn.rst | 6 +- docs/practices/index_cn.rst | 4 +- docs/practices/jit/index_cn.rst | 2 +- docs/practices/nlp/index_cn.rst | 2 +- docs/practices/quick_start/index_cn.rst | 4 +- docs/practices/recommendations/index_cn.rst | 2 +- .../reinforcement_learning/index_cn.rst | 2 +- docs/practices/time_series/index_cn.rst | 2 +- docs/release_note_cn.md | 1860 ++++++++-------- docs/release_note_en.md | 1958 ++++++++--------- 507 files changed, 3666 insertions(+), 3679 deletions(-) diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst index 453b54a08f7..50f6f69a96e 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst @@ -125,7 +125,7 @@ CPU分布式训练速度进一步提高的核心在于选择合适的分布式 elif fleet.is_worker(): fleet.init_worker() exe.run(fleet.startup_program) - # Do training + # Do training exe.run(fleet.main_program) fleet.stop_worker() @@ -137,8 +137,8 @@ paddlepaddle支持对训练策略中的细节进行调整: .. code-block:: python compiled_program = fluid.compiler.CompiledProgram(fleet.main_program).with_data_parallel( - loss_name=loss.name, - build_strategy=strategy.get_build_strategy(), + loss_name=loss.name, + build_strategy=strategy.get_build_strategy(), exec_strategy=strategy.get_execute_strategy()) @@ -147,12 +147,12 @@ paddlepaddle支持对训练策略中的细节进行调整: .. code-block:: python strategy = DistributedStrategyFactory.create_sync_strategy() - + # 方式一(推荐): config = strategy.get_program_config() config.min_block_size = 81920 - - + + # 方式二:调用set_program_config修改组网相关配置,支持DistributeTranspilerConfig和dict两种数据类型 config = DistributeTranspilerConfig() config.min_block_size = 81920 diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst index 8b8a9914b02..60a3310844b 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst @@ -6,12 +6,12 @@ Best practices of distributed training on CPU To improve the training speed of CPU distributed training, we must consider two aspects: -1. Improve the training speed mainly by improving utilization rate of CPU; +1. Improve the training speed mainly by improving utilization rate of CPU; 2. Improve the communication speed mainly by reducing the amount of data transmitted in the communication; 3. Improve the data IO speed by dataset API; 4. Improve the distributed training speed by changing distributed training strategy. -Improve CPU utilization +Improve CPU utilization ============================= The CPU utilization mainly depends on :code:`ParallelExecutor`, which can make full use of the computing power of multiple CPUs to speed up the calculation. @@ -129,7 +129,7 @@ The default configuration of the above policy is introduced by the following cod elif fleet.is_worker(): fleet.init_worker() exe.run(fleet.startup_program) - # Do training + # Do training exe.run(fleet.main_program) fleet.stop_worker() @@ -140,8 +140,8 @@ PaddlePaddle supports adjusting the details of the training strategy: .. code-block:: python compiled_program = fluid.compiler.CompiledProgram(fleet.main_program).with_data_parallel( - loss_name=loss.name, - build_strategy=strategy.get_build_strategy(), + loss_name=loss.name, + build_strategy=strategy.get_build_strategy(), exec_strategy=strategy.get_execute_strategy()) @@ -150,13 +150,13 @@ PaddlePaddle supports adjusting the details of the training strategy: .. code-block:: python strategy = DistributedStrategyFactory.create_sync_strategy() - + # Mode 1 (recommended): config = strategy.get_program_config() config.min_block_size = 81920 - - - # Mode 2 + + + # Mode 2 config = DistributeTranspilerConfig() config.min_block_size = 81920 # config = dict() diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst index 18a8c703d48..f9dbb4bb6fa 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst @@ -46,7 +46,7 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 训练参数设置表 -.. csv-table:: +.. csv-table:: :header: "选项", "类型", "默认值", "说明" :widths: 3, 3, 3, 5 @@ -102,7 +102,7 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 GPU多机多卡同步训练过程中存在慢trainer现象,即每步中训练快的trainer的同步通信需要等待训练慢的trainer。由于每步中慢trainer的rank具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,从而提升同步训练性能。Local SGD训练方式主要有三个参数,分别是: -.. csv-table:: +.. csv-table:: :header: "选项", "类型", "可选值", "说明" :widths: 3, 3, 3, 5 diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst index b8e7ec5b2a4..a43fc3ebcbf 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst @@ -4,7 +4,7 @@ 背景 --------- - + 随着训练数据规模的逐渐增加,训练更大、更深的深度学习模型成为一个主流趋势。目前的深度学习模型训练,通常要求保留前向计算的隐层结果,并且需要保存结果的数量会随着模型层数的增加线性增加,这对于目前能够使用的AI芯片的内存大小是个挑战。Forward Recomputation Backpropagation(FRB)可以在额外增加少量计算的情况下,显著增加模型的层数和宽度,同时也可以显著提升模型训练的batch大小。 原理 @@ -18,7 +18,7 @@ 在前向计算过程中,前向算子会输出大量的中间计算结果,在Paddle中,使用 Variable来存储这些隐层的中间结果。当模型层数加深时,其数量可达成千上万个, -占据大量的内存。Paddle的 `显存回收机制 `_ +占据大量的内存。Paddle的 `显存回收机制 `_ 会及时清除无用的中间结果,以节省存储。 然而,有些中间结果是反向算子的输入,这些Variable必须存储在内存中,直到相应的反向算子计算完毕。 @@ -33,7 +33,7 @@ Variable来存储这些隐层的中间结果。当模型层数加深时,其数 其中 :math:`x, y, z` 为向量, :math:`W_1, W_2` 为矩阵。容易知道,求 :math:`W_2` 梯度的反向计算为: .. math:: - W_{2}^{'} = z^{'} / y + W_{2}^{'} = z^{'} / y 可以看到反向计算中用到了前向计算生成的变量 :math:`y` ,因此变量 :math:`y` 必须存储在内存中,直到这个反向算子计算完毕。当模型加深时,我们会有大量的“ :math:`y` ”,占据了大量的内存。 @@ -71,10 +71,10 @@ Mitsuru Kusumoto \ :sup:`[3]` 等提出了一种基于动态规划的算法, 在多卡训练或者多机训练任务上建议您在Fleet API中使用Recompute。 **1. 直接调用** - + 直接调用RecomputeOptimizer非常简单,首先要定义一个经典的Optimizer,比如Adam; 然后在外面包一层RecomputeOptimizer;最后设置checkpoints即可。 - + .. code-block:: python import paddle.fluid as fluid @@ -101,7 +101,7 @@ Recompute原则上适用于所有Optimizer。 **2. 在Fleet API中使用Recompute** -`Fleet API `_ +`Fleet API `_ 是基于Fluid的分布式计算高层API。在Fleet API中添加RecomputeOptimizer 仅需要2步: @@ -135,9 +135,9 @@ Q&A - **有没有更多Recompute的官方例子?** - 更多Recompute的例子将更新在 `examples `_ + 更多Recompute的例子将更新在 `examples `_ 和 `Fleet `_ 库下,欢迎关注。 - + - **有没有添加checkpoints的建议?** 我们建议将子网络连接部分的变量添加为checkpoints,即: @@ -151,10 +151,10 @@ Q&A 帮助用户定位问题。 [1] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin . Training deep nets with sublinear memory cost. -arXiv preprint, arXiv:1604.06174, 2016. +arXiv preprint, arXiv:1604.06174, 2016. [2] Audrunas Gruslys , Rémi Munos , Ivo Danihelka , Marc Lanctot , and Alex Graves. Memory efficient backpropagation through time. In Advances in Neural Information Processing Systems (NIPS), pages 4125 4133, 2016. -[3] Kusumoto, Mitsuru, et al. "A Graph Theoretic Framework of Recomputation Algorithms for Memory-Efficient Backpropagation." arXiv preprint arXiv:1905.11722 (2019). +[3] Kusumoto, Mitsuru, et al. "A Graph Theoretic Framework of Recomputation Algorithms for Memory-Efficient Backpropagation." arXiv preprint arXiv:1905.11722 (2019). diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst index b1431756f44..c20b390e010 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute_en.rst @@ -10,7 +10,7 @@ and the number of outputs increases linearly with the increase of the number of model layers, which becomes a challenge of the memory size for common devices. - + Theory --------- @@ -19,11 +19,11 @@ As we know, a training process of a deep-learning network contains 3 steps: - **Forward Propagation**:Running forward operators and generate temporary variables as output - **Backward Propagation**:Running backward operators to compute gradients of parameters -- **Optimization**:Applying optimization algorithm to update parameters +- **Optimization**:Applying optimization algorithm to update parameters When the model becomes deeper, the number of temporary variables generated in the forward propagation process can reach tens -of thousands, occupying a large amount of memory. +of thousands, occupying a large amount of memory. The `Garbage Collection mechanism `_ in Paddle can delete useless variables for the sake of saving memory. However, some variables serve as inputs of backward operators, @@ -41,17 +41,17 @@ the forward propagation works as follows: where :math:`x, y, z` are vectors, :math:`W_1, W_2` are matrix。It is easy to conduct that the gradient of :math:`W_2` is: .. math:: - W_{2}^{'} = z^{'} / y + W_{2}^{'} = z^{'} / y -We can see that :math:`y` is used in the backward propagation process, +We can see that :math:`y` is used in the backward propagation process, thus it must be kept in the memory during the whole forward propagation. When network grows deeper, more 'y's need to be stored, adding more requirements to the memory. Forward Recomputation Backpropagation(FRB) splits a deep network to k segments. -For each segment, in forward propagation, -most of the temporary variables are erased in time, -except for some special variables (we will talk about that later); +For each segment, in forward propagation, +most of the temporary variables are erased in time, +except for some special variables (we will talk about that later); in backward propagation, the forward operators will be recomputed to get these temporary variables before running backward operators. In short, FBR runs forward operators twice. @@ -59,13 +59,13 @@ In short, FBR runs forward operators twice. But how to split the network? A deep learning network usually consists of connecting modules in series: ResNet-50 contains 16 blocks and Bert-Large contains 24 transformers. -It is a good choice to treat such modules as segments. +It is a good choice to treat such modules as segments. The variables among segments are called as checkpoints. -The following picture is a network with 4 fc layers, 3 relu layers, +The following picture is a network with 4 fc layers, 3 relu layers, 1 sigmoid layer and 1 log-loss layer in series. -The left column is the forward propagation, +The left column is the forward propagation, the middle column is the normal backward propagation, and the right column is the FRB. Rectangular boxes represent the operators, red dots represent @@ -132,10 +132,10 @@ In principle, recompute is for all kinds of optimizers in Paddle. **2. Using Recompute in Fleet API** -`Fleet API `_ +`Fleet API `_ is a high-level API for distributed training in Fluid. Adding RecomputeOptimizer to Fluid takes two steps: - + - set dist_strategy.forward_recompute to True - set dist_strategy.recompute_checkpoints @@ -176,7 +176,7 @@ raise issues if you get any problem with these examples. - **How should I set checkpoints?** -The position of checkpoints is important: +The position of checkpoints is important: we suggest setting the variable between the sub-model as checkpoints, that is, set a variable as a checkpoint if it can separate the network into two parts without short-cut connections. diff --git a/docs/api/index_en.rst b/docs/api/index_en.rst index fb0d778cce5..c811df0062d 100644 --- a/docs/api/index_en.rst +++ b/docs/api/index_en.rst @@ -26,12 +26,12 @@ In this version, PaddlePaddle has made many optimizations to the APIs. You can r | paddle.device | Device management related APIs, such as set_device, | | | get_device, etc. | +-------------------------------+-------------------------------------------------------+ -| paddle.linalg | Linear algebra related APIs, such as det, svd, etc. | +| paddle.linalg | Linear algebra related APIs, such as det, svd, etc. | +-------------------------------+-------------------------------------------------------+ | paddle.fft | Fast Fourier Transform related APIs, such as | | | fft, fft2, etc. | +-------------------------------+-------------------------------------------------------+ -| paddle.amp | Paddle automatic mixed precision strategy, including | +| paddle.amp | Paddle automatic mixed precision strategy, including | | | auto_cast, GradScaler, etc. | +-------------------------------+-------------------------------------------------------+ | paddle.autograd | Auto grad API, including backward, PyLayer, etc. | @@ -63,12 +63,12 @@ In this version, PaddlePaddle has made many optimizations to the APIs. You can r | paddld.optimizer | APIs related to optimization algorithms such as SGD, | | | Adagrad, and Adam | +-------------------------------+-------------------------------------------------------+ -| paddle.optimizer.lr | APIs related to learning rate decay, such as | +| paddle.optimizer.lr | APIs related to learning rate decay, such as | | | NoamDecay, StepDecay, PiecewiseDecay, etc. | +-------------------------------+-------------------------------------------------------+ | paddle.regularizer | Regularization APIs, including L1Decay, L2Decay, etc. | +-------------------------------+-------------------------------------------------------+ -| paddle.static | Basic framework related APIs under static graph, | +| paddle.static | Basic framework related APIs under static graph, | | | such as Variable, Program, Executor, etc. | +-------------------------------+-------------------------------------------------------+ | paddle.static.nn | Special APIs for networking under static graph such | diff --git a/docs/api/paddle/DataParallel_cn.rst b/docs/api/paddle/DataParallel_cn.rst index 1947069d4fa..1e804092fca 100644 --- a/docs/api/paddle/DataParallel_cn.rst +++ b/docs/api/paddle/DataParallel_cn.rst @@ -28,9 +28,9 @@ DataParallel - **Layer** (Layer) - 需要通过数据并行方式执行的模型。 - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为None。 - **comm_buffer_size** (int,可选) - 它是通信调用(如NCCLAllReduce)时,参数梯度聚合为一组的内存大小(MB)。默认值:25。 - - **last_comm_buffer_size** (float,可选)它限制通信调用中最后一个缓冲区的内存大小(MB)。减小最后一个通信缓冲区的大小有助于提高性能。默认值:1。默认值:1 + - **last_comm_buffer_size** (float,可选)它限制通信调用中最后一个缓冲区的内存大小(MB)。减小最后一个通信缓冲区的大小有助于提高性能。默认值:1。默认值:1 - **find_unused_parameters** (bool,可选) 是否在模型forward函数的返回值的所有张量中,遍历整个向后图。对于不包括在loss计算中的参数,其梯度将被预先标记为ready状态用于后续多卡间的规约操作。请注意,模型参数的所有正向输出必须参与loss的计算以及后续的梯度计算。否则,将发生严重错误。请注意,将find_unused_parameters设置为True会影响计算性能,因此,如果确定所有参数都参与了loss计算和自动反向图的构建,请将其设置为False。默认值:False。 - + 返回 :::::::::::: 支持数据并行的 ``Layer``。 diff --git a/docs/api/paddle/Model_cn.rst b/docs/api/paddle/Model_cn.rst index 71cab82b277..40911e04da8 100644 --- a/docs/api/paddle/Model_cn.rst +++ b/docs/api/paddle/Model_cn.rst @@ -15,7 +15,7 @@ Model - **network** (paddle.nn.Layer) - 是 ``paddle.nn.Layer`` 的一个实例。 - **inputs** (InputSpec|list|tuple|dict|None,可选) - ``network`` 的输入,可以是 ``InputSpec`` 的实例,或者是一个 ``InputSpec`` 的 ``list``,或者是格式为 ``{name: InputSpec}`` 的 ``dict``,或者为 ``None``。默认值为 ``None``。 - **labels** (InputSpec|list|tuple|None,可选) - ``network`` 的标签,可以是 ``InputSpec`` 的实例,或者是一个 ``InputSpec`` 的 ``list``,或者为 ``None``。 默认值为 ``None``。 - + .. note:: 在动态图中,``inputs`` 和 ``labels`` 都可以设置为 ``None``。但是,在静态图中,``input`` 不能设置为 ``None``。而如果损失函数需要标签(label)作为输入,则必须设置 ``labels``,否则,可以为 ``None``。 @@ -69,7 +69,7 @@ eval_batch(inputs, labels=None) - **inputs** (numpy.ndarray|Tensor|list) - 一批次的输入数据。它可以是一个 numpy 数组或 paddle.Tensor,或者是它们的列表(在模型具有多输入的情况下)。 - **labels** (numpy.ndarray|Tensor|list,可选) - 一批次的标签。它可以是一个 numpy 数组或 paddle.Tensor,或者是它们的列表(在模型具有多输入的情况下)。如果无标签,请设置为 None。默认值:None。 - + **返回** list,如果没有定义评估函数,则返回包含了预测损失函数的值的列表;如果定义了评估函数,则返回一个元组(损失函数的列表,评估指标的列表)。 @@ -89,7 +89,7 @@ predict_batch(inputs) - **inputs** (numpy.ndarray|Tensor|list) - 一批次的输入数据。它可以是一个 numpy 数组或 paddle.Tensor,或者是它们的列表(在模型具有多输入的情况下)。 - + **返回** 一个列表,包含了模型的输出。 @@ -110,7 +110,7 @@ save(path, training=True) - **path** (str) - 保存的文件名前缀。格式如 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 - **training** (bool,可选) - 是否保存训练的状态,包括模型参数和优化器参数等。如果为 False,则只保存推理所需的参数与文件。默认值:True。 - + **返回** 无。 @@ -131,7 +131,7 @@ load(path, skip_mismatch=False, reset_optimizer=False) - **path** (str) - 保存参数或优化器信息的文件前缀。格式如 ``path.pdparams`` 或者 ``path.pdopt`` ,后者是非必要的,如果不想恢复优化器信息。 - **skip_mismatch** (bool,可选) - 是否需要跳过保存的模型文件中形状或名称不匹配的参数,设置为 ``False`` 时,当遇到不匹配的参数会抛出一个错误。默认值:False。 - **reset_optimizer** (bool,可选) - 设置为 ``True`` 时,会忽略提供的优化器信息文件。否则会载入提供的优化器信息。默认值:False。 - + **返回** 无。 @@ -145,7 +145,7 @@ parameters(*args, **kwargs) ''''''''' 返回一个包含模型所有参数的列表。 - + **返回** 在静态图中返回一个包含 ``Parameter`` 的列表,在动态图中返回一个包含 ``ParamBase`` 的列表。 diff --git a/docs/api/paddle/Overview_cn.rst b/docs/api/paddle/Overview_cn.rst index 4b987b6e9df..20f2fdaecd3 100755 --- a/docs/api/paddle/Overview_cn.rst +++ b/docs/api/paddle/Overview_cn.rst @@ -56,10 +56,10 @@ tensor数学操作 " :ref:`paddle.divide ` ", "逐元素相除算子" " :ref:`paddle.equal ` ", "该OP返回 x==y 逐元素比较x和y是否相等,相同位置的元素相同则返回True,否则返回False" " :ref:`paddle.equal_all ` ", "如果所有相同位置的元素相同返回True,否则返回False" - " :ref:`paddle.erf ` ", "逐元素计算 Erf 激活函数" + " :ref:`paddle.erf ` ", "逐元素计算 Erf 激活函数" " :ref:`paddle.exp ` ", "逐元素进行以自然数e为底指数运算" " :ref:`paddle.expm1 ` ", "逐元素进行exp(x)-1运算" - " :ref:`paddle.floor ` ", "向下取整函数" + " :ref:`paddle.floor ` ", "向下取整函数" " :ref:`paddle.floor_divide ` ", "逐元素整除算子,输入 x 与输入 y 逐元素整除,并将各个位置的输出元素保存到返回结果中" " :ref:`paddle.greater_equal ` ", "逐元素地返回 x>=y 的逻辑值" " :ref:`paddle.greater_than ` ", "逐元素地返回 x>y 的逻辑值" @@ -70,7 +70,7 @@ tensor数学操作 " :ref:`paddle.less_than ` ", "逐元素地返回 x` ", "计算输入 x 的 gamma 函数的自然对数并返回" " :ref:`paddle.log ` ", "Log激活函数(计算自然对数)" - " :ref:`paddle.log10 ` ", "Log10激活函数(计算底为10的对数)" + " :ref:`paddle.log10 ` ", "Log10激活函数(计算底为10的对数)" " :ref:`paddle.log2 ` ", "计算Log1p(加一的自然对数)结果" " :ref:`paddle.logcumsumexp ` ", "计算 x 的指数的前缀和的对数" " :ref:`paddle.logical_and ` ", "逐元素的对 x 和 y 进行逻辑与运算" @@ -95,11 +95,11 @@ tensor数学操作 " :ref:`paddle.mm ` ", "用于两个输入矩阵的相乘" " :ref:`paddle.inner ` ", "计算两个输入矩阵的内积" " :ref:`paddle.outer ` ", "计算两个输入矩阵的外积" - " :ref:`paddle.multiplex ` ", "从每个输入Tensor中选择特定行构造输出Tensor" + " :ref:`paddle.multiplex ` ", "从每个输入Tensor中选择特定行构造输出Tensor" " :ref:`paddle.multiply ` ", "逐元素相乘算子" " :ref:`paddle.neg ` ", "计算输入 x 的相反数并返回" " :ref:`paddle.not_equal ` ", "逐元素地返回x!=y 的逻辑值" - " :ref:`paddle.pow ` ", "指数算子,逐元素计算 x 的 y 次幂" + " :ref:`paddle.pow ` ", "指数算子,逐元素计算 x 的 y 次幂" " :ref:`paddle.prod ` ", "对指定维度上的Tensor元素进行求乘积运算" " :ref:`paddle.reciprocal ` ", "对输入Tensor取倒数" " :ref:`paddle.round ` ", "将输入中的数值四舍五入到最接近的整数数值" @@ -188,7 +188,7 @@ tensor创建相关 " :ref:`paddle.full_like ` ", "创建一个和 x 具有相同的形状并且数据类型为 dtype 的Tensor" " :ref:`paddle.linspace ` ", "返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num" " :ref:`paddle.meshgrid ` ", "对每个张量做扩充操作" - " :ref:`paddle.numel ` ", "返回一个长度为1并且元素值为输入 x 元素个数的Tensor" + " :ref:`paddle.numel ` ", "返回一个长度为1并且元素值为输入 x 元素个数的Tensor" " :ref:`paddle.ones ` ", "创建形状为 shape 、数据类型为 dtype 且值全为1的Tensor" " :ref:`paddle.ones_like ` ", "返回一个和 x 具有相同形状的数值都为1的Tensor" " :ref:`paddle.Tensor ` ", "Paddle中最为基础的数据结构" diff --git a/docs/api/paddle/ParamAttr_cn.rst b/docs/api/paddle/ParamAttr_cn.rst index ba70390c8fa..909d3adbd1d 100644 --- a/docs/api/paddle/ParamAttr_cn.rst +++ b/docs/api/paddle/ParamAttr_cn.rst @@ -20,7 +20,7 @@ ParamAttr - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **initializer** (Initializer,可选) - 参数的初始化方式。默认值为None,表示权重参数采用Xavier初始化方式,偏置参数采用全0初始化方式。 - **learning_rate** (float,可选) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以learning rate schedule的系数。 - - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`api_paddle_regularizer_L1Decay` 、 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`api_paddle_regularizer_L1Decay` 、 :ref:`api_paddle_regularizer_L2Decay`,如果在 ``optimizer`` (例如 :ref:`api_paddle_optimizer_SGD` ) 中也 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - **trainable** (bool,可选) - 参数是否需要训练。默认值为True,表示需要训练。 diff --git a/docs/api/paddle/Tensor/Overview_en.rst b/docs/api/paddle/Tensor/Overview_en.rst index 59b37485376..ee4a730307d 100644 --- a/docs/api/paddle/Tensor/Overview_en.rst +++ b/docs/api/paddle/Tensor/Overview_en.rst @@ -12,20 +12,20 @@ Data types PaddlePaddle defines the following Tensor types: ======================================= =========================================== -Data type dtype +Data type dtype ======================================= =========================================== -32-bit floating point ``paddle.float32`` -64-bit floating point ``paddle.float64`` -16-bit floating point ``paddle.float16`` -16-bit floating point ``paddle.bfloat16`` -64-bit complex ``paddle.complex64`` -128-bit complex ``paddle.complex128`` -8-bit integer (unsigned) ``paddle.uint8`` -8-bit integer (signed) ``paddle.int8`` -16-bit integer (signed) ``paddle.int16`` -32-bit integer (signed) ``paddle.int32`` -64-bit integer (signed) ``paddle.int64`` -Boolean ``paddle.bool`` +32-bit floating point ``paddle.float32`` +64-bit floating point ``paddle.float64`` +16-bit floating point ``paddle.float16`` +16-bit floating point ``paddle.bfloat16`` +64-bit complex ``paddle.complex64`` +128-bit complex ``paddle.complex128`` +8-bit integer (unsigned) ``paddle.uint8`` +8-bit integer (signed) ``paddle.int8`` +16-bit integer (signed) ``paddle.int16`` +32-bit integer (signed) ``paddle.int32`` +64-bit integer (signed) ``paddle.int64`` +Boolean ``paddle.bool`` ======================================= =========================================== Tensor class reference @@ -38,7 +38,7 @@ Properties ~~~~~~~~~~~~~~~~~~~~~~ ======================================= =========================================== -``T`` The transpose of ``Tensor``. See :ref:`paddle.transpose ` . +``T`` The transpose of ``Tensor``. See :ref:`paddle.transpose ` . ``block`` Tensor's block. ``dtype`` Tensor's data type. ``grad`` The value of Tensor's grad. diff --git a/docs/api/paddle/Tensor_cn.rst b/docs/api/paddle/Tensor_cn.rst index d41cda4a713..51e3ad5d6b5 100755 --- a/docs/api/paddle/Tensor_cn.rst +++ b/docs/api/paddle/Tensor_cn.rst @@ -354,7 +354,7 @@ astype(dtype) x = paddle.to_tensor(1.0) print("original tensor's dtype is: {}".format(x.dtype)) print("new tensor's dtype is: {}".format(x.astype('float64').dtype)) - + atan(name=None) ::::::::: @@ -639,11 +639,11 @@ cpu() .. code-block:: python import paddle - + if paddle.device.cuda.device_count() > 0: x = paddle.to_tensor(1.0, place=paddle.CUDAPlace(0)) print(x.place) # CUDAPlace(0) - + x = paddle.to_tensor(1.0) y = x.cpu() print(y.place) # CPUPlace @@ -715,7 +715,7 @@ detach() .. code-block:: python import paddle - import numpy as np + import numpy as np data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') linear = paddle.nn.Linear(32, 64) @@ -1662,7 +1662,7 @@ pin_memory(y, name=None) .. code-block:: python import paddle - + if paddle.device.cuda.device_count() > 0: x = paddle.to_tensor(1.0, place=paddle.CUDAPlace(0)) print(x.place) # CUDAPlace(0) @@ -1820,7 +1820,7 @@ reshape(shape, name=None) reshape_(shape, name=None) ::::::::: -Inplace 版本的 :ref:`cn_api_fluid_layers_reshape` API,对输入 `x` 采用 Inplace 策略 +Inplace 版本的 :ref:`cn_api_fluid_layers_reshape` API,对输入 `x` 采用 Inplace 策略 reverse(axis, name=None) ::::::::: @@ -2247,7 +2247,7 @@ Inplace版本的 :ref:`cn_api_tensor_uniform`,返回一个从均匀分布采 import paddle x = paddle.ones(shape=[3, 4]) x.uniform_() - print(x) + print(x) # result is random # Tensor(shape=[3, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[ 0.97134161, -0.36784279, -0.13951409, -0.48410338], diff --git a/docs/api/paddle/add_n_cn.rst b/docs/api/paddle/add_n_cn.rst index 6e50fbe9401..c32a3b6028d 100644 --- a/docs/api/paddle/add_n_cn.rst +++ b/docs/api/paddle/add_n_cn.rst @@ -22,7 +22,7 @@ add_n output.shape = [2, 3] output = [[1, 2, 3], [4, 5, 6]] - + Case 2: 输入: 第一个输入: diff --git a/docs/api/paddle/amp/GradScaler_cn.rst b/docs/api/paddle/amp/GradScaler_cn.rst index f75f82f13aa..e5bafac6e34 100644 --- a/docs/api/paddle/amp/GradScaler_cn.rst +++ b/docs/api/paddle/amp/GradScaler_cn.rst @@ -49,7 +49,7 @@ GradScaler用于动态图模式下的"自动混合精度"的训练。它控制lo conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters optimizer.clear_grad() @@ -84,7 +84,7 @@ scale(var) conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters optimizer.clear_grad() @@ -112,12 +112,12 @@ minimize(optimizer, *args, **kwargs) optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) data = paddle.rand([10, 3, 32, 32]) - + with paddle.amp.auto_cast(): conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.minimize(optimizer, scaled) # update parameters optimizer.clear_grad() @@ -146,7 +146,7 @@ step(optimizer) with paddle.amp.auto_cast(): conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.step(optimizer) # update parameters scaler.update() # update the loss scaling ratio @@ -170,7 +170,7 @@ update() with paddle.amp.auto_cast(): conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.step(optimizer) # update parameters scaler.update() # update the loss scaling ratio @@ -199,12 +199,12 @@ unscale_(optimizer) with paddle.amp.auto_cast(): conv = model(data) loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss + scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward scaler.unscale_(optimizer) # unscale the parameter scaler.step(optimizer) - scaler.update() - optimizer.clear_grad() + scaler.update() + optimizer.clear_grad() is_enable() ''''''''' diff --git a/docs/api/paddle/amp/Overview_cn.rst b/docs/api/paddle/amp/Overview_cn.rst index 0958a374f6e..016698e5536 100644 --- a/docs/api/paddle/amp/Overview_cn.rst +++ b/docs/api/paddle/amp/Overview_cn.rst @@ -23,7 +23,7 @@ AMP相关API " :ref:`auto_cast ` ", "创建AMP上下文环境" " :ref:`decorate ` ", "根据选定混合精度训练模式,改写神经网络参数数据类型" " :ref:`GradScaler ` ", "控制loss的缩放比例" - + .. _about_amp_white_list_ops: 开启AMP后默认转化为float16计算的相关OP diff --git a/docs/api/paddle/autograd/PyLayerContext_cn.rst b/docs/api/paddle/autograd/PyLayerContext_cn.rst index eecde9fee10..51949d5d381 100644 --- a/docs/api/paddle/autograd/PyLayerContext_cn.rst +++ b/docs/api/paddle/autograd/PyLayerContext_cn.rst @@ -44,7 +44,7 @@ save_for_backward(self, *tensors) **参数** - - **tensors** (list of Tensor) - 需要被暂存的 ``Tensor`` + - **tensors** (list of Tensor) - 需要被暂存的 ``Tensor`` **返回** diff --git a/docs/api/paddle/autograd/backward_cn.rst b/docs/api/paddle/autograd/backward_cn.rst index a6ba0afebe0..05e9e460b0e 100644 --- a/docs/api/paddle/autograd/backward_cn.rst +++ b/docs/api/paddle/autograd/backward_cn.rst @@ -10,7 +10,7 @@ backward 参数 :::::::::::: - + - **tensors** (list[Tensor]) – 将要计算梯度的Tensors列表。Tensors中不能包含有相同的Tensor。 - **grad_tensors** (None|list[Tensor|None],可选) – ``tensors`` 的初始梯度值。如果非None,必须和 ``tensors`` 有相同的长度,并且如果其中某一Tensor元素为None,则该初始梯度值为填充1.0 的默认值;如果是None,所有的 ``tensors`` 的初始梯度值为填充1.0 的默认值。默认值:None。 - **retain_graph** (bool,可选) – 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP,需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。 diff --git a/docs/api/paddle/callbacks/LRScheduler_cn.rst b/docs/api/paddle/callbacks/LRScheduler_cn.rst index 4af83eef1c8..9770e6b160c 100644 --- a/docs/api/paddle/callbacks/LRScheduler_cn.rst +++ b/docs/api/paddle/callbacks/LRScheduler_cn.rst @@ -10,8 +10,8 @@ LRScheduler 参数 :::::::::::: - - **by_step** (bool,可选) - 是否每个step都更新学习率。默认值:True。 - - **by_epoch** (bool,可选) - 是否每个epoch都更新学习率。默认值:False。 + - **by_step** (bool,可选) - 是否每个step都更新学习率。默认值:True。 + - **by_epoch** (bool,可选) - 是否每个epoch都更新学习率。默认值:False。 代码示例 diff --git a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst index 7626972fda1..73a8795a2a5 100644 --- a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst +++ b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst @@ -12,8 +12,8 @@ ModelCheckpoint 参数 :::::::::::: - - **save_freq** (int,可选) - 间隔多少个epoch保存模型。默认值:1。 - - **save_dir** (int,可选) - 保存模型的文件夹。如果不设定,将不会保存模型。默认值:None。 + - **save_freq** (int,可选) - 间隔多少个epoch保存模型。默认值:1。 + - **save_dir** (int,可选) - 保存模型的文件夹。如果不设定,将不会保存模型。默认值:None。 代码示例 diff --git a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst index 1df80fb20ad..ae1b7911e8b 100644 --- a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst +++ b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst @@ -10,14 +10,14 @@ ReduceLROnPlateau 参数 :::::::::::: - - **monitor** (str,可选) - 监视的指标名称。默认值:'loss'。 - - **factor** (float,可选) - 学习率减小的因子。`new_lr = lr * factor`。默认值:0.1。 - - **patience** (int,可选) - 多少个epoch监视的指标没有提升后就减小学习率。默认值:10。 - - **verbose** (int,可选) - 可视化的模式。0表示不打印任何信息,1表示打印信息。默认值:1。 - - **mode** (int,可选) - 必须是 `{'auto', 'min', 'max'}` 中的值。`'min'` 表示学习率会减少当监视的指标不再下降。`'max'` 表示学习率会减少当监视的指标不再上升。`'auto'` 会根据监视指标的名字来推理是使用min还是max模式,如果名字中包含acc则使用max模式,否则使用min模式。默认值:'auto'。 - - **min_delta** (float,可选) - 评判指标增大或减小的阈值。默认值:0。 - - **cooldown** (int,可选) - 学习率减少后至少经过多少个epoch在进行正常的减少策略。默认值:0。 - - **min_lr** (int,可选) - 学习率减小后的下限。默认值:0。 + - **monitor** (str,可选) - 监视的指标名称。默认值:'loss'。 + - **factor** (float,可选) - 学习率减小的因子。`new_lr = lr * factor`。默认值:0.1。 + - **patience** (int,可选) - 多少个epoch监视的指标没有提升后就减小学习率。默认值:10。 + - **verbose** (int,可选) - 可视化的模式。0表示不打印任何信息,1表示打印信息。默认值:1。 + - **mode** (int,可选) - 必须是 `{'auto', 'min', 'max'}` 中的值。`'min'` 表示学习率会减少当监视的指标不再下降。`'max'` 表示学习率会减少当监视的指标不再上升。`'auto'` 会根据监视指标的名字来推理是使用min还是max模式,如果名字中包含acc则使用max模式,否则使用min模式。默认值:'auto'。 + - **min_delta** (float,可选) - 评判指标增大或减小的阈值。默认值:0。 + - **cooldown** (int,可选) - 学习率减少后至少经过多少个epoch在进行正常的减少策略。默认值:0。 + - **min_lr** (int,可选) - 学习率减小后的下限。默认值:0。 代码示例 diff --git a/docs/api/paddle/clip_cn.rst b/docs/api/paddle/clip_cn.rst index 2dbde97852c..08489dff00e 100644 --- a/docs/api/paddle/clip_cn.rst +++ b/docs/api/paddle/clip_cn.rst @@ -12,7 +12,7 @@ clip .. math:: - Out = MIN(MAX(x, min), max) + Out = MIN(MAX(x, min), max) 参数 :::::::::::: diff --git a/docs/api/paddle/compat/floor_division_cn.rst b/docs/api/paddle/compat/floor_division_cn.rst index ca0dcd69164..461c2d71965 100644 --- a/docs/api/paddle/compat/floor_division_cn.rst +++ b/docs/api/paddle/compat/floor_division_cn.rst @@ -16,5 +16,5 @@ floor_division 返回 :::::::::: - + x//y的除法结果 diff --git a/docs/api/paddle/compat/get_exception_message_cn.rst b/docs/api/paddle/compat/get_exception_message_cn.rst index 50894f5794f..8e50609c413 100644 --- a/docs/api/paddle/compat/get_exception_message_cn.rst +++ b/docs/api/paddle/compat/get_exception_message_cn.rst @@ -15,5 +15,5 @@ get_exception_message 返回 :::::::::: - + exec的错误消息 diff --git a/docs/api/paddle/compat/to_bytes_cn.rst b/docs/api/paddle/compat/to_bytes_cn.rst index bfb35384025..445cd13585e 100644 --- a/docs/api/paddle/compat/to_bytes_cn.rst +++ b/docs/api/paddle/compat/to_bytes_cn.rst @@ -9,23 +9,23 @@ to_bytes 此函数将对象转换为具有特定编码的字节。特别是,如果对象类型是列表或集合容器,我们将迭代对象中的所有项并将其转换为字节。 在Python3中: - + 使用特定编码将str type对象编码为bytes类型。 在Python2中: - + 使用特定的编码将unicode类型的对象编码为str类型,或者只返回object的8位字符串。 参数 :::::::::: - + - **obj** (unicode|str|bytes|list|set) - 要编码的对象。 - **encoding** (str) - 对字符串进行编码的编码格式。 - **inplace** (bool) - 是否改变原始对象或创建一个新对象。 返回 :::::::::: - + obj解码后的结果。 代码示例 diff --git a/docs/api/paddle/compat/to_text_cn.rst b/docs/api/paddle/compat/to_text_cn.rst index c801a2a7eeb..6a8346e775c 100644 --- a/docs/api/paddle/compat/to_text_cn.rst +++ b/docs/api/paddle/compat/to_text_cn.rst @@ -9,11 +9,11 @@ to_text 此函数将对象转换为不带任何编码的文本字符串。特别是,如果对象类型是列表或集合容器,我们将迭代对象中的所有项并将其转换为文本字符串。 在Python3中: - + 使用特定编码将bytes类型对象解码为str类型。 在Python2中: - + 使用特定编码将str type对象解码为unicode类型。 参数 @@ -25,7 +25,7 @@ to_text 返回 :::::::::: - + obj解码后的结果。 代码示例 diff --git a/docs/api/paddle/cross_cn.rst b/docs/api/paddle/cross_cn.rst index 16f78640d97..69cd0fbf961 100644 --- a/docs/api/paddle/cross_cn.rst +++ b/docs/api/paddle/cross_cn.rst @@ -6,10 +6,10 @@ cross .. py:function:: paddle.cross(x, y, axis=None, name=None) -计算张量 ``x`` 和 ``y`` 在 ``axis`` 维度上的向量积(叉积)。 +计算张量 ``x`` 和 ``y`` 在 ``axis`` 维度上的向量积(叉积)。 ``x`` 和 ``y`` 必须有相同的形状,且指定的 ``axis`` 的长度必须为3。如果未指定 ``axis``,默认选取第一个长度为3的 ``axis`` 。 - + 参数 ::::::::: - x (Tensor) – 第一个输入张量。 diff --git a/docs/api/paddle/device/cuda/get_device_capability_cn.rst b/docs/api/paddle/device/cuda/get_device_capability_cn.rst index d22179e19b0..a83b606ecfe 100644 --- a/docs/api/paddle/device/cuda/get_device_capability_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_capability_cn.rst @@ -23,7 +23,7 @@ tuple(int,int):设备计算能力的主要和次要修订号。 .. code-block:: python # required: gpu - + import paddle paddle.device.cuda.get_device_capability() diff --git a/docs/api/paddle/device/cuda/get_device_name_cn.rst b/docs/api/paddle/device/cuda/get_device_name_cn.rst index c79789dcfa5..fa1c83117fe 100644 --- a/docs/api/paddle/device/cuda/get_device_name_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_name_cn.rst @@ -23,7 +23,7 @@ str:设备的名称。 .. code-block:: python # required: gpu - + import paddle paddle.device.cuda.get_device_name() diff --git a/docs/api/paddle/diagonal_cn.rst b/docs/api/paddle/diagonal_cn.rst index c0892fe1946..b521f0c7a78 100644 --- a/docs/api/paddle/diagonal_cn.rst +++ b/docs/api/paddle/diagonal_cn.rst @@ -25,7 +25,7 @@ diagonal - **axis1** (int,可选)- 获取对角线的二维平面的第一维,默认值为 0。 - **axis2** (int,可选)- 获取对角线的二维平面的第二维,默认值为 1 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 ::::::::: - Tensor (Tensor),输入 `Tensor` 在指定二维平面的局部视图,数据类型和输入数据类型一致。 diff --git a/docs/api/paddle/diff_cn.rst b/docs/api/paddle/diff_cn.rst index 0d8d459737b..8deaa8747d1 100644 --- a/docs/api/paddle/diff_cn.rst +++ b/docs/api/paddle/diff_cn.rst @@ -41,7 +41,7 @@ diff y = paddle.to_tensor([7, 9]) out = paddle.diff(x, append=y) print(out) - # out: + # out: # [3, 1, -3, 5, 2] z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) out = paddle.diff(z, axis=0) diff --git a/docs/api/paddle/distributed/InMemoryDataset_cn.rst b/docs/api/paddle/distributed/InMemoryDataset_cn.rst index 277fa5e9590..b6d5487073c 100644 --- a/docs/api/paddle/distributed/InMemoryDataset_cn.rst +++ b/docs/api/paddle/distributed/InMemoryDataset_cn.rst @@ -86,7 +86,7 @@ None。 dataset.set_filelist( ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) dataset.load_into_memory() - + place = paddle.CPUPlace() exe = paddle.static.Executor(place) startup_program = paddle.static.Program() @@ -94,7 +94,7 @@ None。 exe.run(startup_program) exe.train_from_dataset(main_program, dataset) - + os.remove("./test_queue_dataset_run_a.txt") os.remove("./test_queue_dataset_run_b.txt") @@ -212,7 +212,7 @@ load_into_memory() import paddle paddle.enable_static() - + dataset = paddle.distributed.InMemoryDataset() slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] @@ -370,7 +370,7 @@ release_memory() import paddle paddle.enable_static() - + dataset = paddle.distributed.InMemoryDataset() slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] @@ -457,7 +457,7 @@ shuffle数据的大小。 import paddle paddle.enable_static() - + dataset = paddle.distributed.InMemoryDataset() dataset = paddle.distributed.InMemoryDataset() slots = ["slot1", "slot2", "slot3", "slot4"] @@ -493,7 +493,7 @@ slots_shuffle(slots) import paddle paddle.enable_static() - + dataset = paddle.distributed.InMemoryDataset() dataset._init_distributed_settings(fea_eval=True) slots = ["slot1", "slot2", "slot3", "slot4"] diff --git a/docs/api/paddle/distributed/Overview_cn.rst b/docs/api/paddle/distributed/Overview_cn.rst index 395c082fb35..e3adaa502fa 100644 --- a/docs/api/paddle/distributed/Overview_cn.rst +++ b/docs/api/paddle/distributed/Overview_cn.rst @@ -45,7 +45,7 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 .. csv-table:: :header: "API名称", "API功能" :widths: 20, 50 - + " :ref:`init_parallel_env ` ", "初始化并行训练环境,支持动态图模式" " :ref:`launch ` ", "启动分布式训练进程,支持集合通信及参数服务器架构" @@ -61,7 +61,7 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 .. csv-table:: :header: "API名称", "API功能" :widths: 20, 50 - + " :ref:`InMemoryDataset ` ", "数据加载到内存中,在训练前随机整理数据" " :ref:`QueueDataset ` ", "流式数据加载" @@ -76,7 +76,7 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 .. csv-table:: :header: "API名称", "API功能" :widths: 20, 50 - + " :ref:`reduce ` ", "规约,规约进程组内的tensor,返回结果至指定进程" " :ref:`ReduceOP ` ", "规约,指定逐元素规约操作" diff --git a/docs/api/paddle/distributed/ParallelEnv_cn.rst b/docs/api/paddle/distributed/ParallelEnv_cn.rst index 39c476e91d8..d6d85b6d084 100644 --- a/docs/api/paddle/distributed/ParallelEnv_cn.rst +++ b/docs/api/paddle/distributed/ParallelEnv_cn.rst @@ -60,7 +60,7 @@ rank # execute this command in terminal: export PADDLE_TRAINER_ID=0 import paddle.distributed as dist - + env = dist.ParallelEnv() print("The rank is %d" % env.rank) # The rank is 0 @@ -79,7 +79,7 @@ world_size # execute this command in terminal: export PADDLE_TRAINERS_NUM=4 import paddle.distributed as dist - + env = dist.ParallelEnv() print("The world_size is %d" % env.world_size) # The world_size is 4 @@ -98,7 +98,7 @@ device_id # execute this command in terminal: export FLAGS_selected_gpus=1 import paddle.distributed as dist - + env = dist.ParallelEnv() print("The device id are %d" % env.device_id) # The device id are 1 @@ -114,10 +114,10 @@ current_endpoint **代码示例** .. code-block:: python - + # execute this command in terminal: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170 import paddle.distributed as dist - + env = dist.ParallelEnv() print("The current endpoint are %s" % env.current_endpoint) # The current endpoint are 127.0.0.1:6170 @@ -136,7 +136,7 @@ trainer_endpoints # execute this command in terminal: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171 import paddle.distributed as dist - + env = dist.ParallelEnv() print("The trainer endpoints are %s" % env.trainer_endpoints) # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171'] diff --git a/docs/api/paddle/distributed/QueueDataset_cn.rst b/docs/api/paddle/distributed/QueueDataset_cn.rst index 833aaaf3ed2..94bf6419ff4 100644 --- a/docs/api/paddle/distributed/QueueDataset_cn.rst +++ b/docs/api/paddle/distributed/QueueDataset_cn.rst @@ -88,7 +88,7 @@ None。 ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) paddle.enable_static() - + place = paddle.CPUPlace() exe = paddle.static.Executor(place) startup_program = paddle.static.Program() @@ -96,7 +96,7 @@ None。 exe.run(startup_program) exe.train_from_dataset(main_program, dataset) - + os.remove("./test_queue_dataset_run_a.txt") os.remove("./test_queue_dataset_run_b.txt") @@ -114,7 +114,7 @@ set_filelist(filelist) import os paddle.enable_static() - + with open("test_queue_dataset_run_a.txt", "w") as f: data = "2 1 2 2 5 4 2 2 7 2 1 3\n" data += "2 6 2 2 1 4 2 2 4 2 2 3\n" diff --git a/docs/api/paddle/distributed/alltoall_cn.rst b/docs/api/paddle/distributed/alltoall_cn.rst index 09245019301..5599309908c 100644 --- a/docs/api/paddle/distributed/alltoall_cn.rst +++ b/docs/api/paddle/distributed/alltoall_cn.rst @@ -8,7 +8,7 @@ alltoall 将in_tensor_list里面的tensors按照卡数均分并按照卡的顺序分发到所有参与的卡并将结果tensors汇总到out_tensor_list。 如下图所示,GPU0卡的in_tensor_list会按照两张卡拆分成0_0和0_1, GPU1卡的in_tensor_list同样拆分成1_0和1_1,经过alltoall算子后, -GPU0卡的0_0会发送给GPU0,GPU0卡的0_1会发送给GPU1,GPU1卡的1_0会发送给GPU0,GPU1卡的1_1会发送给GPU1,所以GPU0卡的out_tensor_list包含0_0和1_0, +GPU0卡的0_0会发送给GPU0,GPU0卡的0_1会发送给GPU1,GPU1卡的1_0会发送给GPU0,GPU1卡的1_1会发送给GPU1,所以GPU0卡的out_tensor_list包含0_0和1_0, GPU1卡的out_tensor_list包含0_1和1_1。 .. image:: ./img/alltoall.png diff --git a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst index 8d7f660110a..9a0e7f4b205 100755 --- a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst +++ b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst @@ -46,7 +46,7 @@ execution_strategy `Post Local SGD `__ -配置DistributedStrategy中的 `ExecutionStrategy `_ +配置DistributedStrategy中的 `ExecutionStrategy `_ **代码示例** @@ -57,7 +57,7 @@ execution_strategy exe_strategy.num_threads = 10 exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_run = 10 - + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.execution_strategy = exe_strategy @@ -65,7 +65,7 @@ execution_strategy build_strategy ''''''''' -配置DistributedStrategy中的 `BuildStrategy `_ +配置DistributedStrategy中的 `BuildStrategy `_ **代码示例** @@ -81,7 +81,7 @@ build_strategy build_strategy.fuse_broadcast_ops = True build_strategy.fuse_all_optimizer_ops = True build_strategy.enable_inplace = True - + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.build_strategy = build_strategy @@ -186,7 +186,7 @@ gradient_merge import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.gradient_merge = True - strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} + strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} gradient_merge_configs ''''''''' @@ -218,7 +218,7 @@ lars "lars_weight_decay": 0.0005, "epsilon": 0, "exclude_from_weight_decay": ["batch_norm", ".b"], - } + } lars_configs ''''''''' @@ -263,11 +263,11 @@ lamb_configs localsgd ''''''''' -是否使用LocalSGD optimizer,默认值:False。更多的细节请参考 `Don't Use Large Mini-Batches, Use Local SGD `_ +是否使用LocalSGD optimizer,默认值:False。更多的细节请参考 `Don't Use Large Mini-Batches, Use Local SGD `_ **代码示例** -.. code-block:: python +.. code-block:: python import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() @@ -294,7 +294,7 @@ localsgd_configs adaptive_localsgd ''''''''' -是否使用AdaptiveLocalSGD optimizer,默认值:False。更多的细节请参考`Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-Update SGD `_ +是否使用AdaptiveLocalSGD optimizer,默认值:False。更多的细节请参考`Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-Update SGD `_ **代码示例** diff --git a/docs/api/paddle/distributed/fleet/Fleet_cn.rst b/docs/api/paddle/distributed/fleet/Fleet_cn.rst index c69b3db96b5..fa7226d1041 100644 --- a/docs/api/paddle/distributed/fleet/Fleet_cn.rst +++ b/docs/api/paddle/distributed/fleet/Fleet_cn.rst @@ -21,7 +21,7 @@ init(role_maker=None, is_collective=False, strategy=None) - **role_maker** (RoleMakerBase) 已初始化好的PaddleCloudRoleMaker或UserDefineRoleMaker - **is_collective** (bool) 在未指定role_maker的情况下,可由init方法自行初始化RoleMaker, is_collective为True则按照collective模式进行创建,is_collective=False则按照ParameterServer模式进行创建 - - **strategy** (DistributedStrategy):分布式训练的额外属性。详情请参阅paddle.distributed.fleet.DistributedStrategy。默认值:None。 + - **strategy** (DistributedStrategy):分布式训练的额外属性。详情请参阅paddle.distributed.fleet.DistributedStrategy。默认值:None。 **返回** None diff --git a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst index cb9e186ad48..67cc12c3110 100644 --- a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst +++ b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst @@ -32,7 +32,7 @@ to_string() 将当前环境变量以字符串的形式输出 **返回** - + string diff --git a/docs/api/paddle/distributed/fleet/UtilBase_cn.rst b/docs/api/paddle/distributed/fleet/UtilBase_cn.rst index cddbf83a610..a3421b26fe9 100644 --- a/docs/api/paddle/distributed/fleet/UtilBase_cn.rst +++ b/docs/api/paddle/distributed/fleet/UtilBase_cn.rst @@ -32,16 +32,16 @@ Numpy.array|None:一个和 `input` 形状一致的numpy数组或None。 import sys import numpy as np import os - + os.environ["PADDLE_WITH_GLOO"] = "2" - + def train(): role = PaddleCloudRoleMaker( is_collective=False, init_gloo=True, path="./tmp_gloo") fleet.init(role) - + if fleet.is_server(): input = [1, 2] output = fleet.util.all_reduce(input, "sum", "server") @@ -57,7 +57,7 @@ Numpy.array|None:一个和 `input` 形状一致的numpy数组或None。 # [8, 12] if __name__ == "__main__": train() - + barrier(comm_world="worker") ''''''''' 在指定的通信集合间进行阻塞操作,以实现集合间进度同步。 @@ -78,7 +78,7 @@ barrier(comm_world="worker") import os os.environ["PADDLE_WITH_GLOO"] = "2" - + def train(): role = PaddleCloudRoleMaker( is_collective=False, diff --git a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst index 3e562756f1c..c963bc39b0c 100644 --- a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst @@ -266,7 +266,7 @@ HADOOP系统文件移动。 - **fs_src_path** (str):移动前源文件路径名。 - **fs_dst_path** (str):移动后目标文件路径名。 - **overwrite** (bool):若目标文件已存在,是否删除进行重写,默认不重写并抛出异常。 - + **代码示例** .. code-block:: python diff --git a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst index 5d5555d8e16..62de09a312b 100644 --- a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst @@ -192,7 +192,7 @@ mv(src_path, dst_path, overwrite=False) - **src_path** (str):移动前源文件路径名。 - **dst_path** (str):移动后目标文件路径名。 - **overwrite** (bool):若目标文件已存在,是否删除进行重写,默认不重写并抛出异常。 - + **代码示例** .. code-block:: python diff --git a/docs/api/paddle/distributed/irecv_cn.rst b/docs/api/paddle/distributed/irecv_cn.rst index 62663348311..af82ae06164 100644 --- a/docs/api/paddle/distributed/irecv_cn.rst +++ b/docs/api/paddle/distributed/irecv_cn.rst @@ -4,7 +4,7 @@ irecv ------------------------------- -.. py:function:: paddle.distributed.irecv(tensor, src=None, group=None) +.. py:function:: paddle.distributed.irecv(tensor, src=None, group=None) 异步接受发送来的tensor。 参数 diff --git a/docs/api/paddle/distributed/launch_cn.rst b/docs/api/paddle/distributed/launch_cn.rst index 8931a321f20..00220dac88c 100644 --- a/docs/api/paddle/distributed/launch_cn.rst +++ b/docs/api/paddle/distributed/launch_cn.rst @@ -24,7 +24,7 @@ Launch 模块是在每个节点运行,负责分布式协同和本地进程管 [--max_restart MAX_RESTART] [--elastic_level ELASTIC_LEVEL] [--elastic_timeout ELASTIC_TIMEOUT] training_script ... - + 基础参数 ::::::::: - ``--master``:主节点,支持缺省 http:// 和 etcd://,默认缺省 http://。例如 ``--master=127.0.0.1:8080``。默认值 ``--master=None``。 @@ -88,22 +88,22 @@ IPU 参数 ::::::::: IPU分布式训练只需要3个参数:``--devices``,``training_script`` 和 ``training_script_args``。对于IPU的参数说明如下: ``--devices`` 表示设备个数,例如 ``--devices=4`` 表示当前的训练程序需要4个IPUs。 - ``training_script`` 只允许设置为 ``ipu`` 。 + ``training_script`` 只允许设置为 ``ipu`` 。 ``training_script_args`` 表示启动IPU分布式训练的相关参数。请参看如下各项参数说明。 请参考 ``代码实例十``。 - - - ``--hosts``:IPU分布式训练的主机ip,一个主机可包含多个进程。 - + + - ``--hosts``:IPU分布式训练的主机ip,一个主机可包含多个进程。 + - ``--nproc_per_host``: 每个主机的进程数量。一个进程可包含多个实例。 - + - ``--ipus_per_replica``:每个实例包含的IPU数量。一个实例可包含多个IPUs。 - + - ``--ipu_partition``:分布式训练中使用的IPU分区名称。 - + - ``--vipu_server``:IPU设备管理服务的ip。 - + - ``training_script``:分布式训练任务脚本的绝对路径,例如 ``training.py`` 。 - + - ``training_script_args``:``training_script`` 的输入参数,与普通起任务时输入的参数一样,例如 ``--lr=0.1``。 返回 @@ -150,15 +150,15 @@ IPU 参数 ::::::::: .. code-block:: bash :name: code-block-example-bash2 - - # 启动两机任务,其中机器 ip 为 192.168.0.16, 192.168.0.17 + + # 启动两机任务,其中机器 ip 为 192.168.0.16, 192.168.0.17 # On 192.168.0.16: python -m paddle.distributed.launch --devices=0,1,2,3 --master=192.168.0.16:8090 --nnodes=2 train.py --lr=0.01 # On 192.168.0.17: - + python -m paddle.distributed.launch --devices=0,1,2,3 --master=192.168.0.16:8090 --nnodes=2 train.py --lr=0.01 代码示例三 (ps, cpu,单机) @@ -167,7 +167,7 @@ IPU 参数 :name: code-block-example-bash3 # 在单机上启动多个 server 和 trainer - + python -m paddle.distributed.launch --server_num=2 --trainer_num=4 train.py --lr=0.01 代码示例四 (ps, cpu,多机) @@ -228,7 +228,7 @@ IPU 参数 :name: code-block-example-bash8 # 使用如下命令启动多机 heter ps - + # On 192.168.0.16: export CUDA_VISIBLE_DEVICES=0 @@ -247,7 +247,7 @@ IPU 参数 # 使用如下命令启动弹性训练 # 当 4 个节点 ready 时,训练立即开始,当只有 2 或 3 个节点 ready 时,将等待超时然后开始训练 python -m paddle.distributed.launch --master etcd://10.0.0.1:2379 --nnodes 2:4 train.py - + # 在训练过程中如果节点发生变化,上述逻辑不变。 代码示例十 (ipu) diff --git a/docs/api/paddle/distributed/reduce_scatter_cn.rst b/docs/api/paddle/distributed/reduce_scatter_cn.rst index 09fa00c339d..6a6428bba03 100644 --- a/docs/api/paddle/distributed/reduce_scatter_cn.rst +++ b/docs/api/paddle/distributed/reduce_scatter_cn.rst @@ -4,7 +4,7 @@ reduce_scatter ------------------------------- -.. py:function:: paddle.distributed.reduce_scatter(tensor, tensor_list, op=ReduceOp.SUM, group=None, use_calc_stream=True) +.. py:function:: paddle.distributed.reduce_scatter(tensor, tensor_list, op=ReduceOp.SUM, group=None, use_calc_stream=True) 规约,然后将张量列表分散到组中的所有进程上 参数 diff --git a/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst b/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst index 4055e2362f8..85854ddd88d 100644 --- a/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst +++ b/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst @@ -30,4 +30,3 @@ group sharded配置后的model,optimizer和scaler 代码示例 ::::::::: COPY-FROM: paddle.distributed.sharding.group_sharded_parallel - \ No newline at end of file diff --git a/docs/api/paddle/distributed/split_cn.rst b/docs/api/paddle/distributed/split_cn.rst index ebfc4cfa6f9..2e00b3849de 100644 --- a/docs/api/paddle/distributed/split_cn.rst +++ b/docs/api/paddle/distributed/split_cn.rst @@ -22,7 +22,7 @@ split :height: 350 :alt: single_embedding :align: center - + 并行Embedding情况如下图所示 .. image:: ./img/split_embedding_split.png diff --git a/docs/api/paddle/distributed/utils/global_gather_cn.rst b/docs/api/paddle/distributed/utils/global_gather_cn.rst index 5f875d6543d..dcb09e6a1ff 100644 --- a/docs/api/paddle/distributed/utils/global_gather_cn.rst +++ b/docs/api/paddle/distributed/utils/global_gather_cn.rst @@ -9,7 +9,7 @@ global_gather global_gather根据global_count将x的数据收集到n_expert * world_size个expert,然后根据local_count接收数据。 其中expert是用户定义的专家网络,n_expert是指每张卡拥有的专家网络数目,world_size是指运行网络的显卡数目。 -如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], +如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], 1卡的global_count是[2, 0, ,](因为篇幅问题,这里只展示在0卡运算的数据),在global_gather算子里, global_count和local_count的意义与其在global_scatter里正好相反, global_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个expert发送local_expert[i]个数据, diff --git a/docs/api/paddle/distributed/utils/global_scatter_cn.rst b/docs/api/paddle/distributed/utils/global_scatter_cn.rst index d776f138220..1a08d20a54a 100644 --- a/docs/api/paddle/distributed/utils/global_scatter_cn.rst +++ b/docs/api/paddle/distributed/utils/global_scatter_cn.rst @@ -9,7 +9,7 @@ global_scatter global_scatter根据local_count将x的数据分发到n_expert * world_size个expert,然后根据global_count接收数据。 其中expert是用户定义的专家网络,n_expert是指每张卡拥有的专家网络数目,world_size是指运行网络的显卡数目。 -如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], +如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], 1卡的global_count是[2, 0, ,](因为篇幅问题,这里只展示在0卡运算的数据),在global_scatter算子里, local_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个expert发送local_expert[i]个数据, global_count[i]代表从第 (i // n_expert)张卡接收global_count[i]个数据给本卡的 第(i % n_expert)个expert。 diff --git a/docs/api/paddle/distribution/AbsTransform_cn.rst b/docs/api/paddle/distribution/AbsTransform_cn.rst index fb521681250..38deeb5514d 100644 --- a/docs/api/paddle/distribution/AbsTransform_cn.rst +++ b/docs/api/paddle/distribution/AbsTransform_cn.rst @@ -31,9 +31,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -47,7 +47,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -62,7 +62,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -78,7 +78,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -92,7 +92,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -106,7 +106,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/AffineTransform_cn.rst b/docs/api/paddle/distribution/AffineTransform_cn.rst index 074acf92046..10ab3739b65 100644 --- a/docs/api/paddle/distribution/AffineTransform_cn.rst +++ b/docs/api/paddle/distribution/AffineTransform_cn.rst @@ -29,9 +29,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -45,7 +45,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -60,7 +60,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -76,7 +76,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -90,7 +90,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -104,7 +104,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/Beta_cn.rst b/docs/api/paddle/distribution/Beta_cn.rst index bc2bcb4c734..2c8c60f0be3 100644 --- a/docs/api/paddle/distribution/Beta_cn.rst +++ b/docs/api/paddle/distribution/Beta_cn.rst @@ -6,7 +6,7 @@ Beta .. py:class:: paddle.distribution.Beta(alpha, beta) -在概率论中,Beta分布是指一组定义在 [0,1] 区间的连续概率分布,有两个参数 +在概率论中,Beta分布是指一组定义在 [0,1] 区间的连续概率分布,有两个参数 :math:`\alpha,\beta>0`,是狄利克雷(:ref:`cn_api_paddle_distribution_Dirichlet`) 分布的一元形式。 @@ -20,7 +20,7 @@ Beta .. math:: - B(\alpha, \beta) = \int_{0}^{1} t^{\alpha - 1} (1-t)^{\beta - 1}\mathrm{d}t + B(\alpha, \beta) = \int_{0}^{1} t^{\alpha - 1} (1-t)^{\beta - 1}\mathrm{d}t 参数 ::::::::: @@ -60,7 +60,7 @@ prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的概率。 @@ -74,7 +74,7 @@ log_prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的对数概率。 diff --git a/docs/api/paddle/distribution/Categorical_cn.rst b/docs/api/paddle/distribution/Categorical_cn.rst index 9728b6a7367..9e4f638cdaf 100644 --- a/docs/api/paddle/distribution/Categorical_cn.rst +++ b/docs/api/paddle/distribution/Categorical_cn.rst @@ -113,7 +113,7 @@ kl_divergence(other) **参数** - **other** (Categorical) - 输入的另一个类别分布。数据类型为float32。 - + **返回** 相对于另一个类别分布的KL散度,数据类型为float32。 @@ -147,7 +147,7 @@ entropy() ''''''''' 信息熵。 - + **返回** 类别分布的信息熵,数据类型为float32。 @@ -232,4 +232,4 @@ log_prob(value) value = paddle.to_tensor([2,1,3]) cat.log_prob(value) # [-5.10271 -2.22287 -1.31061] - + diff --git a/docs/api/paddle/distribution/ChainTransform_cn.rst b/docs/api/paddle/distribution/ChainTransform_cn.rst index 25e0e32e46b..432a6160fa9 100644 --- a/docs/api/paddle/distribution/ChainTransform_cn.rst +++ b/docs/api/paddle/distribution/ChainTransform_cn.rst @@ -30,9 +30,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -46,7 +46,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -61,7 +61,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -77,7 +77,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -91,7 +91,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -105,7 +105,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/Dirichlet_cn.rst b/docs/api/paddle/distribution/Dirichlet_cn.rst index 68ba44b7549..1ef625b9e0f 100644 --- a/docs/api/paddle/distribution/Dirichlet_cn.rst +++ b/docs/api/paddle/distribution/Dirichlet_cn.rst @@ -8,12 +8,12 @@ Dirichlet 狄利克雷分布(Dirichlet distribution)是一类在实数域以正单纯形(standard simplex)为支撑集的高维连续概率分布,是Beta分布在高维情形的推广。 -对独立同分布(independent and identically distributed, iid)的连续随机变量 +对独立同分布(independent and identically distributed, iid)的连续随机变量 :math:`\boldsymbol X \in R_k`,和支撑集 :math:`\boldsymbol X \in (0,1), ||\boldsymbol X|| = 1`,其概率密度函数(pdf)为: .. math:: - f(\boldsymbol X; \boldsymbol \alpha) = \frac{1}{B(\boldsymbol \alpha)} \prod_{i=1}^{k}x_i^{\alpha_i-1} + f(\boldsymbol X; \boldsymbol \alpha) = \frac{1}{B(\boldsymbol \alpha)} \prod_{i=1}^{k}x_i^{\alpha_i-1} 其中,:math:`\boldsymbol \alpha = {\alpha_1,...,\alpha_k}, k \ge 2` 是无量纲分布参数,:math:`B(\boldsymbol \alpha)` 是多元Beta函数。 @@ -28,7 +28,7 @@ Gamma函数。 ::::::::: - **concentration** (Tensor) - 浓度参数,即上述公式 :math:`\alpha` 参数。当 - concentration维度大于1时,最后一维表示参数,参数形状 + concentration维度大于1时,最后一维表示参数,参数形状 ``event_shape=concentration.shape[-1:]``,其余维为Batch维, ``batch_shape=concentration.shape[:-1]`` . @@ -61,7 +61,7 @@ prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的概率。 @@ -75,7 +75,7 @@ log_prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的对数概率。 diff --git a/docs/api/paddle/distribution/ExpTransform_cn.rst b/docs/api/paddle/distribution/ExpTransform_cn.rst index 5602c3e32b5..83ceb50d9fa 100644 --- a/docs/api/paddle/distribution/ExpTransform_cn.rst +++ b/docs/api/paddle/distribution/ExpTransform_cn.rst @@ -24,9 +24,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -40,7 +40,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -55,7 +55,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -71,7 +71,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -85,7 +85,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -99,7 +99,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/IndependentTransform_cn.rst b/docs/api/paddle/distribution/IndependentTransform_cn.rst index 77341cec37a..57122d7ebe8 100644 --- a/docs/api/paddle/distribution/IndependentTransform_cn.rst +++ b/docs/api/paddle/distribution/IndependentTransform_cn.rst @@ -13,9 +13,9 @@ IndependentTransform 例如,假设基础变换为 ``ExpTransform``,其输入为一个随机采样结果 ``x``,形状 为 ``(S=[4],B=[2,2],E=[3])`` , ``S`` 、``B`` 、``E`` 分别表示采样形状、批形状、事件形 -状,``reinterpreted_batch_rank=1``。则 ``IndependentTransform(ExpTransform)`` +状,``reinterpreted_batch_rank=1``。则 ``IndependentTransform(ExpTransform)`` 变换后,``x`` 的形状为 ``(S=[4],B=[2],E=[2,3])``,即将最右侧的批维度作为事件维度。 -此时 ``forward`` 和 ``inverse`` 输出形状仍是 ``(4,2,2,3)`` , +此时 ``forward`` 和 ``inverse`` 输出形状仍是 ``(4,2,2,3)`` , 但 ``forward_log_det_jacobian`` 以及 ``inverse_log_det_jacobian`` 输出形状 为 ``(4, 2)`` 。 @@ -42,9 +42,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -58,7 +58,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -73,7 +73,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -89,7 +89,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -103,7 +103,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -117,7 +117,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/Independent_cn.rst b/docs/api/paddle/distribution/Independent_cn.rst index 212b6d2179e..12f83e645a0 100644 --- a/docs/api/paddle/distribution/Independent_cn.rst +++ b/docs/api/paddle/distribution/Independent_cn.rst @@ -4,7 +4,7 @@ Independent ------------------------------- .. py:class:: paddle.distribution.Independent(base, reinterpreted_batch_rank) - + 将一个基础分布 ``base`` 的最右侧 ``reinterpreted_batch_rank`` 批维度转换为事件维度。 @@ -43,7 +43,7 @@ prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的概率。 @@ -57,7 +57,7 @@ log_prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的对数概率。 diff --git a/docs/api/paddle/distribution/Multinomial_cn.rst b/docs/api/paddle/distribution/Multinomial_cn.rst index d22a59481bb..2ee96cfeb75 100644 --- a/docs/api/paddle/distribution/Multinomial_cn.rst +++ b/docs/api/paddle/distribution/Multinomial_cn.rst @@ -7,7 +7,7 @@ Multinomial ``Multinomial`` 表示实验次数为 ``total_count``,概率为 ``probs`` 的多项分布。 -在概率论中,多项分布是二项分布的多元推广,表示具有 :math:`k` 个类别的事件重复实验 :math:`n` +在概率论中,多项分布是二项分布的多元推广,表示具有 :math:`k` 个类别的事件重复实验 :math:`n` 次,每个类别出现次数的概率。当 :math:`k=2` 且 :math:`n=1` 时,为伯努利分布,当 :math:`k=2` 且 :math:`n>1` 时,为二项分布,当 :math:`k>2` 且 :math:`n=1` 时,为分类分布。 多项分布概率密度函数如下: @@ -22,14 +22,14 @@ Multinomial :math:`x_i` 表示第 :math:`i` 个分类出现的次数。 - + 参数 ::::::::: - **total_count** (int) - 实验次数。 - **probs** (Tensor) - 每个类别发生的概率。最后一维为事件维度,其它维为批维度。``probs`` 中 的每个元素取值范围为 ``[0,1]``。如果输入数据大于1,会沿着最后一维进行归一化操作。 - + 代码示例 ::::::::: @@ -61,7 +61,7 @@ prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的概率。 @@ -75,7 +75,7 @@ log_prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的对数概率。 diff --git a/docs/api/paddle/distribution/PowerTransform_cn.rst b/docs/api/paddle/distribution/PowerTransform_cn.rst index e8dade4833e..18a39ab9a6f 100644 --- a/docs/api/paddle/distribution/PowerTransform_cn.rst +++ b/docs/api/paddle/distribution/PowerTransform_cn.rst @@ -28,9 +28,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -44,7 +44,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -59,7 +59,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -75,7 +75,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -89,7 +89,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -103,7 +103,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/ReshapeTransform_cn.rst b/docs/api/paddle/distribution/ReshapeTransform_cn.rst index fef79e5d5b1..fb728bb1ea0 100644 --- a/docs/api/paddle/distribution/ReshapeTransform_cn.rst +++ b/docs/api/paddle/distribution/ReshapeTransform_cn.rst @@ -31,9 +31,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -47,7 +47,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -62,7 +62,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -78,7 +78,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -92,7 +92,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -106,7 +106,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/SigmoidTransform_cn.rst b/docs/api/paddle/distribution/SigmoidTransform_cn.rst index 1273f17dbce..8a569f21fd9 100644 --- a/docs/api/paddle/distribution/SigmoidTransform_cn.rst +++ b/docs/api/paddle/distribution/SigmoidTransform_cn.rst @@ -23,9 +23,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -39,7 +39,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -54,7 +54,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -70,7 +70,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -84,7 +84,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -98,7 +98,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst index 5feba97707c..33536d9bc48 100644 --- a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst +++ b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst @@ -27,9 +27,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -43,7 +43,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -58,7 +58,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -74,7 +74,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -88,7 +88,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -102,7 +102,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/StackTransform_cn.rst b/docs/api/paddle/distribution/StackTransform_cn.rst index d1cc722055a..771c9c1e59b 100644 --- a/docs/api/paddle/distribution/StackTransform_cn.rst +++ b/docs/api/paddle/distribution/StackTransform_cn.rst @@ -28,9 +28,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -44,7 +44,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -59,7 +59,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -75,7 +75,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -89,7 +89,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -103,7 +103,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst index 73dc2a5fd0e..db068a3c3cd 100644 --- a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst +++ b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst @@ -23,9 +23,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -39,7 +39,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -54,7 +54,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -70,7 +70,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -84,7 +84,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -98,7 +98,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/TanhTransform_cn.rst b/docs/api/paddle/distribution/TanhTransform_cn.rst index 107fc44554a..ffdcd6db5e8 100644 --- a/docs/api/paddle/distribution/TanhTransform_cn.rst +++ b/docs/api/paddle/distribution/TanhTransform_cn.rst @@ -5,7 +5,7 @@ TanhTransform .. py:class:: paddle.distribution.TanhTransform() -Tanh变换 :math:`y = tanh(x)` +Tanh变换 :math:`y = tanh(x)` 代码示例 @@ -24,9 +24,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -40,7 +40,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -55,7 +55,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -71,7 +71,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -85,7 +85,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -99,7 +99,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/Transform_cn.rst b/docs/api/paddle/distribution/Transform_cn.rst index c81fc65de34..dcddb07eb27 100644 --- a/docs/api/paddle/distribution/Transform_cn.rst +++ b/docs/api/paddle/distribution/Transform_cn.rst @@ -8,10 +8,10 @@ Transform 随机变量变换的基类。 ``Transform`` 表示将一个随机变量,经过一个或一些列可微且可逆的映射后,变换为另一个随机变量, -并提供变换前后相应概率密度计算方法。主要应用于对一个分布 +并提供变换前后相应概率密度计算方法。主要应用于对一个分布 :ref:`cn_api_distribution_Distribution` 的随机采样结果进行变换。 -假设 :math:`X` 为 ``K`` 元随机变量,概率密度为 :math:`p_X(x)`。映射 +假设 :math:`X` 为 ``K`` 元随机变量,概率密度为 :math:`p_X(x)`。映射 :math:`f: x \rightarrow y` 为可微且可逆映射,则 :math:`Y` 的概率密度为 .. math:: @@ -19,33 +19,33 @@ Transform p_Y(y) = p_X(f^{-1}(y)) |det J_{f^{-1}}(y)| -其中 :math:`det` 表示计算行列式,:math:`J_{f^{-1}}(y)` 表示 :math:`f^{-1}` 在 +其中 :math:`det` 表示计算行列式,:math:`J_{f^{-1}}(y)` 表示 :math:`f^{-1}` 在 :math:`y` 处的雅可比矩阵。 .. math:: J(y) = \begin{bmatrix} - {\frac{\partial x_1}{\partial y_1}} &{\frac{\partial x_1}{\partial y_2}} + {\frac{\partial x_1}{\partial y_1}} &{\frac{\partial x_1}{\partial y_2}} &{\cdots} &{\frac{\partial x_1}{\partial y_K}} \\ {\frac{\partial x_2}{\partial y_1}} &{\frac{\partial x_2} {\partial y_2}}&{\cdots} &{\frac{\partial x_2}{\partial y_K}} \\ {\vdots} &{\vdots} &{\ddots} &{\vdots}\\ - {\frac{\partial x_K}{\partial y_1}} &{\frac{\partial x_K}{\partial y_2}} - &{\cdots} &{\frac{\partial x_K}{\partial y_K}} + {\frac{\partial x_K}{\partial y_1}} &{\frac{\partial x_K}{\partial y_2}} + &{\cdots} &{\frac{\partial x_K}{\partial y_K}} \end{bmatrix} 通过上述描述易知,变换 ``Transform`` 主要包含下述三个操作: 1.正变换( ``forward`` ): - + 表示正向变换 :math:`x \rightarrow f(x)` 。 - + 2.逆变换( ``inverse`` ): - + 表示逆向变换 :math:`y \rightarrow f^{-1}(y)` 。 - + 3.雅可比行列式绝对值的对数( ``log_det_jacobian`` ): - + 又可以细分为正变换雅可比行列式绝对值的对数 ``forward_log_det_jacobian`` 和逆变换雅 可比行列式绝对值的对数 ``inverse_log_det_jacobian``,两者互为负数关系,只实现一种 即可。 @@ -76,9 +76,9 @@ forward(x) **参数** -- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` +- **x** (Tensor) - 正变换输入参数,通常为 :ref:`cn_api_distribution_Distribution` 的随机采样结果。 - + **返回** - **y** (Tensor) - 正变换的计算结果。 @@ -92,7 +92,7 @@ inverse(y) **参数** - **y** (Tensor) - 逆变换的输入参数。 - + **返回** - **x** (Tensor) - 逆变换的计算结果。 @@ -107,7 +107,7 @@ forward_log_det_jacobian(x) **参数** - **x** (Tensor) - 输入参数。 - + **返回** - Tensor - 正变换雅可比行列式绝对值的对数。 @@ -123,7 +123,7 @@ inverse_log_det_jacobian(y) **参数** - **y** (Tensor) - 输入参数。 - + **返回** - Tensor - 逆变换雅可比行列式绝对值的对数。 @@ -137,7 +137,7 @@ forward_shape(shape) **参数** - **shape** (Sequence[int]) - 正变换输入的形状。 - + **返回** - Sequence[int] - 正变换输出的形状。 @@ -151,7 +151,7 @@ inverse_shape(shape) **参数** - **shape** (Sequence[int]) - 逆变换输入的形状。 - + **返回** - Sequence[int] - 逆变换输出的形状。 diff --git a/docs/api/paddle/distribution/TransformedDistribution_cn.rst b/docs/api/paddle/distribution/TransformedDistribution_cn.rst index f7325026470..8f3a2f8c5d8 100644 --- a/docs/api/paddle/distribution/TransformedDistribution_cn.rst +++ b/docs/api/paddle/distribution/TransformedDistribution_cn.rst @@ -6,7 +6,7 @@ TransformedDistribution 基于一个基础分布和一系列分布变换构建一个新的分布。 .. py:class:: paddle.distribution.TransformedDistribution(base, transforms) - + 参数 ::::::::: @@ -30,7 +30,7 @@ prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的概率。 @@ -44,7 +44,7 @@ log_prob(value) **参数** - **value** (Tensor) - 待计算值。 - + **返回** - Tensor: value的对数概率。 diff --git a/docs/api/paddle/distribution/Uniform_cn.rst b/docs/api/paddle/distribution/Uniform_cn.rst index 238e2389461..0e0426f8a92 100644 --- a/docs/api/paddle/distribution/Uniform_cn.rst +++ b/docs/api/paddle/distribution/Uniform_cn.rst @@ -51,7 +51,7 @@ sample(shape, seed=0) - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 - **seed** (int) - 长整型数。 - + **返回** Tensor,预先设计好维度的张量,数据类型为float32。 @@ -78,7 +78,7 @@ log_prob(value) **参数** - **value** (Tensor) - 输入张量。数据类型为float32或float64。 - + **返回** Tensor,对数概率,数据类型与value相同。 @@ -92,7 +92,7 @@ probs(value) **参数** - **value** (Tensor) - 输入张量。数据类型为float32或float64。 - + **返回** Tensor,概率,数据类型与value相同。 diff --git a/docs/api/paddle/distribution/kl_divergence_cn.rst b/docs/api/paddle/distribution/kl_divergence_cn.rst index 419dad6be2b..ca396ead5ff 100644 --- a/docs/api/paddle/distribution/kl_divergence_cn.rst +++ b/docs/api/paddle/distribution/kl_divergence_cn.rst @@ -7,9 +7,9 @@ kl_divergence 计算分布p和q之间的KL散度。 -.. math:: - - KL(p||q) = \int p(x)log\frac{p(x)}{q(x)} \mathrm{d}x +.. math:: + + KL(p||q) = \int p(x)log\frac{p(x)}{q(x)} \mathrm{d}x 参数 ::::::::: diff --git a/docs/api/paddle/einsum_cn.rst b/docs/api/paddle/einsum_cn.rst index 20e42e6b8f2..cc84ac72143 100644 --- a/docs/api/paddle/einsum_cn.rst +++ b/docs/api/paddle/einsum_cn.rst @@ -9,7 +9,7 @@ einsum Einstein 求和是一种采用 Einstein 标记法描述的张量求和,输入单个或多个张量,输出单个张量。 -如下的张量操作或运算均可视为 Einstein 求和的特例 +如下的张量操作或运算均可视为 Einstein 求和的特例 - 单操作数 - 迹:trace @@ -69,7 +69,7 @@ Einsum 求和过程理论上等价于如下四步,但实现中实际执行的 **equation** (str):求和标记 - + **operands** (Tensor, [Tensor, ...]):输入张量 返回 diff --git a/docs/api/paddle/empty_cn.rst b/docs/api/paddle/empty_cn.rst index 1181a6f5b2c..7c78d4a9ac8 100644 --- a/docs/api/paddle/empty_cn.rst +++ b/docs/api/paddle/empty_cn.rst @@ -15,7 +15,7 @@ empty - **shape** (list|tuple|Tensor) – 指定创建 Tensor 的形状(shape),数据类型为 int32 或者 int64。 - **dtype** (np.dtype|str,可选)- 输出变量的数据类型,可以是 bool、float16、float32、float64、int32、int64。若为 None,则输出变量的数据类型为系统全局默认类型,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 返回一个根据 ``shape`` 和 ``dtype`` 创建并且尚未初始化的 Tensor。 diff --git a/docs/api/paddle/empty_like_cn.rst b/docs/api/paddle/empty_like_cn.rst index 4d1d32f7d31..4b924632b40 100644 --- a/docs/api/paddle/empty_like_cn.rst +++ b/docs/api/paddle/empty_like_cn.rst @@ -14,7 +14,7 @@ empty_like - **x** (Tensor) – 输入Tensor,输出Tensor和x具有相同的形状,x的数据类型可以是bool、float16、float32、float64、int32、int64。 - **dtype** (np.dtype|str,可选)- 输出变量的数据类型,可以是bool、float16、float32、float64、int32、int64。若参数为None,则输出变量的数据类型和输入变量相同,默认值为None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 返回一个根据 ``x`` 和 ``dtype`` 创建并且尚未初始化的Tensor。 diff --git a/docs/api/paddle/equal_cn.rst b/docs/api/paddle/equal_cn.rst index 77c912bc67a..e1f51e740b1 100644 --- a/docs/api/paddle/equal_cn.rst +++ b/docs/api/paddle/equal_cn.rst @@ -16,7 +16,7 @@ equal - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/fft/Overview_cn.rst b/docs/api/paddle/fft/Overview_cn.rst index 041692d6753..1442ac0707c 100644 --- a/docs/api/paddle/fft/Overview_cn.rst +++ b/docs/api/paddle/fft/Overview_cn.rst @@ -17,7 +17,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.fft.fft ` ", "一维离散傅里叶变换" " :ref:`paddle.fft.ifft ` ", "一维逆向离散傅里叶变换" " :ref:`paddle.fft.fft2 ` ", "二维离散傅里叶变换" @@ -32,7 +32,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.fft.rfft ` ", "一维离散实数傅里叶变换" " :ref:`paddle.fft.irfft ` ", "一维离散实数傅里叶变换的逆变换" " :ref:`paddle.fft.rfft2 ` ", "二维离散实数傅里叶变换" @@ -47,7 +47,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.fft.hfft ` ", "一维离散厄米特傅里叶变换" " :ref:`paddle.fft.ihfft ` ", "一维离散厄米特傅里叶变换的逆变换" " :ref:`paddle.fft.hfft2 ` ", "二维离散厄米特傅里叶变换" @@ -62,7 +62,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.fft.fftfreq ` ", "计算傅里叶变换采样频率" " :ref:`paddle.fft.rfftfreq ` ", "计算傅里叶变换采样频率,用于 ``rfft``, ``irfft``" " :ref:`paddle.fft.fftshift ` ", "移动零频率项至频谱中心" @@ -71,7 +71,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API 背景 ========================== 傅里叶分析是将信号表示为一系列周期性成分,并且从这些周期性成分中还原信号的方法。当信号和傅里叶 -变换都被替换成离散化的,这个过程称为离散傅里叶变换 (Discrete Fourier Transform, DFT). +变换都被替换成离散化的,这个过程称为离散傅里叶变换 (Discrete Fourier Transform, DFT). 因为快速傅里叶变换算法的高效性,傅里叶变换称为数值计算的一个重要支柱。 离散傅里叶变换将离散的输入表示为离散频率的周期性成分之和,在数字信号处理上有广泛的应用,比如滤 @@ -91,7 +91,7 @@ paddle.fft 的离散傅里叶变换中,一维离散傅里叶变换定义如下 X_{k} = \sigma \sum_{j=0}^{n-1} x_{j} \exp (\delta i 2 \pi \frac{jk}{n}) -其中频率为 f (单位:循环每采样间隔)的分量被表示为一个复指数函数 :math:`\exp (i 2\pi fj \Delta t)`, +其中频率为 f (单位:循环每采样间隔)的分量被表示为一个复指数函数 :math:`\exp (i 2\pi fj \Delta t)`, :math:`\Delta t` 为采样间隔。 n 为傅里叶变换点数,亦即傅里叶变换轴的长度。 @@ -113,7 +113,7 @@ n 为傅里叶变换点数,亦即傅里叶变换轴的长度。 负的奈奎斯特(Nyquist)频率项,对于实数输入来说,这一项也总是实数。``X[(n-1)//2]`` 为频率最 大的正频率项,`X[(n+1)//2]`为频率绝对值最大的负频率项。 -``paddle.fft.fftfreq(n)`` 可以返回频谱中每一项对应的频率值。``paddle.fft.fftshift(X)`` +``paddle.fft.fftfreq(n)`` 可以返回频谱中每一项对应的频率值。``paddle.fft.fftshift(X)`` 可以对频谱进行偏移,将零频率移动到中心位置,``paddle.fft.fftshift(X)`` 则是这个变换的逆变 换。 @@ -138,7 +138,7 @@ d 是傅里叶变换维数。 :math:`n_{1}, n_{2}, \cdots, n_{d}` 是每个傅 - "forward": 正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho": 正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; -其中 +其中 .. math:: @@ -149,11 +149,11 @@ d 是傅里叶变换维数。 :math:`n_{1}, n_{2}, \cdots, n_{d}` 是每个傅 实数傅里叶变换和厄米特傅里叶变换 ======================================== -当输入信号为实数信号时,傅里叶变换的结果具有厄米特对称性,亦即频率 :math:`f_{k}` 上的分量和 -:math:`-f_{k}` 上的分量互为共轭。因此可以利用对称性来减少计算量。实数傅里叶变换 +当输入信号为实数信号时,傅里叶变换的结果具有厄米特对称性,亦即频率 :math:`f_{k}` 上的分量和 +:math:`-f_{k}` 上的分量互为共轭。因此可以利用对称性来减少计算量。实数傅里叶变换 (``rfft``) 系列的函数是用于实数输入的,并且利用了对称性,只计算正频率项,直到奈奎斯特频率项。 因此,对于实数傅里叶变换,``n`` 个复数输入点只产生 ``n//2 + 1`` 个实数输出点。这一系列变换 -的逆变换也预设了输入数据具有厄米特对称性,要产生 ``n`` 个实数输出点,只需要使用 +的逆变换也预设了输入数据具有厄米特对称性,要产生 ``n`` 个实数输出点,只需要使用 ``n//2 + 1`` 个复数输入点。 与此相对应,当频谱是纯实数时,输入信号具有厄米特对称性。厄米特傅里叶变换(``hfft``)系列同样 @@ -164,5 +164,5 @@ d 是傅里叶变换维数。 :math:`n_{1}, n_{2}, \cdots, n_{d}` 是每个傅 ======================================== paddle.fft 中的傅里叶变换函数支持自动微分,使用的方法是维廷格微积分(Wertinger Calculus)。 -对于复函数 :math:`f: \mathbb{C} \rightarrow \mathbb{C}`,paddle 中的惯例是使用 +对于复函数 :math:`f: \mathbb{C} \rightarrow \mathbb{C}`,paddle 中的惯例是使用 :math:`f(z)` 对其输入的共轭的偏导数 :math:`\frac{\partial f}{\partial z^{*}}`. diff --git a/docs/api/paddle/fft/fft2_cn.rst b/docs/api/paddle/fft/fft2_cn.rst index 6cbd4fb8dd8..6400a227482 100644 --- a/docs/api/paddle/fft/fft2_cn.rst +++ b/docs/api/paddle/fft/fft2_cn.rst @@ -15,7 +15,7 @@ fft2 - **x** (Tensor) - 输入 Tensor,数据类型为实数或复数。 - **s** (Sequence[int],可选) - 输出 Tensor 在每一个傅里叶变换轴上的长度(类似一维傅里 - 叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor + 叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴的长度小,输入 Tensor 会被截断。如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴 的长度大,则输入会被补零。如果 ``s`` 没有指定,则使用输入 Tensor 中由 ``axes`` 指定的各 个轴的长度。 diff --git a/docs/api/paddle/fft/fftfreq_cn.rst b/docs/api/paddle/fft/fftfreq_cn.rst index 80ffdc51183..ece6d217087 100644 --- a/docs/api/paddle/fft/fftfreq_cn.rst +++ b/docs/api/paddle/fft/fftfreq_cn.rst @@ -18,7 +18,7 @@ fftfreq - **n** (int) - 窗长度(傅里叶变换点数)。 - **d** (float,可选) - 采样间隔,采样率的倒数,默认值为 1。 -- **dtype** (str,可选) - 返回 Tensor 的数据类型,默认为 +- **dtype** (str,可选) - 返回 Tensor 的数据类型,默认为 ``paddle.get_default_dtype()`` 返回的类型。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/fftn_cn.rst b/docs/api/paddle/fft/fftn_cn.rst index 8572165b42b..454574dacba 100644 --- a/docs/api/paddle/fft/fftn_cn.rst +++ b/docs/api/paddle/fft/fftn_cn.rst @@ -14,7 +14,7 @@ fftn - **x** (Tensor) - 输入数据,其数据类型可以为实数或复数。 - **s** (Sequence[int],可选) - 输出 Tensor 在每一个傅里叶变换轴上的长度(类似一维傅里 - 叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor + 叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴的长度小,输入 Tensor 会被截断。如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴 的长度大,则输入会被补零。如果 ``s`` 没有指定,则使用输入 Tensor 中由 ``axes`` 指定的各 个轴的长度。 diff --git a/docs/api/paddle/fft/hfft_cn.rst b/docs/api/paddle/fft/hfft_cn.rst index 65f7592ea38..8ca2db2c4d7 100644 --- a/docs/api/paddle/fft/hfft_cn.rst +++ b/docs/api/paddle/fft/hfft_cn.rst @@ -14,8 +14,8 @@ hfft - **x** (Tensor) - 输入数据,其数据类型为复数。 - **n** (int,可选) - 输出 Tensor 在傅里叶变换轴的长度。输入 Tensor 在该轴的长度必须为 - ``n//2+1``,如果输入 Tensor 的长度大于 ``n//2+1``,输入 Tensor 会被截断。如果输入 - Tensor 的长度小于 ``n//2+1``,则输入 Tensor 会被补零。如果 ``n`` 没有被指定,则取 + ``n//2+1``,如果输入 Tensor 的长度大于 ``n//2+1``,输入 Tensor 会被截断。如果输入 + Tensor 的长度小于 ``n//2+1``,则输入 Tensor 会被补零。如果 ``n`` 没有被指定,则取 ``2*(m-1)``,其中,``m`` 是输入 Tensor 在 ``axis`` 维的长度。 - **axis** (int,optional) - 傅里叶变换的轴。如果没有指定,默认是使用最后一维。 - **norm** (str,可选) - 傅里叶变换的缩放模式,缩放系数由变换的方向和缩放模式同时决定。取 @@ -25,14 +25,14 @@ hfft - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: Tensor,数据类型为实数。由输入 Tensor(可能被截断或者补零之后)在指定维度进行傅里叶变换的输 -出。如果指定 n,则输出 Tensor 在傅立叶变换轴的长度为 n,否则为 ``2*(m-1)``,其中``m`` +出。如果指定 n,则输出 Tensor 在傅立叶变换轴的长度为 n,否则为 ``2*(m-1)``,其中``m`` 是输入 Tensor 在 ``axis`` 维的长度。 代码示例 diff --git a/docs/api/paddle/fft/hfftn_cn.rst b/docs/api/paddle/fft/hfftn_cn.rst index 982c4fdcae2..cf0be81dcec 100644 --- a/docs/api/paddle/fft/hfftn_cn.rst +++ b/docs/api/paddle/fft/hfftn_cn.rst @@ -16,11 +16,11 @@ hfftn 叶变换中的参数 ``n``)。对于傅里叶变换的最后一个轴,输入长度要求是 ``s[-1]//2+1``,如果 输入 Tensor 的长度大于 ``s[-1]//2+1``,输入 Tensor 会被截断。如果输入 Tensor 的长度 小于 ``s[-1]//2+1``,则输入 Tensor 会被补零; - + 对于傅里变换其他每一个轴 ``i``,如果输入 Tensor 的长度大于 ``s[i]``,输入 Tensor 会 被截断。如果输入 Tensor 的长度小于 ``s[i]``,则输入 Tensor 会被补零; - 如果未指定 `s`,则 ``s`` 在最后一个傅里叶变换轴取值为 ``2*(m-1)``,其中 ``m`` 是输入 + 如果未指定 `s`,则 ``s`` 在最后一个傅里叶变换轴取值为 ``2*(m-1)``,其中 ``m`` 是输入 Tensor 在最后一个傅里叶变换轴的长度,其余轴为输入 Tensor 在该轴的长度。 - **axes** (Sequence[int],可选) - 计算快速傅里叶变换的轴。如果没有指定,默认是使用最 后 ``len(s)`` 个轴,如果 ``s`` 也没有指定则使用输入数据的全部的轴。 diff --git a/docs/api/paddle/fft/ifft2_cn.rst b/docs/api/paddle/fft/ifft2_cn.rst index b7af076e0df..d44745b3c5c 100644 --- a/docs/api/paddle/fft/ifft2_cn.rst +++ b/docs/api/paddle/fft/ifft2_cn.rst @@ -13,7 +13,7 @@ ifft2 - **x** (Tensor) - 输入 Tensor,数据类型为实数或复数。 - **s** (Sequence[int],可选) - 输出 Tensor 在每一个傅里叶变换轴上的长度(类似一维逆向傅 - 里叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor + 里叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴的长度小,输入 Tensor 会被截断。如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴 的长度大,则输入会被补零。如果 ``s`` 没有指定,则使用输入 Tensor 中由 ``axes`` 指定的各 个轴的长度。 @@ -25,7 +25,7 @@ ifft2 - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + 其中 ``n`` 为 ``s`` 中每个元素连乘。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/ifft_cn.rst b/docs/api/paddle/fft/ifft_cn.rst index 4eb032c14af..b0d411e3705 100644 --- a/docs/api/paddle/fft/ifft_cn.rst +++ b/docs/api/paddle/fft/ifft_cn.rst @@ -22,7 +22,7 @@ ifft - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/fft/ifftn_cn.rst b/docs/api/paddle/fft/ifftn_cn.rst index b7e6735135f..ee4ccee8c6d 100644 --- a/docs/api/paddle/fft/ifftn_cn.rst +++ b/docs/api/paddle/fft/ifftn_cn.rst @@ -13,7 +13,7 @@ N 维离散傅里叶变换的逆变换。在一定的误差范围内,``ifftn(f - **x** (Tensor) - 输入 Tensor,数据类型为实数或复数。 - **s** (Sequence[int],可选) - 输出 Tensor 在每一个傅里叶变换轴上的长度(类似一维逆向傅 - 里叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor + 里叶变换中的参数 ``n``)。对于每一个傅里叶变换的轴,如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴的长度小,输入 Tensor 会被截断。如果 ``s`` 中该轴的长度比输入 Tensor 中对应轴 的长度大,则输入会被补零。如果 ``s`` 没有指定,则使用输入 Tensor 中由 ``axes`` 指定的各 个轴的长度。 @@ -26,7 +26,7 @@ N 维离散傅里叶变换的逆变换。在一定的误差范围内,``ifftn(f - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + 其中 ``n`` 为 ``s`` 中每个元素连乘。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/ihfft2_cn.rst b/docs/api/paddle/fft/ihfft2_cn.rst index 7fea616520a..c30f7d3d579 100644 --- a/docs/api/paddle/fft/ihfft2_cn.rst +++ b/docs/api/paddle/fft/ihfft2_cn.rst @@ -28,7 +28,7 @@ ihfft2 - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - 其中 ``n`` 为 ``s`` 中每个元素连乘 + 其中 ``n`` 为 ``s`` 中每个元素连乘 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/ihfft_cn.rst b/docs/api/paddle/fft/ihfft_cn.rst index cfcab70f7ba..8a688c89db0 100644 --- a/docs/api/paddle/fft/ihfft_cn.rst +++ b/docs/api/paddle/fft/ihfft_cn.rst @@ -23,7 +23,7 @@ ihfft - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/ihfftn_cn.rst b/docs/api/paddle/fft/ihfftn_cn.rst index 5a41a05d521..04716ca4eb6 100644 --- a/docs/api/paddle/fft/ihfftn_cn.rst +++ b/docs/api/paddle/fft/ihfftn_cn.rst @@ -27,7 +27,7 @@ N 维厄米特(Hermitian)傅里叶变换的逆变换。 - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + 其中 ``n`` 为 ``s`` 中每个元素连乘 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/irfft_cn.rst b/docs/api/paddle/fft/irfft_cn.rst index 32f558ea885..ac9c95de6ae 100644 --- a/docs/api/paddle/fft/irfft_cn.rst +++ b/docs/api/paddle/fft/irfft_cn.rst @@ -14,8 +14,8 @@ irfft - **x** (Tensor) - 输入数据,其数据类型为复数。 - **n** (int,可选) - 输出 Tensor 在傅里叶变换轴的长度。输入 Tensor 在该轴的长度必须为 - ``n//2+1``,如果输入 Tensor 的长度大于 ``n//2+1``,输入 Tensor 会被截断。如果输入 - Tensor 的长度小于 ``n//2+1``,则输入 Tensor 会被补零。如果 ``n`` + ``n//2+1``,如果输入 Tensor 的长度大于 ``n//2+1``,输入 Tensor 会被截断。如果输入 + Tensor 的长度小于 ``n//2+1``,则输入 Tensor 会被补零。如果 ``n`` 没有被指定,则取 ``2*(m-1)``,其中,``m`` 是输入 Tensor 在 ``axis`` 维的长度。 - **axis** (int,optional) - 傅里叶变换的轴。如果没有指定,默认是使用最后一维。 - **norm** (str,可选) - 傅里叶变换的缩放模式,缩放系数由变换的方向和缩放模式同时决定。取 @@ -25,14 +25,14 @@ irfft - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: Tensor,数据类型为实数。由输入 Tensor(可能被截断或者补零之后)在指定维度进行傅里叶变换的输 -出。如果指定 n,则输出 Tensor 在傅立叶变换轴的长度为 n,否则为 ``2*(m-1)``,其中``m`` +出。如果指定 n,则输出 Tensor 在傅立叶变换轴的长度为 n,否则为 ``2*(m-1)``,其中``m`` 是输入 Tensor 在 ``axis`` 维的长度。 代码示例 diff --git a/docs/api/paddle/fft/irfftn_cn.rst b/docs/api/paddle/fft/irfftn_cn.rst index 450b6963b07..6b638b27e13 100644 --- a/docs/api/paddle/fft/irfftn_cn.rst +++ b/docs/api/paddle/fft/irfftn_cn.rst @@ -15,16 +15,16 @@ irfftn - **s** (Sequence[int],可选) - 输出 Tensor 在每一个傅里叶变换轴上的长度(类似一维傅里 叶变换中的参数 ``n``)。 - 对于傅里叶变换的最后一个轴,输入长度要求是 ``s[-1]//2+1``,如果输入 Tensor 的长度大于 + 对于傅里叶变换的最后一个轴,输入长度要求是 ``s[-1]//2+1``,如果输入 Tensor 的长度大于 ``s[-1]//2+1``,输入 Tensor 会被截断。如果输入 Tensor 的长度小于 ``s[-1]//2+1``, 则输入 Tensor 会被补零; - + 对于傅里变换其他每一个轴 ``i``,如果输入 Tensor 的长度大于 ``s[i]``,输入 Tensor 会 被截断。如果输入 Tensor 的长度小于 ``s[i]``,则输入 Tensor 会被补零; 如果未指定 `s`,则 ``s`` 在最后一个傅里叶变换轴取值为 ``2*(m-1)``,其中 ``m`` 是输 入 Tensor 在最后一个傅里叶变换轴的长度,其余轴为输入 Tensor 在该轴的长度。 -- **axes** (Sequence[int],可选) - 计算快速傅里叶变换的轴。如果没有指定,默认是使用最后 +- **axes** (Sequence[int],可选) - 计算快速傅里叶变换的轴。如果没有指定,默认是使用最后 ``len(s)`` 个轴,如果 ``s`` 也没有指定则使用输入数据的全部的轴。 - **norm** (str,可选) - 指定傅里叶变换的缩放模式,缩放系数由变换的方向和模式同时决定。取 值必须是 "forward","backward","ortho"之一,默认值为 "backward"。三种缩放模式对应的 diff --git a/docs/api/paddle/fft/rfft2_cn.rst b/docs/api/paddle/fft/rfft2_cn.rst index 3a4e4a7bdb0..6e2552f3b02 100644 --- a/docs/api/paddle/fft/rfft2_cn.rst +++ b/docs/api/paddle/fft/rfft2_cn.rst @@ -27,7 +27,7 @@ rfft2 - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/rfft_cn.rst b/docs/api/paddle/fft/rfft_cn.rst index bbb9d07111b..dead78e8e0f 100644 --- a/docs/api/paddle/fft/rfft_cn.rst +++ b/docs/api/paddle/fft/rfft_cn.rst @@ -23,7 +23,7 @@ rfft - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/rfftfreq_cn.rst b/docs/api/paddle/fft/rfftfreq_cn.rst index ce8b5ecda9e..35c3d7d1e8d 100644 --- a/docs/api/paddle/fft/rfftfreq_cn.rst +++ b/docs/api/paddle/fft/rfftfreq_cn.rst @@ -18,7 +18,7 @@ rfftfreq - **n** (int) - 窗长度(傅里叶变换点数)。 - **d** (float,可选) - 采样间隔,采样率的倒数,默认值为 1。 -- **dtype** (str,可选) - 返回 Tensor 的数据类型,默认为 +- **dtype** (str,可选) - 返回 Tensor 的数据类型,默认为 ``paddle.get_default_dtype()`` 返回的类型。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/fft/rfftn_cn.rst b/docs/api/paddle/fft/rfftn_cn.rst index 94505bef3f4..4490895ad06 100644 --- a/docs/api/paddle/fft/rfftn_cn.rst +++ b/docs/api/paddle/fft/rfftn_cn.rst @@ -27,7 +27,7 @@ N 维实数傅里叶变换。 - "backward":正向和逆向变换的缩放系数分别为 ``1`` 和 ``1/n``; - "forward":正向和逆向变换的缩放系数分别为 ``1/n`` 和 ``1``; - "ortho":正向和逆向变换的缩放系数均为 ``1/sqrt(n)``; - + 其中 ``n`` 为 ``s`` 中每个元素连乘。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/flatten_cn.rst b/docs/api/paddle/flatten_cn.rst index 3faabc1962f..213e9072e97 100644 --- a/docs/api/paddle/flatten_cn.rst +++ b/docs/api/paddle/flatten_cn.rst @@ -61,7 +61,7 @@ flatten image_shape=(2, 3, 4, 4) x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]) img = paddle.reshape(x, image_shape) / 100 - + out = paddle.flatten(img, start_axis=1, stop_axis=2) # out shape is [2, 12, 4] diff --git a/docs/api/paddle/full_cn.rst b/docs/api/paddle/full_cn.rst index d14eb3199fd..d8832a44f9e 100644 --- a/docs/api/paddle/full_cn.rst +++ b/docs/api/paddle/full_cn.rst @@ -16,7 +16,7 @@ full - **fill_value** (bool|float|int|Tensor) - 用于初始化输出Tensor的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 - **dtype** (np.dtype|str,可选)- 输出变量的数据类型。若为None,则输出变量的数据类型和输入变量相同,默认值为None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 返回一个存储结果的Tensor,数据类型和dtype相同。 diff --git a/docs/api/paddle/full_like_cn.rst b/docs/api/paddle/full_like_cn.rst index b39955e6d02..b70f86ccf9c 100644 --- a/docs/api/paddle/full_like_cn.rst +++ b/docs/api/paddle/full_like_cn.rst @@ -15,7 +15,7 @@ full_like - **fill_value** (bool|float|int) - 用于初始化输出张量的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 - **dtype** (np.dtype|str,可选) - 输出变量的数据类型。若参数为None,则输出变量的数据类型和输入变量相同,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 返回一个根据 ``x`` 、``fill_value`` 、 ``dtype`` 创建的 Tensor。 @@ -26,7 +26,7 @@ full_like .. code-block:: python import paddle - + input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input') output = paddle.full_like(input, 2.0) # [[2. 2. 2.] diff --git a/docs/api/paddle/gather_cn.rst b/docs/api/paddle/gather_cn.rst index a2cc40eaa37..350120c5313 100644 --- a/docs/api/paddle/gather_cn.rst +++ b/docs/api/paddle/gather_cn.rst @@ -10,7 +10,7 @@ gather .. code-block:: text Given: - + X = [[1, 2], [3, 4], [5, 6]] @@ -40,7 +40,7 @@ gather :::::::::::: .. code-block:: python - + import numpy as np import paddle diff --git a/docs/api/paddle/gather_nd_cn.rst b/docs/api/paddle/gather_nd_cn.rst index daf33fdb585..3a93363b0e6 100644 --- a/docs/api/paddle/gather_nd_cn.rst +++ b/docs/api/paddle/gather_nd_cn.rst @@ -10,7 +10,7 @@ gather_nd .. math:: output[(i_0, ..., i_{K-2})] = x[index[(i_0, ..., i_{K-2})]] -显然,:code:`index.shape[-1] <= x.rank` 并且输出张量的维度是 :code:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 +显然,:code:`index.shape[-1] <= x.rank` 并且输出张量的维度是 :code:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 示例: @@ -27,9 +27,9 @@ gather_nd - 案例 1: index = [[1]] - - gather_nd(x, index) - = [x[1, :, :]] + + gather_nd(x, index) + = [x[1, :, :]] = [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] @@ -55,7 +55,7 @@ gather_nd - **x** (Tensor) - 输入 Tensor,数据类型可以是 int32、int64、float32、float64、bool。 - **index** (Tensor) - 输入的索引 Tensor,其数据类型 int32 或者 int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= x.rank` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/get_cuda_rng_state_cn.rst b/docs/api/paddle/get_cuda_rng_state_cn.rst index e521e357307..c7f9062a78b 100644 --- a/docs/api/paddle/get_cuda_rng_state_cn.rst +++ b/docs/api/paddle/get_cuda_rng_state_cn.rst @@ -14,7 +14,7 @@ get_cuda_rng_state 返回 :::::::::::: - + GeneratorState:对象。 代码示例 diff --git a/docs/api/paddle/get_flags_cn.rst b/docs/api/paddle/get_flags_cn.rst index 2d7084f2516..1949457da0f 100644 --- a/docs/api/paddle/get_flags_cn.rst +++ b/docs/api/paddle/get_flags_cn.rst @@ -18,7 +18,7 @@ get_flags :::::::::::: Flag 的值。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/grad_cn.rst b/docs/api/paddle/grad_cn.rst index ff6cc0f6c5a..00fb29bd5fa 100644 --- a/docs/api/paddle/grad_cn.rst +++ b/docs/api/paddle/grad_cn.rst @@ -74,7 +74,7 @@ tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第i个返回 x.stop_gradient = False y1 = x * x - y2 = x * 3 + y2 = x * 3 # If grad_outputs=None, dy1 = [1], dy2 = [1]. # If grad_outputs=[g1, g2], then: diff --git a/docs/api/paddle/greater_equal_cn.rst b/docs/api/paddle/greater_equal_cn.rst index fbe243b22e9..0f293d53d0e 100644 --- a/docs/api/paddle/greater_equal_cn.rst +++ b/docs/api/paddle/greater_equal_cn.rst @@ -16,7 +16,7 @@ greater_equal - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/greater_than_cn.rst b/docs/api/paddle/greater_than_cn.rst index 4e08c4c32d6..184309f05e8 100644 --- a/docs/api/paddle/greater_than_cn.rst +++ b/docs/api/paddle/greater_than_cn.rst @@ -15,7 +15,7 @@ greater_than - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 ::::::::: diff --git a/docs/api/paddle/hub/Overview_cn.rst b/docs/api/paddle/hub/Overview_cn.rst index 3662435bddc..c07bdcb2be4 100644 --- a/docs/api/paddle/hub/Overview_cn.rst +++ b/docs/api/paddle/hub/Overview_cn.rst @@ -21,7 +21,7 @@ paddle.hub 是预训练模型库的集合,用来复用社区生产力,方便 " :ref:`list ` ", "查看Repo支持的模型列表" " :ref:`help ` ", "查看指定模型的文档" " :ref:`load ` ", "加载指定模型" - + .. _about_hub_repos: @@ -90,13 +90,13 @@ paddle.hub 是预训练模型库的集合,用来复用社区生产力,方便 import paddle # PaddleClas - models = paddle.hub.list('PaddlePaddle/PaddleClas:develop', source='github', force_reload=True,) + models = paddle.hub.list('PaddlePaddle/PaddleClas:develop', source='github', force_reload=True,) print(models) - docs = paddle.hub.help('PaddlePaddle/PaddleClas:develop', 'alexnet', source='github', force_reload=False,) + docs = paddle.hub.help('PaddlePaddle/PaddleClas:develop', 'alexnet', source='github', force_reload=False,) print(docs) - model = paddle.hub.load('PaddlePaddle/PaddleClas:develop', 'alexnet', source='github', force_reload=False, pretrained=True) + model = paddle.hub.load('PaddlePaddle/PaddleClas:develop', 'alexnet', source='github', force_reload=False, pretrained=True) data = paddle.rand((1, 3, 224, 224)) out = model(data) print(out.shape) # [1, 1000] diff --git a/docs/api/paddle/incubate/autograd/Hessian_cn.rst b/docs/api/paddle/incubate/autograd/Hessian_cn.rst index 47313e42850..054ab6aaff7 100644 --- a/docs/api/paddle/incubate/autograd/Hessian_cn.rst +++ b/docs/api/paddle/incubate/autograd/Hessian_cn.rst @@ -7,7 +7,7 @@ Hessian 计算函数 ``func`` 在 ``xs`` 处的海森矩阵。 -其中,函数 ``func`` 的输入可以为Tensor或Tensor序列,输出要求为只包含单个元素的Tensor, +其中,函数 ``func`` 的输入可以为Tensor或Tensor序列,输出要求为只包含单个元素的Tensor, ``is_batched`` 表示是否支持batch, ``True`` 表示支持并默认第零维作为batch维。 在计算海森矩阵时,所有输入Tensor会沿着batch维外的其它维度进行展平,且当输入为Tensor序列时, @@ -35,7 +35,7 @@ Tensor形状为 ``(B, 1)``,则最终输出海森矩阵形状为 ``(B, M, M)`` ``is_batched=False``,输出形状为 ``(1)`` 。 - **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为Tensor或 Tensor序列。 -- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` +- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` 表示不包含batch。默认值为 ``False`` 。 返回 diff --git a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst index b02ee6b527e..ea22c2a3113 100644 --- a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst +++ b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst @@ -22,7 +22,7 @@ Tensor经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比 对 ``Jacobian`` 多维索引获取整个雅可比矩阵或子矩阵的实际结果,并且实际计算也发生在这一过程,已 经计算的子矩阵也会被缓存以避免重复计算。 -例如,假设 ``Jacobian`` 的实例 ``J`` 形状为 ``(B, M, N)``,假设 ``M>4`` , +例如,假设 ``Jacobian`` 的实例 ``J`` 形状为 ``(B, M, N)``,假设 ``M>4`` , 则 ``J[:, 1:4:1, :]`` 表示获取 ``J`` 的第 ``1`` 行到第 ``3`` 行值,实际计算时,仅会对 第 ``1`` 行到第 ``3`` 进行求值,并且 ``1`` 到 ``3`` 行的计算结果会以行的粒度进行缓存,下次再 获取上述某一行或多行结果时不会发生重复计算。 @@ -41,7 +41,7 @@ Tensor经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比 - **func** (Callable) - Python函数,输入参数为 ``xs``,输出为Tensor或Tensor序列。 - **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为Tensor或 Tensor序列。 -- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` +- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` 表示不包含batch。默认值为 ``False`` 。 返回 diff --git a/docs/api/paddle/incubate/autograd/Overview_cn.rst b/docs/api/paddle/incubate/autograd/Overview_cn.rst index 8083c38b17d..1bdf4ab6128 100644 --- a/docs/api/paddle/incubate/autograd/Overview_cn.rst +++ b/docs/api/paddle/incubate/autograd/Overview_cn.rst @@ -17,7 +17,7 @@ paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.incubate.autograd.enable_prim ` ", "开启基于自动微分基础算子的自动微分机制" " :ref:`paddle.incubate.autograd.disable_prim ` ", "关闭基于自动微分基础算子的自动微分机制" " :ref:`paddle.incubate.autograd.prim_enabled ` ", "显示是否开启了基于自动微分基础算子的自动微分机制" @@ -30,7 +30,7 @@ paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.incubate.autograd.prim2orig ` ", "自动微分基础算子转换为等价功能原生算子" @@ -41,7 +41,7 @@ paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.incubate.autograd.jvp ` ", "雅可比矩阵与向量乘积" " :ref:`paddle.incubate.autograd.vjp ` ", "向量与雅可比矩阵乘积" " :ref:`paddle.incubate.autograd.Jacobian ` ", "雅可比矩阵" @@ -92,7 +92,7 @@ linearize 和 transpose 程序变换的想法来自 `JAX ` ", "多进程数据读取器" " :ref:`get_worker_info ` ", "获取当前子进程相关信息" - + .. _about_dataset_define: 数据集定义相关API @@ -37,7 +37,7 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API " :ref:`Dataset ` ", "映射式(map-style)数据集基类定义接口" " :ref:`IterableDataset ` ", "迭代式(iterable-style)数据集基类定义接口" " :ref:`TensorDataset ` ", "张量(Tensor)数据集基类定义接口" - + .. _about_dataset_operate: 数据集操作相关API @@ -65,7 +65,7 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API " :ref:`SequenceSampler ` ", "顺序采样器接口" " :ref:`RandomSampler ` ", "随机采样器接口" " :ref:`WeightedRandomSampler ` ", "带权重随机采样器接口" - + .. _about_batch_sampler: 批采样器相关API @@ -77,4 +77,4 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API " :ref:`BatchSampler ` ", "批采样器接口" " :ref:`DistributedBatchSampler ` ", "分布式批采样器接口, 用于分布式多卡场景" - + diff --git a/docs/api/paddle/io/Sampler_cn.rst b/docs/api/paddle/io/Sampler_cn.rst index 1e3d4db6ada..bfe6a4c2e40 100644 --- a/docs/api/paddle/io/Sampler_cn.rst +++ b/docs/api/paddle/io/Sampler_cn.rst @@ -23,7 +23,7 @@ Sampler 返回 :::::::::::: Sampler,返回样本下标的迭代器。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/io/SequenceSampler_cn.rst b/docs/api/paddle/io/SequenceSampler_cn.rst index 21f22931f49..0a20a1cc72f 100644 --- a/docs/api/paddle/io/SequenceSampler_cn.rst +++ b/docs/api/paddle/io/SequenceSampler_cn.rst @@ -15,7 +15,7 @@ SequenceSampler 返回 :::::::::::: SequenceSampler,返回样本下标的迭代器。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/io/WeightedRandomSampler_cn.rst b/docs/api/paddle/io/WeightedRandomSampler_cn.rst index 32e6d25aa49..44587f2718b 100644 --- a/docs/api/paddle/io/WeightedRandomSampler_cn.rst +++ b/docs/api/paddle/io/WeightedRandomSampler_cn.rst @@ -18,7 +18,7 @@ WeightedRandomSampler ::::::::: WeightedRandomSampler,返回根据权重随机采样下标的采样器 - + 代码示例 ::::::::: diff --git a/docs/api/paddle/is_complex_cn.rst b/docs/api/paddle/is_complex_cn.rst index 544ff1b568b..d2ed3ce766d 100644 --- a/docs/api/paddle/is_complex_cn.rst +++ b/docs/api/paddle/is_complex_cn.rst @@ -11,7 +11,7 @@ is_complex 参数 ::::::::: - **x** (Tensor) - 输入 Tensor - + 返回 ::::::::: diff --git a/docs/api/paddle/is_integer_cn.rst b/docs/api/paddle/is_integer_cn.rst index 83ea08080ec..162b6637d22 100644 --- a/docs/api/paddle/is_integer_cn.rst +++ b/docs/api/paddle/is_integer_cn.rst @@ -11,7 +11,7 @@ is_integer 参数 ::::::::: - **x** (Tensor) - 输入 Tensor - + 返回 ::::::::: diff --git a/docs/api/paddle/jit/ProgramTranslator_cn.rst b/docs/api/paddle/jit/ProgramTranslator_cn.rst index 6943e303f4d..6473f2bb356 100644 --- a/docs/api/paddle/jit/ProgramTranslator_cn.rst +++ b/docs/api/paddle/jit/ProgramTranslator_cn.rst @@ -203,7 +203,7 @@ get_code(dygraph_func) return x_v - prog_trans = paddle.jit.ProgramTranslator() + prog_trans = paddle.jit.ProgramTranslator() code = prog_trans.get_code(func) print(type(code)) # diff --git a/docs/api/paddle/jit/TracedLayer_cn.rst b/docs/api/paddle/jit/TracedLayer_cn.rst index 56f2dfcdf04..86e0dca6515 100644 --- a/docs/api/paddle/jit/TracedLayer_cn.rst +++ b/docs/api/paddle/jit/TracedLayer_cn.rst @@ -117,7 +117,7 @@ save_inference_model(path, feed=None, fetch=None) - **fetch** (list(int),可选) - 预测模型输出变量的索引。若为None,则TracedLayer的所有输出变量均会作为预测模型的输出。默认值为None。 **返回** - + 无。 **代码示例** diff --git a/docs/api/paddle/jit/load_cn.rst b/docs/api/paddle/jit/load_cn.rst index 1bb8c7ed5d5..4810cd5d351 100644 --- a/docs/api/paddle/jit/load_cn.rst +++ b/docs/api/paddle/jit/load_cn.rst @@ -19,7 +19,7 @@ load ::::::::: - **path** (str) - 载入模型的路径前缀。格式为 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 - **config** (dict,可选) - 其他用于兼容的载入配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项: - (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; + (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; (2) params_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件。 返回 @@ -147,7 +147,7 @@ TranslatedLayer,一个能够执行存储模型的 ``Layer`` 对象。 def __len__(self): return self.num_samples - + paddle.enable_static() image = static.data(name='image', shape=[None, 784], dtype='float32') @@ -168,7 +168,7 @@ TranslatedLayer,一个能够执行存储模型的 ``Layer`` 对象。 loader = paddle.io.DataLoader(dataset, feed_list=[image, label], places=place, - batch_size=BATCH_SIZE, + batch_size=BATCH_SIZE, shuffle=True, drop_last=True, return_list=False, @@ -178,7 +178,7 @@ TranslatedLayer,一个能够执行存储模型的 ``Layer`` 对象。 for data in loader(): exe.run( static.default_main_program(), - feed=data, + feed=data, fetch_list=[avg_loss]) model_path = "fc.example.model" diff --git a/docs/api/paddle/jit/save_cn.rst b/docs/api/paddle/jit/save_cn.rst index a3cabbc604b..a529f8b249c 100644 --- a/docs/api/paddle/jit/save_cn.rst +++ b/docs/api/paddle/jit/save_cn.rst @@ -14,7 +14,7 @@ save 存储的模型能够被以下API完整地载入使用: - ``paddle.jit.load`` - - ``paddle.static.load_inference_model`` + - ``paddle.static.load_inference_model`` - 其他预测库API .. note:: diff --git a/docs/api/paddle/less_equal_cn.rst b/docs/api/paddle/less_equal_cn.rst index 16affdeb1fa..4f062ae0058 100644 --- a/docs/api/paddle/less_equal_cn.rst +++ b/docs/api/paddle/less_equal_cn.rst @@ -16,7 +16,7 @@ less_equal - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/less_than_cn.rst b/docs/api/paddle/less_than_cn.rst index a505579d386..27c29476996 100644 --- a/docs/api/paddle/less_than_cn.rst +++ b/docs/api/paddle/less_than_cn.rst @@ -16,7 +16,7 @@ less_than - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/linalg/Overview_cn.rst b/docs/api/paddle/linalg/Overview_cn.rst index c4795022399..33618d18b49 100644 --- a/docs/api/paddle/linalg/Overview_cn.rst +++ b/docs/api/paddle/linalg/Overview_cn.rst @@ -26,7 +26,7 @@ paddle.linalg 目录下包含飞桨框架支持的线性代数相关API。具体 " :ref:`paddle.linalg.norm ` ", "计算矩阵范数或向量范数" " :ref:`paddle.linalg.matrix_rank ` ", "计算矩阵的秩" - + .. _about_matrix_functions: 矩阵计算相关API diff --git a/docs/api/paddle/linalg/eigvals_cn.rst b/docs/api/paddle/linalg/eigvals_cn.rst index bade69ef8c5..06418d81db3 100644 --- a/docs/api/paddle/linalg/eigvals_cn.rst +++ b/docs/api/paddle/linalg/eigvals_cn.rst @@ -7,7 +7,7 @@ eigvals 计算一个(或一批)普通方阵的特征值。 -.. note:: +.. note:: 该API的反向实现尚未完成,若你的代码需要对其进行反向传播,请使用ref:`cn_api_linalg_eig`。 diff --git a/docs/api/paddle/linalg/lstsq_cn.rst b/docs/api/paddle/linalg/lstsq_cn.rst index b6d5486b5a3..09244087d74 100644 --- a/docs/api/paddle/linalg/lstsq_cn.rst +++ b/docs/api/paddle/linalg/lstsq_cn.rst @@ -21,7 +21,7 @@ lstsq :::::::::::: Tuple,包含 ``solution``、``residuals``、``rank`` 和 ``singular_values``。 - + - ``solution`` 指最小二乘解,形状为 ``(*, N, K)`` 的 Tensor。 - ``residuals`` 指最小二乘解对应的残差,形状为 ``(*, K)`` 的 Tensor;当 ``M > N`` 且 ``x`` 中所有矩阵均为满秩矩阵时,该值会被计算,否则返回空 Tensor。 - ``rank`` 指 ``x`` 中矩阵的秩,形状为 ``(*)`` 的 Tensor;当 ``driver`` 为 'gelsy', 'gelsd', 'gelss' 时,该值会被计算,否则返回空 Tensor。 diff --git a/docs/api/paddle/linalg/lu_cn.rst b/docs/api/paddle/linalg/lu_cn.rst index 632f5ea2e1c..1bd61d1cfbb 100644 --- a/docs/api/paddle/linalg/lu_cn.rst +++ b/docs/api/paddle/linalg/lu_cn.rst @@ -44,7 +44,7 @@ LU和pivot可以通过调用paddle.linalg.lu_unpack展开获得L、U、P矩阵 .. code-block:: python - import paddle + import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) @@ -60,23 +60,23 @@ LU和pivot可以通过调用paddle.linalg.lu_unpack展开获得L、U、P矩阵 # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) - + P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], - # [1., 0., 0.]]), + # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], - # [0.60000000, 0.50000000]]), + # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) - - # one can verify : X = P @ L @ U ; + + # one can verify : X = P @ L @ U ; diff --git a/docs/api/paddle/linalg/lu_unpack_cn.rst b/docs/api/paddle/linalg/lu_unpack_cn.rst index 2afdd186adf..0edfb289638 100644 --- a/docs/api/paddle/linalg/lu_unpack_cn.rst +++ b/docs/api/paddle/linalg/lu_unpack_cn.rst @@ -38,7 +38,7 @@ lu_unpack .. code-block:: python - import paddle + import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) @@ -54,24 +54,23 @@ lu_unpack # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) - + P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], - # [1., 0., 0.]]), + # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], - # [0.60000000, 0.50000000]]), + # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) - - # one can verify : X = P @ L @ U ; - \ No newline at end of file + + # one can verify : X = P @ L @ U ; diff --git a/docs/api/paddle/linalg/qr_cn.rst b/docs/api/paddle/linalg/qr_cn.rst index 025f4fc362c..01d6797b277 100644 --- a/docs/api/paddle/linalg/qr_cn.rst +++ b/docs/api/paddle/linalg/qr_cn.rst @@ -11,7 +11,7 @@ qr 记 :math:`X` 为一个矩阵,则计算的结果为2个矩阵 :math:`Q` 和 :math:`R`,则满足公式: .. math:: - X = Q * R + X = Q * R 其中,:math:`Q` 是正交矩阵,:math:`R` 是上三角矩阵。 diff --git a/docs/api/paddle/linspace_cn.rst b/docs/api/paddle/linspace_cn.rst index 1e45089980e..d3d2947cb0d 100644 --- a/docs/api/paddle/linspace_cn.rst +++ b/docs/api/paddle/linspace_cn.rst @@ -7,7 +7,7 @@ linspace 返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 **注意:不进行梯度计算** - + 参数 :::::::::::: diff --git a/docs/api/paddle/load_cn.rst b/docs/api/paddle/load_cn.rst index f9834587167..9680f2bae48 100644 --- a/docs/api/paddle/load_cn.rst +++ b/docs/api/paddle/load_cn.rst @@ -15,22 +15,22 @@ load .. toctree:: :maxdepth: 1 - + ../../../../faq/save_cn.md 参数 ::::::::: - **path** (str|BytesIO) - 载入目标对象实例的路径/内存对象。通常该路径是目标文件的路径,当从用于存储预测模型 API 的存储结果中载入 state_dict 时,该路径可能是一个文件前缀或者目录。 - **\*\*configs** (dict,可选) - 其他用于兼容的载入配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项: - - - (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; + + - (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; - (2) params_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件; - (3) return_numpy(bool) - 如果被指定为 ``True`` ,``load`` 的结果中的Tensor会被转化为 ``numpy.ndarray``,默认为 ``False`` 。 返回 ::::::::: Object,一个可以在 paddle 中使用的对象实例。 - + 代码示例 ::::::::: diff --git a/docs/api/paddle/log10_cn.rst b/docs/api/paddle/log10_cn.rst index 3fd1f9ec1c6..5fae50ab16d 100755 --- a/docs/api/paddle/log10_cn.rst +++ b/docs/api/paddle/log10_cn.rst @@ -18,7 +18,7 @@ Log10激活函数(计算底为10的对数) 参数 :::::::::::: - - **x** (Tensor) – 输入的 Tensor。数据类型为float32、float64。 + - **x** (Tensor) – 输入的 Tensor。数据类型为float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/log1p_cn.rst b/docs/api/paddle/log1p_cn.rst index 5f3e2152991..d4a4e0c9594 100644 --- a/docs/api/paddle/log1p_cn.rst +++ b/docs/api/paddle/log1p_cn.rst @@ -15,7 +15,7 @@ log1p 参数 :::::::::::: - - **x** (Tensor) – 输入为一个多维的 Tensor,数据类型为 float32,float64。 + - **x** (Tensor) – 输入为一个多维的 Tensor,数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/log2_cn.rst b/docs/api/paddle/log2_cn.rst index 5cd5712ce68..0925ca547be 100755 --- a/docs/api/paddle/log2_cn.rst +++ b/docs/api/paddle/log2_cn.rst @@ -17,7 +17,7 @@ Log2激活函数(计算底为2的对数) 参数 ::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/log_cn.rst b/docs/api/paddle/log_cn.rst index 90874e865b8..6e235e0f096 100644 --- a/docs/api/paddle/log_cn.rst +++ b/docs/api/paddle/log_cn.rst @@ -18,7 +18,7 @@ Log激活函数(计算自然对数) 参数 :::::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logcumsumexp_cn.rst b/docs/api/paddle/logcumsumexp_cn.rst index 8326dc8f62f..49b5acda1e1 100644 --- a/docs/api/paddle/logcumsumexp_cn.rst +++ b/docs/api/paddle/logcumsumexp_cn.rst @@ -15,7 +15,7 @@ logcumsumexp .. math:: logcumsumexp(x)_{ij} = log \sum_{i=0}^{j}exp(x_{ij}) - + 参数 ::::::::: - **x** (Tensor) - 需要进行操作的Tensor。 diff --git a/docs/api/paddle/logit_cn.rst b/docs/api/paddle/logit_cn.rst index 5629519cb80..74aab79d03b 100644 --- a/docs/api/paddle/logit_cn.rst +++ b/docs/api/paddle/logit_cn.rst @@ -8,7 +8,7 @@ logit 实现了logit层。若eps为默认值None,并且 ``x`` < 0 或者 ``x`` > 1,该函数将返回NaN,OP的计算公式如下: .. math:: - logit(x) = ln(\frac{x}{1-x}) + logit(x) = ln(\frac{x}{1-x}) 其中,:math:`x`` 为输入的 Tensor,且和eps有着如下关系: diff --git a/docs/api/paddle/logspace_cn.rst b/docs/api/paddle/logspace_cn.rst index 142c5f2a5e1..8d7cd25e7e9 100644 --- a/docs/api/paddle/logspace_cn.rst +++ b/docs/api/paddle/logspace_cn.rst @@ -9,7 +9,7 @@ logspace .. note:: ``paddle.logspace`` 不进行梯度计算。 - + 参数 :::::::::::: diff --git a/docs/api/paddle/masked_select_cn.rst b/docs/api/paddle/masked_select_cn.rst index 88c1fe15fac..844bf710b77 100644 --- a/docs/api/paddle/masked_select_cn.rst +++ b/docs/api/paddle/masked_select_cn.rst @@ -15,7 +15,7 @@ masked_select - **x** (Tensor) - 输入Tensor,数据类型为 float32,float64,int32 或者 int64。 - **mask** (Tensor) - 用于索引的二进制掩码的 Tensor,数据类型为 bool。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 返回一个根据 ``mask`` 选择的的 Tensor。 diff --git a/docs/api/paddle/meshgrid_cn.rst b/docs/api/paddle/meshgrid_cn.rst index c82a2245e62..6e3c0e35a3e 100644 --- a/docs/api/paddle/meshgrid_cn.rst +++ b/docs/api/paddle/meshgrid_cn.rst @@ -18,7 +18,7 @@ meshgrid 返回 :::::::::::: - + k 个 k 维 ``Tensor``,每个形状均为(N1, N2, ..., Nk)。 diff --git a/docs/api/paddle/metric/Auc_cn.rst b/docs/api/paddle/metric/Auc_cn.rst index 10286086ceb..7c74dd9cad8 100644 --- a/docs/api/paddle/metric/Auc_cn.rst +++ b/docs/api/paddle/metric/Auc_cn.rst @@ -30,16 +30,16 @@ Auc import numpy as np import paddle - + m = paddle.metric.Auc() - + n = 8 class0_preds = np.random.random(size = (n, 1)) class1_preds = 1 - class0_preds - + preds = np.concatenate((class0_preds, class1_preds), axis=1) labels = np.random.randint(2, size = (n, 1)) - + m.update(preds=preds, labels=labels) res = m.accumulate() @@ -47,42 +47,42 @@ Auc :::::::::::: 在Model API中的示例 - + .. code-block:: python import numpy as np import paddle import paddle.nn as nn - + class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('int64') - + def __getitem__(self, idx): return self.x[idx], self.y[idx] - + def __len__(self): return self.n - + model = paddle.Model(nn.Sequential( nn.Linear(10, 2), nn.Softmax()) ) optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=model.parameters()) - + def loss(x, y): return nn.functional.nll_loss(paddle.log(x), y) - + model.prepare( optim, loss=loss, metrics=paddle.metric.Auc()) data = Data() model.fit(data, batch_size=16) - + 方法 :::::::::::: @@ -92,11 +92,11 @@ update(pred, label, *args) 更新AUC计算的状态。 **参数** - + - **preds** (numpy.array | Tensor):一个shape为[batch_size, 2]的Numpy数组或Tensor,preds[i][j]表示第i个样本类别为j的概率。 - **labels** (numpy.array | Tensor):一个shape为[batch_size, 1]的Numpy数组或Tensor,labels[i]是0或1,表示第i个样本的类别。 -**返回** +**返回** 无。 @@ -106,7 +106,7 @@ reset() 清空状态和计算结果。 -**返回** +**返回** 无。 @@ -116,7 +116,7 @@ accumulate() 累积的统计指标,计算和返回AUC值。 -**返回** +**返回** AUC值,一个标量。 @@ -126,6 +126,6 @@ name() 返回Metric实例的名字,参考上述的name,默认是'auc'。 -**返回** +**返回** 评估的名字,string类型。 diff --git a/docs/api/paddle/metric/Metric_cn.rst b/docs/api/paddle/metric/Metric_cn.rst index 6d76a527bb4..a616acc2c0b 100644 --- a/docs/api/paddle/metric/Metric_cn.rst +++ b/docs/api/paddle/metric/Metric_cn.rst @@ -9,14 +9,14 @@ Metric 评估器metric的基类。 用法: - + .. code-block:: text m = SomeMetric() for prediction, label in ...: m.update(prediction, label) m.accumulate() - + `compute` 接口的进阶用法: 在 `compute` 中可以使用PaddlePaddle内置的算子进行评估器的状态,而不是通过 @@ -50,9 +50,9 @@ Python/NumPy,这样可以加速计算。`update` 接口将 `compute` 的输出 例如,预测结果包含10类,`pred` 的shape是[N, 10],`label` 的shape是[N, 1],N是batch size,我们需要计算top-1和top-5的准确率, 可以在 `compute` 中计算每个样本的top-5得分,正确预测的矩阵的shape是[N, 5]。 - + .. code-block:: python - + def compute(pred, label): # sort prediction and slice the top-5 scores pred = paddle.argsort(pred, descending=True)[:, :5] @@ -66,7 +66,7 @@ Python/NumPy,这样可以加速计算。`update` 接口将 `compute` 的输出 在 `compute` 中的计算,使用内置的算子(可以跑在GPU上,使得速度更快)。作为 `update` 的输入,该接口计算如下: .. code-block:: python - + def update(self, correct): accs = [] for i, k in enumerate(self.topk): diff --git a/docs/api/paddle/metric/Precision_cn.rst b/docs/api/paddle/metric/Precision_cn.rst index c1548ec7e09..af43d7d253d 100644 --- a/docs/api/paddle/metric/Precision_cn.rst +++ b/docs/api/paddle/metric/Precision_cn.rst @@ -23,7 +23,7 @@ Precision :::::::::::: 独立使用示例 - + .. code-block:: python import numpy as np @@ -41,27 +41,27 @@ Precision :::::::::::: 在Model API中的示例 - + .. code-block:: python import numpy as np - + import paddle import paddle.nn as nn - + class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') - + def __getitem__(self, idx): return self.x[idx], self.y[idx] - + def __len__(self): return self.n - + model = paddle.Model(nn.Sequential( nn.Linear(10, 1), nn.Sigmoid() @@ -72,10 +72,10 @@ Precision optim, loss=nn.BCELoss(), metrics=paddle.metric.Precision()) - + data = Data() model.fit(data, batch_size=16) - + 方法 :::::::::::: update(preds, labels, *args) diff --git a/docs/api/paddle/metric/Recall_cn.rst b/docs/api/paddle/metric/Recall_cn.rst index bc36c8566a2..84257a7ca67 100644 --- a/docs/api/paddle/metric/Recall_cn.rst +++ b/docs/api/paddle/metric/Recall_cn.rst @@ -24,7 +24,7 @@ Recall :::::::::::: 独立使用示例 - + .. code-block:: python import numpy as np @@ -41,27 +41,27 @@ Recall 代码示例 2 :::::::::::: 在Model API中的示例 - + .. code-block:: python import numpy as np - + import paddle import paddle.nn as nn - + class Data(paddle.io.Dataset): def __init__(self): super(Data, self).__init__() self.n = 1024 self.x = np.random.randn(self.n, 10).astype('float32') self.y = np.random.randint(2, size=(self.n, 1)).astype('float32') - + def __getitem__(self, idx): return self.x[idx], self.y[idx] - + def __len__(self): return self.n - + model = paddle.Model(nn.Sequential( nn.Linear(10, 1), nn.Sigmoid() @@ -72,10 +72,10 @@ Recall optim, loss=nn.BCELoss(), metrics=[paddle.metric.Precision(), paddle.metric.Recall()]) - + data = Data() model.fit(data, batch_size=16) - + 方法 :::::::::::: update(preds, labels, *args) diff --git a/docs/api/paddle/multiplex_cn.rst b/docs/api/paddle/multiplex_cn.rst index cc50b1eb804..076ff2ac895 100644 --- a/docs/api/paddle/multiplex_cn.rst +++ b/docs/api/paddle/multiplex_cn.rst @@ -16,7 +16,7 @@ multiplex 示例: .. code-block:: text - + # 输入为4个shape为[4,4]的Tensor inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]], [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]], @@ -25,7 +25,7 @@ multiplex # index为shape为[4,1]的Tensor index = [[3],[0],[1],[2]] - + # 输出shape为[4,4] out = [[3,0,3,4] // out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4] [0,1,3,4] // out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4] diff --git a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst index f51bacaaf95..d765afef7c1 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst @@ -19,13 +19,13 @@ AdaptiveMaxPool2D Output(i) &= max(Input[lstart:lend]) hstart &= floor(i * H_{in} / H_{out}) - + hend &= ceil((i + 1) * H_{in} / H_{out}) - + wstart &= floor(j * W_{in} / W_{out}) - + wend &= ceil((j + 1) * W_{in} / W_{out}) - + Output(i ,j) &= max(Input[hstart:hend, wstart:wend]) 参数 diff --git a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst index 43f1028de8a..b0dc38dcc26 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst @@ -13,17 +13,17 @@ AdaptiveMaxPool3D .. math:: dstart &= floor(i * D_{in} / D_{out}) - + dend &= ceil((i + 1) * D_{in} / D_{out}) - + hstart &= floor(j * H_{in} / H_{out}) - + hend &= ceil((j + 1) * H_{in} / H_{out}) - + wstart &= floor(k * W_{in} / W_{out}) - + wend &= ceil((k + 1) * W_{in} / W_{out}) - + Output(i ,j, k) &= max(Input[dstart:dend, hstart:hend, wstart:wend]) 参数 diff --git a/docs/api/paddle/nn/AlphaDropout_cn.rst b/docs/api/paddle/nn/AlphaDropout_cn.rst index d10fd1c0c68..c6d2d56785f 100644 --- a/docs/api/paddle/nn/AlphaDropout_cn.rst +++ b/docs/api/paddle/nn/AlphaDropout_cn.rst @@ -5,7 +5,7 @@ AlphaDropout .. py:function:: paddle.nn.AlphaDropout(p=0.5, name=None) -AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。AlphaDropout通常与SELU激活函数组合使用。论文请参考:`Self-Normalizing Neural Networks `_ +AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。AlphaDropout通常与SELU激活函数组合使用。论文请参考:`Self-Normalizing Neural Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 diff --git a/docs/api/paddle/nn/BatchNorm1D_cn.rst b/docs/api/paddle/nn/BatchNorm1D_cn.rst index d6651249e17..1905accc7af 100644 --- a/docs/api/paddle/nn/BatchNorm1D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm1D_cn.rst @@ -6,7 +6,7 @@ BatchNorm1D .. py:class:: paddle.nn.BatchNorm1D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCL', name=None) -该接口用于构建 ``BatchNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理2D或者3D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理2D或者3D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: @@ -57,9 +57,9 @@ BatchNorm1D - input:形状为(批大小,通道数)的2-D Tensor 或(批大小,通道数,长度)的3-D Tensor。 - output:和输入形状一样。 -.. note:: +.. note:: 目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -71,7 +71,7 @@ BatchNorm1D np.random.seed(123) x_data = np.random.random(size=(2, 1, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm1D(1) batch_norm_out = batch_norm(x) diff --git a/docs/api/paddle/nn/BatchNorm2D_cn.rst b/docs/api/paddle/nn/BatchNorm2D_cn.rst index 92c361ab9ac..d1d52bbb982 100644 --- a/docs/api/paddle/nn/BatchNorm2D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm2D_cn.rst @@ -6,7 +6,7 @@ BatchNorm2D .. py:class:: paddle.nn.BatchNorm2D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCHW', name=None): -该接口用于构建 ``BatchNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: @@ -57,9 +57,9 @@ BatchNorm2D - input:形状为(批大小,通道数,高度,宽度)的4-D Tensor 或(批大小,通道数,宽度,高度)的4-D Tensor。 - output:和输入形状一样。 -.. note:: +.. note:: 目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -71,7 +71,7 @@ BatchNorm2D np.random.seed(123) x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm2D(1) batch_norm_out = batch_norm(x) diff --git a/docs/api/paddle/nn/BatchNorm3D_cn.rst b/docs/api/paddle/nn/BatchNorm3D_cn.rst index 75f0ba42e2b..a733c8469ad 100644 --- a/docs/api/paddle/nn/BatchNorm3D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm3D_cn.rst @@ -6,7 +6,7 @@ BatchNorm3D .. py:class:: paddle.nn.BatchNorm3D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCDHW', name=None): -该接口用于构建 ``BatchNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: @@ -59,7 +59,7 @@ BatchNorm3D .. note:: 目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -71,7 +71,7 @@ BatchNorm3D np.random.seed(123) x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) batch_norm = paddle.nn.BatchNorm3D(1) batch_norm_out = batch_norm(x) diff --git a/docs/api/paddle/nn/BatchNorm_cn.rst b/docs/api/paddle/nn/BatchNorm_cn.rst index 16827bca00e..28ac76b3a94 100644 --- a/docs/api/paddle/nn/BatchNorm_cn.rst +++ b/docs/api/paddle/nn/BatchNorm_cn.rst @@ -8,7 +8,7 @@ BatchNorm -该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当use_global_stats = False时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: diff --git a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst index 8eab6c18940..d71387297b0 100644 --- a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst +++ b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst @@ -9,9 +9,9 @@ BeamSearchDecoder - -带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ - + +带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ + **注意** 在使用beam search解码时,cell的输入和状态将被扩展到 :math:`beam\_size`,得到 :math:`[batch\_size * beam\_size, ...]` 一样的形状,这个操作在BeamSearchDecoder中自动完成,因此,其他任何在 :code:`cell.call` 中使用的Tensor,如果形状为 :math:`[batch\_size, ...]`,都必须先手动使用 :code:`BeamSearchDecoder.tile_beam_merge_with_batch` 接口扩展。最常见的情况是带注意机制的编码器输出。 参数 @@ -50,7 +50,7 @@ tile_beam_merge_with_batch(x, beam_size) Tensor,形状为 :math:`[batch\_size * beam\_size, ...]` 的Tensor,其数据类型与 :code:`x` 相同。 - + _split_batch_beams(x) ''''''''' @@ -138,16 +138,16 @@ tuple,一个元组 :code:`(initial_inputs, initial_states, finished)`。:code: _beam_search_step(time, logits, next_cell_states, beam_state) ''''''''' - + 计算得分并选择候选id。 - + **参数** - **time** (Variable) - 调用者提供的形状为[1]的Tensor,表示当前解码的时间步长。其数据类型为int64。 - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的Tensor,表示当前时间步的logits。其数据类型为float32。 - **next_cell_states** (Variable) - 单个Tensor变量或Tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。 - **beam_state** (Variable) - Tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 - + **返回** tuple,一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是Tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 @@ -157,14 +157,14 @@ step(time, inputs, states, **kwargs) ''''''''' 执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。 - + **参数** - **time** (Variable) - 调用者提供的形状为[1]的Tensor,表示当前解码的时间步长。其数据类型为int64。。 - **inputs** (Variable) - Tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 - **states** (Variable) - Tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 - **kwargs** - 附加的关键字参数,由调用者提供。 - + **返回** tuple,一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。:code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。:code:`beam_search_output` 是Tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的Tensor,形状为 :math:`[batch\_size,beam\_size]`。 @@ -172,15 +172,15 @@ tuple,一个元组 :code:`(beam_search_output,beam_search_state,next_input finalize(outputs, final_states, sequence_lengths) ''''''''' - + 使用 :code:`gather_tree` 沿beam search树回溯并构建完整的预测序列。 - + **参数** - **outputs** (Variable) - Tensor变量组成的结构(命名元组),该结构和数据类型与 :code:`output_dtype` 相同。Tensor将所有时间步的输出堆叠,因此具有形状 :math:`[time\_step,batch\_size,...]`。 - **final_states** (Variable) - Tensor变量组成的结构(命名元组)。它是 :code:`decoder.step` 在最后一个解码步骤返回的 :code:`next_states`,因此具有与任何时间步的 :code:`state` 相同的结构、形状和数据类型。 - **sequence_lengths** (Variable) - Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为int64。它包含解码期间确定的每个beam的序列长度。 - + **返回** tuple,一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个Tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 diff --git a/docs/api/paddle/nn/BiRNN_cn.rst b/docs/api/paddle/nn/BiRNN_cn.rst index 95eafc1fb3f..5196227958a 100644 --- a/docs/api/paddle/nn/BiRNN_cn.rst +++ b/docs/api/paddle/nn/BiRNN_cn.rst @@ -17,7 +17,7 @@ BiRNN - **cell_fw** (RNNCellBase) - 前向cell。RNNCellBase类的一个实例。 - **cell_bw** (RNNCellBase) - 后向cell。RNNCellBase类的一个实例。 - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。默认为False。 - + 输入 :::::::::::: @@ -30,7 +30,7 @@ BiRNN - **outputs** (Tensor) - 输出,由前向和后向cell的输出拼接得到。如果time_major为False,则Tensor的形状为[batch_size,time_steps,cell_fw.hidden_size + cell_bw.hidden_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,cell_fw.hidden_size + cell_bw.hidden_size]。 - **final_states** (tuple) - 前向和后向cell的最终状态。 - + .. Note:: 该类是一个封装rnn cell的低级api,用户在使用forward函数时须确保initial_states满足cell的要求。 diff --git a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst index 789a77a51e7..3910f7cf8aa 100644 --- a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst @@ -7,7 +7,7 @@ ClipGradByGlobalNorm - + 将一个 Tensor列表 :math:`t\_list` 中所有Tensor的L2范数之和,限定在 ``clip_norm`` 范围内。 - 如果范数之和大于 ``clip_norm``,则所有 Tensor 会乘以一个系数进行压缩 @@ -22,10 +22,10 @@ ClipGradByGlobalNorm .. math:: \\t\_list[i]=t\_list[i]∗\frac{clip\_norm}{max(global\_norm,clip\_norm)}\\ - + 其中: -.. math:: +.. math:: \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(l2norm(t\_list[i]))^2}\\ @@ -37,14 +37,14 @@ ClipGradByGlobalNorm 代码示例 :::::::::::: - + .. code-block:: python import paddle x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), + linear = paddle.nn.Linear(in_features=10, out_features=10, + weight_attr=paddle.ParamAttr(need_clip=True), bias_attr=paddle.ParamAttr(need_clip=False)) out = linear(x) loss = paddle.mean(out) @@ -53,4 +53,4 @@ ClipGradByGlobalNorm clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) sdg.step() - + diff --git a/docs/api/paddle/nn/ClipGradByNorm_cn.rst b/docs/api/paddle/nn/ClipGradByNorm_cn.rst index 0be25ab43c0..94a5bcaf8ec 100644 --- a/docs/api/paddle/nn/ClipGradByNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByNorm_cn.rst @@ -43,14 +43,14 @@ ClipGradByNorm 代码示例 :::::::::::: - + .. code-block:: python import paddle x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), + linear = paddle.nn.Linear(in_features=10, out_features=10, + weight_attr=paddle.ParamAttr(need_clip=True), bias_attr=paddle.ParamAttr(need_clip=False)) out = linear(x) loss = paddle.mean(out) @@ -59,4 +59,4 @@ ClipGradByNorm clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) sdg.step() - + diff --git a/docs/api/paddle/nn/ClipGradByValue_cn.rst b/docs/api/paddle/nn/ClipGradByValue_cn.rst index b764f78443c..62c63687a31 100644 --- a/docs/api/paddle/nn/ClipGradByValue_cn.rst +++ b/docs/api/paddle/nn/ClipGradByValue_cn.rst @@ -28,14 +28,14 @@ ClipGradByValue 代码示例 :::::::::::: - + .. code-block:: python import paddle x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), + linear = paddle.nn.Linear(in_features=10, out_features=10, + weight_attr=paddle.ParamAttr(need_clip=True), bias_attr=paddle.ParamAttr(need_clip=False)) out = linear(x) loss = paddle.mean(out) diff --git a/docs/api/paddle/nn/Conv1DTranspose_cn.rst b/docs/api/paddle/nn/Conv1DTranspose_cn.rst index 2aaa81aacc2..e9ea45e8672 100644 --- a/docs/api/paddle/nn/Conv1DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv1DTranspose_cn.rst @@ -42,7 +42,7 @@ Conv1DTranspose - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N是批尺寸,C是通道数,L特征长度。默认值:"NCL"。 - + 形状 :::::::::::: diff --git a/docs/api/paddle/nn/Conv1D_cn.rst b/docs/api/paddle/nn/Conv1D_cn.rst index 48c205abb87..b618bd3c137 100644 --- a/docs/api/paddle/nn/Conv1D_cn.rst +++ b/docs/api/paddle/nn/Conv1D_cn.rst @@ -53,7 +53,7 @@ weight bias ''''''''' 本层的可学习偏置,类型为 ``Parameter`` - + 形状 :::::::::::: - 输入::math:`(N, C_{in}, L_{in})` diff --git a/docs/api/paddle/nn/Conv2DTranspose_cn.rst b/docs/api/paddle/nn/Conv2DTranspose_cn.rst index 4ba2325b45e..6df3b675539 100644 --- a/docs/api/paddle/nn/Conv2DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv2DTranspose_cn.rst @@ -50,7 +50,7 @@ Conv2DTranspose - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - + 形状 :::::::::::: diff --git a/docs/api/paddle/nn/Conv2D_cn.rst b/docs/api/paddle/nn/Conv2D_cn.rst index 69080e55334..26735897aab 100644 --- a/docs/api/paddle/nn/Conv2D_cn.rst +++ b/docs/api/paddle/nn/Conv2D_cn.rst @@ -53,7 +53,7 @@ weight bias ''''''''' 本层的可学习偏置,类型为 ``Parameter`` - + 形状 :::::::::::: - 输入::math:`(N, C_{in}, H_{in}, W_{in})` @@ -61,7 +61,7 @@ bias - 卷积核::math:`(C_{out}, C_{in}, K_{h}, K_{w})` - 偏置::math:`(C_{out})` - + - 输出::math:`(N, C_{out}, H_{out}, W_{out})` 其中: diff --git a/docs/api/paddle/nn/Conv3D_cn.rst b/docs/api/paddle/nn/Conv3D_cn.rst index 75d6e12e858..9af5722e99e 100644 --- a/docs/api/paddle/nn/Conv3D_cn.rst +++ b/docs/api/paddle/nn/Conv3D_cn.rst @@ -52,7 +52,7 @@ weight bias ''''''''' 本层的可学习偏置,类型为 ``Parameter`` - + 形状 :::::::::::: diff --git a/docs/api/paddle/nn/Dropout2D_cn.rst b/docs/api/paddle/nn/Dropout2D_cn.rst index 8fdae68306d..59d90231572 100644 --- a/docs/api/paddle/nn/Dropout2D_cn.rst +++ b/docs/api/paddle/nn/Dropout2D_cn.rst @@ -5,7 +5,7 @@ Dropout2D .. py:function:: paddle.nn.Dropout2D(p=0.5, data_format='NCHW', name=None) -根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。Dropout2D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ +根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。Dropout2D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 diff --git a/docs/api/paddle/nn/Dropout3D_cn.rst b/docs/api/paddle/nn/Dropout3D_cn.rst index f173f21c434..f76dc502541 100644 --- a/docs/api/paddle/nn/Dropout3D_cn.rst +++ b/docs/api/paddle/nn/Dropout3D_cn.rst @@ -5,7 +5,7 @@ Dropout3D .. py:function:: paddle.nn.Dropout3D(p=0.5, data_format='NCDHW', name=None) -根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道特征图指的是其中的形状为 `DHW` 的3维特征图)。Dropout3D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ +根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道特征图指的是其中的形状为 `DHW` 的3维特征图)。Dropout3D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 diff --git a/docs/api/paddle/nn/Dropout_cn.rst b/docs/api/paddle/nn/Dropout_cn.rst index 483af6a145d..5dd87d2e676 100644 --- a/docs/api/paddle/nn/Dropout_cn.rst +++ b/docs/api/paddle/nn/Dropout_cn.rst @@ -5,7 +5,7 @@ Dropout .. py:function:: paddle.nn.Dropout(p=0.5, axis=None, mode="upscale_in_train”, name=None) -Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。论文请参考:`Improving neural networks by preventing co-adaptation of feature detectors `_ +Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。论文请参考:`Improving neural networks by preventing co-adaptation of feature detectors `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 diff --git a/docs/api/paddle/nn/GRUCell_cn.rst b/docs/api/paddle/nn/GRUCell_cn.rst index 8ecf9af9e2e..200a64db0c4 100644 --- a/docs/api/paddle/nn/GRUCell_cn.rst +++ b/docs/api/paddle/nn/GRUCell_cn.rst @@ -28,7 +28,7 @@ GRUCell 其中: - :math:`\sigma` :sigmoid激活函数。 - + 详情请参考论文:`An Empirical Exploration of Recurrent Network Architectures `_ 。 @@ -50,7 +50,7 @@ GRUCell - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(3 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hr}, W_{hz}, W_{hc}`。 - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{ir}, b_{iz}, b_{ic}`。 - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{hr}, b_{hz}, b_{hc}`。 - + 输入 :::::::::::: @@ -60,7 +60,7 @@ GRUCell 输出: - **outputs** (Tensor) - 输出。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - **new_states** (Tensor) - 新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - + .. Note:: 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 diff --git a/docs/api/paddle/nn/GRU_cn.rst b/docs/api/paddle/nn/GRU_cn.rst index 1728736663a..4cd6de23970 100644 --- a/docs/api/paddle/nn/GRU_cn.rst +++ b/docs/api/paddle/nn/GRU_cn.rst @@ -42,7 +42,7 @@ GRU - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 - + 输入 :::::::::::: diff --git a/docs/api/paddle/nn/GroupNorm_cn.rst b/docs/api/paddle/nn/GroupNorm_cn.rst index ce569d65e3f..90c31af1b30 100644 --- a/docs/api/paddle/nn/GroupNorm_cn.rst +++ b/docs/api/paddle/nn/GroupNorm_cn.rst @@ -36,7 +36,7 @@ GroupNorm np.random.seed(123) x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6) group_norm_out = group_norm(x) diff --git a/docs/api/paddle/nn/InstanceNorm1D_cn.rst b/docs/api/paddle/nn/InstanceNorm1D_cn.rst index 312e87b82d3..869165d3938 100644 --- a/docs/api/paddle/nn/InstanceNorm1D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm1D_cn.rst @@ -42,7 +42,7 @@ Note: .. note:: 目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -54,7 +54,7 @@ Note: np.random.seed(123) x_data = np.random.random(size=(2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm1D(2) instance_norm_out = instance_norm(x) diff --git a/docs/api/paddle/nn/InstanceNorm2D_cn.rst b/docs/api/paddle/nn/InstanceNorm2D_cn.rst index aa9d85493ec..c6f3ab1dff4 100644 --- a/docs/api/paddle/nn/InstanceNorm2D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm2D_cn.rst @@ -41,7 +41,7 @@ Note: .. note:: 目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -53,10 +53,10 @@ Note: np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm2D(2) instance_norm_out = instance_norm(x) print(instance_norm_out) - + diff --git a/docs/api/paddle/nn/InstanceNorm3D_cn.rst b/docs/api/paddle/nn/InstanceNorm3D_cn.rst index 7c7d2460253..2e044850d08 100644 --- a/docs/api/paddle/nn/InstanceNorm3D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm3D_cn.rst @@ -40,7 +40,7 @@ Note: .. note:: 目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 - + 代码示例 :::::::::::: @@ -52,7 +52,7 @@ Note: np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) instance_norm = paddle.nn.InstanceNorm3D(2) instance_norm_out = instance_norm(x) diff --git a/docs/api/paddle/nn/KLDivLoss_cn.rst b/docs/api/paddle/nn/KLDivLoss_cn.rst index 686817241ac..a6def96b80d 100644 --- a/docs/api/paddle/nn/KLDivLoss_cn.rst +++ b/docs/api/paddle/nn/KLDivLoss_cn.rst @@ -26,7 +26,7 @@ kL发散损失计算如下: :::::::::::: - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 - + 形状 :::::::::::: diff --git a/docs/api/paddle/nn/L1Loss_cn.rst b/docs/api/paddle/nn/L1Loss_cn.rst index 4c778961c13..a006f83f269 100644 --- a/docs/api/paddle/nn/L1Loss_cn.rst +++ b/docs/api/paddle/nn/L1Loss_cn.rst @@ -10,7 +10,7 @@ L1Loss 该损失函数的数学计算公式如下: 当 `reduction` 设置为 ``'none'`` 时, - + .. math:: Out = \lvert input - label\rvert @@ -20,7 +20,7 @@ L1Loss Out = MEAN(\lvert input - label\rvert) 当 `reduction` 设置为 ``'sum'`` 时, - + .. math:: Out = SUM(\lvert input - label\rvert) diff --git a/docs/api/paddle/nn/LSTMCell_cn.rst b/docs/api/paddle/nn/LSTMCell_cn.rst index 431e4ba42a3..ee8de0c3264 100644 --- a/docs/api/paddle/nn/LSTMCell_cn.rst +++ b/docs/api/paddle/nn/LSTMCell_cn.rst @@ -15,20 +15,20 @@ LSTMCell .. math:: - i_{t} &= \sigma (W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})\\ - f_{t} &= \sigma (W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})\\ - o_{t} &= \sigma (W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})\\ - g_{t} &= \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})\\ - c_{t} &= f_{t} * c_{t-1} + i_{t} * g_{t}\\ - h_{t} &= o_{t} * \tanh (c_{t})\\ - y_{t} &= h_{t} + i_{t} &= \sigma (W_{ii}x_{t} + b_{ii} + W_{hi}h_{t-1} + b_{hi})\\ + f_{t} &= \sigma (W_{if}x_{t} + b_{if} + W_{hf}h_{t-1} + b_{hf})\\ + o_{t} &= \sigma (W_{io}x_{t} + b_{io} + W_{ho}h_{t-1} + b_{ho})\\ + g_{t} &= \tanh (W_{ig}x_{t} + b_{ig} + W_{hg}h_{t-1} + b_{hg})\\ + c_{t} &= f_{t} * c_{t-1} + i_{t} * g_{t}\\ + h_{t} &= o_{t} * \tanh (c_{t})\\ + y_{t} &= h_{t} 其中: - :math:`\sigma` :sigmoid激活函数。 - + 详情请参考论文:`An Empirical Exploration of Recurrent Network Architectures `_ 。 @@ -50,7 +50,7 @@ LSTMCell - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(4 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}`。 - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{ii}, b_{if}, b_{ig}, b_{io}`。 - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}`。 - + 输入 :::::::::::: @@ -62,7 +62,7 @@ LSTMCell - **outputs** (Tensor) - 输出。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - **new_states** (tuple) - 一个包含两个Tensor的元组,每个Tensor的形状都为[batch_size, hidden_size],新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t},c_{t}`。 - + .. Note:: 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 diff --git a/docs/api/paddle/nn/LSTM_cn.rst b/docs/api/paddle/nn/LSTM_cn.rst index 1ee20f0649c..40e7f93c25c 100644 --- a/docs/api/paddle/nn/LSTM_cn.rst +++ b/docs/api/paddle/nn/LSTM_cn.rst @@ -46,7 +46,7 @@ LSTM - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 - + 输入 :::::::::::: diff --git a/docs/api/paddle/nn/LayerDict_cn.rst b/docs/api/paddle/nn/LayerDict_cn.rst index 3566ab59b9b..9127b05f669 100644 --- a/docs/api/paddle/nn/LayerDict_cn.rst +++ b/docs/api/paddle/nn/LayerDict_cn.rst @@ -244,4 +244,3 @@ update() #conv2d : Conv2D(4, 2, kernel_size=[4, 4], data_format=NCHW) #conv3d : Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW) #relu : ReLU() - \ No newline at end of file diff --git a/docs/api/paddle/nn/LayerList_cn.rst b/docs/api/paddle/nn/LayerList_cn.rst index 94360d993f9..f1250a155b5 100644 --- a/docs/api/paddle/nn/LayerList_cn.rst +++ b/docs/api/paddle/nn/LayerList_cn.rst @@ -74,7 +74,7 @@ insert() .. code-block:: python import paddle - + linears = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(10)]) another = paddle.nn.Linear(10, 10) linears.insert(3, another) @@ -94,7 +94,7 @@ extend() .. code-block:: python import paddle - + linears = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(10)]) another_list = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(5)]) linears.extend(another_list) diff --git a/docs/api/paddle/nn/LayerNorm_cn.rst b/docs/api/paddle/nn/LayerNorm_cn.rst index ab99b5d9c75..df109a87489 100644 --- a/docs/api/paddle/nn/LayerNorm_cn.rst +++ b/docs/api/paddle/nn/LayerNorm_cn.rst @@ -5,7 +5,7 @@ LayerNorm .. py:class:: paddle.nn.LayerNorm(normalized_shape, epsilon=1e-05, weight_attr=None, bias_attr=None, name=None) -该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ +该接口用于构建 ``LayerNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了层归一化层(Layer Normalization Layer)的功能,其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ 计算公式如下 @@ -48,7 +48,7 @@ LayerNorm np.random.seed(123) x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) + x = paddle.to_tensor(x_data) layer_norm = paddle.nn.LayerNorm(x_data.shape[1:]) layer_norm_out = layer_norm(x) diff --git a/docs/api/paddle/nn/Layer_cn.rst b/docs/api/paddle/nn/Layer_cn.rst index f469544273e..2f7b6b77c2f 100644 --- a/docs/api/paddle/nn/Layer_cn.rst +++ b/docs/api/paddle/nn/Layer_cn.rst @@ -194,7 +194,7 @@ HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注 # hook change the linear's output to output * 2, so out0 is equal to out1 * 2. assert (out0.numpy() == (out1.numpy()) * 2).any() - + create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None) ''''''''' @@ -243,7 +243,7 @@ create_variable(name=None, persistable=None, dtype=None) - **dtype** (str,可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 **返回** -Tensor,返回创建的 ``Tensor`` +Tensor,返回创建的 ``Tensor`` **代码示例** @@ -257,13 +257,13 @@ Tensor,返回创建的 ``Tensor`` out_features): super(MyLinear, self).__init__() self.linear = paddle.nn.Linear( 10, 10) - + self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype) - + def forward(self, input): out = self.linear(input) paddle.assign( out, self.back_var) - + return out create_tensor(name=None, persistable=None, dtype=None) @@ -278,7 +278,7 @@ create_tensor(name=None, persistable=None, dtype=None) - **dtype** (str,可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 **返回** -Tensor,返回创建的 ``Tensor`` +Tensor,返回创建的 ``Tensor`` **代码示例** @@ -292,13 +292,13 @@ Tensor,返回创建的 ``Tensor`` out_features): super(MyLinear, self).__init__() self.linear = paddle.nn.Linear( 10, 10) - + self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype) - + def forward(self, input): out = self.linear(input) paddle.assign( out, self.back_var) - + return out @@ -499,7 +499,7 @@ None import numpy as np import paddle - + linear = paddle.nn.Linear(10, 3) value = np.array([0]).astype("float32") buffer = paddle.to_tensor(value) @@ -703,7 +703,7 @@ set_state_dict(state_dict, use_structured_name=True) import paddle emb = paddle.nn.Embedding(10, 10) - + state_dict = emb.state_dict() paddle.save(state_dict, "paddle_dy.pdparams") para_state_dict = paddle.load("paddle_dy.pdparams") @@ -725,20 +725,20 @@ to(device=None, dtype=None, blocking=None) .. code-block:: python import paddle - + linear=paddle.nn.Linear(2, 2) linear.weight #Parameter containing: #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=False, # [[-0.32770029, 0.38653070], # [ 0.46030545, 0.08158520]]) - + linear.to(dtype='float64') linear.weight #Tenor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=False, # [[-0.32770029, 0.38653070], # [ 0.46030545, 0.08158520]]) - + linear.to(device='cpu') linear.weight #Tensor(shape=[2, 2], dtype=float64, place=CPUPlace, stop_gradient=False, @@ -749,4 +749,4 @@ to(device=None, dtype=None, blocking=None) #Tensor(shape=[2, 2], dtype=float64, place=CUDAPinnedPlace, stop_gradient=False, # [[-0.04989364, -0.56889004], # [ 0.33960250, 0.96878713]]) - + diff --git a/docs/api/paddle/nn/LocalResponseNorm_cn.rst b/docs/api/paddle/nn/LocalResponseNorm_cn.rst index 530cd7bfb12..85d4eae84ed 100644 --- a/docs/api/paddle/nn/LocalResponseNorm_cn.rst +++ b/docs/api/paddle/nn/LocalResponseNorm_cn.rst @@ -5,7 +5,7 @@ LocalResponseNorm .. py:function:: paddle.nn.LocalResponseNorm(size, alpha=0.0001, beta=0.75, k=1.0, data_format="NCHW", name=None) -局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ +局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ .. note:: 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_local_response_norm` 。 diff --git a/docs/api/paddle/nn/LogSoftmax_cn.rst b/docs/api/paddle/nn/LogSoftmax_cn.rst index aabfb328c92..ef6e3835d09 100644 --- a/docs/api/paddle/nn/LogSoftmax_cn.rst +++ b/docs/api/paddle/nn/LogSoftmax_cn.rst @@ -8,7 +8,7 @@ LogSoftmax激活层,计算公式如下: .. math:: - \begin{aligned} + \begin{aligned} Out[i, j] &= log(softmax(x)) \\ &= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])}) \end{aligned} diff --git a/docs/api/paddle/nn/MSELoss_cn.rst b/docs/api/paddle/nn/MSELoss_cn.rst index 2072798657f..ba5df238aa3 100644 --- a/docs/api/paddle/nn/MSELoss_cn.rst +++ b/docs/api/paddle/nn/MSELoss_cn.rst @@ -34,7 +34,7 @@ MSELoss - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/nn/MarginRankingLoss_cn.rst b/docs/api/paddle/nn/MarginRankingLoss_cn.rst index 2cf85011f10..c0f57b3d20a 100644 --- a/docs/api/paddle/nn/MarginRankingLoss_cn.rst +++ b/docs/api/paddle/nn/MarginRankingLoss_cn.rst @@ -9,7 +9,7 @@ MarginRankingLoss 该损失函数的数学计算公式如下: - .. math:: + .. math:: margin\_rank\_loss = max(0, -label * (input - other) + margin) 当 `reduction` 设置为 ``'mean'`` 时, @@ -18,7 +18,7 @@ MarginRankingLoss Out = MEAN(margin\_rank\_loss) 当 `reduction` 设置为 ``'sum'`` 时, - + .. math:: Out = SUM(margin\_rank\_loss) @@ -26,7 +26,7 @@ MarginRankingLoss 参数 :::::::: - - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'`` 、 ``'mean'`` 、 ``'sum'``。如果设置为 ``'none'``,则直接返回 最原始的 ``margin_rank_loss``。如果设置为 ``'sum'``,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'``,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst b/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst index cea645ca5ea..03983ba8a07 100644 --- a/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst +++ b/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst @@ -24,24 +24,24 @@ MultiLabelSoftMarginLoss - **weight** (Tensor,可选) - 手动设定权重,默认为None - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始Loss。 - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 - + 输入 ::::::::: - **input** (Tensor): - 输入 Tensor,维度是 [N, *], 其中 N 是 batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64。 - **label** (Tensor): - 标签,维度是 [N, *], 与 ``input`` 相同,Tensor 中的值应该只包含 1 和 -1。数据类型为:float32、float64。 - + 形状 ::::::::: - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 - - + + 返回 ::::::::: 返回计算 MultiLabelSoftMarginLoss 的可调用类。 - - + + 代码示例 ::::::::: COPY-FROM: Paddle.nn.MultiLabelSoftMarginLoss diff --git a/docs/api/paddle/nn/Overview_cn.rst b/docs/api/paddle/nn/Overview_cn.rst index fd863f0e96f..f4d51771238 100644 --- a/docs/api/paddle/nn/Overview_cn.rst +++ b/docs/api/paddle/nn/Overview_cn.rst @@ -42,7 +42,7 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.Layer ` ", "基于OOD实现的动态图Layer" " :ref:`paddle.nn.LayerList ` ", "用于保存子层列表" @@ -57,7 +57,7 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.Conv1D ` ", "一维卷积层" @@ -66,7 +66,7 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 " :ref:`paddle.nn.Conv2DTranspose ` ", "二维转置卷积层" " :ref:`paddle.nn.Conv3D ` ", "三维卷积层" " :ref:`paddle.nn.Conv3DTranspose ` ", "三维转置卷积层" - + .. _pooling_layers: pooling层 @@ -74,7 +74,7 @@ pooling层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.AdaptiveAvgPool1D ` ", "一维自适应平均池化层" " :ref:`paddle.nn.AdaptiveAvgPool2D ` ", "二维自适应平均池化层" @@ -91,7 +91,7 @@ pooling层 " :ref:`paddle.nn.MaxUnPool1D ` ", "一维最大反池化层" " :ref:`paddle.nn.MaxUnPool2D ` ", "二维最大反池化层" " :ref:`paddle.nn.MaxUnPool3D ` ", "三维最大反池化层" - + .. _padding_layers: Padding层 @@ -99,13 +99,13 @@ Padding层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.Pad1D ` ", "一维填充层" " :ref:`paddle.nn.Pad2D ` ", "二维填充层" " :ref:`paddle.nn.Pad3D ` ", "三维填充层" " :ref:`paddle.nn.ZeroPad2D ` ", "二维零填充层" - + .. _activation_layers: 激活层 @@ -113,7 +113,7 @@ Padding层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.CELU ` ", "CELU激活层" " :ref:`paddle.nn.ELU ` ", "ELU激活层" @@ -142,7 +142,7 @@ Padding层 " :ref:`paddle.nn.Tanh ` ", "Tanh激活层" " :ref:`paddle.nn.Tanhshrink ` ", "Tanhshrink激活层" " :ref:`paddle.nn.ThresholdedReLU ` ", "Thresholded ReLU激活层" - + .. _normalization_layers: Normalization层 @@ -150,7 +150,7 @@ Normalization层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.BatchNorm ` ", "Batch Normalization层" " :ref:`paddle.nn.BatchNorm1D ` ", "一维Batch Normalization层" @@ -164,7 +164,7 @@ Normalization层 " :ref:`paddle.nn.LocalResponseNorm ` ", "Local Response Normalization层" " :ref:`paddle.nn.SpectralNorm ` ", "Spectral Normalization层" " :ref:`paddle.nn.SyncBatchNorm ` ", "Synchronized Batch Normalization层" - + .. _RNN_layers: 循环神经网络层 @@ -172,7 +172,7 @@ Normalization层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.BiRNN ` ", "双向循环神经网络" " :ref:`paddle.nn.GRU ` ", "门控循环单元网络" @@ -183,7 +183,7 @@ Normalization层 " :ref:`paddle.nn.RNNCellBase ` ", "循环神经网络单元基类" " :ref:`paddle.nn.SimpleRNN ` ", "简单循环神经网络" " :ref:`paddle.nn.SimpleRNNCell ` ", "简单循环神经网络单元" - + .. _Transformer: Transformer相关 @@ -191,7 +191,7 @@ Transformer相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.MultiHeadAttention ` ", "多头注意力机制" " :ref:`paddle.nn.Transformer ` ", "Transformer模型" @@ -199,7 +199,7 @@ Transformer相关 " :ref:`paddle.nn.TransformerDecoderLayer ` ", "Transformer解码器层" " :ref:`paddle.nn.TransformerEncoder ` ", "Transformer编码器" " :ref:`paddle.nn.TransformerEncoderLayer ` ", "Transformer编码器层" - + .. _linear_layers: 线性层 @@ -207,11 +207,11 @@ Transformer相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.Bilinear ` ", "对两个输入执行双线性张量积" " :ref:`paddle.nn.Linear ` ", "线性变换层" - + .. _dropout_layers: Dropout层 @@ -219,13 +219,13 @@ Dropout层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.AlphaDropout ` ", "具有自归一化性质的dropout" " :ref:`paddle.nn.Dropout ` ", "Dropout" " :ref:`paddle.nn.Dropout2D ` ", "一维Dropout" " :ref:`paddle.nn.Dropout3D ` ", "二维Dropout" - + .. _embedding_layers: Embedding层 @@ -233,10 +233,10 @@ Embedding层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.Embedding ` ", "嵌入层(Embedding Layer)" - + .. _loss_layers: Loss层 @@ -244,7 +244,7 @@ Loss层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.BCELoss ` ", "BCELoss层" " :ref:`paddle.nn.BCEWithLogitsLoss ` ", "BCEWithLogitsLoss层" @@ -271,7 +271,7 @@ Vision层 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.ChannelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor按通道分成g组,得到形为[N, g, C/g, H, W]或[N, H, W, g, C/g]的Tensor,然后转置为[N, C/g, g, H, W]或[N, H, W, C/g, g]的形状,最后重新排列为原来的形状" " :ref:`paddle.nn.PixelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor" @@ -279,7 +279,7 @@ Vision层 " :ref:`paddle.nn.Upsample ` ", "用于调整一个batch中图片的大小" " :ref:`paddle.nn.UpsamplingBilinear2D ` ", "用于调整一个batch中图片的大小(使用双线性插值方法)" " :ref:`paddle.nn.UpsamplingNearest2D ` ", "用于调整一个batch中图片的大小(使用最近邻插值方法)" - + .. _about_clip: Clip相关 @@ -287,12 +287,12 @@ Clip相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.ClipGradByGlobalNorm ` ", "将一个 Tensor列表 t_list 中所有Tensor的L2范数之和,限定在 clip_norm 范围内" " :ref:`paddle.nn.ClipGradByNorm ` ", "将输入的多维Tensor X 的L2范数限制在 clip_norm 范围之内" " :ref:`paddle.nn.ClipGradByValue ` ", "将输入的多维Tensor X 的值限制在 [min, max] 范围" - + .. _common_layers: 公共层 @@ -300,7 +300,7 @@ Clip相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.BeamSearchDecoder ` ", "带beam search解码策略的解码器" " :ref:`paddle.nn.CosineSimilarity ` ", "余弦相似度计算" @@ -319,7 +319,7 @@ Clip相关 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.conv1d ` ", "一维卷积函数" " :ref:`paddle.nn.functional.conv1d_transpose ` ", "一维转置卷积函数" @@ -327,7 +327,7 @@ Clip相关 " :ref:`paddle.nn.functional.conv2d_transpose ` ", "二维转置卷积函数" " :ref:`paddle.nn.functional.conv3d ` ", "三维卷积函数" " :ref:`paddle.nn.functional.conv3d_transpose ` ", "三维转置卷积函数" - + .. _pooling_functional: Pooling相关函数 @@ -335,7 +335,7 @@ Pooling相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.adaptive_avg_pool1d ` ", "一维自适应平均池化" " :ref:`paddle.nn.functional.adaptive_avg_pool2d ` ", "二维自适应平均池化" @@ -360,11 +360,11 @@ Padding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.pad ` ", "依照 pad 和 mode 属性对input进行填充" " :ref:`paddle.nn.functional.zeropad2d ` ", "依照 pad 对x进行零填充" - + .. _activation_functional: 激活函数 @@ -372,7 +372,7 @@ Padding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.celu ` ", "celu激活函数" " :ref:`paddle.nn.functional.elu ` ", "elu激活函数" @@ -404,7 +404,7 @@ Padding相关函数 " :ref:`paddle.nn.functional.mish ` ", "mish激活函数" " :ref:`paddle.nn.functional.tanhshrink ` ", "tanhshrink激活函数" " :ref:`paddle.nn.functional.thresholded_relu ` ", "thresholded_relu激活函数" - + .. _normalization_functional: Normalization方法 @@ -412,7 +412,7 @@ Normalization方法 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.local_response_norm ` ", "Local Response Normalization函数" " :ref:`paddle.nn.functional.normalize ` ", "归一化方法" @@ -427,11 +427,11 @@ Normalization方法 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.bilinear ` ", "对两个输入执行双线性张量积" " :ref:`paddle.nn.functional.linear ` ", "线性变换" - + .. _dropout_functional: Dropout方法 @@ -439,13 +439,13 @@ Dropout方法 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.alpha_dropout ` ", "一种具有自归一化性质的dropout" " :ref:`paddle.nn.functional.dropout ` ", "Dropout" " :ref:`paddle.nn.functional.dropout2d ` ", "一维Dropout" " :ref:`paddle.nn.functional.dropout3d ` ", "二维Dropout" - + .. _embedding_functional: Embedding相关函数 @@ -453,11 +453,11 @@ Embedding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.diag_embed ` ", "对角线Embedding 方法" " :ref:`paddle.nn.functional.embedding ` ", "Embedding 方法" - + .. _loss_functional: 损失函数 @@ -465,7 +465,7 @@ Embedding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.binary_cross_entropy ` ", "二值交叉熵损失值" " :ref:`paddle.nn.functional.binary_cross_entropy_with_logits ` ", "logits二值交叉熵损失值" @@ -489,7 +489,7 @@ Embedding相关函数 " :ref:`paddle.nn.functional.triplet_margin_with_distance_loss ` ", "用户自定义距离函数用于计算triplet margin loss 损失" " :ref:`paddle.nn.functional.multi_label_soft_margin_loss ` ", "用于计算多分类的hinge loss损失函数" - + .. _common_functional: 公用方法 @@ -497,7 +497,7 @@ Embedding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.functional.affine_grid ` ", "用于生成仿射变换前后的feature maps的坐标映射关系" " :ref:`paddle.nn.functional.cosine_similarity ` ", "用于计算x1与x2沿axis维度的余弦相似度" @@ -525,7 +525,7 @@ Embedding相关函数 .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.nn.initializer.Assign ` ", "使用Numpy数组、Python列表、Tensor来初始化参数" " :ref:`paddle.nn.initializer.Bilinear ` ", "该接口为参数初始化函数,用于转置卷积函数中" diff --git a/docs/api/paddle/nn/RNN_cn.rst b/docs/api/paddle/nn/RNN_cn.rst index a0524fa6e94..fd6d0ac8eda 100644 --- a/docs/api/paddle/nn/RNN_cn.rst +++ b/docs/api/paddle/nn/RNN_cn.rst @@ -17,7 +17,7 @@ RNN - **cell** (RNNCellBase) - RNNCellBase类的一个实例。 - **is_reverse** (bool,可选) - 指定遍历input的方向。默认为False - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。默认为False。 - + 输入 :::::::::::: @@ -30,7 +30,7 @@ RNN - **outputs** (Tensor|list|tuple) - 输出。如果time_major为False,则Tensor的形状为[batch_size,time_steps,hidden_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,hidden_size]。 - **final_states** (Tensor|list|tuple) - cell的最终状态,嵌套结构,形状和数据类型都与初始状态相同。 - + .. Note:: 该类是一个封装rnn cell的低级api,用户在使用forward函数时须确保initial_states满足cell的要求。 diff --git a/docs/api/paddle/nn/RReLU_cn.rst b/docs/api/paddle/nn/RReLU_cn.rst index 9a5994d42bf..1b5a34d812f 100644 --- a/docs/api/paddle/nn/RReLU_cn.rst +++ b/docs/api/paddle/nn/RReLU_cn.rst @@ -32,7 +32,7 @@ RReLU激活层,应用随机纠正线性单元对神经元激活,参考论文 (lower + upper) * 0.5 * x, & & otherwise \\ \end{array} \right. - + 其中,:math:`x` 为输入的 Tensor,:math:`lower` 及 :math:`upper` 是随机均匀分布的上下边界。 参数 diff --git a/docs/api/paddle/nn/Sigmoid_cn.rst b/docs/api/paddle/nn/Sigmoid_cn.rst index 4def7e71620..18c6526ab41 100644 --- a/docs/api/paddle/nn/Sigmoid_cn.rst +++ b/docs/api/paddle/nn/Sigmoid_cn.rst @@ -17,7 +17,7 @@ Sigmoid 形状 :::::::: - - **x** (Tensor)- N-D tensor,可以支持的数据类型是float16,float32,float64。 + - **x** (Tensor)- N-D tensor,可以支持的数据类型是float16,float32,float64。 返回 :::::::: diff --git a/docs/api/paddle/nn/SimpleRNNCell_cn.rst b/docs/api/paddle/nn/SimpleRNNCell_cn.rst index dbd84eb2e6a..aa978de8db6 100644 --- a/docs/api/paddle/nn/SimpleRNNCell_cn.rst +++ b/docs/api/paddle/nn/SimpleRNNCell_cn.rst @@ -42,7 +42,7 @@ SimpleRNNCell - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(hidden_size, hidden_size)。对应公式中的 :math:`W_{hh}`。 - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{ih}`。 - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{hh}`。 - + 输入 :::::::::::: @@ -54,7 +54,7 @@ SimpleRNNCell - **outputs** (Tensor) - 输出。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - **new_states** (Tensor) - 新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - + .. Note:: 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 diff --git a/docs/api/paddle/nn/SimpleRNN_cn.rst b/docs/api/paddle/nn/SimpleRNN_cn.rst index 1d757f4deed..d3832737e6e 100644 --- a/docs/api/paddle/nn/SimpleRNN_cn.rst +++ b/docs/api/paddle/nn/SimpleRNN_cn.rst @@ -33,7 +33,7 @@ SimpleRNN - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 - + 输入 :::::::::::: diff --git a/docs/api/paddle/nn/SmoothL1Loss_cn.rst b/docs/api/paddle/nn/SmoothL1Loss_cn.rst index ce8e764720f..d961c6698d5 100644 --- a/docs/api/paddle/nn/SmoothL1Loss_cn.rst +++ b/docs/api/paddle/nn/SmoothL1Loss_cn.rst @@ -25,7 +25,7 @@ SmoothL1Loss - **reduction** (string,可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 - **delta** (string,可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。默认值= 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 输入 :::::::::: - **input** (Tensor):输入 `Tensor`,数据类型为float32。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 diff --git a/docs/api/paddle/nn/Softmax_cn.rst b/docs/api/paddle/nn/Softmax_cn.rst index 41ad0bd0c00..2efd7b57f99 100644 --- a/docs/api/paddle/nn/Softmax_cn.rst +++ b/docs/api/paddle/nn/Softmax_cn.rst @@ -30,7 +30,7 @@ Softmax激活层,OP的计算过程如下: # input - x.shape = [2, 3, 4] + x.shape = [2, 3, 4] x.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], @@ -58,7 +58,7 @@ Softmax激活层,OP的计算过程如下: # input - x.shape = [2, 3, 4] + x.shape = [2, 3, 4] x.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], @@ -78,7 +78,7 @@ Softmax激活层,OP的计算过程如下: [0.97555875, 0.97555875, 0.93623955, 0.93623955]], [[0.00490169, 0.00490169, 0.00490169, 0.00490169], [0.26762315, 0.26762315, 0.26762315, 0.26762315], - [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] 参数 :::::::::: diff --git a/docs/api/paddle/nn/SpectralNorm_cn.rst b/docs/api/paddle/nn/SpectralNorm_cn.rst index abe83cf530f..7eb0f46aa0e 100644 --- a/docs/api/paddle/nn/SpectralNorm_cn.rst +++ b/docs/api/paddle/nn/SpectralNorm_cn.rst @@ -23,7 +23,7 @@ SpectralNorm \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} -可参考:`Spectral Normalization `_ +可参考:`Spectral Normalization `_ 参数 ::::::::: diff --git a/docs/api/paddle/nn/SyncBatchNorm_cn.rst b/docs/api/paddle/nn/SyncBatchNorm_cn.rst index 58f865e582b..85f17539602 100644 --- a/docs/api/paddle/nn/SyncBatchNorm_cn.rst +++ b/docs/api/paddle/nn/SyncBatchNorm_cn.rst @@ -5,7 +5,7 @@ SyncBatchNorm .. py:class:: paddle.nn.SyncBatchNorm(num_features, epsilon=1e-5, momentum=0.9, weight_attr=None, bias_attr=None, data_format='NCHW', name=None) -该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。实现了跨卡GPU同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有GPU同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。实现了跨卡GPU同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有GPU同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ 当模型处于训练模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是所有GPU上同一minibatch的统计数据。计算公式如下: diff --git a/docs/api/paddle/nn/Transformer_cn.rst b/docs/api/paddle/nn/Transformer_cn.rst index 6a7b2841aa1..2d4be6d1384 100644 --- a/docs/api/paddle/nn/Transformer_cn.rst +++ b/docs/api/paddle/nn/Transformer_cn.rst @@ -42,7 +42,7 @@ Transformer模型由一个 ``TransformerEncoder`` 实例和一个 ``TransformerD import paddle from paddle.nn import Transformer - + # src: [batch_size, tgt_len, d_model] enc_input = paddle.rand((2, 4, 128)) # tgt: [batch_size, src_len, d_model] @@ -59,7 +59,7 @@ Transformer模型由一个 ``TransformerEncoder`` 实例和一个 ``TransformerD enc_self_attn_mask, dec_self_attn_mask, cross_attn_mask) # [2, 6, 128] - + 方法 diff --git a/docs/api/paddle/nn/Upsample_cn.rst b/docs/api/paddle/nn/Upsample_cn.rst index c139df60414..9ff0961fcdb 100644 --- a/docs/api/paddle/nn/Upsample_cn.rst +++ b/docs/api/paddle/nn/Upsample_cn.rst @@ -15,7 +15,7 @@ Upsample NEAREST:最近邻插值 LINEAR:线性插值 - + BILINEAR:双线性插值 TRILINEAR:三线性插值 diff --git a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst index 9775ba6fc0a..bf7e673d2d4 100644 --- a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst +++ b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst @@ -23,7 +23,7 @@ UpsamplingNearest2D :: - + scale 计算方法: if align_corners = True && out_size > 1 : diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst index 9a7714f912b..6e7fe2d01df 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst @@ -25,7 +25,7 @@ adaptive_avg_pool3d wend &= ceil((k + 1) * W_{in} / W_{out}) Output(i ,j, k) &= \frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} - + 参数 diff --git a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst index 7874658ea4d..104ffcdc830 100755 --- a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst @@ -88,7 +88,7 @@ conv3d_transpose - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 diff --git a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst index 25699c498ac..236e074aabf 100644 --- a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst @@ -14,8 +14,8 @@ cosine_similarity - **x2** (Tensor) - Tensor,数据类型支持float32, float64。 - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 - - + + 返回 :::::::::::: Tensor,余弦相似度的计算结果,数据类型与x1, x2相同。 diff --git a/docs/api/paddle/nn/functional/cross_entropy_cn.rst b/docs/api/paddle/nn/functional/cross_entropy_cn.rst index 40e5d5b5875..0828592b61d 100644 --- a/docs/api/paddle/nn/functional/cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/cross_entropy_cn.rst @@ -18,7 +18,7 @@ cross_entropy 1. 硬标签(每个样本仅可分到一个类别) .. math:: - \\loss_j=-\text{logits}_{label_j}+\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right) + \\loss_j=-\text{logits}_{label_j}+\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right) , j = 1,...,N, N为样本数,C为类别数 2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为1) @@ -38,7 +38,7 @@ cross_entropy 1.1。硬标签情况(soft_label = False) .. math:: - \\loss_j=loss_j*weight[label_j] + \\loss_j=loss_j*weight[label_j] 1.2。软标签情况(soft_label = True) @@ -47,11 +47,11 @@ cross_entropy 2. reduction情况 -2.1 如果 ``reduction`` 参数为 ``none`` +2.1 如果 ``reduction`` 参数为 ``none`` 则直接返回上一步结果 -2.2 如果 ``reduction`` 参数为 ``sum`` +2.2 如果 ``reduction`` 参数为 ``sum`` 则返回上一步结果的和 @@ -60,7 +60,7 @@ cross_entropy 2.3 如果 ``reduction`` 参数为 ``mean``,则根据 ``weight`` 参数情况进行处理: -2.3.1。如果 ``weight`` 参数为 ``None`` +2.3.1。如果 ``weight`` 参数为 ``None`` 则返回上一步结果的平均值 @@ -72,13 +72,13 @@ cross_entropy (1) 硬标签情况(soft_label = False) .. math:: - \\loss=\sum_{j}loss_j/\sum_{j}weight[label_j] + \\loss=\sum_{j}loss_j/\sum_{j}weight[label_j] (2) 软标签情况(soft_label = True) .. math:: \\loss=\sum_{j}loss_j/\sum_{j}\left(\sum_{i}weight[label_i]\right) - + 参数 ::::::::: - **input** (Tensor) – 维度为 :math:`[N_1, N_2, ..., N_k, C]` 的多维Tensor,其中最后一维C是类别数目。数据类型为float32或float64。它需要未缩放的 ``input``。该OP不应该对softmax运算的输出进行操作,否则会产生错误的结果。 diff --git a/docs/api/paddle/nn/functional/diag_embed_cn.rst b/docs/api/paddle/nn/functional/diag_embed_cn.rst index c36b8d5f2b0..ed1491f81a3 100644 --- a/docs/api/paddle/nn/functional/diag_embed_cn.rst +++ b/docs/api/paddle/nn/functional/diag_embed_cn.rst @@ -10,7 +10,7 @@ diag_embed 该 OP 创建一个 Tensor,其在指定的 2D 平面(由 ``dim1`` 和 ``dim2`` 指定)上的对角线由输入 ``input`` 填充。 默认的,指定的 2D 平面由返回 Tensor 的最后两维组成。 - + 参数 ``offset`` 确定在指定的二维平面中填充对角线的位置: - 如果 offset = 0,则填充主对角线。 @@ -29,14 +29,14 @@ diag_embed :::::::::::: 指定二维平面填充了对角线的 Tensor。数据类型和输入数据类型一致。 -代码示例 +代码示例 :::::::::::: .. code-block:: python import paddle.nn.functional as F import numpy as np - + diag_embed = np.random.randn(2, 3).astype('float32') # [[ 0.7545889 , -0.25074545, 0.5929117 ], # [-0.6097662 , -0.01753256, 0.619769 ]] diff --git a/docs/api/paddle/nn/functional/gather_tree_cn.rst b/docs/api/paddle/nn/functional/gather_tree_cn.rst index 0e228e3fefa..b17c79a6c4d 100644 --- a/docs/api/paddle/nn/functional/gather_tree_cn.rst +++ b/docs/api/paddle/nn/functional/gather_tree_cn.rst @@ -30,7 +30,7 @@ gather_tree [0 1]]] 结果: - gather_tree(ids, parents) + gather_tree(ids, parents) = [[[2 2] [1 6]] [[3 3] @@ -45,7 +45,7 @@ gather_tree - **ids** (Tensor) - 形状为 :math:`[length, batch\_size, beam\_size]` 的三维 Tensor,数据类型是 int32 或 int64。包含了所有时间步选择的 id。 - **parents** (Tensor) - 形状和数据类型均与 ``ids`` 相同的 Tensor。包含了束搜索中每一时间步所选 id 对应的 parent。 - + 返回 :::::::::::: 和 ``ids`` 具有相同形状和数据类型的 Tensor。包含了根据 parent 回溯而收集产生的完整 id 序列。 diff --git a/docs/api/paddle/nn/functional/grid_sample_cn.rst b/docs/api/paddle/nn/functional/grid_sample_cn.rst index d0c7e86336c..91aeceea906 100644 --- a/docs/api/paddle/nn/functional/grid_sample_cn.rst +++ b/docs/api/paddle/nn/functional/grid_sample_cn.rst @@ -75,12 +75,12 @@ Tensor,输入X基于输入网格的双线性插值计算结果,维度为 :ma import paddle import paddle.nn.functional as F import numpy as np - + # shape=[1, 1, 3, 3] x = np.array([[[[-0.6, 0.8, -0.5], [-0.5, 0.2, 1.2], [ 1.4, 0.3, -0.2]]]]).astype("float64") - + # grid shape = [1, 3, 4, 2] grid = np.array( [[[[ 0.2, 0.3], @@ -95,8 +95,8 @@ Tensor,输入X基于输入网格的双线性插值计算结果,维度为 :ma [-0.3, -1. ], [ 0.7, 0.4], [ 0.2, 0.8]]]]).astype("float64") - - + + x = paddle.to_tensor(x) grid = paddle.to_tensor(grid) y_t = F.grid_sample( @@ -106,9 +106,9 @@ Tensor,输入X基于输入网格的双线性插值计算结果,维度为 :ma padding_mode='border', align_corners=True) print(y_t) - + # output shape = [1, 1, 3, 4] # [[[[ 0.34 0.016 0.086 -0.448] # [ 0.55 -0.076 0.35 0.59 ] # [ 0.596 0.38 0.52 0.24 ]]]] - + diff --git a/docs/api/paddle/nn/functional/interpolate_cn.rst b/docs/api/paddle/nn/functional/interpolate_cn.rst index 2bee31706b1..4e6c3cee797 100644 --- a/docs/api/paddle/nn/functional/interpolate_cn.rst +++ b/docs/api/paddle/nn/functional/interpolate_cn.rst @@ -40,7 +40,7 @@ Align_corners和align_mode是可选参数,插值的计算方法可以由它们 :: - + scale 计算方法: if align_corners = True && out_size > 1 : diff --git a/docs/api/paddle/nn/functional/kl_div_cn.rst b/docs/api/paddle/nn/functional/kl_div_cn.rst index 73df12ed98e..e25d79715df 100644 --- a/docs/api/paddle/nn/functional/kl_div_cn.rst +++ b/docs/api/paddle/nn/functional/kl_div_cn.rst @@ -28,7 +28,7 @@ kL发散损失计算如下: - **label** (Tensor) - KL散度损失算子的张量。与输入 ``input`` 的维度和数据类型一致的多维Tensor。 - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 ::::::::: Tensor KL散度损失。 diff --git a/docs/api/paddle/nn/functional/l1_loss_cn.rst b/docs/api/paddle/nn/functional/l1_loss_cn.rst index 1c064d39424..b2d736bd0a5 100644 --- a/docs/api/paddle/nn/functional/l1_loss_cn.rst +++ b/docs/api/paddle/nn/functional/l1_loss_cn.rst @@ -10,7 +10,7 @@ l1_loss 该损失函数的数学计算公式如下: 当 `reduction` 设置为 ``'none'`` 时, - + .. math:: Out = \lvert input - label\rvert @@ -20,7 +20,7 @@ l1_loss Out = MEAN(\lvert input - label\rvert) 当 `reduction` 设置为 ``'sum'`` 时, - + .. math:: Out = SUM(\lvert input - label\rvert) diff --git a/docs/api/paddle/nn/functional/local_response_norm_cn.rst b/docs/api/paddle/nn/functional/local_response_norm_cn.rst index 8109629f8d3..3c334d0a89e 100644 --- a/docs/api/paddle/nn/functional/local_response_norm_cn.rst +++ b/docs/api/paddle/nn/functional/local_response_norm_cn.rst @@ -5,7 +5,7 @@ local_response_norm .. py:function:: paddle.nn.functional.local_response_norm(x, size, alpha=1e-4, beta=0.75, k=1., data_format="NCHW", name=None) -局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ +局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ 其中 ``input`` 是mini-batch的输入特征。计算过程如下: diff --git a/docs/api/paddle/nn/functional/log_softmax_cn.rst b/docs/api/paddle/nn/functional/log_softmax_cn.rst index a19d3408fff..31decd56ab0 100644 --- a/docs/api/paddle/nn/functional/log_softmax_cn.rst +++ b/docs/api/paddle/nn/functional/log_softmax_cn.rst @@ -8,7 +8,7 @@ log_softmax .. math:: - \begin{aligned} + \begin{aligned} log\_softmax[i, j] &= log(softmax(x)) \\ &= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])}) \end{aligned} diff --git a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst index 58d31ecf37c..1977b725d7c 100644 --- a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst +++ b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst @@ -7,7 +7,7 @@ margin_ranking_loss 该算子计算输入input,other 和 标签label间的 `margin rank loss` 损失。该损失函数的数学计算公式如下: - .. math:: + .. math:: margin\_rank\_loss = max(0, -label * (input - other) + margin) 当 `reduction` 设置为 ``'mean'`` 时, @@ -16,7 +16,7 @@ margin_ranking_loss Out = MEAN(margin\_rank\_loss) 当 `reduction` 设置为 ``'sum'`` 时, - + .. math:: Out = SUM(margin\_rank\_loss) @@ -27,9 +27,9 @@ margin_ranking_loss - **input** (Tensor):第一个输入的 `Tensor`,数据类型为:float32、float64。 - **other** (Tensor):第二个输入的 `Tensor`,数据类型为:float32、float64。 - **label** (Tensor):训练数据的标签,数据类型为:float32, float64。 - - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **margin** (float,可选): - 用于加和的margin值,默认值为0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'`` 、 ``'mean'`` 、 ``'sum'``。如果设置为 ``'none'``,则直接返回 最原始的 ``margin_rank_loss``。如果设置为 ``'sum'``,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'``,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::: diff --git a/docs/api/paddle/nn/functional/mse_loss_cn.rst b/docs/api/paddle/nn/functional/mse_loss_cn.rst index bf4e0c51409..4b1efbb644a 100644 --- a/docs/api/paddle/nn/functional/mse_loss_cn.rst +++ b/docs/api/paddle/nn/functional/mse_loss_cn.rst @@ -10,7 +10,7 @@ mse_loss 对于预测值input和目标值label,公式为: 当 `reduction` 设置为 ``'none'`` 时, - + .. math:: Out = (input - label)^2 @@ -20,7 +20,7 @@ mse_loss Out = \operatorname{mean}((input - label)^2) 当 `reduction` 设置为 ``'sum'`` 时, - + .. math:: Out = \operatorname{sum}((input - label)^2) diff --git a/docs/api/paddle/nn/functional/npair_loss_cn.rst b/docs/api/paddle/nn/functional/npair_loss_cn.rst index 200f6bd8212..bba84a2f541 100644 --- a/docs/api/paddle/nn/functional/npair_loss_cn.rst +++ b/docs/api/paddle/nn/functional/npair_loss_cn.rst @@ -5,7 +5,7 @@ npair_loss .. py:function:: paddle.nn.functional.npair_loss(anchor, positive, labels, l2_reg=0.002) -参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_ +参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_ NPair损失需要成对的数据。NPair损失分为两部分:第一部分是对嵌入向量进行L2正则化;第二部分是每一对数据的相似性矩阵的每一行和映射到ont-hot之后的标签的交叉熵损失的和。 diff --git a/docs/api/paddle/nn/functional/one_hot_cn.rst b/docs/api/paddle/nn/functional/one_hot_cn.rst index 10efccac81f..6ff2d3f609b 100644 --- a/docs/api/paddle/nn/functional/one_hot_cn.rst +++ b/docs/api/paddle/nn/functional/one_hot_cn.rst @@ -16,7 +16,7 @@ one_hot X.shape = [4] X.data = [1, 1, 3, 0] num_classes = 4 - + 输出: Out.shape = [4, 4] Out.data = [[0., 1., 0., 0.], diff --git a/docs/api/paddle/nn/functional/pad_cn.rst b/docs/api/paddle/nn/functional/pad_cn.rst index 6e34465f20f..3b34c1fd95f 100644 --- a/docs/api/paddle/nn/functional/pad_cn.rst +++ b/docs/api/paddle/nn/functional/pad_cn.rst @@ -39,7 +39,7 @@ Tensor,对 ``x`` 进行 ``'pad'`` 的结果,数据类型和 ``x`` 相同。 x = [[[[[1., 2., 3.], [4., 5., 6.]]]]] - + Case 0: pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0], mode = 'constant' diff --git a/docs/api/paddle/nn/functional/sequence_mask_cn.rst b/docs/api/paddle/nn/functional/sequence_mask_cn.rst index df72bf69f29..cc07d7b3378 100644 --- a/docs/api/paddle/nn/functional/sequence_mask_cn.rst +++ b/docs/api/paddle/nn/functional/sequence_mask_cn.rst @@ -28,7 +28,7 @@ sequence_mask [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]] - + diff --git a/docs/api/paddle/nn/functional/softmax_cn.rst b/docs/api/paddle/nn/functional/softmax_cn.rst index 34e33a559e7..b38ed66756f 100644 --- a/docs/api/paddle/nn/functional/softmax_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_cn.rst @@ -31,7 +31,7 @@ softmax # input - x.shape = [2, 3, 4] + x.shape = [2, 3, 4] x.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], @@ -59,7 +59,7 @@ softmax # input - x.shape = [2, 3, 4] + x.shape = [2, 3, 4] x.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], @@ -79,7 +79,7 @@ softmax [0.97555875, 0.97555875, 0.93623955, 0.93623955]], [[0.00490169, 0.00490169, 0.00490169, 0.00490169], [0.26762315, 0.26762315, 0.26762315, 0.26762315], - [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] + [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] 参数 diff --git a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst index bcf2a1be817..b7062cb57e0 100644 --- a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst @@ -37,7 +37,7 @@ softmax_with_cross_entropy - **logits** (Tensor) - 维度为任意维的多维 ``Tensor``,数据类型为float32或float64。表示未缩放的输入。 - **label** (Tensor) - 如果 ``soft_label`` 为True, ``label`` 是一个和 ``logits`` 维度相同的的 ``Tensor``。如果 ``soft_label`` 为False, ``label`` 是一个在axis维度上大小为1,其它维度上与 ``logits`` 维度相同的 ``Tensor`` 。 - **soft_label** (bool,可选) - 指明是否将输入标签当作软标签。默认值:False。 - - **ignore_index** (int,可选) - 指明要无视的目标值,使其不对输入梯度有贡献。仅在 ``soft_label`` 为False时有效,默认值:kIgnoreIndex(-100)。 + - **ignore_index** (int,可选) - 指明要无视的目标值,使其不对输入梯度有贡献。仅在 ``soft_label`` 为False时有效,默认值:kIgnoreIndex(-100)。 - **numeric_stable_mode** (bool,可选) – 指明是否使用一个具有更佳数学稳定性的算法。仅在 ``soft_label`` 为 False的GPU模式下生效。若 ``soft_label`` 为 True或者执行设备为CPU,算法一直具有数学稳定性。注意使用稳定算法时速度可能会变慢。默认值:True。 - **return_softmax** (bool,可选) – 指明是否在返回交叉熵计算结果的同时返回softmax结果。默认值:False。 - **axis** (int,可选) – 执行softmax计算的维度索引。其范围为 :math:`[-1,rank-1]`,其中 ``rank`` 是输入 ``logits`` 的秩。默认值:-1。 diff --git a/docs/api/paddle/nn/functional/temporal_shift_cn.rst b/docs/api/paddle/nn/functional/temporal_shift_cn.rst index a2f8b650218..57930d0c26f 100644 --- a/docs/api/paddle/nn/functional/temporal_shift_cn.rst +++ b/docs/api/paddle/nn/functional/temporal_shift_cn.rst @@ -27,7 +27,7 @@ temporal_shift 步骤4:沿第3(C)维连接三个切片,并将结果重塑为[N*T, C, H, W]。 -有关时序移动的详细信息,请参阅文件:`Temporal Shift Module `_ +有关时序移动的详细信息,请参阅文件:`Temporal Shift Module `_ 参数 ::::::::: diff --git a/docs/api/paddle/nn/functional/upsample_cn.rst b/docs/api/paddle/nn/functional/upsample_cn.rst index 4ccf038cfd0..16275a35cbd 100644 --- a/docs/api/paddle/nn/functional/upsample_cn.rst +++ b/docs/api/paddle/nn/functional/upsample_cn.rst @@ -19,9 +19,9 @@ upsample TRILINEAR:三线性插值 BICUBIC:双三次插值 - + LINEAR:线性插值 - + AREA:面积插值 @@ -39,7 +39,7 @@ Align_corners和align_mode是可选参数,插值的计算方法可以由它们 :: - + scale 计算方法: if align_corners = True && out_size > 1 : diff --git a/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst b/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst index 107815fac16..d96b964eb0e 100644 --- a/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst +++ b/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst @@ -25,7 +25,7 @@ KaimingNormal - **negative_slope** (float,可选) - 只适用于使用 leaky_relu 作为激活函数时的 negative_slope 参数。默认值为 :math:`0.0`。 - **nonlinearity** (str,可选) - 非线性激活函数。默认值为relu。 -.. note:: +.. note:: 在大多数情况下推荐设置 fan_in 为 None。 diff --git a/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst b/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst index f0ef3f692b5..22ae851c818 100644 --- a/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst +++ b/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst @@ -25,7 +25,7 @@ KaimingUniform - **negative_slope** (float,可选) - 只适用于使用 leaky_relu 作为激活函数时的 negative_slope 参数。默认值为 :math:`0.0`。 - **nonlinearity** (str,可选) - 非线性激活函数。默认值为 relu。 -.. note:: +.. note:: 在大多数情况下推荐设置 fan_in 为 None。 diff --git a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst index 0c45eb26735..1f072db6113 100644 --- a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst +++ b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst @@ -45,4 +45,3 @@ Orthogonal linear = paddle.nn.Linear(15, 10, weight_attr=weight_attr) # linear.weight: X' * X = I - \ No newline at end of file diff --git a/docs/api/paddle/nn/initializer/Uniform_cn.rst b/docs/api/paddle/nn/initializer/Uniform_cn.rst index 4483af486c4..544d5e872c7 100644 --- a/docs/api/paddle/nn/initializer/Uniform_cn.rst +++ b/docs/api/paddle/nn/initializer/Uniform_cn.rst @@ -3,7 +3,7 @@ Uniform ------------------------------- -.. py:class:: paddle.nn.initializer.Uniform(low=-1.0, high=1.0, name=None) +.. py:class:: paddle.nn.initializer.Uniform(low=-1.0, high=1.0, name=None) 均匀分布初始化方法。 diff --git a/docs/api/paddle/nn/initializer/XavierNormal_cn.rst b/docs/api/paddle/nn/initializer/XavierNormal_cn.rst index f249104db57..2cd69df4cee 100644 --- a/docs/api/paddle/nn/initializer/XavierNormal_cn.rst +++ b/docs/api/paddle/nn/initializer/XavierNormal_cn.rst @@ -11,7 +11,7 @@ XavierNormal 该初始化函数用于保持所有层的梯度尺度几乎一致。所使用的正态分布的的均值为 :math:`0`,标准差为 .. math:: - + x = \sqrt{\frac{2.0}{fan\_in+fan\_out}}. 参数 diff --git a/docs/api/paddle/nn/utils/spectral_norm_cn.rst b/docs/api/paddle/nn/utils/spectral_norm_cn.rst index d6cc0d97e93..8d558aac436 100644 --- a/docs/api/paddle/nn/utils/spectral_norm_cn.rst +++ b/docs/api/paddle/nn/utils/spectral_norm_cn.rst @@ -23,7 +23,7 @@ spectral_norm \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} -可参考:`Spectral Normalization `_ +可参考:`Spectral Normalization `_ 参数 :::::::::::: diff --git a/docs/api/paddle/nn/utils/weight_norm_cn.rst b/docs/api/paddle/nn/utils/weight_norm_cn.rst index 8573608f3d1..c6934c5f0eb 100644 --- a/docs/api/paddle/nn/utils/weight_norm_cn.rst +++ b/docs/api/paddle/nn/utils/weight_norm_cn.rst @@ -10,14 +10,14 @@ weight_norm .. math:: \mathbf{w} = g \dfrac{v}{\|v\|} -权重归一化可以将神经网络中权重向量的长度与其方向解耦,权重归一化可以用两个变量(例如:代表长度的变量 `weight_g` 和代表方向的变量 `weight_v`)来代替由名字(例如:`weight`)指定的变量。详细可以参考论文:`A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ +权重归一化可以将神经网络中权重向量的长度与其方向解耦,权重归一化可以用两个变量(例如:代表长度的变量 `weight_g` 和代表方向的变量 `weight_v`)来代替由名字(例如:`weight`)指定的变量。详细可以参考论文:`A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ 参数 :::::::::::: - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 - **name** (str,可选) - 权重参数的名字。默认值为 ``weight``。 - - **dim** (int|None,可选) - 进行归一化操作的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 [cout,cin,kh,kw] , rank是4,则dim可以选0,1,2,3;fc的权重shape是 [cout,cin] ,rank是2,dim可以选0,1。如果为None就对所有维度上的元素做归一化。默认:0。 + - **dim** (int|None,可选) - 进行归一化操作的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 [cout,cin,kh,kw] , rank是4,则dim可以选0,1,2,3;fc的权重shape是 [cout,cin] ,rank是2,dim可以选0,1。如果为None就对所有维度上的元素做归一化。默认:0。 返回 :::::::::::: diff --git a/docs/api/paddle/nonzero_cn.rst b/docs/api/paddle/nonzero_cn.rst index fea5d91726d..ec36dcf5cec 100644 --- a/docs/api/paddle/nonzero_cn.rst +++ b/docs/api/paddle/nonzero_cn.rst @@ -11,7 +11,7 @@ nonzero 返回输入 ``x`` 中非零元素的坐标。如果输入 ``x`` 有 ``n`` 维,共包含 ``z`` 个非零元素,当 ``as_tuple = False`` 时, 返回结果是一个 ``shape`` 等于 ``[z x n]`` 的 ``Tensor``,第 ``i`` 行代表输入中第 ``i`` 个非零元素的坐标;当 ``as_tuple = True`` 时, 返回结果是由 ``n`` 个大小为 ``z`` 的 ``1-D Tensor`` 构成的元组,第 ``i`` 个 ``1-D Tensor`` 记录输入的非零元素在第 ``i`` 维的坐标。 - + 参数 ::::::::: @@ -21,7 +21,7 @@ nonzero 返回 ::::::::: - **Tensor or tuple(1-D Tensor)**,数据类型为 **INT64** 。 - + 代码示例 ::::::::: diff --git a/docs/api/paddle/not_equal_cn.rst b/docs/api/paddle/not_equal_cn.rst index fa56474f034..e96c32c675e 100644 --- a/docs/api/paddle/not_equal_cn.rst +++ b/docs/api/paddle/not_equal_cn.rst @@ -16,7 +16,7 @@ not_equal - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: diff --git a/docs/api/paddle/ones_like_cn.rst b/docs/api/paddle/ones_like_cn.rst index 9c60986ce2d..72f7f89b8b6 100644 --- a/docs/api/paddle/ones_like_cn.rst +++ b/docs/api/paddle/ones_like_cn.rst @@ -13,7 +13,7 @@ ones_like - **x** (Tensor) – 输入的Tensor,数据类型可以是 bool,float16,float32,float64,int32,int64。 - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 bool,float16, float32,float64,int32,int64。当该参数值为 None 时,输出 Tensor 的数据类型与 ``x`` 相同。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::: diff --git a/docs/api/paddle/optimizer/Adadelta_cn.rst b/docs/api/paddle/optimizer/Adadelta_cn.rst index d4d02fb2918..a4a69ec1a33 100644 --- a/docs/api/paddle/optimizer/Adadelta_cn.rst +++ b/docs/api/paddle/optimizer/Adadelta_cn.rst @@ -99,7 +99,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Adagrad_cn.rst b/docs/api/paddle/optimizer/Adagrad_cn.rst index 4a01ad43283..767d1c7537a 100644 --- a/docs/api/paddle/optimizer/Adagrad_cn.rst +++ b/docs/api/paddle/optimizer/Adagrad_cn.rst @@ -12,7 +12,7 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以 .. math:: - moment\_out &= moment + grad * grad\\param\_out + moment\_out &= moment + grad * grad\\param\_out &= param - \frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon} @@ -28,7 +28,7 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以 - **learning_rate** (float|Tensor) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Tensor。 - **epsilon** (float,可选) - 维持数值稳定性的浮点型值,默认值为1e-06。 - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 diff --git a/docs/api/paddle/optimizer/AdamW_cn.rst b/docs/api/paddle/optimizer/AdamW_cn.rst index 20638e4cac8..bd4f96d4916 100755 --- a/docs/api/paddle/optimizer/AdamW_cn.rst +++ b/docs/api/paddle/optimizer/AdamW_cn.rst @@ -23,7 +23,7 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION `_ +相关论文:`Adam: A Method for Stochastic Optimization `_ .. note:: 当前,AdamW不支持稀疏参数优化。 @@ -134,7 +134,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Adam_cn.rst b/docs/api/paddle/optimizer/Adam_cn.rst index daa1346b918..afa37f99bf5 100755 --- a/docs/api/paddle/optimizer/Adam_cn.rst +++ b/docs/api/paddle/optimizer/Adam_cn.rst @@ -23,17 +23,17 @@ Adam优化器出自 `Adam论文 `_ 的第二节 .. math:: param\_out=param-learning\_rate*\frac{moment\_1}{\sqrt{moment\_2}+\epsilon}\\ -相关论文:`Adam: A Method for Stochastic Optimization `_ +相关论文:`Adam: A Method for Stochastic Optimization `_ 参数 :::::::::::: - + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - **beta1** (float|Tensor,可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9。 - **beta2** (float|Tensor,可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999。 - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08。 - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 @@ -128,7 +128,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Adamax_cn.rst b/docs/api/paddle/optimizer/Adamax_cn.rst index f5f98ba803c..53539923ab6 100755 --- a/docs/api/paddle/optimizer/Adamax_cn.rst +++ b/docs/api/paddle/optimizer/Adamax_cn.rst @@ -23,7 +23,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 .. math:: param\_out=param−learning\_rate*\frac{moment\_out}{inf\_norm\_out}\\ -相关论文:`Adam: A Method for Stochastic Optimization `_ +相关论文:`Adam: A Method for Stochastic Optimization `_ 论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性,避免除0错误,此处增加了这个参数。 @@ -35,7 +35,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 - **beta2** (float,可选) - 二阶矩估计的指数衰减率,默认值为0.999。 - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08。 - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 @@ -63,7 +63,7 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 out.backward() adam.step() adam.clear_grad() - + 方法 :::::::::::: @@ -109,7 +109,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Lamb_cn.rst b/docs/api/paddle/optimizer/Lamb_cn.rst index c7068df1d4e..c6b47bf5437 100755 --- a/docs/api/paddle/optimizer/Lamb_cn.rst +++ b/docs/api/paddle/optimizer/Lamb_cn.rst @@ -24,7 +24,7 @@ LAMB(Layer-wise Adaptive Moments optimizer for Batching training)优化器 其中 :math:`m` 表示第一个动量,:math:`v` 代表第二个动量,:math:`\eta` 代表学习率,:math:`\lambda` 代表LAMB的权重学习率。 -相关论文:`Large Batch Optimization for Deep Learning: Training BERT in 76 minutes `_ +相关论文:`Large Batch Optimization for Deep Learning: Training BERT in 76 minutes `_ 参数 :::::::::::: @@ -103,7 +103,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Momentum_cn.rst b/docs/api/paddle/optimizer/Momentum_cn.rst index 35672225f0a..5f22a222534 100644 --- a/docs/api/paddle/optimizer/Momentum_cn.rst +++ b/docs/api/paddle/optimizer/Momentum_cn.rst @@ -92,7 +92,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 @@ -148,5 +148,5 @@ set_lr(value) .. note:: 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 - + diff --git a/docs/api/paddle/optimizer/Optimizer_cn.rst b/docs/api/paddle/optimizer/Optimizer_cn.rst index f8b7c66c173..06c3808c1d7 100755 --- a/docs/api/paddle/optimizer/Optimizer_cn.rst +++ b/docs/api/paddle/optimizer/Optimizer_cn.rst @@ -11,10 +11,10 @@ Optimizer 参数 :::::::::::: - + - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 @@ -87,7 +87,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 diff --git a/docs/api/paddle/optimizer/Overview_cn.rst b/docs/api/paddle/optimizer/Overview_cn.rst index 8ea67d8dd52..bef0516d854 100644 --- a/docs/api/paddle/optimizer/Overview_cn.rst +++ b/docs/api/paddle/optimizer/Overview_cn.rst @@ -29,7 +29,7 @@ paddle.optimizer 目录下包含飞桨框架支持的优化器算法相关的API " :ref:`Optimizer ` ", "飞桨框架优化器基类" " :ref:`RMSProp ` ", "RMSProp优化器" " :ref:`SGD ` ", "SGD优化器" - + .. _about_lr: 学习率衰减相关API diff --git a/docs/api/paddle/optimizer/RMSProp_cn.rst b/docs/api/paddle/optimizer/RMSProp_cn.rst index 704d7653651..e913a986f84 100755 --- a/docs/api/paddle/optimizer/RMSProp_cn.rst +++ b/docs/api/paddle/optimizer/RMSProp_cn.rst @@ -13,24 +13,24 @@ RMSProp .. math:: r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ w & = w - \frac{\eta} {\sqrt{r(w,t) + \epsilon}} \nabla Q_{i}(w) - + 第一个等式计算每个权重平方梯度的移动平均值,然后将梯度除以 :math:`sqrtv(w,t)` 。 - + .. math:: r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ v(w, t) & = \beta v(w, t-1) +\frac{\eta} {\sqrt{r(w,t) +\epsilon}} \nabla Q_{i}(w)\\ w & = w - v(w, t) 如果居中为真: - + .. math:: r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ g(w, t) & = \rho g(w, t-1) + (1 -\rho)\nabla Q_{i}(w)\\ v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +\epsilon}} \nabla Q_{i}(w)\\ w & = w - v(w, t) - + 其中,:math:`ρ` 是超参数,典型值为0.9,0.95等。:math:`beta` 是动量术语。:math:`epsilon` 是一个平滑项,用于避免除零,通常设置在1e-4到1e-8的范围内。 - + 参数 :::::::::::: @@ -40,13 +40,13 @@ RMSProp - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 默认值为None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 代码示例 :::::::::::: @@ -105,10 +105,10 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 @@ -180,7 +180,7 @@ set_lr(value) import paddle - + linear = paddle.nn.Linear(10, 10) rmsprop = paddle.optimizer.RMSProp(0.1, parameters=linear.parameters()) diff --git a/docs/api/paddle/optimizer/SGD_cn.rst b/docs/api/paddle/optimizer/SGD_cn.rst index 17c1f906608..cf8e7d232f8 100644 --- a/docs/api/paddle/optimizer/SGD_cn.rst +++ b/docs/api/paddle/optimizer/SGD_cn.rst @@ -9,7 +9,7 @@ SGD .. math:: \\param\_out=param-learning\_rate*grad\\ - + 为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 @@ -80,10 +80,10 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 - + **返回** tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 @@ -140,7 +140,7 @@ set_lr(value) .. note:: 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 - + diff --git a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst index 021e1a19988..7d95b4c833f 100644 --- a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst @@ -3,7 +3,7 @@ CosineAnnealingDecay ----------------------------------- -.. py:class:: paddle.optimizer.lr.CosineAnnealingDecay(learning_rate, T_max, eta_min=0, last_epoch=-1, verbose=False) +.. py:class:: paddle.optimizer.lr.CosineAnnealingDecay(learning_rate, T_max, eta_min=0, last_epoch=-1, verbose=False) 该接口使用 ``cosine annealing`` 的策略来动态调整学习率。 @@ -21,14 +21,14 @@ CosineAnnealingDecay :math:`\eta_{max}` 的初始值为 ``learning_rate``, :math:`T_{cur}` 是SGDR(重启训练SGD)训练过程中的当前训练轮数。SGDR的训练方法可以参考论文, 这里只是实现了 ``cosine annealing`` 动态学习率,热启训练部分没有实现。 -相关论文:`SGDR: Stochastic Gradient Descent with Warm Restarts `_ +相关论文:`SGDR: Stochastic Gradient Descent with Warm Restarts `_ 参数 :::::::::::: - **learning_rate** (float) - 初始学习率,也就是公式中的 :math:`\eta_{max}`,数据类型为Python float。 - **T_max** (float|int) - 训练的上限轮数,是余弦衰减周期的一半。 - - **eta_min** (float|int,可选) - 学习率的最小值,即公式中的 :math:`\eta_{min}`。默认值为0。 + - **eta_min** (float|int,可选) - 学习率的最小值,即公式中的 :math:`\eta_{min}`。默认值为0。 - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 @@ -58,7 +58,7 @@ CosineAnnealingDecay sgd.clear_gradients() scheduler.step() # If you update learning rate each step # scheduler.step() # If you update learning rate each epoch - + # train on static graph mode paddle.enable_static() diff --git a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst index a787b2bf436..9c128de2b5c 100644 --- a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst @@ -9,7 +9,7 @@ CyclicLR 该策略将学习率调整的过程视为一个又一个的循环,学习率根据指定的缩放策略以固定的频率在最大和最小学习率之间变化。 -相关论文:`Cyclic Learning Rates for Training Neural Networks `_ +相关论文:`Cyclic Learning Rates for Training Neural Networks `_ 内置了三种学习率缩放策略:**triangular**:没有任何缩放的三角循环。**triangular2**:每个三角循环里将初始幅度缩放一半。**exp_range**:每个循环中将初始幅度按照指数函数进行缩放,公式为 :math:`gamma^{iterations}`。 diff --git a/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst b/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst index b3f7b0a2247..cc58ec927b7 100644 --- a/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst +++ b/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst @@ -3,7 +3,7 @@ LRScheduler ----------------------------------- -.. py:class:: paddle.optimizer.lr.LRScheduler(learning_rate=0.1, last_epoch=-1, verbose=False) +.. py:class:: paddle.optimizer.lr.LRScheduler(learning_rate=0.1, last_epoch=-1, verbose=False) 学习率策略的基类。定义了所有学习率调整策略的公共接口。 @@ -105,7 +105,7 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 请参考 ``基类LRScheduler`` 的任意子类实现,这里以 ``StepLR`` 为例进行了示例: .. code-block:: python - + import paddle import numpy as np diff --git a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst index 835640337de..7299f2b3b33 100644 --- a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst @@ -9,7 +9,7 @@ MultiStepDecay 衰减过程可以参考以下代码: -.. code-block:: text +.. code-block:: text learning_rate = 0.5 milestones = [30, 50] diff --git a/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst b/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst index 7f81808824b..3d94fb94c38 100644 --- a/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst @@ -9,7 +9,7 @@ OneCycleLR 该策略将学习率从初始学习率调整到最大学习率,再从最大学习率调整到远小于初始学习率的最小学习率。 -相关论文:`Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates `_ +相关论文:`Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates `_ 注意,本调度器默认行为参考fastai仓库,其声称二阶段拥有比三阶段更好的效果。设置 ``three_phase=True`` 可以与论文中所描述的行为保持一致。 diff --git a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst index 255f0f1fc4b..f5502ff7276 100644 --- a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst @@ -12,7 +12,7 @@ PolynomialDecay .. math:: - decay\_steps & = decay\_steps * math.ceil(\frac{epoch}{decay\_steps}) + decay\_steps & = decay\_steps * math.ceil(\frac{epoch}{decay\_steps}) new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\frac{epoch}{decay\_steps})^{power}+end\_lr @@ -20,7 +20,7 @@ PolynomialDecay .. math:: - epoch & = min(epoch, decay\_steps) + epoch & = min(epoch, decay\_steps) new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\frac{epoch}{decay\_steps})^{power}+end\_lr diff --git a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst index c1c992839db..9b4fcd28d88 100644 --- a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst @@ -9,7 +9,7 @@ StepDecay 衰减过程可以参考以下代码: -.. code-block:: text +.. code-block:: text learning_rate = 0.5 step_size = 30 diff --git a/docs/api/paddle/outer_cn.rst b/docs/api/paddle/outer_cn.rst index f5346a202e2..df27ba3032d 100644 --- a/docs/api/paddle/outer_cn.rst +++ b/docs/api/paddle/outer_cn.rst @@ -26,13 +26,13 @@ Tensor, x、y的外积结果,Tensor shape为 [x.size, y.size]。 :::::::::: .. code-block:: python - + import paddle x = paddle.arange(1, 4).astype('float32') y = paddle.arange(1, 6).astype('float32') out = paddle.outer(x, y) - + print(out) # ([[1, 2, 3, 4, 5], # [2, 4, 6, 8, 10], diff --git a/docs/api/paddle/prod_cn.rst b/docs/api/paddle/prod_cn.rst index 7c5275b999b..86e13676959 100644 --- a/docs/api/paddle/prod_cn.rst +++ b/docs/api/paddle/prod_cn.rst @@ -21,9 +21,9 @@ prod 返回 :::::::::::: 输入 Tensor 在指定 axis 上的累乘的结果。 - - + + 代码示例 :::::::::::: - + COPY-FROM: paddle.prod diff --git a/docs/api/paddle/profiler/Overview_cn.rst b/docs/api/paddle/profiler/Overview_cn.rst index 9b9f26e1305..cbc6541139a 100644 --- a/docs/api/paddle/profiler/Overview_cn.rst +++ b/docs/api/paddle/profiler/Overview_cn.rst @@ -25,7 +25,7 @@ Profiler功能使用相关的枚举类 API " :ref:`ProfilerTarget ` ", "用来指定性能分析的设备" " :ref:`ProfilerState ` ", "用来表示性能分析器的状态" " :ref:`SortedKeys ` ", "用来指定表单内数据的排序方式" - + .. _about_profiler_control: Profiler周期控制和性能数据IO API @@ -39,7 +39,7 @@ Profiler周期控制和性能数据IO API " :ref:`export_chrome_tracing ` ", "用于生成将性能数据保存到google chrome tracing文件的回调函数" " :ref:`export_protobuf ` ", "用于生成将性能数据保存到protobuf文件的回调函数" " :ref:`load_profiler_result ` ", "用于载入所保存到protobuf文件的性能数据" - + .. _about_profiler_profiler: Profiler性能分析器 API @@ -50,7 +50,7 @@ Profiler性能分析器 API :widths: 10, 30 " :ref:`Profiler ` ", "性能分析器" - + .. _about_profiler_record: Profiler性能数据自定义记录 API diff --git a/docs/api/paddle/profiler/Profiler_cn.rst b/docs/api/paddle/profiler/Profiler_cn.rst index 98335f5be92..d2d908c0ce5 100644 --- a/docs/api/paddle/profiler/Profiler_cn.rst +++ b/docs/api/paddle/profiler/Profiler_cn.rst @@ -133,7 +133,7 @@ summary(sorted_by=SortedKeys.CPUTotal, op_detail=True, thread_sep=False, time_un - **sorted_by** ( :ref:`SortedKeys `,可选) – 表单的数据项排列方式,默认值SortedKeys.CPUTotal。 - **op_detail** (bool,可选) – 是否打印算子内各过程的详细信息,默认值True。 - **thread_sep** (bool,可选) - 是否分线程打印,默认值False。 - - **time_unit** (str,可选) - 表单数据的时间单位,默认为'ms',可选's'、'us'、'ns'。 + - **time_unit** (str,可选) - 表单数据的时间单位,默认为'ms',可选's'、'us'、'ns'。 **代码示例** diff --git a/docs/api/paddle/randperm_cn.rst b/docs/api/paddle/randperm_cn.rst index ac8107edfc7..962dc2902e6 100644 --- a/docs/api/paddle/randperm_cn.rst +++ b/docs/api/paddle/randperm_cn.rst @@ -9,7 +9,7 @@ randperm 参数 :::::::::::: - - **n** (int) - 随机序列的上限(不包括在序列中),应该大于0。 + - **n** (int) - 随机序列的上限(不包括在序列中),应该大于0。 - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 int32、int64、float32、float64。默认值为 int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/regularizer/L1Decay_cn.rst b/docs/api/paddle/regularizer/L1Decay_cn.rst index 97c3d1c6480..86aefab13b3 100644 --- a/docs/api/paddle/regularizer/L1Decay_cn.rst +++ b/docs/api/paddle/regularizer/L1Decay_cn.rst @@ -8,9 +8,9 @@ L1Decay L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 -该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` +该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_Momentum` )中,在 ``ParamAttr`` 中设置时,只对该 -网络层中的可训练参数生效;在 ``optimizer`` 中设置时,会对所有的可训练参数生效;如果同时设置,在 +网络层中的可训练参数生效;在 ``optimizer`` 中设置时,会对所有的可训练参数生效;如果同时设置,在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中的设置,即,对于一个可训练的参数,如果在 ``ParamAttr`` 中定义了正则化,那么会忽略 ``optimizer`` 中的正则化;否则会使用 ``optimizer``中的 正则化。 @@ -29,7 +29,7 @@ L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵 :::::::::::: .. code-block:: python - + # Example1: set Regularizer in optimizer import paddle from paddle.regularizer import L1Decay @@ -53,14 +53,14 @@ L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵 :::::::::::: .. code-block:: python - + # Example2: set Regularizer in parameters # Set L1 regularization in parameters. # Global regularizer does not take effect on my_conv2d for this case. from paddle.nn import Conv2D from paddle import ParamAttr from paddle.regularizer import L2Decay - + my_conv2d = Conv2D( in_channels=10, out_channels=10, diff --git a/docs/api/paddle/regularizer/L2Decay_cn.rst b/docs/api/paddle/regularizer/L2Decay_cn.rst index c84325311df..38b0e01532c 100644 --- a/docs/api/paddle/regularizer/L2Decay_cn.rst +++ b/docs/api/paddle/regularizer/L2Decay_cn.rst @@ -8,7 +8,7 @@ L2Decay L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 -该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` +该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_Momentum` )中,在 ``ParamAttr`` 中设置时, 只对该网络层中的参数生效;在 ``optimizer`` 中设置时,会对所有的参数生效;如果同时设置, 在 ``ParamAttr`` 中设置的优先级会高于在 ``optimizer`` 中设置,即,对于一个可训练的参数,如果在 @@ -29,7 +29,7 @@ L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模 :::::::::::: .. code-block:: python - + # Example1: set Regularizer in optimizer import paddle from paddle.regularizer import L2Decay @@ -53,7 +53,7 @@ L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模 :::::::::::: .. code-block:: python - + # Example2: set Regularizer in parameters # Set L2 regularization in parameters. # Global regularizer does not take effect on my_conv2d for this case. diff --git a/docs/api/paddle/roll_cn.rst b/docs/api/paddle/roll_cn.rst index fa01f8d9e0b..9b56835b721 100644 --- a/docs/api/paddle/roll_cn.rst +++ b/docs/api/paddle/roll_cn.rst @@ -22,7 +22,7 @@ roll ::::::::: - **Tensor**,数据类型同输入。 - + 代码示例 ::::::::: diff --git a/docs/api/paddle/rsqrt_cn.rst b/docs/api/paddle/rsqrt_cn.rst index 56bda78934e..f91b1771126 100644 --- a/docs/api/paddle/rsqrt_cn.rst +++ b/docs/api/paddle/rsqrt_cn.rst @@ -21,7 +21,7 @@ rsqrt激活函数。 参数 :::::::::::: - - **x** (Tensor) – 输入是多维Tensor,数据类型可以是float32和float64。 + - **x** (Tensor) – 输入是多维Tensor,数据类型可以是float32和float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/save_cn.rst b/docs/api/paddle/save_cn.rst index 91a4fa4669b..394ed3b7ca4 100644 --- a/docs/api/paddle/save_cn.rst +++ b/docs/api/paddle/save_cn.rst @@ -21,7 +21,7 @@ save .. toctree:: :maxdepth: 1 - + ../../../../faq/save_cn.md 参数 diff --git a/docs/api/paddle/scatter_cn.rst b/docs/api/paddle/scatter_cn.rst index 6a6c6d588ff..f4cd3a63bbc 100644 --- a/docs/api/paddle/scatter_cn.rst +++ b/docs/api/paddle/scatter_cn.rst @@ -8,7 +8,7 @@ scatter 通过基于 ``updates`` 来更新选定索引 ``index`` 上的输入来获得输出。具体行为如下: .. code-block:: python - + import numpy as np #input: x = np.array([[1, 1], [2, 2], [3, 3]]) @@ -49,16 +49,16 @@ Tensor,与x有相同形状和数据类型。 代码示例 ::::::::: - + .. code-block:: python - + import paddle import numpy as np x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) index_data = np.array([2, 1, 0, 1]).astype(np.int64) updates_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) - + x = paddle.to_tensor(x_data) index = paddle.to_tensor(index_data) updates = paddle.to_tensor(updates_data) diff --git a/docs/api/paddle/scatter_nd_add_cn.rst b/docs/api/paddle/scatter_nd_add_cn.rst index cf3aea74af7..d76f7b4a40c 100644 --- a/docs/api/paddle/scatter_nd_add_cn.rst +++ b/docs/api/paddle/scatter_nd_add_cn.rst @@ -25,7 +25,7 @@ scatter_nd_add updates = [9, 10, 11, 12] 得到: - + output = [0, 22, 12, 14, 4, 5] - 案例 2: @@ -38,7 +38,7 @@ scatter_nd_add updates.shape = (2, 2, 2) 得到: - + output = [[67, 19], [-16, -27]] diff --git a/docs/api/paddle/scatter_nd_cn.rst b/docs/api/paddle/scatter_nd_cn.rst index f07c6682fc8..a9c0de1b808 100644 --- a/docs/api/paddle/scatter_nd_cn.rst +++ b/docs/api/paddle/scatter_nd_cn.rst @@ -17,7 +17,7 @@ scatter_nd - **updates** (Tensor) - 输入的更新张量。形状必须是 :code:`index.shape[:-1] + shape[index.shape[-1]:]`。数据类型可以是float32,float64。 - **shape** (tuple|list) - 要求输出张量的形状。类型是tuple或者list。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: Tensor,数据类型与 :code:`updates` 相同,形状是 :code:`shape` 。 diff --git a/docs/api/paddle/seed_cn.rst b/docs/api/paddle/seed_cn.rst index 6e860fe7642..06ff1a50156 100644 --- a/docs/api/paddle/seed_cn.rst +++ b/docs/api/paddle/seed_cn.rst @@ -17,7 +17,7 @@ seed 返回 :::::::::::: - + Generator:全局默认generator对象。 代码示例 diff --git a/docs/api/paddle/set_cuda_rng_state_cn.rst b/docs/api/paddle/set_cuda_rng_state_cn.rst index 65e81dd24ee..b8740aed605 100644 --- a/docs/api/paddle/set_cuda_rng_state_cn.rst +++ b/docs/api/paddle/set_cuda_rng_state_cn.rst @@ -17,7 +17,7 @@ set_cuda_rng_state 返回 :::::::::::: - + 无。 代码示例 diff --git a/docs/api/paddle/set_flags_cn.rst b/docs/api/paddle/set_flags_cn.rst index 780ef68f81b..6cd6b08ce52 100644 --- a/docs/api/paddle/set_flags_cn.rst +++ b/docs/api/paddle/set_flags_cn.rst @@ -17,7 +17,7 @@ set_flags 返回 :::::::::::: - + 无 代码示例 diff --git a/docs/api/paddle/shard_index_cn.rst b/docs/api/paddle/shard_index_cn.rst index 4caf4e2d19e..7ca3f5d4104 100644 --- a/docs/api/paddle/shard_index_cn.rst +++ b/docs/api/paddle/shard_index_cn.rst @@ -10,7 +10,7 @@ shard_index :: shard_size = (index_num + nshards - 1) // nshards - + 对于输入\ `input`\ 中的每个值\ `v`\,我们根据下面的公式设置它新的值: :: diff --git a/docs/api/paddle/signal/Overview_cn.rst b/docs/api/paddle/signal/Overview_cn.rst index 3cbc6d88454..d717f9806ae 100644 --- a/docs/api/paddle/signal/Overview_cn.rst +++ b/docs/api/paddle/signal/Overview_cn.rst @@ -8,6 +8,6 @@ paddle.signal 目录下包含飞桨框架支持的数字信号处理的相关API .. csv-table:: :header: "API名称", "API功能" - + " :ref:`paddle.signal.stft ` ", "短时傅里叶变换" " :ref:`paddle.signal.istft ` ", "逆短时傅里叶变换" diff --git a/docs/api/paddle/signal/istft_cn.rst b/docs/api/paddle/signal/istft_cn.rst index 5c324bbef87..bee5cd27a9a 100644 --- a/docs/api/paddle/signal/istft_cn.rst +++ b/docs/api/paddle/signal/istft_cn.rst @@ -32,13 +32,13 @@ istft - **x** (Tensor) - 输入数据,是维度为2D或者3D的Tensor,数据类型必须为复数(复信号),其 形状为 ``[..., fft_size, num_frames]``; - **n_fft** (int) - 离散傅里叶变换的样本点个数; -- **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` +- **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` (为 ``n_fft//4``); - **win_length** (int,可选) - 信号窗的长度,默认为 ``None`` (为 ``n_fft``); - **window** (int,可选) - 维度为1D长度为 ``win_length`` 的Tensor,数据类型可为复数。 如果 ``win_length < n_fft``,该Tensor将被补长至 ``n_fft``。默认为 ``None`` (长 度为 ``win_length`` 幅值为1的矩形窗); -- **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 +- **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 :math:`t \times hop\_length` 个样本点在第 :math:`t` 帧的中心,默认为 ``True``; - **normalized** (bool,可选) - 是否将傅里叶变换的结果乘以值为 ``1/sqrt(n)`` 的缩放 系数; @@ -47,7 +47,7 @@ istft 且设为 ``True``,则 ``paddle.signal.istft`` 将返回一个实信号,默认为 ``True``; - **length** (int,可选) - 指定输出信号的长度,该信号将从逆短时傅里叶变换的结果中截取。 默认为 ``None`` (返回不截取的信号); -- **return_complex** (bool,可选) - 表示输出的重构信号是否为复信号。如果 +- **return_complex** (bool,可选) - 表示输出的重构信号是否为复信号。如果 ``return_complex`` 设为 ``True``, ``onesided`` 必须设为 ``False``,默认 为 ``False``; - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/signal/stft_cn.rst b/docs/api/paddle/signal/stft_cn.rst index 2d632e08428..93c790f8ab5 100644 --- a/docs/api/paddle/signal/stft_cn.rst +++ b/docs/api/paddle/signal/stft_cn.rst @@ -13,16 +13,16 @@ stft .. math:: X_t[f] = \sum_{n = 0}^{N-1} - \mathrm{window}[n]\ x[t \times H + n]\ + \mathrm{window}[n]\ x[t \times H + n]\ \exp(-{2 \pi j f n}/{N}) 上式中符号的意义: - :math:`t`:第 :math:`t` 帧输入信号; -- :math:`f`:傅里叶变换频域的自变量,如果 ``onesided=False`` , :math:`f` +- :math:`f`:傅里叶变换频域的自变量,如果 ``onesided=False`` , :math:`f` 取值范围是 :math:`0 \leq f < n\_fft` , - 如果 `onesided=True`,取值范围是 - :math:`0 \leq f < \lfloor n\_fft / 2 \rfloor + 1`; + 如果 `onesided=True`,取值范围是 + :math:`0 \leq f < \lfloor n\_fft / 2 \rfloor + 1`; - :math:`N`: ``n_fft`` 参数的值; - :math:`H`: ``hop_length`` 参数的值。 @@ -33,13 +33,13 @@ stft - **x** (Tensor) - 输入数据,是维度为1D或者2D的Tensor,数据类型可为复数(复信号),其形状 为 ``[..., seq_length]``; - **n_fft** (int) - 离散傅里叶变换的样本点个数; -- **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` +- **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` (为 ``n_fft//4``); - **win_length** (int,可选) - 信号窗的长度,默认为 ``None`` (为 ``n_fft``); - **window** (int,可选) - 维度为1D长度为 ``win_length`` 的Tensor,数据类型可为复数。 如果 ``win_length < n_fft``,该Tensor将被补长至 ``n_fft``。默认为 ``None`` (长度 为 ``win_length`` 幅值为1的矩形窗); -- **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 +- **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 :math:`t \times hop\_length` 个样本点在第 ``t`` 帧的中心,默认为 ``True``; - **pad_mode** (str,可选) - 当 ``center`` 为 ``True`` 时,确定padding的模式,模式 的选项可以参考 ``paddle.nn.functional.pad``,默认为 "reflect"; diff --git a/docs/api/paddle/sort_cn.rst b/docs/api/paddle/sort_cn.rst index 3ed240eee9f..ab1edde289f 100644 --- a/docs/api/paddle/sort_cn.rst +++ b/docs/api/paddle/sort_cn.rst @@ -27,4 +27,4 @@ Tensor,排序后的输出(与 ``x`` 维度相同、数据类型相同)。 :::::::::::: COPY-FROM: paddle.sort:code-example1 - + diff --git a/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst b/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst index 3c4f7df5f6e..44e2c62486a 100644 --- a/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst +++ b/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst @@ -29,7 +29,7 @@ sparse_coo_tensor 默认值为None,如果 ``values`` 为python浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``values`` 为其他类型, 则会自动推导类型。 - - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 diff --git a/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst b/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst index cf272c5e47b..c702dac3feb 100644 --- a/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst +++ b/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst @@ -33,7 +33,7 @@ sparse_csr_tensor 默认值为None,如果 ``values`` 为python浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``values`` 为其他类型, 则会自动推导类型。 - - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 diff --git a/docs/api/paddle/squeeze_cn.rst b/docs/api/paddle/squeeze_cn.rst index 8aa6b1c8a00..c698f577fc4 100644 --- a/docs/api/paddle/squeeze_cn.rst +++ b/docs/api/paddle/squeeze_cn.rst @@ -27,11 +27,11 @@ squeeze axis = 0 Output: out.shape = [3, 1, 5] - + Case 3: Input: - x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged. + x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged. axis = [0, 2, 3] Output: out.shape = [3, 5] @@ -39,7 +39,7 @@ squeeze Case 4: Input: - x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x). + x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x). axis = [-2] Output: out.shape = [1, 3, 5] diff --git a/docs/api/paddle/stack_cn.rst b/docs/api/paddle/stack_cn.rst index 8c40981e01c..910b2ec41c8 100644 --- a/docs/api/paddle/stack_cn.rst +++ b/docs/api/paddle/stack_cn.rst @@ -70,13 +70,13 @@ stack :::::::::::: .. code-block:: python - + import paddle - + x1 = paddle.to_tensor([[1.0, 2.0]]) x2 = paddle.to_tensor([[3.0, 4.0]]) x3 = paddle.to_tensor([[5.0, 6.0]]) - + out = paddle.stack([x1, x2, x3], axis=0) print(out.shape) # [3, 1, 2] print(out) @@ -90,5 +90,5 @@ stack # [[[1., 2.], # [3., 4.], # [5., 6.]]] - - + + diff --git a/docs/api/paddle/static/BuildStrategy_cn.rst b/docs/api/paddle/static/BuildStrategy_cn.rst index 7c23c2046df..a30b5d273cc 100644 --- a/docs/api/paddle/static/BuildStrategy_cn.rst +++ b/docs/api/paddle/static/BuildStrategy_cn.rst @@ -15,7 +15,7 @@ BuildStrategy,一个BuildStrategy的实例。 ::::::::: .. code-block:: python - + import os import paddle import paddle.static as static @@ -80,7 +80,7 @@ bool类型。如果设置为True,则算子的执行顺序将与算子定义的 fuse_broadcast_ops ''''''''' - + bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模式下有效,使程序运行更快。默认为False。 **代码示例** @@ -95,7 +95,7 @@ bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模 build_strategy = static.BuildStrategy() build_strategy.fuse_broadcast_ops = True - + fuse_elewise_add_act_ops ''''''''' diff --git a/docs/api/paddle/static/CompiledProgram_cn.rst b/docs/api/paddle/static/CompiledProgram_cn.rst index b757e0767de..2f66e6e2296 100644 --- a/docs/api/paddle/static/CompiledProgram_cn.rst +++ b/docs/api/paddle/static/CompiledProgram_cn.rst @@ -32,7 +32,7 @@ with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, shar .. note:: 如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy。 - + **参数** - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 diff --git a/docs/api/paddle/static/ExecutionStrategy_cn.rst b/docs/api/paddle/static/ExecutionStrategy_cn.rst index 71e4aaba2fc..451575e80cd 100644 --- a/docs/api/paddle/static/ExecutionStrategy_cn.rst +++ b/docs/api/paddle/static/ExecutionStrategy_cn.rst @@ -51,7 +51,7 @@ int型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的 **代码示例** .. code-block:: python - + import paddle import paddle.static as static diff --git a/docs/api/paddle/static/Executor_cn.rst b/docs/api/paddle/static/Executor_cn.rst index 6fea9190ea5..f4156d5adc6 100644 --- a/docs/api/paddle/static/Executor_cn.rst +++ b/docs/api/paddle/static/Executor_cn.rst @@ -16,11 +16,11 @@ Executor支持单GPU、多GPU以及CPU运行。 :::::::::::: - **place** (paddle.CPUPlace()|paddle.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()`,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)`。默认值为None。 - + .. note:: -多卡训练初始化Executor时也只用传入一个Place或None,其他API会处理使用的多卡,见 `多卡使用方式 `_ - +多卡训练初始化Executor时也只用传入一个Place或None,其他API会处理使用的多卡,见 `多卡使用方式 `_ + 返回 :::::::::::: 初始化后的 ``Executor`` 对象。 @@ -29,7 +29,7 @@ Executor支持单GPU、多GPU以及CPU运行。 :::::::::::: .. code-block:: python - + import paddle import numpy import os @@ -95,7 +95,7 @@ close() **代码示例** .. code-block:: python - + import paddle cpu = paddle.CPUPlace() @@ -110,7 +110,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na 执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即paddle.static.global_scope()。 **参数** - + - **program** (Program|CompiledProgram) – 该参数为被执行的Program或CompiledProgram,如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为paddle.static.default_main_program()。默认为:None。 - **feed** (list|dict) – 该参数表示模型的输入变量。如果是单卡训练,``feed`` 为 ``dict`` 类型,如果是多卡训练,参数 ``feed`` 可以是 ``dict`` 或者 ``list`` 类型变量,如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU),即输入数据被均匀分配到不同设备上;如果该参数类型为 ``list``,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 - **fetch_list** (list) – 该参数表示模型运行之后需要返回的变量。默认为:None。 @@ -137,7 +137,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na import paddle import numpy - + #首先创建执行引擎 paddle.enable_static() place = paddle.CPUPlace() # paddle.CUDAPlace(0) @@ -192,7 +192,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na unmerged_prediction, = exe.run(binary, feed={'X': x}, fetch_list=[prediction.name], - return_merged=False) + return_merged=False) # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。 # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用 # 的GPU卡数之商。 @@ -234,7 +234,7 @@ infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。infer_from_dataset()可以非常容易地用于多线程中的评估。 **参数** - + - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的)。 - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None。 - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域。 @@ -279,7 +279,7 @@ train_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False train_from_dataset将销毁每次运行在executor中创建的所有资源。 **参数** - + - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的)。 - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None。 - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域。 diff --git a/docs/api/paddle/static/ExponentialMovingAverage_cn.rst b/docs/api/paddle/static/ExponentialMovingAverage_cn.rst index 83eb0ce1af7..fb7201d196d 100644 --- a/docs/api/paddle/static/ExponentialMovingAverage_cn.rst +++ b/docs/api/paddle/static/ExponentialMovingAverage_cn.rst @@ -12,19 +12,19 @@ ExponentialMovingAverage 用 ``update()`` 方法计算出的平均结果将保存在由实例化对象创建和维护的临时变量中,并且可以通过调用 ``apply()`` 方法把结果应用于当前模型的参数。同时,可用 ``restore()`` 方法恢复原始参数。 -**偏置校正** +**偏置校正** 所有的滑动平均均初始化为 :math:`0`,因此它们相对于零是有偏的,可以通过除以因子 :math:`(1 - \text{decay}^t)` 来校正,因此在调用 ``apply()`` 方法时,作用于参数的真实滑动平均值将为: .. math:: \widehat{\text{EMA}}_t = \frac{\text{EMA}_t}{1 - \text{decay}^t} -**衰减率调节** +**衰减率调节** 一个非常接近于1的很大的衰减率将会导致平均值滑动得很慢。更优的策略是,开始时设置一个相对较小的衰减率。参数 ``thres_steps`` 允许用户传递一个变量以设置衰减率,在这种情况下, 真实的衰减率变为: -.. math:: +.. math:: \min(\text{decay}, \frac{1 + \text{thres_steps}}{10 + \text{thres_steps}}) 通常 ``thres_steps`` 可以是全局的训练迭代步数。 diff --git a/docs/api/paddle/static/InputSpec_cn.rst b/docs/api/paddle/static/InputSpec_cn.rst index 34890b19803..f82ce8e1389 100644 --- a/docs/api/paddle/static/InputSpec_cn.rst +++ b/docs/api/paddle/static/InputSpec_cn.rst @@ -109,7 +109,7 @@ batch(batch_size) .. code-block:: python from paddle.static import InputSpec - + x_spec = InputSpec(shape=[64], dtype='float32', name='x') x_spec.batch(4) print(x_spec) # InputSpec(shape=(4, 64), dtype=paddle.float32, name=x) diff --git a/docs/api/paddle/static/ParallelExecutor_cn.rst b/docs/api/paddle/static/ParallelExecutor_cn.rst index 21380d2a7d0..6211df0f8d6 100644 --- a/docs/api/paddle/static/ParallelExecutor_cn.rst +++ b/docs/api/paddle/static/ParallelExecutor_cn.rst @@ -48,8 +48,8 @@ ParallelExecutor use_cuda = True paddle.enable_static() - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - + place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, # 在这种情况下,输入的batch size应大于CPU_NUM, @@ -72,7 +72,7 @@ ParallelExecutor train_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, - loss_name=loss.name) + loss_name=loss.name) # 注意:如果此处不设置share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 test_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, main_program=test_program, @@ -117,7 +117,7 @@ run(fetch_list, feed=None, feed_dict=None, return_numpy=True) use_cuda = True paddle.enable_static() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - + # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, # 在这种情况下,输入的batch size应大于CPU_NUM, @@ -172,7 +172,7 @@ drop_local_exe_scopes() import paddle import numpy import os - + use_cuda = True # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, diff --git a/docs/api/paddle/static/Program_cn.rst b/docs/api/paddle/static/Program_cn.rst index c5f019d44ca..94e7c0d63e8 100644 --- a/docs/api/paddle/static/Program_cn.rst +++ b/docs/api/paddle/static/Program_cn.rst @@ -9,7 +9,7 @@ Program .. note:: 默认情况下,Paddle内部默认含有 :ref:`cn_api_fluid_default_startup_program` 和 :ref:`cn_api_fluid_default_main_program`,它们共享参数。:ref:`cn_api_fluid_default_startup_program` 只运行一次来初始化参数,:ref:`cn_api_fluid_default_main_program` 在每个mini batch中运行并更新权重。 -Program是Paddle对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block`,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block`,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ +Program是Paddle对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block`,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block`,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ 。 一个Program的集合通常包含初始化程序(startup_program)与主程序(main_program),初始化程序是一个包含一些初始化工作的Program,主程序将会包含用来训练的网络结构和变量,在使用同一个 :ref:`api_guide_executor` 执行时他们会共享初始化工作的结果,例如初始化的参数。一个Program的集合可以被用来测试或者训练,被用来训练时,``Paddle`` 将会利用所有用户使用的OP和变量来搭建一个训练网络,被用来测试时,可以通过调用Program相关的接口例如:`clone` 剪去一些与测试无关的OP和变量,比如反向传播的OP和变量。 @@ -298,7 +298,7 @@ int,该Program中的 :ref:`api_guide_Block` 的个数。 prog = static.default_main_program() num_blocks = prog.num_blocks print(num_blocks) - + # print result: # 1 @@ -360,7 +360,7 @@ global_block() prog = static.default_main_program() gb_block = prog.global_block() print(gb_block) - + block(index) ''''''''' @@ -463,7 +463,7 @@ list[ :ref:`api_guide_parameter` ],一个包含当前Program中所有参数的 for param in program.all_parameters(): print(param) - + # Here will print all parameters in current program, in this example, # the result is like: # diff --git a/docs/api/paddle/static/Variable_cn.rst b/docs/api/paddle/static/Variable_cn.rst index 69321305302..99ddaedd19e 100644 --- a/docs/api/paddle/static/Variable_cn.rst +++ b/docs/api/paddle/static/Variable_cn.rst @@ -153,7 +153,7 @@ Tensor, :ref:`api_guide_Variable` 的值。 .. code-block:: python import paddle - import paddle.static as static + import paddle.static as static import numpy as np paddle.enable_static() @@ -198,7 +198,7 @@ set_value(value, scope=None) .. code-block:: python import paddle - import paddle.static as static + import paddle.static as static import numpy as np paddle.enable_static() diff --git a/docs/api/paddle/static/WeightNormParamAttr_cn.rst b/docs/api/paddle/static/WeightNormParamAttr_cn.rst index d202b92e531..9887f189c22 100644 --- a/docs/api/paddle/static/WeightNormParamAttr_cn.rst +++ b/docs/api/paddle/static/WeightNormParamAttr_cn.rst @@ -11,10 +11,10 @@ WeightNormParamAttr 动态图模式下请使用 ``paddle.nn.utils.weight_norm`` 。 .. note:: - 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略::ref:`cn_api_paddle_nn_ClipGradByGlobalNorm` 、 + 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略::ref:`cn_api_paddle_nn_ClipGradByGlobalNorm` 、 :ref:`cn_api_paddle_nn_ClipGradByNorm` 、 :ref:`cn_api_paddle_nn_ClipGradByValue` 。 -该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ +该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ 参数 :::::::::::: @@ -23,7 +23,7 @@ WeightNormParamAttr - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **initializer** (Initializer,可选) - 初始化参数方法,例如 ``initializer = fluid.nn.initializer.Constant(1.0)``。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。 - **learning_rate** (float32,可选) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。 - - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`cn_api_paddle_regularizer_L1Decay` 、 + - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`cn_api_paddle_regularizer_L1Decay` 、 :ref:`cn_api_paddle_regularizer_L2Decay`,如果在 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_SGD` ) 中也 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - **trainable** (bool) - 可选,指明参数是否可训练,默认为True。 diff --git a/docs/api/paddle/static/auc_cn.rst b/docs/api/paddle/static/auc_cn.rst index cb7c7f72f38..18bfd924e4a 100755 --- a/docs/api/paddle/static/auc_cn.rst +++ b/docs/api/paddle/static/auc_cn.rst @@ -14,7 +14,7 @@ auc 注:如果输入标注包含一种值,只有0或1两种情况,数据类型则强制转换成布尔值。 -相关定义可以在这里找到:https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve +相关定义可以在这里找到:https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve 有两种可能的曲线: diff --git a/docs/api/paddle/static/default_main_program_cn.rst b/docs/api/paddle/static/default_main_program_cn.rst index c2c18ae4898..1af2442e66e 100644 --- a/docs/api/paddle/static/default_main_program_cn.rst +++ b/docs/api/paddle/static/default_main_program_cn.rst @@ -11,7 +11,7 @@ default_main_program ``default main program`` 是许多编程接口中Program参数的默认值。例如对于 ``Executor.run()`` 如果用户没有传入Program参数,会默认使用 ``default main program`` 。 -可以使用 :ref:`cn_api_fluid_program_guard` 来切换 ``default main program``。 +可以使用 :ref:`cn_api_fluid_program_guard` 来切换 ``default main program``。 返回 ::::::::: diff --git a/docs/api/paddle/static/deserialize_persistables_cn.rst b/docs/api/paddle/static/deserialize_persistables_cn.rst index b2a5b8520d6..103a98c00f5 100644 --- a/docs/api/paddle/static/deserialize_persistables_cn.rst +++ b/docs/api/paddle/static/deserialize_persistables_cn.rst @@ -16,7 +16,7 @@ deserialize_persistables - **program** (Program) - 指定包含要反序列化的参数的名称的 program。 - **data** (bytes) - 序列化之后的模型参数。 - - **executor** (Executor) - 用来执行 load op 的 ``executor`` 。 + - **executor** (Executor) - 用来执行 load op 的 ``executor`` 。 返回 :::::::::::: diff --git a/docs/api/paddle/static/device_guard_cn.rst b/docs/api/paddle/static/device_guard_cn.rst index 1b39a9d76a5..4917f6c8285 100644 --- a/docs/api/paddle/static/device_guard_cn.rst +++ b/docs/api/paddle/static/device_guard_cn.rst @@ -7,7 +7,7 @@ device_guard .. note:: 该API仅支持静态图模式。 - + 一个用于指定OP运行设备的上下文管理器。 参数 diff --git a/docs/api/paddle/static/gradients_cn.rst b/docs/api/paddle/static/gradients_cn.rst index d7c86c232d5..d7f674508d4 100644 --- a/docs/api/paddle/static/gradients_cn.rst +++ b/docs/api/paddle/static/gradients_cn.rst @@ -13,7 +13,7 @@ gradients 参数 :::::::::::: - + - **targets** (Tensor|list[Tensor]) – 目标 Tensor 或包含 Tensor 的列表。 - **inputs** (Tensor|list[Tensor]) – 输入 Tensor 或包含 Tensor 的列表。 - **target_gradients** (Tensor|list[Tensor],可选) – 目标的梯度 Tensor,应与目标 Tensor 的形状相同;如果设置为None,则以 1 初始化所有梯度 Tensor。 diff --git a/docs/api/paddle/static/nn/batch_norm_cn.rst b/docs/api/paddle/static/nn/batch_norm_cn.rst index 9d1156a5044..d79a279b166 100644 --- a/docs/api/paddle/static/nn/batch_norm_cn.rst +++ b/docs/api/paddle/static/nn/batch_norm_cn.rst @@ -17,7 +17,7 @@ batch_norm - 2.NCHW[batch,in_channels,in_height,in_width] -更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ ``input`` 是mini-batch的输入。 @@ -29,8 +29,8 @@ batch_norm \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\ y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift - moving\_mean = moving\_mean * momentum + mini\_batch\_mean * (1. - momentum) \\ - moving\_variance = moving\_variance * momentum + mini\_batch\_var * (1. - momentum) + moving\_mean = moving\_mean * momentum + mini\_batch\_mean * (1. - momentum) \\ + moving\_variance = moving\_variance * momentum + mini\_batch\_var * (1. - momentum) moving_mean和moving_var是训练过程中统计得到的全局均值和方差,在预测或者评估中使用。 `is_test` 参数只能用于测试或者评估阶段,如果想在训练阶段使用预训练模型的全局均值和方差的话,可以设置 `use_global_stats=True`。 diff --git a/docs/api/paddle/static/nn/cond_cn.rst b/docs/api/paddle/static/nn/cond_cn.rst index a69aeb8827e..ccde363936e 100644 --- a/docs/api/paddle/static/nn/cond_cn.rst +++ b/docs/api/paddle/static/nn/cond_cn.rst @@ -19,7 +19,7 @@ PaddlePaddle里Tensor的嵌套结构是指一个Tensor,或者Tensor的元组 3. 静态图模式下,因为各个分支都要参与组网,因此不论运行哪个分支,在 ``true_fn`` 和 ``false_fn`` 内外创建的Tensor和Op都会组网,即PaddlePaddle并不是惰性语法(lazy semantics)。例如 .. code-block:: python - + import paddle a = paddle.zeros((1, 1)) diff --git a/docs/api/paddle/static/nn/conv2d_cn.rst b/docs/api/paddle/static/nn/conv2d_cn.rst index aeadef8e538..77207ea050b 100644 --- a/docs/api/paddle/static/nn/conv2d_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_cn.rst @@ -75,7 +75,7 @@ conv2d - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含两个整数值:(filter_size_height,filter_size_width)。若为一个整数,filter_size_height = filter_size_width = filter_size。 - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - + - (1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - (2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - (3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 diff --git a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst index 7bc1e7446ca..a8e680cbb2d 100644 --- a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst @@ -85,11 +85,11 @@ conv2d_transpose - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含两个整型数,(output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含两个整型数,(filter_size_height, filter_size_width)。否则,filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - + - (1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - (2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - (3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,则必须包含两个整型数,形式为(stride_height,stride_width)。否则,stride_height = stride_width = stride。默认:stride = 1。 - **dilation** (int|tuple,可选) - 膨胀比例(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组必须包含两个整型数,形式为(dilation_height, dilation_width)。否则,dilation_height = dilation_width = dilation。默认:dilation= 1。 - **groups** (int,可选) - 二维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 diff --git a/docs/api/paddle/static/nn/conv3d_cn.rst b/docs/api/paddle/static/nn/conv3d_cn.rst index ae9c211e8a6..f857db7bcf2 100644 --- a/docs/api/paddle/static/nn/conv3d_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_cn.rst @@ -80,7 +80,7 @@ conv3d - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含三个整数值:(filter_size_depth, filter_size_height,filter_size_width)。若为一个整数,则filter_size_depth = filter_size_height = filter_size_width = filter_size。 - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含三个整型数:(stride_depth, stride_height, stride_width)。若为一个整数,stride_depth = stride_height = stride_width = stride。默认值:1。 - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - + - (1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - (2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - (3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 diff --git a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst index 2d89351e814..ce593612817 100755 --- a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst @@ -92,11 +92,11 @@ conv3d_transpose - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含三个整型数,(output_size_depth,output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - + - (1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]; - (2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]; - (3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 - + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认:stride = 1。 - **dilation** (int|tuple,可选) - 膨胀比例dilation大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认:dilation= 1。 - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 diff --git a/docs/api/paddle/static/nn/deform_conv2d_cn.rst b/docs/api/paddle/static/nn/deform_conv2d_cn.rst index 6d087ba63f8..80296a9a1f9 100644 --- a/docs/api/paddle/static/nn/deform_conv2d_cn.rst +++ b/docs/api/paddle/static/nn/deform_conv2d_cn.rst @@ -24,7 +24,7 @@ deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 **示例** - + 输入: input 形状::math:`(N, C_{in}, H_{in}, W_{in})` @@ -34,7 +34,7 @@ deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor offset 形状::math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` mask 形状::math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` - + 输出: 输出形状::math:`(N, C_{out}, H_{out}, W_{out})` @@ -46,7 +46,7 @@ deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 - + 参数 :::::::::::: @@ -65,11 +65,11 @@ deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor - **weight_attr** (ParamAttr,可选) – 可变形卷积的可学习权重的属性。如果将其设置为None或某种ParamAttr,可变形卷积将创建ParamAttr作为weight_attr。如果没有设置此weight_attr的Initializer,该参数将被Normal(0.0, std)初始化,且其中的std为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`。默认值为None。 - **bias_attr** (ParamAttr|bool,可选) – 可变形卷积层的偏置的参数属性。如果设为False,则输出单元不会加偏置。如果设为None或者某种ParamAttr,conv2d会创建ParamAttr作为bias_attr。如果不设置bias_attr的Initializer,偏置会被初始化为0。默认值为None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: Tensor,可变形卷积输出的4-D Tensor,数据类型为float32或float64。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/embedding_cn.rst b/docs/api/paddle/static/nn/embedding_cn.rst index c53857f2af5..faebd8cc9e7 100644 --- a/docs/api/paddle/static/nn/embedding_cn.rst +++ b/docs/api/paddle/static/nn/embedding_cn.rst @@ -34,11 +34,11 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [[0.345249859, 0.124939536, ..., 0.194353745], [0.945345345, 0.435394634, ..., 0.435345365]], - + [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 - + Case 2: input是lod level 为1的LoDTensor,且padding_idx = 0 diff --git a/docs/api/paddle/static/nn/fc_cn.rst b/docs/api/paddle/static/nn/fc_cn.rst index 94809f980ba..f600426dd1c 100755 --- a/docs/api/paddle/static/nn/fc_cn.rst +++ b/docs/api/paddle/static/nn/fc_cn.rst @@ -36,7 +36,7 @@ fc - :math:`Act` :activation function (激活函数); - :math:`Out`:输出Tensor。 - + .. code-block:: text # Case 1, input is a single tensor: diff --git a/docs/api/paddle/static/nn/group_norm_cn.rst b/docs/api/paddle/static/nn/group_norm_cn.rst index b1a44666b1c..6a03c18336e 100755 --- a/docs/api/paddle/static/nn/group_norm_cn.rst +++ b/docs/api/paddle/static/nn/group_norm_cn.rst @@ -6,7 +6,7 @@ group_norm .. py:function:: paddle.static.nn.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None) -论文参考:`Group Normalization `_ +论文参考:`Group Normalization `_ 参数 ::::::::: diff --git a/docs/api/paddle/static/nn/instance_norm_cn.rst b/docs/api/paddle/static/nn/instance_norm_cn.rst index f5f88f43573..b4dfd2108d6 100644 --- a/docs/api/paddle/static/nn/instance_norm_cn.rst +++ b/docs/api/paddle/static/nn/instance_norm_cn.rst @@ -14,7 +14,7 @@ instance_norm NCHW[batch,in_channels,in_height,in_width] -更多详情请参考:`Instance Normalization: The Missing Ingredient for Fast Stylization `_ +更多详情请参考:`Instance Normalization: The Missing Ingredient for Fast Stylization `_ ``input`` 是mini-batch的输入。 diff --git a/docs/api/paddle/static/nn/layer_norm_cn.rst b/docs/api/paddle/static/nn/layer_norm_cn.rst index f2ef17ad409..3e38c986bd3 100644 --- a/docs/api/paddle/static/nn/layer_norm_cn.rst +++ b/docs/api/paddle/static/nn/layer_norm_cn.rst @@ -11,7 +11,7 @@ layer_norm 该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。 -论文参考:`Layer Normalization `_ +论文参考:`Layer Normalization `_ 计算公式如下 diff --git a/docs/api/paddle/static/nn/multi_box_head_cn.rst b/docs/api/paddle/static/nn/multi_box_head_cn.rst index 1fac62b2b6a..232afbfe52b 100644 --- a/docs/api/paddle/static/nn/multi_box_head_cn.rst +++ b/docs/api/paddle/static/nn/multi_box_head_cn.rst @@ -67,7 +67,7 @@ list(Variable) | tuple(Variable) 设置min_ratio和max_ratio .. code-block:: python - + import paddle paddle.enable_static() @@ -97,7 +97,7 @@ list(Variable) | tuple(Variable) 设置min_sizes和max_sizes .. code-block:: python - + import paddle paddle.enable_static() diff --git a/docs/api/paddle/static/nn/nce_cn.rst b/docs/api/paddle/static/nn/nce_cn.rst index 03c78a0e5b1..ceb387e32e9 100644 --- a/docs/api/paddle/static/nn/nce_cn.rst +++ b/docs/api/paddle/static/nn/nce_cn.rst @@ -34,7 +34,7 @@ nce 返回 :::::::::::: Tensor,nce loss,数据类型与 **input** 相同。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/prelu_cn.rst b/docs/api/paddle/static/nn/prelu_cn.rst index 635209bc72e..258d6a18561 100644 --- a/docs/api/paddle/static/nn/prelu_cn.rst +++ b/docs/api/paddle/static/nn/prelu_cn.rst @@ -24,7 +24,7 @@ prelu激活函数 - **mode** (str) - 权重共享模式。 - **param_attr** (ParamAttr,可选) - 可学习权重 :math:`[\alpha]` 的参数属性,可由ParamAttr创建。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **data_format** (str,可选) – 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" 或者 "NDHWC"。默认值:"NCHW"。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/static/nn/row_conv_cn.rst b/docs/api/paddle/static/nn/row_conv_cn.rst index 17aa0d12366..c5080ddfa9d 100644 --- a/docs/api/paddle/static/nn/row_conv_cn.rst +++ b/docs/api/paddle/static/nn/row_conv_cn.rst @@ -26,7 +26,7 @@ row_conv 详细请参考 `设计文档 `_ 。 论文链接:`Deep Speech 2: End-to-End Speech Recognition in English and Mandarin `_ 。 - + 参数 :::::::::::: diff --git a/docs/api/paddle/static/nn/sequence_concat_cn.rst b/docs/api/paddle/static/nn/sequence_concat_cn.rst index a93cb00c216..333aae58ba1 100644 --- a/docs/api/paddle/static/nn/sequence_concat_cn.rst +++ b/docs/api/paddle/static/nn/sequence_concat_cn.rst @@ -24,7 +24,7 @@ sequence_concat x2.data = [[6], [7], [8], [9]] x2.shape = [4, 1] 且必须满足:len(x1.lod[0]) == len(x2.lod[0]) - + 输出为LoDTensor: out.lod = [[0, 3+2, 5+4]] out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]] diff --git a/docs/api/paddle/static/nn/sequence_conv_cn.rst b/docs/api/paddle/static/nn/sequence_conv_cn.rst index 9a003da449b..7f2febfff6b 100644 --- a/docs/api/paddle/static/nn/sequence_conv_cn.rst +++ b/docs/api/paddle/static/nn/sequence_conv_cn.rst @@ -23,7 +23,7 @@ sequence_conv [3, 3], [4, 4]] input.lod = [[0, 3, 4]] - + 即输入input总共有4个词,每个词被表示为一个2维向量。 Case1: @@ -38,7 +38,7 @@ sequence_conv [1, 1, 2, 2, 3, 3], [2, 2, 3, 3, 0, 0], [0, 0, 4, 4, 0, 0]] - + 它将和卷积核矩阵相乘得到最终的输出,假设num_filters = 3: output.data = [[ 0.3234, -0.2334, 0.7433], [ 0.5646, 0.9464, -0.1223], diff --git a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst index 931a82d564c..a79806b90c9 100644 --- a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst +++ b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst @@ -17,13 +17,13 @@ sequence_enumerate 给定输入 x: x.lod = [[0, 3, 5]] - x.data = [[1], [2], [3], [4], [5]] + x.data = [[1], [2], [3], [4], [5]] x.dims = [5, 1] 设置属性 win_size = 2 pad_value = 0 - + 得到输出 out: - out.lod = [[0, 3, 5]] - out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]] + out.lod = [[0, 3, 5]] + out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]] out.dims = [5, 2] 参数 diff --git a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst index 257df47bffc..2c93c5a40ed 100644 --- a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst +++ b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst @@ -23,12 +23,12 @@ Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` x.dims = [4, 1] 和输入 y y.lod = [[3, 3, 1, 1]] #为了便于理解这里用基于长度lod表示 - + 经过sequence_expand_as运算,得到输出1级LoDTensor out out.lod = [[0, 3, 6, 7, 8]] #基于偏移的lod,等价于基于长度的[[3, 3, 1, 1]] out.data = [[a], [a], [a], [b], [b], [b], [c], [d]] out.dims = [8, 1] - + 可见,输出out将x扩展至和y具有相同的lod。 :: diff --git a/docs/api/paddle/static/nn/sequence_first_step_cn.rst b/docs/api/paddle/static/nn/sequence_first_step_cn.rst index f81dfdbb7e5..89c52cf6a9f 100644 --- a/docs/api/paddle/static/nn/sequence_first_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_first_step_cn.rst @@ -26,14 +26,14 @@ sequence_first_step out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) Case 2: - + input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - + 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 输出为LoDTensor: out.shape= [5, 1] diff --git a/docs/api/paddle/static/nn/sequence_last_step_cn.rst b/docs/api/paddle/static/nn/sequence_last_step_cn.rst index fdffd522f3f..7b1d3523506 100644 --- a/docs/api/paddle/static/nn/sequence_last_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_last_step_cn.rst @@ -28,14 +28,14 @@ sequence_last_step out.data = [[3.], [6.], [1.]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.) Case 2: - + input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - + 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 输出为LoDTensor: out.shape= [5, 1] diff --git a/docs/api/paddle/static/nn/sequence_pool_cn.rst b/docs/api/paddle/static/nn/sequence_pool_cn.rst index abe5effa7db..b493eb1d036 100644 --- a/docs/api/paddle/static/nn/sequence_pool_cn.rst +++ b/docs/api/paddle/static/nn/sequence_pool_cn.rst @@ -42,18 +42,18 @@ sequence_pool max : out.data = [[3.], [6.], [5.], [0.0]], where 3.=max(1., 3.), 6.=max(2., 4., 6.), 5.=max(5., 1.) last : out.data = [[3.], [6.], [1.], [0.0]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.) first : out.data = [[1.], [2.], [5.], [0.0]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) - + 上述out.data中的最后一个[0.0]均为填充的数据。 Case 2: - + input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - + 以pool_type取值为sum为例,将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 输出为LoDTensor: out.shape= [5, 1] diff --git a/docs/api/paddle/static/nn/sequence_scatter_cn.rst b/docs/api/paddle/static/nn/sequence_scatter_cn.rst index e168e3a0795..77eef5a6000 100644 --- a/docs/api/paddle/static/nn/sequence_scatter_cn.rst +++ b/docs/api/paddle/static/nn/sequence_scatter_cn.rst @@ -47,7 +47,7 @@ output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数 ::::::::: - **input** (Tensor) - 维度为 :math:`[N, k_1 ... k_n]` 的Tensor,支持的数据类型:float32,float64,int32,int64。 - **index** (Tensor) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int32,int64。 - - **updates** (Tensor) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。 + - **updates** (Tensor) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 返回 diff --git a/docs/api/paddle/static/nn/sequence_softmax_cn.rst b/docs/api/paddle/static/nn/sequence_softmax_cn.rst index 7edf503302b..cd2b8f56024 100644 --- a/docs/api/paddle/static/nn/sequence_softmax_cn.rst +++ b/docs/api/paddle/static/nn/sequence_softmax_cn.rst @@ -36,11 +36,11 @@ sequence_softmax 则: output.data = [0.30724832, 0.41474187, 0.2780098, 0.59868765, 0.40131235, - 0.2544242, 0.09359743, 0.13963096, 0.5123474, + 0.2544242, 0.09359743, 0.13963096, 0.5123474, 1., 0.84553474, 0.15446526, 0.06995796, 0.69777346, 0.23226859] - output.lod = [[0, 3, 5, 9, 10, 12, 15]] + output.lod = [[0, 3, 5, 9, 10, 12, 15]] 参数 diff --git a/docs/api/paddle/static/nn/sparse_embedding_cn.rst b/docs/api/paddle/static/nn/sparse_embedding_cn.rst index c5d6ee8278b..2333ac43840 100644 --- a/docs/api/paddle/static/nn/sparse_embedding_cn.rst +++ b/docs/api/paddle/static/nn/sparse_embedding_cn.rst @@ -32,11 +32,11 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [[0.345249859, 0.124939536, ..., 0.194353745], [0.945345345, 0.435394634, ..., 0.435345365]], - + [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 - + Case 2: input是lod level 为1的LoDTensor,且padding_idx = 0 @@ -64,7 +64,7 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 - **padding_idx** (int|long|None,可选) - padding_idx需在区间 ``[-vocab_size, vocab_size)``,否则不生效,``padding_idx < 0`` 时,padding_idx会被改成``vocab_size + padding_idx``,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 - **is_test** (bool,可选) - 表示训练/预测模式。在预测模式(is_test=False)下,遇到不存在的特征,不会初始化及创建,直接以0填充后返回。默认值为False。 - **entry** (str,可选) - 准入策略配置,目前支持概率准入ProbabilityEntry和频次准入CountFilterEntry。默认为None。 - - **table_class** (str,可选) - 稀疏表的类型,其值可以为CommonSparseTable和SSDSparseTable。默认为CommonSparseTable。 + - **table_class** (str,可选) - 稀疏表的类型,其值可以为CommonSparseTable和SSDSparseTable。默认为CommonSparseTable。 - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_paddle_ParamAttr`。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_paddle_to_tensor` 进行初始化,即可实现加载自定义或预训练的词向量。 - **dtype** (str) - 输出Tensor的数据类型,数据类型必须为:float32 或float64,默认为float32。 diff --git a/docs/api/paddle/static/nn/spectral_norm_cn.rst b/docs/api/paddle/static/nn/spectral_norm_cn.rst index 7ffca846cfe..493ba6dbb0b 100644 --- a/docs/api/paddle/static/nn/spectral_norm_cn.rst +++ b/docs/api/paddle/static/nn/spectral_norm_cn.rst @@ -24,7 +24,7 @@ spectral_norm \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} -论文参考:`Spectral Normalization `_ +论文参考:`Spectral Normalization `_ 参数 ::::::::: diff --git a/docs/api/paddle/static/nn/while_loop_cn.rst b/docs/api/paddle/static/nn/while_loop_cn.rst index e10cc04f503..8ce1d93e243 100644 --- a/docs/api/paddle/static/nn/while_loop_cn.rst +++ b/docs/api/paddle/static/nn/while_loop_cn.rst @@ -30,7 +30,7 @@ list|tuple,循环迭代之后 ``body`` 的返回值,和 ``loop_vars`` 具有 ::::::::: .. code-block:: python - + import paddle paddle.enable_static() @@ -47,7 +47,7 @@ list|tuple,循环迭代之后 ``body`` 的返回值,和 ``loop_vars`` 具有 i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length i, ten = paddle.static.nn.while_loop(cond, body, [i, ten]) - + exe = paddle.static.Executor(paddle.CPUPlace()) res = exe.run(main_program, feed={}, fetch_list=[i]) print(res) # [array([10])] diff --git a/docs/api/paddle/static/program_guard_cn.rst b/docs/api/paddle/static/program_guard_cn.rst index 125ef85764d..fd6994bd045 100644 --- a/docs/api/paddle/static/program_guard_cn.rst +++ b/docs/api/paddle/static/program_guard_cn.rst @@ -25,7 +25,7 @@ program_guard .. code-block:: python import paddle - + paddle.enable_static() main_program = paddle.static.Program() startup_program = paddle.static.Program() diff --git a/docs/api/paddle/static/py_func_cn.rst b/docs/api/paddle/static/py_func_cn.rst index 8f7ae0517c2..44965a9e9ab 100644 --- a/docs/api/paddle/static/py_func_cn.rst +++ b/docs/api/paddle/static/py_func_cn.rst @@ -13,7 +13,7 @@ PaddlePaddle 通过py_func在Python端注册OP。py_func的设计原理在于Pad 该自定义的Python OP的前向函数是 ``func``,反向函数是 ``backward_func`` 。 Paddle将在前向部分调用 ``func``,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是None)。 ``x`` 为 ``func`` 的输入,必须为Tensor类型;``out`` 为 ``func`` 的输出,既可以是Tensor类型,也可以是numpy数组。 -反向函数 ``backward_func`` 的输入依次为:前向输入 ``x`` 、前向输出 ``out`` 、 ``out`` 的梯度。如果 ``out`` 的某些输出没有梯度,则 ``backward_func`` 的相关输入为None。如果 ``x`` 的某些变量没有梯度,则用户应在 ``backward_func`` 中主动返回None。 +反向函数 ``backward_func`` 的输入依次为:前向输入 ``x`` 、前向输出 ``out`` 、 ``out`` 的梯度。如果 ``out`` 的某些输出没有梯度,则 ``backward_func`` 的相关输入为None。如果 ``x`` 的某些变量没有梯度,则用户应在 ``backward_func`` 中主动返回None。 在调用该接口之前,还应正确设置 ``out`` 的数据类型和形状,而 ``out`` 和 ``x`` 对应梯度的数据类型和形状将自动推断而出。 @@ -57,7 +57,7 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` # 自定义的前向函数,可用于调试正在运行的网络(打印值) def debug_func(x): print(x) - + def create_tmp_var(name, dtype, shape): return paddle.static.default_main_program().current_block().create_var( name=name, dtype=dtype, shape=shape) @@ -99,16 +99,16 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` :::::::::::: .. code-block:: python - + # 该示例展示了如何将LoDTensor转化为numpy数组,并利用numpy API来自定义一个OP import paddle import numpy as np paddle.enable_static() - def element_wise_add(x, y): + def element_wise_add(x, y): # 必须先手动将LodTensor转换为numpy数组,否则无法支持numpy的shape操作 - x = np.array(x) + x = np.array(x) y = np.array(y) if x.shape != y.shape: @@ -132,7 +132,7 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` # 创建前向函数的输入变量 x = paddle.static.data(name='x', shape=[2,3], dtype='int32') y = paddle.static.data(name='y', shape=[2,3], dtype='int32') - + # 创建前向函数的输出变量,必须指明变量名称name/数据类型dtype/维度shape output = create_tmp_var('output','int32', [3,1]) @@ -145,7 +145,7 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` # 给program喂入numpy数组 input1 = np.random.randint(1, 10, size=[2,3], dtype='int32') input2 = np.random.randint(1, 10, size=[2,3], dtype='int32') - out = exe.run(main_program, + out = exe.run(main_program, feed={'x':input1, 'y':input2}, fetch_list=[output.name]) print("{0} + {1} = {2}".format(input1, input2, out)) diff --git a/docs/api/paddle/static/serialize_persistables_cn.rst b/docs/api/paddle/static/serialize_persistables_cn.rst index dc31573b41d..dd5d7168604 100644 --- a/docs/api/paddle/static/serialize_persistables_cn.rst +++ b/docs/api/paddle/static/serialize_persistables_cn.rst @@ -16,7 +16,7 @@ serialize_persistables - **feed_vars** (Variable | list[Variable]) – 模型的输入变量。 - **fetch_vars** (Variable | list[Variable]) – 模型的输出变量。 - - **executor** (Executor) - 用于保存预测模型的 ``executor``,详见 :ref:`api_guide_executor` 。 + - **executor** (Executor) - 用于保存预测模型的 ``executor``,详见 :ref:`api_guide_executor` 。 - **kwargs** - 支持的 key 包括 program。(注意:kwargs 主要是用来做反向兼容的) - **program** - 指定包含要序列化的参数的 program,默认是 default_main_program。 diff --git a/docs/api/paddle/strided_slice_cn.rst b/docs/api/paddle/strided_slice_cn.rst index eac7b469c3c..3b8318148fc 100644 --- a/docs/api/paddle/strided_slice_cn.rst +++ b/docs/api/paddle/strided_slice_cn.rst @@ -12,7 +12,7 @@ strided_slice算子。 :: - + 示例1: 给定: data=[[1,2,3,4],[5,6,7,8],] @@ -32,7 +32,7 @@ strided_slice算子。 strides=[1,-1] 则: - result=[[8,7,6],] + result=[[8,7,6],] 示例3: 给定: data=[[1,2,3,4],[5,6,7,8],] @@ -42,12 +42,12 @@ strided_slice算子。 strides =[1,3] 则: result=[[2],] - + 参数 :::::::::::: - + - **x** (Tensor)- 多维 ``Tensor``,数据类型为 ``bool``, ``float32``,``float64``,``int32``,或 ``int64``。 - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 - **starts** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``starts`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的起始索引值。 diff --git a/docs/api/paddle/sysconfig/get_include_cn.rst b/docs/api/paddle/sysconfig/get_include_cn.rst index 8684b37eebf..f42ee160b24 100644 --- a/docs/api/paddle/sysconfig/get_include_cn.rst +++ b/docs/api/paddle/sysconfig/get_include_cn.rst @@ -9,7 +9,7 @@ get_include 返回 :::::::::: - + 字符串类型的文件目录。 代码示例 diff --git a/docs/api/paddle/sysconfig/get_lib_cn.rst b/docs/api/paddle/sysconfig/get_lib_cn.rst index a5114d940cc..535b273500f 100644 --- a/docs/api/paddle/sysconfig/get_lib_cn.rst +++ b/docs/api/paddle/sysconfig/get_lib_cn.rst @@ -9,7 +9,7 @@ get_lib 返回 :::::::::: - + 字符串类型的文件目录。 代码示例 diff --git a/docs/api/paddle/tan_cn.rst b/docs/api/paddle/tan_cn.rst index 577a04a1633..80cf22e5c56 100644 --- a/docs/api/paddle/tan_cn.rst +++ b/docs/api/paddle/tan_cn.rst @@ -6,7 +6,7 @@ tan .. py:function:: paddle.tan(x, name=None) 三角函数tangent。 -输入范围是 `(k*pi-pi/2, k*pi+pi/2)`,输出范围是 `[-inf, inf]` 。 +输入范围是 `(k*pi-pi/2, k*pi+pi/2)`,输出范围是 `[-inf, inf]` 。 .. math:: out = tan(x) @@ -14,7 +14,7 @@ tan 参数 ::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/tensordot_cn.rst b/docs/api/paddle/tensordot_cn.rst index d65f4213736..08a236d06ad 100644 --- a/docs/api/paddle/tensordot_cn.rst +++ b/docs/api/paddle/tensordot_cn.rst @@ -13,7 +13,7 @@ tensordot - **x** (Tensor)- 缩并运算操作的左张量,数据类型为 ``float32`` 或 ``float64``。 - **y** (Tensor)- 缩并运算操作的右张量,与 ``x`` 具有相同的数据类型。 - **axes** (int|tuple|list|Tensor)- 指定对 ``x`` 和 ``y`` 做缩并运算的轴,默认值为整数2。 - + 1. ``axes`` 可以是一个非负整数。若输入的是一个整数 ``n``,则表示对 ``x`` 的后 ``n`` 个轴和对 ``y`` 的前 ``n`` 个轴进行缩并运算。 2. ``axes`` 可以是一个一维的整数tuple或list,表示 ``x`` 和 ``y`` 沿着相同的轴方向进行缩并运算。例如,``axes`` =[0, 1]表示 ``x`` 的前两个轴和 ``y`` 的前两个轴对应进行缩并运算。 diff --git a/docs/api/paddle/text/Conll05st_cn.rst b/docs/api/paddle/text/Conll05st_cn.rst index 0b9fd5a66cf..f9cb4d19e8c 100644 --- a/docs/api/paddle/text/Conll05st_cn.rst +++ b/docs/api/paddle/text/Conll05st_cn.rst @@ -6,7 +6,7 @@ Conll05st .. py:class:: paddle.text.datasets.Conll05st() -该类是对 `Conll05st `_ +该类是对 `Conll05st `_ 测试数据集的实现。 .. note:: diff --git a/docs/api/paddle/text/Movielens_cn.rst b/docs/api/paddle/text/Movielens_cn.rst index c4503f8f010..085703af345 100644 --- a/docs/api/paddle/text/Movielens_cn.rst +++ b/docs/api/paddle/text/Movielens_cn.rst @@ -6,7 +6,7 @@ Movielens .. py:class:: paddle.text.datasets.Movielens() -该类是对 `Movielens 1-M `_ +该类是对 `Movielens 1-M `_ 测试数据集的实现。 参数 diff --git a/docs/api/paddle/text/Overview_cn.rst b/docs/api/paddle/text/Overview_cn.rst index f8528a9c6f9..97c50285123 100644 --- a/docs/api/paddle/text/Overview_cn.rst +++ b/docs/api/paddle/text/Overview_cn.rst @@ -39,7 +39,7 @@ PaddleNLP 提供了在文本任务上简洁易用的全流程API,旨在为飞 pip install --upgrade paddlenlp -i https://pypi.org/simple -可参考PaddleNLP `GitHub `_ 以及 `文档 `_ +可参考PaddleNLP `GitHub `_ 以及 `文档 `_ .. csv-table:: :header: "API模块", "功能简介", "API用法简单示例" diff --git a/docs/api/paddle/text/UCIHousing_cn.rst b/docs/api/paddle/text/UCIHousing_cn.rst index 10403681436..8f6076a4e75 100644 --- a/docs/api/paddle/text/UCIHousing_cn.rst +++ b/docs/api/paddle/text/UCIHousing_cn.rst @@ -6,7 +6,7 @@ UCIHousing .. py:class:: paddle.text.datasets.UCIHousing() -该类是对 `UCI housing `_ +该类是对 `UCI housing `_ 测试数据集的实现。 参数 @@ -22,7 +22,7 @@ UCIHousing 代码示例 ::::::::: - + .. code-block:: python import paddle diff --git a/docs/api/paddle/tile_cn.rst b/docs/api/paddle/tile_cn.rst index 9aa8bb30587..3b676c08c39 100644 --- a/docs/api/paddle/tile_cn.rst +++ b/docs/api/paddle/tile_cn.rst @@ -1,4 +1,4 @@ -.. _cn_api_tensor_tile: +.. _cn_api_tensor_tile: tile ------------------------------- diff --git a/docs/api/paddle/tolist_cn.rst b/docs/api/paddle/tolist_cn.rst index 0eea4985c92..b453d8acb71 100644 --- a/docs/api/paddle/tolist_cn.rst +++ b/docs/api/paddle/tolist_cn.rst @@ -28,10 +28,10 @@ Tensor对应结构的list。 .. code-block:: python import paddle - + t = paddle.to_tensor([0,1,2,3,4]) expectlist = t.tolist() print(expectlist) #[0, 1, 2, 3, 4] - + expectlist = paddle.tolist(t) print(expectlist) #[0, 1, 2, 3, 4] diff --git a/docs/api/paddle/topk_cn.rst b/docs/api/paddle/topk_cn.rst index 08ddd7003d6..bb67865d87e 100644 --- a/docs/api/paddle/topk_cn.rst +++ b/docs/api/paddle/topk_cn.rst @@ -11,7 +11,7 @@ topk 参数 ::::::::: - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int32、int64。 - - **k** (int,Tensor) - 在指定的轴上进行 top 寻找的数量。 + - **k** (int,Tensor) - 在指定的轴上进行 top 寻找的数量。 - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 - **largest** (bool,可选) - 指定算法排序的方向。如果设置为 True,排序算法按照降序的算法排序,否则按照升序排序。默认值为 True。 - **sorted** (bool,可选) - 控制返回的结果是否按照有序返回,默认为 True。在 GPU 上总是返回有序的结果。 diff --git a/docs/api/paddle/transpose_cn.rst b/docs/api/paddle/transpose_cn.rst index 32ad3738ebc..98ed78e6368 100644 --- a/docs/api/paddle/transpose_cn.rst +++ b/docs/api/paddle/transpose_cn.rst @@ -27,7 +27,7 @@ transpose .. code-block:: text - x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]] + x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]] [[13 14 15 16] [17 18 19 20] [21 22 23 24]]] shape(x) = [2,3,4] diff --git a/docs/api/paddle/unique_consecutive_cn.rst b/docs/api/paddle/unique_consecutive_cn.rst index 599a1d156e5..4f8250be752 100644 --- a/docs/api/paddle/unique_consecutive_cn.rst +++ b/docs/api/paddle/unique_consecutive_cn.rst @@ -5,7 +5,7 @@ unique_consecutive .. py:function:: paddle.unique_consecutive(x, return_inverse=False, return_counts=False, axis=None, dtype="int64", name=None) -将Tensor中连续重复的元素进行去重,返回连续不重复的Tensor。 +将Tensor中连续重复的元素进行去重,返回连续不重复的Tensor。 参数 :::::::::::: diff --git a/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst b/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst index ce733bedaea..688336a4d6f 100644 --- a/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst @@ -13,7 +13,7 @@ CUDAExtension .. code-block:: text - # setup.py + # setup.py # 编译支持 CPU/GPU 的算子 from paddle.utils.cpp_extension import CUDAExtension, setup diff --git a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst index 3a23dfd0cbc..d59da32c454 100644 --- a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst @@ -13,7 +13,7 @@ CppExtension .. code-block:: text - # setup.py + # setup.py # 编译仅支持 CPU 的算子 from paddle.utils.cpp_extension import CppExtension, setup diff --git a/docs/api/paddle/utils/cpp_extension/load_cn.rst b/docs/api/paddle/utils/cpp_extension/load_cn.rst index 8ca3c89ddaf..14482d91e7d 100644 --- a/docs/api/paddle/utils/cpp_extension/load_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/load_cn.rst @@ -24,7 +24,7 @@ load **使用样例如下:** .. code-block:: text - + import paddle from paddle.utils.cpp_extension import load diff --git a/docs/api/paddle/utils/cpp_extension/setup_cn.rst b/docs/api/paddle/utils/cpp_extension/setup_cn.rst index 56ce779cc62..d3a5ddb4af7 100644 --- a/docs/api/paddle/utils/cpp_extension/setup_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/setup_cn.rst @@ -23,7 +23,7 @@ setup .. code-block:: text - # setup.py + # setup.py # 方式一:编译支持 CPU 和 GPU 的算子 from paddle.utils.cpp_extension import CUDAExtension, setup diff --git a/docs/api/paddle/utils/deprecated_cn.rst b/docs/api/paddle/utils/deprecated_cn.rst index 832c6398a2e..0ca3c05f004 100644 --- a/docs/api/paddle/utils/deprecated_cn.rst +++ b/docs/api/paddle/utils/deprecated_cn.rst @@ -21,4 +21,4 @@ paddle_utils_deprecated 返回 :::::::::::: 装饰器(装饰器函数或者装饰器类)。 - + diff --git a/docs/api/paddle/var_cn.rst b/docs/api/paddle/var_cn.rst index a87a7cd6a20..06569078d59 100644 --- a/docs/api/paddle/var_cn.rst +++ b/docs/api/paddle/var_cn.rst @@ -11,11 +11,11 @@ var :::::::::: - **x** (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **axis** (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。 - + - 如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。 - 如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。 - 如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算方差。默认值为 None。 - + - **unbiased** (bool,可选) - 是否使用无偏估计来计算方差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为 True,则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为 True。 - **keepdim** (bool,可选) - 是否在输出 Tensor 中保留输入的维度。除非 keepdim 为 True,否则输出 Tensor 的维度将比输入 Tensor 小一维,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/vision/models/VGG_cn.rst b/docs/api/paddle/vision/models/VGG_cn.rst index e984a02aa48..20d1c37ace6 100644 --- a/docs/api/paddle/vision/models/VGG_cn.rst +++ b/docs/api/paddle/vision/models/VGG_cn.rst @@ -14,7 +14,7 @@ VGG 模型,来自论文 `"Very Deep Convolutional Networks For Large-Scale Ima - **features** (Layer) - VGG 模型的特征层。由函数 make_layers 产生。 - **num_classes** (int,可选) - 最后一个全连接层输出的维度。如果该值小于等于 0,则不定义最后一个全连接层。默认值为 1000。 - **with_pool** (bool,可选) - 是否在最后三个全连接层前使用池化。默认值为 True。 - + 返回 ::::::::: diff --git a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst index 2625fedb182..eb64ea39d4a 100644 --- a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst +++ b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst @@ -21,7 +21,7 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 **示例** - + 输入: input 形状::math:`(N, C_{in}, H_{in}, W_{in})` @@ -30,7 +30,7 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x offset 形状::math:`(N, 2 * H_f * W_f, H_{out}, W_{out})` mask 形状::math:`(N, H_f * W_f, H_{out}, W_{out})` - + 输出: 输出形状::math:`(N, C_{out}, H_{out}, W_{out})` @@ -57,7 +57,7 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - + 形状: - x: :math:`(N, C_{in}, H_{in}, W_{in})` - offset: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})` diff --git a/docs/api/paddle/vision/ops/RoIPool_cn.rst b/docs/api/paddle/vision/ops/RoIPool_cn.rst index 562649ab1d5..c105bdcd15d 100644 --- a/docs/api/paddle/vision/ops/RoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/RoIPool_cn.rst @@ -25,12 +25,12 @@ RoIPool 代码示例 ::::::::: - + .. code-block:: python import paddle from paddle.vision.ops import RoIPool - + data = paddle.rand([1, 256, 32, 32]) boxes = paddle.rand([3, 4]) boxes[:, 2] += boxes[:, 0] + 3 diff --git a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst index 458224f9779..b34aa905b7b 100755 --- a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst +++ b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst @@ -20,7 +20,7 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 **示例** - + 输入: input 形状::math:`(N, C_{in}, H_{in}, W_{in})` @@ -30,7 +30,7 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x offset 形状::math:`(N, 2 * H_f * W_f, H_{out}, W_{out})` mask 形状::math:`(N, H_f * W_f, H_{out}, W_{out})` - + 输出: 输出形状::math:`(N, C_{out}, H_{out}, W_{out})` @@ -58,11 +58,11 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - **mask** (Tensor,可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子v1时,请将mask设置为None,数据类型为float32或float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::::: 可变形卷积输出的4-D Tensor,数据类型为float32或float64。 - + 代码示例 :::::::::::: diff --git a/docs/api/paddle/vision/ops/nms_cn.rst b/docs/api/paddle/vision/ops/nms_cn.rst index f974795ab07..a73004ac224 100644 --- a/docs/api/paddle/vision/ops/nms_cn.rst +++ b/docs/api/paddle/vision/ops/nms_cn.rst @@ -7,7 +7,7 @@ nms 非极大抑制(non-maximum suppression, NMS)用于在目标检测应用对检测边界框(bounding box)中搜索局部最大值,即只保留处于同一检测目标位置处重叠的框中分数最大的一个框。IoU(Intersection Over Union) 被用于判断两个框是否重叠,该值大于门限值(iou_threshold)则被认为两个框重叠。其计算公式如下: -.. math:: +.. math:: IoU = \frac{intersection\_area(box1, box2)}{union\_area(box1, box2)} diff --git a/docs/api/paddle/vision/ops/roi_pool_cn.rst b/docs/api/paddle/vision/ops/roi_pool_cn.rst index c80b2bd245b..8489f56c786 100644 --- a/docs/api/paddle/vision/ops/roi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/roi_pool_cn.rst @@ -25,7 +25,7 @@ roi_pool 代码示例 ::::::::: - + .. code-block:: python import paddle diff --git a/docs/api/paddle/vision/set_image_backend_cn.rst b/docs/api/paddle/vision/set_image_backend_cn.rst index c316cfb0a14..5de1ce12a5c 100644 --- a/docs/api/paddle/vision/set_image_backend_cn.rst +++ b/docs/api/paddle/vision/set_image_backend_cn.rst @@ -11,7 +11,7 @@ set_image_backend ::::::::: - backend (str) - 加载图像的后端,必须为 ``pil`` 或者 ``cv2`` 。 - + 代码示例 ::::::::: diff --git a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst index 82bec6ce3d2..6dbea6693e8 100644 --- a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst @@ -14,7 +14,7 @@ BaseTransform if keys is None: _get_params -> _apply_image() else: - _get_params -> _apply_*() for * in keys + _get_params -> _apply_*() for * in keys 如果你想要定义自己的图像变化方法,需要重写子类中的 ``_apply_*`` 方法。 @@ -23,11 +23,11 @@ BaseTransform - keys (list[str]|tuple[str], optional) - 输入的类型。你的输入可以是单一的图像,也可以是包含不同数据结构的元组,``keys`` 可以用来指定输入类型。举个例子,如果你的输入就是一个单一的图像,那么 ``keys`` 可以为 ``None`` 或者 ("image")。如果你的输入是两个图像:``(image, image)``,那么 `keys` 应该设置为 ``("image", "image")``。如果你的输入是 ``(image, boxes)``,那么 ``keys`` 应该为 ``("image", "boxes")``。目前支持的数据类型如下所示: - - "image":输入的图像,它的维度为 ``(H, W, C)`` 。 - - "coords":输入的左边,它的维度为 ``(N, 2)`` 。 + - "image":输入的图像,它的维度为 ``(H, W, C)`` 。 + - "coords":输入的左边,它的维度为 ``(N, 2)`` 。 - "boxes":输入的矩形框,他的维度为 (N, 4),形式为 "xyxy",第一个 "xy" 表示矩形框左上方的坐标,第二个 "xy" 表示矩形框右下方的坐标。 - "mask":分割的掩码,它的维度为 ``(H, W, 1)`` 。 - + 你也可以通过自定义 _apply_*的方法来处理特殊的数据结构。 返回 @@ -37,7 +37,7 @@ BaseTransform 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -86,7 +86,7 @@ BaseTransform maxxy = coords.max(axis=1) trans_boxes = np.concatenate((minxy, maxxy), axis=1) return trans_boxes - + # if you only want to transform image, do not need to rewrite this function def _apply_mask(self, mask): if self.params['flip']: @@ -106,4 +106,4 @@ BaseTransform flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask')) (converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask)) print('converted boxes', converted_boxes) - + diff --git a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst index cb1ffc90ece..63685b12246 100644 --- a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst @@ -26,7 +26,7 @@ BrightnessTransform 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -38,4 +38,3 @@ BrightnessTransform fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/CenterCrop_cn.rst b/docs/api/paddle/vision/transforms/CenterCrop_cn.rst index 4d9f8f3b1c7..815046e8389 100644 --- a/docs/api/paddle/vision/transforms/CenterCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/CenterCrop_cn.rst @@ -26,9 +26,9 @@ CenterCrop 代码示例 ::::::::: - + .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import CenterCrop diff --git a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst index 4d3e79c20d0..faeed88a28f 100644 --- a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst +++ b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst @@ -29,7 +29,7 @@ ColorJitter 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -41,4 +41,3 @@ ColorJitter fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/Compose_cn.rst b/docs/api/paddle/vision/transforms/Compose_cn.rst index df67e4a5aa5..20843ea2068 100644 --- a/docs/api/paddle/vision/transforms/Compose_cn.rst +++ b/docs/api/paddle/vision/transforms/Compose_cn.rst @@ -19,7 +19,7 @@ Compose 代码示例 ::::::::: - + .. code-block:: python from paddle.vision.datasets import Flowers @@ -32,4 +32,3 @@ Compose sample = flowers[i] print(sample[0].size, sample[1]) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst index 44d5209698a..0e56a42d020 100644 --- a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst @@ -26,7 +26,7 @@ ContrastTransform 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -38,4 +38,3 @@ ContrastTransform fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/Grayscale_cn.rst b/docs/api/paddle/vision/transforms/Grayscale_cn.rst index f8cc6b91827..3f74f4c8ef7 100644 --- a/docs/api/paddle/vision/transforms/Grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/Grayscale_cn.rst @@ -26,7 +26,7 @@ Grayscale 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -39,4 +39,3 @@ Grayscale fake_img = transform(fake_img) print(np.array(fake_img).shape) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/HueTransform_cn.rst b/docs/api/paddle/vision/transforms/HueTransform_cn.rst index 3743ace4d0e..ea7139b157a 100644 --- a/docs/api/paddle/vision/transforms/HueTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/HueTransform_cn.rst @@ -26,7 +26,7 @@ HueTransform 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -38,4 +38,3 @@ HueTransform fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/Normalize_cn.rst b/docs/api/paddle/vision/transforms/Normalize_cn.rst index 445999bb4c3..719f7e1bf16 100644 --- a/docs/api/paddle/vision/transforms/Normalize_cn.rst +++ b/docs/api/paddle/vision/transforms/Normalize_cn.rst @@ -9,7 +9,7 @@ normalize 参数 ::::::::: - + - img (PIL.Image|np.array|paddle.Tensor) - 用于归一化的数据。 - mean (list|tuple) - 用于每个通道归一化的均值。 - std (list|tuple) - 用于每个通道归一化的标准差值。 diff --git a/docs/api/paddle/vision/transforms/Pad_cn.rst b/docs/api/paddle/vision/transforms/Pad_cn.rst index f71739a121e..5e22c328e7e 100644 --- a/docs/api/paddle/vision/transforms/Pad_cn.rst +++ b/docs/api/paddle/vision/transforms/Pad_cn.rst @@ -22,19 +22,19 @@ pad 代码示例 ::::::::: - + .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import functional as F - + fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - + fake_img = Image.fromarray(fake_img) - + padded_img = F.pad(fake_img, padding=1) print(padded_img.size) - + padded_img = F.pad(fake_img, padding=(2, 1)) print(padded_img.size) diff --git a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst index 634f41636ed..533b80285c0 100644 --- a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst @@ -30,7 +30,7 @@ RandomErasing 代码示例 ::::::::: - + .. code-block:: python import paddle diff --git a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst index 2cf5fa8f472..d2ed45d0306 100644 --- a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst @@ -26,7 +26,7 @@ RandomHorizontalFlip 代码示例 ::::::::: - + .. code-block:: python import numpy as np diff --git a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst index 19c01dff52f..0f9d730b420 100644 --- a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst @@ -11,7 +11,7 @@ RandomResizedCrop 参数 ::::::::: - + - size (int|list|tuple) - 输出图像大小,当为单个int值时,生成指定size大小的方形图片,为(height,width)格式的数组或元组时按照参数大小输出。 - scale (list|tuple) - 相对于原图的尺寸,随机裁剪后图像大小的范围。默认值:(0.08,1.0)。 - ratio (list|tuple) - 裁剪后的目标图像宽高比范围,默认值:(0.75, 1.33)。 @@ -31,7 +31,7 @@ RandomResizedCrop 代码示例 ::::::::: - + .. code-block:: python import numpy as np diff --git a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst index 443ac621160..c6049299301 100644 --- a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst @@ -27,7 +27,7 @@ RandomRotate - center (2-tuple,可选) - 旋转的中心点坐标,原点是图片左上角,默认值是图像的中心点。 - fill (int,可选) - 对图像扩展时填充的值。默认值:0。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 - + 形状 ::::::::: @@ -41,9 +41,9 @@ RandomRotate 代码示例 ::::::::: - + .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import RandomRotation @@ -54,4 +54,4 @@ RandomRotate fake_img = transform(fake_img) print(fake_img.size) - + diff --git a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst index fca3e48fcb7..a9d98d5d9f9 100644 --- a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst @@ -26,9 +26,9 @@ RandomVerticalFlip 代码示例 ::::::::: - + .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import RandomVerticalFlip diff --git a/docs/api/paddle/vision/transforms/Resize_cn.rst b/docs/api/paddle/vision/transforms/Resize_cn.rst index 82a08b39577..efb090d235e 100644 --- a/docs/api/paddle/vision/transforms/Resize_cn.rst +++ b/docs/api/paddle/vision/transforms/Resize_cn.rst @@ -14,19 +14,19 @@ resize - size (int|tuple) - 输出图像大小。如果size是一个序列,例如(h,w),输出大小将与此匹配。如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 - interpolation (int|str, optional) - 插值的方法,默认值:'bilinear'。 - 当使用 ``pil`` 作为后端时,支持的插值方法如下 - + "nearest": Image.NEAREST, - + "bilinear": Image.BILINEAR, - + "bicubic": Image.BICUBIC, - + "box": Image.BOX, - + "lanczos": Image.LANCZOS, + + "nearest": Image.NEAREST, + + "bilinear": Image.BILINEAR, + + "bicubic": Image.BICUBIC, + + "box": Image.BOX, + + "lanczos": Image.LANCZOS, + "hamming": Image.HAMMING。 - 当使用 ``cv2`` 作为后端时,支持的插值方法如下 - + "nearest": cv2.INTER_NEAREST, - + "bilinear": cv2.INTER_LINEAR, - + "area": cv2.INTER_AREA, - + "bicubic": cv2.INTER_CUBIC, + + "nearest": cv2.INTER_NEAREST, + + "bilinear": cv2.INTER_LINEAR, + + "area": cv2.INTER_AREA, + + "bicubic": cv2.INTER_CUBIC, + "lanczos": cv2.INTER_LANCZOS4。 - + 返回 ::::::::: @@ -36,7 +36,7 @@ resize ::::::::: .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import functional as F @@ -52,4 +52,4 @@ resize converted_img = F.resize(fake_img, (200, 150)) print(converted_img.size) # (150, 200) - + diff --git a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst index 0cfddd0fb12..44b38ab4d35 100644 --- a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst @@ -26,7 +26,7 @@ SaturationTransform 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -38,4 +38,3 @@ SaturationTransform fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) fake_img = transform(fake_img) - \ No newline at end of file diff --git a/docs/api/paddle/vision/transforms/ToTensor_cn.rst b/docs/api/paddle/vision/transforms/ToTensor_cn.rst index 833fe73e29d..dbe7f3bf56f 100644 --- a/docs/api/paddle/vision/transforms/ToTensor_cn.rst +++ b/docs/api/paddle/vision/transforms/ToTensor_cn.rst @@ -12,8 +12,8 @@ ToTensor 若输入数据形状为(H x W), ``ToTensor`` 会将该数据的形状视为(H x W x 1)。 -同时,如果输入的 ``PIL.Image`` 的 ``mode`` 是 ``(L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)`` -其中一种,或者输入的 ``numpy.ndarray`` 数据类型是 'uint8',那个会将输入数据从(0-255)的范围缩放到 +同时,如果输入的 ``PIL.Image`` 的 ``mode`` 是 ``(L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)`` +其中一种,或者输入的 ``numpy.ndarray`` 数据类型是 'uint8',那个会将输入数据从(0-255)的范围缩放到 (0-1)的范围。其他的情况,则保持输入不变。 @@ -36,7 +36,7 @@ ToTensor 代码示例 ::::::::: - + .. code-block:: python import numpy as np @@ -53,6 +53,6 @@ ToTensor print(tensor.shape) # [3, 4, 5] - + print(tensor.dtype) # paddle.float32 diff --git a/docs/api/paddle/vision/transforms/Transpose_cn.rst b/docs/api/paddle/vision/transforms/Transpose_cn.rst index 0366f02e0d9..dbc24cc3e92 100644 --- a/docs/api/paddle/vision/transforms/Transpose_cn.rst +++ b/docs/api/paddle/vision/transforms/Transpose_cn.rst @@ -27,7 +27,7 @@ Transpose 代码示例 ::::::::: - + .. code-block:: python import numpy as np diff --git a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst index 214aef5fbfd..41bd4160b4c 100644 --- a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst @@ -23,4 +23,4 @@ adjust_brightness COPY-FROM: paddle.vision.transforms.adjust_brightness - + diff --git a/docs/api/paddle/vision/transforms/normalize_cn.rst b/docs/api/paddle/vision/transforms/normalize_cn.rst index 125e218114b..c6d637b72a8 100644 --- a/docs/api/paddle/vision/transforms/normalize_cn.rst +++ b/docs/api/paddle/vision/transforms/normalize_cn.rst @@ -9,7 +9,7 @@ normalize 参数 ::::::::: - + - img (PIL.Image|np.array|paddle.Tensor) - 用于归一化的数据。 - mean (list|tuple) - 用于每个通道归一化的均值。 - std (list|tuple) - 用于每个通道归一化的标准差值。 diff --git a/docs/api/paddle/vision/transforms/pad_cn.rst b/docs/api/paddle/vision/transforms/pad_cn.rst index f71739a121e..5e22c328e7e 100644 --- a/docs/api/paddle/vision/transforms/pad_cn.rst +++ b/docs/api/paddle/vision/transforms/pad_cn.rst @@ -22,19 +22,19 @@ pad 代码示例 ::::::::: - + .. code-block:: python import numpy as np from PIL import Image from paddle.vision.transforms import functional as F - + fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - + fake_img = Image.fromarray(fake_img) - + padded_img = F.pad(fake_img, padding=1) print(padded_img.size) - + padded_img = F.pad(fake_img, padding=(2, 1)) print(padded_img.size) diff --git a/docs/api/paddle/vision/transforms/resize_cn.rst b/docs/api/paddle/vision/transforms/resize_cn.rst index 82a08b39577..efb090d235e 100644 --- a/docs/api/paddle/vision/transforms/resize_cn.rst +++ b/docs/api/paddle/vision/transforms/resize_cn.rst @@ -14,19 +14,19 @@ resize - size (int|tuple) - 输出图像大小。如果size是一个序列,例如(h,w),输出大小将与此匹配。如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 - interpolation (int|str, optional) - 插值的方法,默认值:'bilinear'。 - 当使用 ``pil`` 作为后端时,支持的插值方法如下 - + "nearest": Image.NEAREST, - + "bilinear": Image.BILINEAR, - + "bicubic": Image.BICUBIC, - + "box": Image.BOX, - + "lanczos": Image.LANCZOS, + + "nearest": Image.NEAREST, + + "bilinear": Image.BILINEAR, + + "bicubic": Image.BICUBIC, + + "box": Image.BOX, + + "lanczos": Image.LANCZOS, + "hamming": Image.HAMMING。 - 当使用 ``cv2`` 作为后端时,支持的插值方法如下 - + "nearest": cv2.INTER_NEAREST, - + "bilinear": cv2.INTER_LINEAR, - + "area": cv2.INTER_AREA, - + "bicubic": cv2.INTER_CUBIC, + + "nearest": cv2.INTER_NEAREST, + + "bilinear": cv2.INTER_LINEAR, + + "area": cv2.INTER_AREA, + + "bicubic": cv2.INTER_CUBIC, + "lanczos": cv2.INTER_LANCZOS4。 - + 返回 ::::::::: @@ -36,7 +36,7 @@ resize ::::::::: .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import functional as F @@ -52,4 +52,4 @@ resize converted_img = F.resize(fake_img, (200, 150)) print(converted_img.size) # (150, 200) - + diff --git a/docs/api/paddle/vision/transforms/rotate_cn.rst b/docs/api/paddle/vision/transforms/rotate_cn.rst index e5c5cf9e051..d705f32a380 100644 --- a/docs/api/paddle/vision/transforms/rotate_cn.rst +++ b/docs/api/paddle/vision/transforms/rotate_cn.rst @@ -24,9 +24,9 @@ rotate 代码示例 ::::::::: - + .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import functional as F @@ -37,4 +37,4 @@ rotate rotated_img = F.rotate(fake_img, 90) print(rotated_img.size) - + diff --git a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst index 3bf50810045..be19de26592 100644 --- a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst @@ -20,12 +20,12 @@ to_grayscale - 如果 output_channels == 1:返回一个单通道图像。 - 如果 output_channels == 3:返回一个RBG格式的3通道图像。 - + 代码示例 ::::::::: - + .. code-block:: python - + import numpy as np from PIL import Image from paddle.vision.transforms import functional as F @@ -36,4 +36,4 @@ to_grayscale gray_img = F.to_grayscale(fake_img) print(gray_img.size) - + diff --git a/docs/api/paddle/vision/transforms/to_tensor_cn.rst b/docs/api/paddle/vision/transforms/to_tensor_cn.rst index 876a70aaf95..4e72626ead0 100644 --- a/docs/api/paddle/vision/transforms/to_tensor_cn.rst +++ b/docs/api/paddle/vision/transforms/to_tensor_cn.rst @@ -10,8 +10,8 @@ to_tensor 将形状为 (H x W x C)的输入数据 ``PIL.Image`` 或 ``numpy.ndarray`` 转换为 (C x H x W)。 如果想保持形状不变,可以将参数 ``data_format`` 设置为 ``'HWC'``。 -同时,如果输入的 ``PIL.Image`` 的 ``mode`` 是 ``(L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)`` -其中一种,或者输入的 ``numpy.ndarray`` 数据类型是 'uint8',那个会将输入数据从(0-255)的范围缩放到 +同时,如果输入的 ``PIL.Image`` 的 ``mode`` 是 ``(L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)`` +其中一种,或者输入的 ``numpy.ndarray`` 数据类型是 'uint8',那个会将输入数据从(0-255)的范围缩放到 (0-1)的范围。其他的情况,则保持输入不变。 参数 diff --git a/docs/api/paddle/where_cn.rst b/docs/api/paddle/where_cn.rst index 8f19f3ccfc5..b7f7832d077 100644 --- a/docs/api/paddle/where_cn.rst +++ b/docs/api/paddle/where_cn.rst @@ -17,7 +17,7 @@ where y_i, & \text{if} \ condition_i \ \text{is} \ False \\ \end{cases}. -.. note:: +.. note:: ``numpy.where(condition)`` 功能与 ``paddle.nonzero(condition, as_tuple=True)`` 相同,可以参考 :ref:`cn_api_tensor_search_nonzero`。 参数 diff --git a/docs/api/paddle/zeros_like_cn.rst b/docs/api/paddle/zeros_like_cn.rst index 2cd80aca038..e6143a745f5 100644 --- a/docs/api/paddle/zeros_like_cn.rst +++ b/docs/api/paddle/zeros_like_cn.rst @@ -13,7 +13,7 @@ zeros_like - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时,输出Tensor的数据类型与 ``x`` 相同。默认值为None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - + 返回 :::::::::: Tensor:和 ``x`` 具有相同的形状全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 diff --git a/docs/api_guides/X2Paddle/Caffe-Fluid.rst b/docs/api_guides/X2Paddle/Caffe-Fluid.rst index 1440910056f..6f895c29a87 100644 --- a/docs/api_guides/X2Paddle/Caffe-Fluid.rst +++ b/docs/api_guides/X2Paddle/Caffe-Fluid.rst @@ -4,10 +4,10 @@ Caffe-Fluid常用层对应表 ######################## -本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 +本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 -.. csv-table:: +.. csv-table:: :header: "序号", "Caffe Layer", "Fluid接口", "备注" :widths: 1, 8, 8, 3 diff --git a/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst b/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst index 20ee91c8cd7..940904dba35 100644 --- a/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst +++ b/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst @@ -4,9 +4,9 @@ TensorFlow-Fluid常用接口对应表 ############################### -本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 +本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 -.. csv-table:: +.. csv-table:: :header: "序号", "TensorFlow接口", "Fluid接口", "备注" :widths: 1, 8, 8, 3 diff --git a/docs/api_guides/low_level/backward_en.rst b/docs/api_guides/low_level/backward_en.rst index 022b4900f07..599e4111dc4 100644 --- a/docs/api_guides/low_level/backward_en.rst +++ b/docs/api_guides/low_level/backward_en.rst @@ -18,6 +18,6 @@ You could refer to `back propagation algorithm `_ and `Hierarchical sigmoid `_ . * NCE solves the binary-classification problem of discriminating the true distribution and the noise distribution by converting the multi-classification problem into a classifier. The maximum likelihood estimation is performed based on the binary-classification to avoid calculating the normalization factor in the full-class space to reduce computational complexity. diff --git a/docs/api_guides/low_level/layers/pooling.rst b/docs/api_guides/low_level/layers/pooling.rst index 3ef868bf28c..6d615fb7126 100644 --- a/docs/api_guides/low_level/layers/pooling.rst +++ b/docs/api_guides/low_level/layers/pooling.rst @@ -31,11 +31,11 @@ PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3 - ``use_cudnn``\ : 选项可以来选择是否使用cudnn来优化计算池化速度。 - ``ceil_mode``\ : 是否使用ceil函数计算输出高度和宽度。\ ``ceil mode``\ 意为天花板模式,是指会把特征图中不足\ ``filter size``\ 的边给保留下来,单独另算,或者也可以理解为在原来的数据上补充了值为-NAN的边。而floor模式则是直接把不足\ ``filter size``\ 的边给舍弃了。具体计算公式如下: - + - 非\ ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding) / stride(步长) + 1`` - + - ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding + stride - 1) / stride + 1`` - + api汇总: @@ -54,7 +54,7 @@ api汇总: - ``pooled_height`` 和 ``pooled_width``\ : 这里可以接受非正方的池化窗口大小 - ``spatial_scale``\ : 用作设定缩放RoI和原图缩放的比例,注意,这里的设定需要用户自行计算RoI和原图的实际缩放比例。 - + api汇总: diff --git a/docs/api_guides/low_level/layers/pooling_en.rst b/docs/api_guides/low_level/layers/pooling_en.rst index 8d19e8d0ff2..64bb9b06eb0 100755 --- a/docs/api_guides/low_level/layers/pooling_en.rst +++ b/docs/api_guides/low_level/layers/pooling_en.rst @@ -31,11 +31,11 @@ PaddlePaddle has two-dimensional (pool2d), three-dimensional convolution (pool3d - The ``use_cudnn`` : This option allows you to choose whether or not to use cudnn to accelerate pooling. - ``ceil_mode`` : Whether to use the ceil function to calculate the output height and width. ``ceil mode`` means ceiling mode, which means that, in the feature map, the edge parts that are smaller than ``filter size`` will be retained, and separately calculated. It can be understood as supplementing the original data with edge with a value of -NAN. By contrast, The floor mode directly discards the edges smaller than the ``filter size``. The specific calculation formula is as follows: -    + * Non ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding) / stride (stride size) + 1`` -     + * ``ceil_mode`` : ``Output size = (input size - filter size + 2 * padding + stride - 1) / stride + 1`` -     + related API: @@ -54,7 +54,7 @@ related API: - ``pooled_height`` and ``pooled_width`` : accept non-square pooling box sizes - ``spatial_scale`` : Used to set the scale of scaling the RoI and the original image. Note that the settings here require the user to manually calculate the actual scaling of the RoI and the original image. -  + related API: diff --git a/docs/api_guides/low_level/metrics.rst b/docs/api_guides/low_level/metrics.rst index 4044483704a..e771b738b6b 100644 --- a/docs/api_guides/low_level/metrics.rst +++ b/docs/api_guides/low_level/metrics.rst @@ -11,20 +11,20 @@ - 准确率: :code:`Precision` ,用来衡量二分类中召回真值和召回值的比例。 - API Reference 请参考 :ref:`cn_api_fluid_metrics_Precision` + API Reference 请参考 :ref:`cn_api_fluid_metrics_Precision` - 正确率: :code:`Accuracy` ,用来衡量二分类中召回真值和总样本数的比例。需要注意的是,准确率和正确率的定义是不同的,可以类比于误差分析中的 :code:`Variance` 和 :code:`Bias` 。 - API Reference 请参考 :ref:`cn_api_fluid_metrics_Accuracy` + API Reference 请参考 :ref:`cn_api_fluid_metrics_Accuracy` - 召回率: :code:`Recall` ,用来衡量二分类中召回值和总样本数的比例。准确率和召回率的选取相互制约,实际模型中需要进行权衡,可以参考文档 `Precision_and_recall `_ 。 - API Reference 请参考 :ref:`cn_api_fluid_metrics_Recall` + API Reference 请参考 :ref:`cn_api_fluid_metrics_Recall` - AUC: :code:`Area Under Curve`, 适用于二分类的分类模型评估,用来计算 `ROC曲线的累积面积 `_。:code:`Auc` 通过python计算实现,如果关注性能,可以使用 :code:`fluid.layers.auc` 代替。 - API Reference 请参考 :ref:`cn_api_fluid_metrics_Auc` + API Reference 请参考 :ref:`cn_api_fluid_metrics_Auc` - 平均准确度: :code:`Average Precision` ,常用在Faster R-CNN和SSD等物体检测任务中。在不同召回条件下,计算了准确率的平均值,具体可以参考文档 `Average-precision `_ 和 `SSD: Single Shot MultiBox Detector `_。 diff --git a/docs/api_guides/low_level/model_save_reader.rst b/docs/api_guides/low_level/model_save_reader.rst index d14e19b48e6..c8afd62817d 100644 --- a/docs/api_guides/low_level/model_save_reader.rst +++ b/docs/api_guides/low_level/model_save_reader.rst @@ -33,15 +33,15 @@ API Reference 请参考 :ref:`cn_api_fluid_io_save_persistables`。 -- :code:`fluid.io.save_inference_model`:请参考 :ref:`api_guide_inference`。 +- :code:`fluid.io.save_inference_model`:请参考 :ref:`api_guide_inference`。 模型加载API介绍 ==================== - :code:`fluid.io.load_vars`:通过执行器(:code:`Executor`)加载指定目录中的变量。加载变量的方式有两种: - + 1)通过接口中的 :code:`vars` 指定需要加载的变量列表。 - + 2)将一个已经存在的程序(:code:`Program`)赋值给接口中的 :code:`main_program`,然后这个程序中的所有变量都将被加载。 第一种加载方式的优先级要高于第二种。 diff --git a/docs/api_guides/low_level/model_save_reader_en.rst b/docs/api_guides/low_level/model_save_reader_en.rst index 82c43d22359..6978c10f69d 100755 --- a/docs/api_guides/low_level/model_save_reader_en.rst +++ b/docs/api_guides/low_level/model_save_reader_en.rst @@ -33,15 +33,15 @@ Introduction to APIs for saving a model For API Reference, please refer to :ref:`api_fluid_io_save_persistables`. -- :code:`fluid.io.save_inference_model`: please refer to :ref:`api_guide_inference_en`. +- :code:`fluid.io.save_inference_model`: please refer to :ref:`api_guide_inference_en`. Introduction to APIs for loading a model ======================================== - :code:`fluid.io.load_vars`: Executor( :code:`Executor` ) loads variables into the target directory. There are two ways to load variables: - + 1):code:`vars` in the API assigns variable list to be loaded. - + 2)Assign an existed program( :code:`Program` ) to the :code:`main_program` field in the API, and then all variables in the program will be loaded. The first loading method has higher priority than the second one. diff --git a/docs/api_guides/low_level/optimizer.rst b/docs/api_guides/low_level/optimizer.rst index c597900ecba..016d36b652c 100644 --- a/docs/api_guides/low_level/optimizer.rst +++ b/docs/api_guides/low_level/optimizer.rst @@ -12,7 +12,7 @@ ------------------ :code:`SGD` 是实现 `随机梯度下降 `_ 的一个 :code:`Optimizer` 子类,是 `梯度下降 `_ 大类中的一种方法。 -当需要训练大量样本的时候,往往选择 :code:`SGD` 来使损失函数更快的收敛。 +当需要训练大量样本的时候,往往选择 :code:`SGD` 来使损失函数更快的收敛。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_SGDOptimizer` diff --git a/docs/api_guides/low_level/optimizer_en.rst b/docs/api_guides/low_level/optimizer_en.rst index 732b786f791..d166163796e 100755 --- a/docs/api_guides/low_level/optimizer_en.rst +++ b/docs/api_guides/low_level/optimizer_en.rst @@ -12,7 +12,7 @@ With `forward computing and back propagation `_ which is a method of `Gradient Descent `_ . -When it needs to train a large number of samples, we usually choose :code:`SGD` to make loss function converge more quickly. +When it needs to train a large number of samples, we usually choose :code:`SGD` to make loss function converge more quickly. API Reference: :ref:`api_fluid_optimizer_SGDOptimizer` diff --git a/docs/api_guides/low_level/parallel_executor_en.rst b/docs/api_guides/low_level/parallel_executor_en.rst index faad6688300..64abf3a2ccf 100644 --- a/docs/api_guides/low_level/parallel_executor_en.rst +++ b/docs/api_guides/low_level/parallel_executor_en.rst @@ -34,10 +34,10 @@ Since the execution speed of the model is related to the model structure and the .. code-block:: python # Note: - # - If you want to specify the GPU cards which are used to run - # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES + # - If you want to specify the GPU cards which are used to run + # in ParallelExecutor, you should define the CUDA_VISIBLE_DEVICES # in environment. - # - If you want to use multi CPU to run the program in ParallelExecutor, + # - If you want to use multi CPU to run the program in ParallelExecutor, # you should define the CPU_NUM in the environment. # First create the Executor. @@ -53,8 +53,8 @@ Since the execution speed of the model is related to the model structure and the build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = True if memory_opt else False - train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, build_strategy=build_strategy, exec_strategy=exec_strategy, loss_name=loss.name) diff --git a/docs/api_guides/low_level/parameter_en.rst b/docs/api_guides/low_level/parameter_en.rst index fe46687ce8c..a2c376d6818 100644 --- a/docs/api_guides/low_level/parameter_en.rst +++ b/docs/api_guides/low_level/parameter_en.rst @@ -172,4 +172,4 @@ In the miniBatch training process, parameters will be updated once after each ba The averaged parameters are only used for testing and prediction, and they do not get involved in the actual training process. -API reference :ref:`api_fluid_optimizer_ModelAverage` +API reference :ref:`api_fluid_optimizer_ModelAverage` diff --git a/docs/design/algorithm/parameter_average.md b/docs/design/algorithm/parameter_average.md index a77062cb16a..a0d8e56868f 100644 --- a/docs/design/algorithm/parameter_average.md +++ b/docs/design/algorithm/parameter_average.md @@ -41,7 +41,7 @@ We can add the ParameterAverageOptimizer op to the graph through Python API. Usi - Makes it easy for the users to customize and extend the framework. **Disadvantages**: - - Implementation requires re-writing the averaging methodology in Python. + - Implementation requires re-writing the averaging methodology in Python. ### Low-Level implementation diff --git a/docs/design/concepts/block.md b/docs/design/concepts/block.md index 15d3d67f557..20a1c9d25b5 100644 --- a/docs/design/concepts/block.md +++ b/docs/design/concepts/block.md @@ -153,7 +153,7 @@ with rnn.step(): h = rnn.memory(init = m) h_prev = rnn.previous_memory(h) a = layer.fc(W, x) - b = layer.fc(U, h_prev) + b = layer.fc(U, h_prev) s = pd.add(a, b) act = pd.sigmoid(s) rnn.update_memory(h, act) diff --git a/docs/design/concepts/cpp_data_feeding.md b/docs/design/concepts/cpp_data_feeding.md index 745d4fa007a..fc197b89cf4 100644 --- a/docs/design/concepts/cpp_data_feeding.md +++ b/docs/design/concepts/cpp_data_feeding.md @@ -55,7 +55,7 @@ class FileReader : public ReaderBase { A file reader binds with a single file and reads one data instance at a time. Each type of file reader shall implement its own `ReadNextImpl()`, `HasNext()` and `ReInit()`. -The `ReadNextImpl()` is invoked by `ReadNext()`. Besides invoking `ReadNextImpl()`, `ReadNext()` is also responsible for checking the output, making sure that each shape of `LoDTensor` in `*out` is consistent with the one in `dims_`. +The `ReadNextImpl()` is invoked by `ReadNext()`. Besides invoking `ReadNextImpl()`, `ReadNext()` is also responsible for checking the output, making sure that each shape of `LoDTensor` in `*out` is consistent with the one in `dims_`. ### DecoratedReader diff --git a/docs/design/concepts/python_data_feeding.md b/docs/design/concepts/python_data_feeding.md index f68cbbf7864..ef85ff8e960 100644 --- a/docs/design/concepts/python_data_feeding.md +++ b/docs/design/concepts/python_data_feeding.md @@ -46,7 +46,7 @@ class LoDTensorBlockingQueue { }; class LoDTensorBlockingQueueHolder { - public: + public: // Call the constructor of `LoDTensorBlockingQueue` to create queue_ // `InitOnce` can only called once, otherwise an exception would raise void InitOnce(size_t capacity, const std::vector& dims) { @@ -125,6 +125,6 @@ def py_reader(capacity, shapes): out = create_var() create_py_reader_op_with_queue_name( inputs={'blocking_queue': queue_name}, - outputs={'Out':[out]}) + outputs={'Out':[out]}) return out, feed_queue ``` diff --git a/docs/design/concepts/tensor.md b/docs/design/concepts/tensor.md index 12a62b70120..3fdd8b35de2 100644 --- a/docs/design/concepts/tensor.md +++ b/docs/design/concepts/tensor.md @@ -53,7 +53,7 @@ In Majel, `DDimVar` is derived from `Dim`, `DArrayVar` is from `Array`. ```c++ template struct Dim { -... +... int head; Dim tail; } diff --git a/docs/design/concurrent/channel.md b/docs/design/concurrent/channel.md index ad5666e4970..0ef886408d5 100644 --- a/docs/design/concurrent/channel.md +++ b/docs/design/concurrent/channel.md @@ -131,7 +131,7 @@ In golang, variables in channels are copied from the sender to the receiver. In Paddle, the data from our variables are **moved** from sender to receiver. As a result, these variables should not be used after they are sent. We provide a flag in channel_send method to allow users to copy the variable to -be sent before it is sent. +be sent before it is sent. Please note that this is achieved by adding an **assign** operator and creating a temporary variable that is sent in place of the original variable. Please diff --git a/docs/design/concurrent/concurrent_programming.md b/docs/design/concurrent/concurrent_programming.md index ecee3f8b958..1a1a3bc1d41 100644 --- a/docs/design/concurrent/concurrent_programming.md +++ b/docs/design/concurrent/concurrent_programming.md @@ -1,8 +1,8 @@ # Design Doc: Concurrent Programming with Fluid -With PaddlePaddle Fluid, users describe a program other than a model. The program is a [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto) protobuf message. TensorFlow/MxNet/Caffe2 applications generate protobuf messages too, but their protobuf messages represent the model, a graph of operators, but not the program that trains/uses the model. +With PaddlePaddle Fluid, users describe a program other than a model. The program is a [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto) protobuf message. TensorFlow/MxNet/Caffe2 applications generate protobuf messages too, but their protobuf messages represent the model, a graph of operators, but not the program that trains/uses the model. -Many know that when we program TensorFlow, we can specify the device on which each operator runs. This allows us to create a concurrent/parallel AI application. An interesting questions is **how does a `ProgramDesc` represents a concurrent program?** +Many know that when we program TensorFlow, we can specify the device on which each operator runs. This allows us to create a concurrent/parallel AI application. An interesting questions is **how does a `ProgramDesc` represents a concurrent program?** The answer relies on the fact that a `ProgramDesc` is similar to an abstract syntax tree (AST) that describes a program. So users just program a concurrent program that they do with any concurrent programming language, e.g., [Go](https://golang.org). @@ -144,12 +144,12 @@ func main() { //// block 0 An explanation of the above program: -- `fluid.k8s` is a package that provides access to Kubernetes API. -- `fluid.k8s.get_worker_addrs` returns the list of IP and ports of all pods of the current job except for the current one (the master pod). +- `fluid.k8s` is a package that provides access to Kubernetes API. +- `fluid.k8s.get_worker_addrs` returns the list of IP and ports of all pods of the current job except for the current one (the master pod). - `fluid.tensor_array` creates a [tensor array](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/lod_tensor_array.h). `fluid.parallel_for` creates a `ParallelFor` intrinsic, which, when executed, 1. creates `len(L)` scopes, each for the concurrent running of the sub-block (block 1 in this case), and initializes a variable named "index" in the scope to an integer value in the range `[0, len(L)-1]`, and - 2. creates `len(L)` threads by calling into the `ThreadPool` singleton, each thread + 2. creates `len(L)` threads by calling into the `ThreadPool` singleton, each thread 1. creates an Executor instance, and 2. calls `Executor.Run(block)`, where `block` is block 1 as explained above. 1. Please be aware that block 1 is a sub-block of block 0, so ops in block 1 could refer to variables defined in block 0. diff --git a/docs/design/concurrent/csp.md b/docs/design/concurrent/csp.md index 8059b53fd7e..bbc55ab31f7 100644 --- a/docs/design/concurrent/csp.md +++ b/docs/design/concurrent/csp.md @@ -199,7 +199,7 @@ In Go, the `select` statement lets a goroutine wait on multiple communication op ```go -ch1 := make(chan int) +ch1 := make(chan int) ch2 := make(chan int, 100) x := 0 diff --git a/docs/design/concurrent/go_op.md b/docs/design/concurrent/go_op.md index d55dcbc937c..b4e90277ed8 100644 --- a/docs/design/concurrent/go_op.md +++ b/docs/design/concurrent/go_op.md @@ -19,8 +19,8 @@ with fluid.Go(): fluid.channel_send(channel, tensor) # Receive sent tensor from "channel" on the main thread -result = fill_constant(shape=[1], dtype='int', value=-1) -fluid.channel_recv(ch, result) +result = fill_constant(shape=[1], dtype='int', value=-1) +fluid.channel_recv(ch, result) ``` The go operator can be accessed by using the fluid.Go() control flow. This diff --git a/docs/design/concurrent/select_op.md b/docs/design/concurrent/select_op.md index 2c193f5a2b4..984a254987b 100644 --- a/docs/design/concurrent/select_op.md +++ b/docs/design/concurrent/select_op.md @@ -42,7 +42,7 @@ x = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) y = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) while_cond = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) -while_op = While(cond=while_cond) +while_op = While(cond=while_cond) with while_op.block(): with fluid.Select() as select: @@ -103,7 +103,7 @@ blocks { // X: All input variables used by operators within the select block // case_to_execute: Variable filled in by select_op when it determines // which case to execute. - // + // // outputs: // Out: All output variables referenced by operators within select block. // @@ -126,7 +126,7 @@ blocks { outputs { parameter: "Out" arguments: "fill_constant_110.tmp_0" - } + } type: "select" attrs { name: "sub_block" diff --git a/docs/design/data_type/float16.md b/docs/design/data_type/float16.md index aad578f5067..57d912bd8cf 100644 --- a/docs/design/data_type/float16.md +++ b/docs/design/data_type/float16.md @@ -96,7 +96,7 @@ float half_to_float(float16 h); which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. ## float16 inference -In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. +In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. ### Operator level requirement Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output. @@ -118,7 +118,7 @@ The following code demonstrates how to do the tensor conversion. ```Python # var is the variable of float weights # tensor is a numpy array of data copied from the tensor data in var -# fp16_var is the variable that will contain float16 weights converted from var +# fp16_var is the variable that will contain float16 weights converted from var tensor = numpy.array(var.get_tensor()) fp16_tensor = fp16_var.get_tensor() @@ -126,13 +126,13 @@ fp16_tensor = fp16_var.get_tensor() # view(numpy.uint16) is used so that the internal memory of the numpy array # will be reinterpreted to be of uint16 data type, which is binded to # Fluid float16 class via pybind with the help of uint16_t built-in c++ type -fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) +fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) ``` ### Consistent API requirement The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type. -This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. +This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. ### float16 transpiler Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one. diff --git a/docs/design/mkldnn/inplace/inplace.md b/docs/design/mkldnn/inplace/inplace.md index ccad2687866..cc3e4821e8d 100644 --- a/docs/design/mkldnn/inplace/inplace.md +++ b/docs/design/mkldnn/inplace/inplace.md @@ -3,7 +3,7 @@ PaddlePaddle is implementing concept of in-place execution of some of operators. The idea of in-place execution is present on following picture: -![](images/inplace.svg) +![](images/inplace.svg) Exemplary graph presents three operators where one of them (type of elementwise_add) is to be performing in-place computation. In-place computation means that input variable (Tensor) is used for both input and output. This means that one of inputs will be overwritten with computational results. In presented picture in-place operator (elementwise_add) is having two input nodes: *b* and *d* and output *b*. So *b* is used for input and output and underneath it is represented by a one, shared Tensor. So this means that variable *b* is initially holding some input data and after the operator computation, input data is lost and replaced by computation's result. @@ -61,7 +61,7 @@ are checked by oneDNN in-place pass: 1. If input node to in-place operator is also an input to different operator, then in-place computation cannot be performed, as there is a risk that other operator consuming in-placed op operator will be executed after in-placed operator and therefore get invalid input data (overwritten by in-place computation). 2. If after in-placed operator there is another operator that is reusing in-place op's input var then in-place cannot happen unless next op can perform in-place computation. Next picture presents the idea. -![](images/unwanted-inplace.svg) +![](images/unwanted-inplace.svg) In the picture we are seeing that in-place pass is considering to enable in-place execution for softmax oneDNN kernel. All is fine, but next operator after softmax is layer norm (non-oneDNN). Layer norm is already reusing input of softmax due to some earlier memory optimization pass being applied. If we make softmax op to perform in-place computation, then it will also make layer norm to work in-place (b -> a). The thing is that layer norm cannot work in-place (InplaceInferer is not present), so if we force it do so layer norm will produce invalid result. @@ -76,7 +76,7 @@ When sub-graph is aligned with restrictions then in-place computation can be ena in-place computation. 5. if there are multiple operators after our in-place operator then we need to update all of them (their input vars). Idea is presented in the following picture: -![](images/multi-output-inplace.svg) +![](images/multi-output-inplace.svg) We can see that there are two *top_k* operators after *elementwise_add* operator that is set to work in-placed. Each of *top_k* is having its own list of input vars, so we need to rename relevant input var to new name. As in-place pattern consists of: input node -> in-place op -> output node -> next op -> next op's output. For presented graph, there will be 8 patterns detected: diff --git a/docs/design/mkldnn/nhwc/nhwc.md b/docs/design/mkldnn/nhwc/nhwc.md index 19fca60d20e..8601d4a44a2 100644 --- a/docs/design/mkldnn/nhwc/nhwc.md +++ b/docs/design/mkldnn/nhwc/nhwc.md @@ -66,7 +66,7 @@ Hence when enabling any operator to have ``NHWC`` data arrangement supported we #### *InferShape()* modifications This modification is related to fact that MKL-DNN kernel does operate on data with shape described in ``NCHW`` -order, hence We need to make sure that even if ``data_format`` is having value ``NHWC`` still ``Infershape`` will work on ``NCHW`` order. +order, hence We need to make sure that even if ``data_format`` is having value ``NHWC`` still ``Infershape`` will work on ``NCHW`` order. Snippet from *PoolOp::InferShape()* that illustrated the idea of modifications to *InferShape*: diff --git a/docs/design/modules/regularization.md b/docs/design/modules/regularization.md index 4c3547a94dc..2ed87437fd8 100644 --- a/docs/design/modules/regularization.md +++ b/docs/design/modules/regularization.md @@ -45,7 +45,7 @@ Below is an example of a really simple feed forward neural network. The Python API will modify this computation graph to add regularization operators. The modified computation graph will look as follows:
-    + ### Python API implementation for Regularization Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/modules/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. diff --git a/docs/design/motivation/fluid.md b/docs/design/motivation/fluid.md index bb311cfc20c..7060cbe4894 100644 --- a/docs/design/motivation/fluid.md +++ b/docs/design/motivation/fluid.md @@ -101,7 +101,7 @@ for i in xrange(1000): x = m["sentence"] for t in xrange x.len(): h[t] = the_step(x[t]) -``` +``` With Fluid, the training loop and the RNN in the above program are not really Python loops, but just a "loop structure" provided by Fluid and implemented in C++ as the following: @@ -113,7 +113,7 @@ with train_loop.block(): rnn = layers.While(...) with rnn.block(): h[t] = the_step(input[t]) -``` +``` An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/bde090a97564b9c61a6aaa38b72ccc4889d102d9/python/paddle/fluid/tests/unittests/test_while_op.py#L50-L58). diff --git a/docs/design/others/gan_api.md b/docs/design/others/gan_api.md index b00c0c5706d..6c311b493d4 100644 --- a/docs/design/others/gan_api.md +++ b/docs/design/others/gan_api.md @@ -48,7 +48,7 @@ To be more detailed, we introduce our design of DCGAN as following: class DCGAN(object): def __init__(self, y_dim=None): - # hyper parameters + # hyper parameters self.y_dim = y_dim # conditional gan or not self.batch_size = 100 self.z_dim = z_dim # input noise dimension diff --git a/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst b/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst index c027d16e4d2..b03bbd15edc 100644 --- a/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst +++ b/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst @@ -40,8 +40,8 @@ API设计文档的目的是为了社区开发者更容易的参与开源项目 :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30,30 - "1、API 设计文档", "- `API 设计及命名规范 <./api_design_guidelines_standard_cn.html>`_ - - `API 设计文档模版 `_ + "1、API 设计文档", "- `API 设计及命名规范 <./api_design_guidelines_standard_cn.html>`_ + - `API 设计文档模版 `_ - `API 设计文档示例 `_ ", "`Github开发者社区仓库 `_" diff --git a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md index 19a8eec12f7..aba7d896dcd 100644 --- a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md @@ -156,7 +156,7 @@ paddle.optimizer.lr.LambdaDecay ``` -- 由多个单词组成的类名,最后一个单词应表示类型 +- 由多个单词组成的类名,最后一个单词应表示类型 ```python # SimpleRNNCell 继承自 RNNCellBase diff --git a/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md b/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md index b4fd1a313e2..d0a8c70bf1d 100644 --- a/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md @@ -461,7 +461,7 @@ API的方法用来描述API所包含的方法,一些类的API会有这个内 英文: - Warning: + Warning: This API must be used after building network, and before ``minimize`` , and it may be removed in future releases, so it is not recommended. It is recommended to set ``grad_clip`` when initializing the ``optimizer`` , diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_en.rst b/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_en.rst index 55bfaf8e652..6ef79173b04 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_en.rst +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_en.rst @@ -2,15 +2,15 @@ Kernel Implementation APIs ############################# -The custom kernel-function implementation mainly depends on two parts: 1.APIs released by PaddlePaddle, including the context API, the tensor API, and the exception API; 2. APIs of the device encapsulation library. And the C++ API of PaddlePaddle has been released by the header file. +The custom kernel-function implementation mainly depends on two parts: 1.APIs released by PaddlePaddle, including the context API, the tensor API, and the exception API; 2. APIs of the device encapsulation library. And the C++ API of PaddlePaddle has been released by the header file. -- `Context API <./context_api_en.html>`_ : about the C++ API of the device context +- `Context API <./context_api_en.html>`_ : about the C++ API of the device context - `Tensor API <./tensor_api_en.html>`_ : about the C++ API of Tensor - `Exception API <./exception_api_en.html>`_ : about the C++ API of exception handling -Note:There are abundant C++ API of PaddlePaddle. Three APIs will be introduced here and related classes and documents listed in corresponding websites are provided for developers. +Note:There are abundant C++ API of PaddlePaddle. Three APIs will be introduced here and related classes and documents listed in corresponding websites are provided for developers. .. toctree:: :hidden: diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_en.rst b/docs/dev_guides/custom_device_docs/custom_kernel_en.rst index 5a64ac842f2..8b176a97fe4 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_en.rst +++ b/docs/dev_guides/custom_device_docs/custom_kernel_en.rst @@ -2,8 +2,8 @@ Custom Kernel #################### -The custom kernel is the implementation of corresponding operators of the kernel function (or kernel). The PaddlePaddle framework provides the custom kernel for the external device registered by the custom runtime, achieving the compiling, registration, and automatic loading of the kernel independent of the framework. -The implementation of the custom kernel is based on the public kernel statement of PaddlePaddle, and public C++ API and register macro. +The custom kernel is the implementation of corresponding operators of the kernel function (or kernel). The PaddlePaddle framework provides the custom kernel for the external device registered by the custom runtime, achieving the compiling, registration, and automatic loading of the kernel independent of the framework. +The implementation of the custom kernel is based on the public kernel statement of PaddlePaddle, and public C++ API and register macro. - `Kernel function statement <./custom_kernel_docs/kernel_declare_en.html>`_ : to introduce the kernel statement of PaddlePaddle diff --git a/docs/dev_guides/custom_device_docs/custom_runtime_en.rst b/docs/dev_guides/custom_device_docs/custom_runtime_en.rst index c036565be6d..565dd063657 100644 --- a/docs/dev_guides/custom_device_docs/custom_runtime_en.rst +++ b/docs/dev_guides/custom_device_docs/custom_runtime_en.rst @@ -4,7 +4,7 @@ Custom Runtime Custom Runtime offers a new method to register the runtime of new devices via plug-ins. Responsible for the management of PaddlePaddle devices and Runtime/Driver API, DeviceManager provides a uniform API for the framework to invoke device capabilities, offers a series of APIs to register Custom Runtime, and ensure that the binary system is compatible through C API. The APIs can be found in `device_ext.h `_ . Developers can add custom runtime for PaddlePaddle only by implementing these APIs. -- `Data type <./runtime_data_type_en.html>`_ : to introduce definitions of data types of custom runtime. +- `Data type <./runtime_data_type_en.html>`_ : to introduce definitions of data types of custom runtime. - `Device API <./device_api_en.html>`_ : to introduce definitions and functions of Device APIs. - `Memory API <./memory_api_en.html>`_ : to introduce definitions and functions of Memory APIs. - `Stream API <./stream_api_en.html>`_ : to introduce definitions and functions of Stream APIs. @@ -39,7 +39,7 @@ Device APIs +------------------------+----------------------------------------+ | get_runtime_version | To get the runtime version | +------------------------+----------------------------------------+ -| get_driver_version | To get the driver version | +| get_driver_version | To get the driver version | +------------------------+----------------------------------------+ @@ -111,7 +111,7 @@ Stream APIs +---------------------+-------------------------------------------------------------------+ | stream_add_callback | To add a host and call it back on the stream | +---------------------+-------------------------------------------------------------------+ -| stream_wait_event | To wait for the completion of an event on the stream | +| stream_wait_event | To wait for the completion of an event on the stream | +---------------------+-------------------------------------------------------------------+ diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md index 3b6d9edaeba..7165e4d4df6 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md @@ -172,7 +172,7 @@ __device__ void ReadDataReduce(Tx* dst, > Rank :原始输出数据的维度。
> IndexCal :输入输出坐标映射规则。定义方式如下:
``` - struct IndexCal { + struct IndexCal { __device__ inline int operator()(int index) const { return ... } diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_en.md b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_en.md index 43e171ebe98..a5b5cc9e391 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_en.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_en.md @@ -169,7 +169,7 @@ The data processing process is as follows:
> Rank: The dimension of the original output data.
> IndexCal: Input and output coordinate mapping rules. The definition is as follows:
``` - struct IndexCal { + struct IndexCal { __device__ inline int operator()(int index) const { return ... } diff --git a/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst b/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst index f532c30dcca..eefd814cdd5 100644 --- a/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst +++ b/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst @@ -40,7 +40,7 @@ :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30, 30 - "算子性能优化设计文档", "- `算子性能优化设计文档 模版 `_ + "算子性能优化设计文档", "- `算子性能优化设计文档 模版 `_ - `算子性能优化设计文档 示例 `_ ", "`Github开发者社区仓库 `_" @@ -65,8 +65,8 @@ "算子性能优化实现代码", "- `Paddle代码规范 `_ - `C++ OP开发指南 <../api_contributing_guides/new_cpp_op_cn.html>`_ - `OP Benchmark使用指南 `_ - - `算子性能优化 优化方法 <./op_optimization_method_introduction_cn.html>`_ - - `算子性能优化 验收规范 <./op_optimization_accpetance_criteria_cn.html>`_ + - `算子性能优化 优化方法 <./op_optimization_method_introduction_cn.html>`_ + - `算子性能优化 验收规范 <./op_optimization_accpetance_criteria_cn.html>`_ ", "`Github飞桨训练框架仓库 `_" diff --git a/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md b/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md index bb131bfbeb4..286afda2768 100644 --- a/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md +++ b/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md @@ -116,11 +116,11 @@ cd /workspace/Paddle/paddle/phi/kernels ```bash # 例如原有误差阈值为0.01 -self.assertTrue(np.allclose(hist, prob, rtol=0.01), +self.assertTrue(np.allclose(hist, prob, rtol=0.01), "actual: {}, expected: {}".format(hist, prob)) - + # 将其修改为新的误差阈值如 0.05 -self.assertTrue(np.allclose(hist, prob, rtol=0.05), +self.assertTrue(np.allclose(hist, prob, rtol=0.05), "actual: {}, expected: {}".format(hist, prob)) ``` @@ -153,4 +153,4 @@ self.assertTrue(np.allclose(hist, prob, rtol=0.05), 356: But Got[[[[nan nan nan nan nan] ... ``` -从输出中观察到,算子输出直接出nan了,可能是算子本身计算代码在HIP平台下存在问题。同上个问题一样,需要仔细调试该算子的GPU Kernel,定位算子计算问题并进行修复。可能的解决办法是请先检查对应算子Kernel的线程数,可以参考 [ROCm-Developer-Tools/HIP#2235](https://github.com/ROCm-Developer-Tools/HIP/issues/2235) 中的回复,将HIP平台下的算子线程数控制在256及以内。 \ No newline at end of file +从输出中观察到,算子输出直接出nan了,可能是算子本身计算代码在HIP平台下存在问题。同上个问题一样,需要仔细调试该算子的GPU Kernel,定位算子计算问题并进行修复。可能的解决办法是请先检查对应算子Kernel的线程数,可以参考 [ROCm-Developer-Tools/HIP#2235](https://github.com/ROCm-Developer-Tools/HIP/issues/2235) 中的回复,将HIP平台下的算子线程数控制在256及以内。 diff --git "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" index 856949528a0..af215315003 100644 --- "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" +++ "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" @@ -863,7 +863,7 @@ Eval samples: 428 | 1 | 导入分布式训练所需要的依赖包 | 完成 | 无 | 无 | | 2 | 初始化分布式环境 | 完成 | PaddlePaddle 安装有时候会有一些问题、NCCL初始化有问题![图片](https://user-images.githubusercontent.com/35827074/165877509-b84f5846-b175-4ab9-8ae3-eef66ed09047.png) | 使用export设置一些安装的库的环境变量,上述问题是rocm版本问题,需要使用rocm-4.0.1版本。 修改rocm版本的方法为. module switch compiler/rocm/4.0.1,再就是导入超算上的一些环境变量 export NCCL_IB_HCA=mlx5_0 export NCCL_SOCKET_IFNAME=eno1 export NCCL_IB_DISABLE=0 | | 3 | 设置分布式训练需要的优化器 | 完成 | 无 | 无 | -| 4 | 数据集拆分 | 完成 | 示例里面没有数据集的拆分案例,不会使用数据集的拆分;使用DistributedBatchSampler采样器之后DataLoader中无法指定batchsize以及shuffle参数 | 分析paddle的分布式API底层以及结合其他深度学习框架分析,发现了DistributedBatchSampler API,然后分析其底层实现,发现可以应用;分析DataLoader底层的源码,发现在指定batch_sampler参数之后不能指定batchsize、shuffle以及drop_last参数,然后在DistributedBatchSampler构建采样器的过程中指定。分布式数据集拆分使用DistributedBatchSampler,通过使用DistributedBatchSampler构建一个分布式的采样器,其会将数据平均划分到多个设备中,然后将其输入到Dataloader函数中,参数为batch_sampler,案例的全部代码已经在附录中给出。关于拆分部分如下:train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=2) val_sampler = DistributedBatchSampler(val_dataset, 32) val_loader = DataLoader(val_dataset, batch_sampler=val_sampler, num_workers=2) | +| 4 | 数据集拆分 | 完成 | 示例里面没有数据集的拆分案例,不会使用数据集的拆分;使用DistributedBatchSampler采样器之后DataLoader中无法指定batchsize以及shuffle参数 | 分析paddle的分布式API底层以及结合其他深度学习框架分析,发现了DistributedBatchSampler API,然后分析其底层实现,发现可以应用;分析DataLoader底层的源码,发现在指定batch_sampler参数之后不能指定batchsize、shuffle以及drop_last参数,然后在DistributedBatchSampler构建采样器的过程中指定。分布式数据集拆分使用DistributedBatchSampler,通过使用DistributedBatchSampler构建一个分布式的采样器,其会将数据平均划分到多个设备中,然后将其输入到Dataloader函数中,参数为batch_sampler,案例的全部代码已经在附录中给出。关于拆分部分如下:train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=2) val_sampler = DistributedBatchSampler(val_dataset, 32) val_loader = DataLoader(val_dataset, batch_sampler=val_sampler, num_workers=2) | | 5 | 构建训练代码 | 完成 | 无 | 无 | | 6 | 单机多卡分布式训练 | 完成 | 在曙光超算上使用SBATCH作业提交方式时有环境的问题 | 申请4个DCU,使用镜像的方式进行实现 | | 7 | 多机多卡分布式训练 | 完成 | 无 | 注意再进行多机多卡时先要两个机器之间互相ping一下 | diff --git a/docs/faq/index_cn.rst b/docs/faq/index_cn.rst index 6f2a3b0018f..5bc8364048d 100644 --- a/docs/faq/index_cn.rst +++ b/docs/faq/index_cn.rst @@ -12,7 +12,7 @@ .. toctree:: :maxdepth: 1 - + 2.0.md .. toctree:: diff --git a/docs/faq/install_cn.md b/docs/faq/install_cn.md index dad038b94d9..cfe51ffce96 100644 --- a/docs/faq/install_cn.md +++ b/docs/faq/install_cn.md @@ -5,12 +5,12 @@ + 问题描述: -> TensorRT dynamic library (libnvinfer.so) that Paddle depends on is not configured correctly. (error code is libnvinfer.so: cannot open shared object file: No such file or directory) -> Suggestions: -> Check if TensorRT is installed correctly and its version is matched with paddlepaddle you installed. -> Configure TensorRT dynamic library environment variables as follows: -> Linux: set LD_LIBRARY_PATH by export LD_LIBRARY_PATH=... -> Windows: set PATH by `set PATH=XXX; +> TensorRT dynamic library (libnvinfer.so) that Paddle depends on is not configured correctly. (error code is libnvinfer.so: cannot open shared object file: No such file or directory) +> Suggestions: +> Check if TensorRT is installed correctly and its version is matched with paddlepaddle you installed. +> Configure TensorRT dynamic library environment variables as follows: +> Linux: set LD_LIBRARY_PATH by export LD_LIBRARY_PATH=... +> Windows: set PATH by `set PATH=XXX; + 问题分析: 遇到该问题是因为使用的paddle默认开始了TensorRT,但是本地环境中没有找到TensorRT的库,该问题只影响使用[Paddle Inference](https://paddleinference.paddlepaddle.org.cn/master/product_introduction/inference_intro.html)开启TensorRT预测的场景,对其它方面均不造成影响。 diff --git a/docs/faq/params_cn.md b/docs/faq/params_cn.md index 4124db0fce8..a04b32df906 100644 --- a/docs/faq/params_cn.md +++ b/docs/faq/params_cn.md @@ -38,7 +38,7 @@ ---------- -##### 问题:训练的step在参数优化器中是如何变化的? +##### 问题:训练的step在参数优化器中是如何变化的? 图片名称 @@ -58,7 +58,7 @@ for epoch in range(epochs): ----- -##### 问题:如何修改全连接层参数,比如weight,bias? +##### 问题:如何修改全连接层参数,比如weight,bias? + 答复:可以通过`param_attr`设置参数的属性,`paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0.0, 0.02), learning_rate=2.0)`,如果`learning_rate`设置为0,该层就不参与训练。也可以构造一个numpy数据,使用`paddle.nn.initializer.Assign`来给权重设置想要的值。 @@ -66,7 +66,7 @@ for epoch in range(epochs): ----- -##### 问题:如何进行梯度裁剪? +##### 问题:如何进行梯度裁剪? + 答复:Paddle的梯度裁剪方式需要在[Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/Overview_cn.html#api)中进行设置,目前提供三种梯度裁剪方式,分别是[paddle.nn.ClipGradByValue](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByValue_cn.html)`(设定范围值裁剪)`、[paddle.nn.ClipGradByNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByNorm_cn.html)`(设定L2范数裁剪)` 、[paddle.nn.ClipGradByGlobalNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByGlobalNorm_cn.html)`(通过全局L2范数裁剪)`,需要先创建一个该类的实例对象,然后将其传入到优化器中,优化器会在更新参数前,对梯度进行裁剪。 @@ -87,15 +87,15 @@ sdg.step() # 更新参数前,会先对参 ##### 问题:如何在同一个优化器中定义不同参数的优化策略,比如bias的参数weight_decay的值为0.0,非bias的参数weight_decay的值为0.01? + 答复: - 1. [AdamW](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/AdamW_cn.html#adamw)的参数`apply_decay_param_fun`可以用来选择哪些参数使用decay_weight策略。 - 2. 在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性。 + 1. [AdamW](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/AdamW_cn.html#adamw)的参数`apply_decay_param_fun`可以用来选择哪些参数使用decay_weight策略。 + 2. 在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性。 ---------- ##### 问题:paddle fluid如何自定义优化器,自定义更新模型参数的规则? + 答复: - 1. 要定义全新优化器,自定义优化器中参数的更新规则,可以通过继承fluid.Optimizer,重写_append_optimize_op方法实现。不同优化器实现原理各不相同,一般流程是先获取learning_rate,gradients参数,可训练参数,以及该优化器自身特别需要的参数,然后实现更新参数的代码,最后返回更新后的参数。 - 在实现更新参数代码时,可以选择直接调用[paddle的API](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html)或者使用[自定义原生算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/index_cn.html)。在使用自定义原生算子时,要注意动态图与静态图调用方式有所区别: - 需要首先使用`framework.in_dygraph_mode()`判断是否为动态图模式,如果是动态图模式,则需要调用`paddle._C_ops`中相应的优化器算子;如果不是动态图模式,则需要调用`block.append_op` 来添加优化器算子。 - 代码样例可参考[paddle源码](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)中AdamOptimizer等优化器的实现。 - 2. 使用现有的常用优化器,可以在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性,可以通过设置`regularizer`,`learning_rate`等参数简单设置参数的更新规则。 + 1. 要定义全新优化器,自定义优化器中参数的更新规则,可以通过继承fluid.Optimizer,重写_append_optimize_op方法实现。不同优化器实现原理各不相同,一般流程是先获取learning_rate,gradients参数,可训练参数,以及该优化器自身特别需要的参数,然后实现更新参数的代码,最后返回更新后的参数。 + 在实现更新参数代码时,可以选择直接调用[paddle的API](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html)或者使用[自定义原生算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/index_cn.html)。在使用自定义原生算子时,要注意动态图与静态图调用方式有所区别: + 需要首先使用`framework.in_dygraph_mode()`判断是否为动态图模式,如果是动态图模式,则需要调用`paddle._C_ops`中相应的优化器算子;如果不是动态图模式,则需要调用`block.append_op` 来添加优化器算子。 + 代码样例可参考[paddle源码](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)中AdamOptimizer等优化器的实现。 + 2. 使用现有的常用优化器,可以在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性,可以通过设置`regularizer`,`learning_rate`等参数简单设置参数的更新规则。 diff --git a/docs/faq/train_cn.md b/docs/faq/train_cn.md index b7a7deef3ed..e7cb958c4bd 100644 --- a/docs/faq/train_cn.md +++ b/docs/faq/train_cn.md @@ -170,7 +170,7 @@ export FLAGS_fraction_of_gpu_memory_to_use=0 ---------- -##### 问题:预测时如何打印模型中每一步的耗时? +##### 问题:预测时如何打印模型中每一步的耗时? + 答复:可以在设置config时使用`config.enable_profile()`统计预测时每个算子和数据搬运的耗时。对于推理api的使用,可以参考官网文档[Python预测API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/python_infer_cn.html)。示例代码: ```python diff --git a/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst b/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst index 7bde682b490..a48aa16f580 100644 --- a/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst +++ b/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst @@ -150,7 +150,7 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 3. 保存模型: a. Checkpoint Model:用于下次训练开始时的模型加载部分。 b. Inference Model:用于线上推理部署。 - + 完整训练示例代码请参考:\ `CPUPS示例 `_\、\ `GPUPS示例 `_\,本节只介绍飞桨参数服务器在训练过程中需要使用到的与单机不同的API。 4.1 大规模稀疏参数 @@ -198,17 +198,17 @@ Dataset有两种不同的类型: dataset = paddle.distributed.QueueDataset() thread_num = 1 - + # use_var指定网络中的输入数据,pipe_command指定数据处理脚本 # 要求use_var中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 - dataset.init(use_var=model.inputs, - pipe_command="python reader.py", - batch_size=batch_size, + dataset.init(use_var=model.inputs, + pipe_command="python reader.py", + batch_size=batch_size, thread_num=thread_num) train_files_list = [os.path.join(train_data_path, x) for x in os.listdir(train_data_path)] - + # set_filelist指定dataset读取的训练文件的列表 dataset.set_filelist(train_files_list) @@ -223,8 +223,8 @@ Dataset有两种不同的类型: exe.train_from_dataset(paddle.static.default_main_program(), dataset, - paddle.static.global_scope(), - debug=False, + paddle.static.global_scope(), + debug=False, fetch_list=[model.cost], fetch_info=["loss"], print_period=1) @@ -235,8 +235,8 @@ Dataset有两种不同的类型: exe.infer_from_dataset(paddle.static.default_main_program(), dataset, - paddle.static.global_scope(), - debug=False, + paddle.static.global_scope(), + debug=False, fetch_list=[model.cost], fetch_info=["loss"], print_period=1) @@ -288,7 +288,7 @@ Dataset有两种不同的类型: .. code-block:: python dirname = "/you/path/to/model" - + # 加载checkpoint model fleet.load_model(dirname) diff --git a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst index 5b0e6b1b439..f4fad856a2f 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst @@ -14,16 +14,16 @@ 在编写分布式训练程序之前,用户需要确保已经安装GPU版的PaddlePaddle 2.3.0及以上版本。 -1.2 具体步骤 +1.2 具体步骤 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 与单机单卡的普通模型训练相比,数据并行训练只需要按照如下5个步骤对代码进行简单调整即可: - 1. 导入分布式训练依赖包 - 2. 初始化Fleet环境 - 3. 构建分布式训练使用的网络模型 - 4. 构建分布式训练使用的优化器 - 5. 构建分布式训练使用的数据加载器 + 1. 导入分布式训练依赖包 + 2. 初始化Fleet环境 + 3. 构建分布式训练使用的网络模型 + 4. 构建分布式训练使用的优化器 + 5. 构建分布式训练使用的数据加载器 下面将逐一进行讲解。 @@ -83,7 +83,7 @@ .. code-block:: python - # 构建分布式数据采样器 + # 构建分布式数据采样器 # 注意:需要保证batch中每个样本数据shape相同,若原尺寸不一,需进行预处理 train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True, drop_last=True) val_sampler = DistributedBatchSampler(val_dataset, 32) @@ -232,7 +232,7 @@ 假设集群包含两个节点,每个节点上可使用的GPU卡数为4,IP地址分别为192.168.1.2和192.168.1.3,那么需要在两个节点的终端上分别运行如下命令: 在192.168.1.2节点运行: - + .. code-block:: bash python -m paddle.distributed.launch \ diff --git a/docs/guides/06_distributed_training/cluster_quick_start_en.rst b/docs/guides/06_distributed_training/cluster_quick_start_en.rst index daa7a088788..f5eb67e6a3c 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_en.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_en.rst @@ -7,7 +7,7 @@ Distributed training with Fleet API Since PaddlePaddle `Release 1.5.1 `__, it is officially recommended to use the Fleet API for distributed -training. +training. Preparation ~~~~~~~~~~~ @@ -25,11 +25,11 @@ Click-through rate prediction Here, we will use a simple example, click-through rate prediction task, to illustrate how to configure Fleet API for distributed training, and gives an example by using a single node environment to simulate the -distributed environment. +distributed environment. In order to facilitate learning, the example given here is a mixed code of single node and multi node. You can start single node or multi node -tasks through different startup commands. +tasks through different startup commands. .. code-block:: python @@ -129,7 +129,7 @@ tasks through different startup commands. - Note: The IO method used in this example is dataset, please refer to `Dataset API `__ - for specific documents and usage. + for specific documents and usage. Start command of single node training ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -151,4 +151,4 @@ tasks. The task running log can be viewed in the logs directory of the working directory. When you can use a single machine to simulate distributed -training, you can perform true multi node distributed training. +training, you can perform true multi node distributed training. diff --git a/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst index abc809d406a..24db20de448 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst @@ -39,9 +39,9 @@ 3. 加载模型。 4. 构建dataset加载数据 5. 定义参数更新策略及优化器。 - 6. 开始训练。 + 6. 开始训练。 + - 下面将逐一进行讲解。 1.2.1 导入依赖 @@ -104,14 +104,14 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an # 返回值为一个list,其中的每个元素均为一个list,不需要转成np.array格式 # 具体格式:[[dense_value1, dense_value2, ...], [sparse_value1], [sparse_value2], ..., [label]] return [dense_feature] + sparse_feature + [label] - + # 实现generate_sample()函数 # 该方法有一个名为line的参数,只需要逐行处理数据,不需要对数据文件进行操作 def generate_sample(self, line): def wd_reader(): # 按行处理数据 input_data = self.line_process(line) - + # 构造特征名数组feature_name feature_name = ["dense_input"] for idx in categorical_range_: @@ -122,7 +122,7 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an # 元组的第一个元素为特征名(string类型),第二个元素为特征值(list类型) # 具体格式:[('dense_input', [dense_value1, dense_value2, ...]), ('C1', [sparse_value1]), ('C2', [sparse_value2]), ..., ('label', [label])] yield zip(feature_name, input_data) - + # generate_sample()函数需要返回一个可以迭代的reader方法 return wd_reader @@ -137,17 +137,17 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an dataset = paddle.distributed.QueueDataset() thread_num = 1 - + # use_var指定网络中的输入数据,pipe_command指定数据处理脚本 # 要求use_var中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 - dataset.init(use_var=model.inputs, - pipe_command="python reader.py", - batch_size=batch_size, + dataset.init(use_var=model.inputs, + pipe_command="python reader.py", + batch_size=batch_size, thread_num=thread_num) train_files_list = [os.path.join(train_data_path, x) for x in os.listdir(train_data_path)] - + # set_filelist指定dataset读取的训练文件的列表 dataset.set_filelist(train_files_list) @@ -166,7 +166,7 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an # 定义异步训练 dist_strategy = fleet.DistributedStrategy() dist_strategy.a_sync = True - + 用户需要使用 ``fleet.distributed_optimizer()`` 接口,将单机优化器转换成分布式优化器,并最小化模型的损失值。 .. code-block:: python @@ -201,12 +201,12 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an for epoch_id in range(1): exe.train_from_dataset(paddle.static.default_main_program(), dataset, - paddle.static.global_scope(), - debug=False, + paddle.static.global_scope(), + debug=False, fetch_list=[model.loss], fetch_info=["loss"], print_period=1) - + fleet.stop_worker() 备注:Paddle2.3版本及以后,ParameterServer训练将废弃掉dataloader + exe.run()方式,请切换到dataset + exe.train_from_dataset()方式。 @@ -224,7 +224,7 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an 您将在执行终端看到如下日志信息: .. code-block:: bash - + LAUNCH INFO 2022-05-18 11:27:17,761 ----------- Configuration ---------------------- LAUNCH INFO 2022-05-18 11:27:17,761 devices: None LAUNCH INFO 2022-05-18 11:27:17,761 elastic_level: -1 @@ -242,16 +242,16 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an LAUNCH INFO 2022-05-18 11:27:17,762 rank: -1 LAUNCH INFO 2022-05-18 11:27:17,762 run_mode: collective LAUNCH INFO 2022-05-18 11:27:17,762 server_num: 1 - LAUNCH INFO 2022-05-18 11:27:17,762 servers: + LAUNCH INFO 2022-05-18 11:27:17,762 servers: LAUNCH INFO 2022-05-18 11:27:17,762 trainer_num: 2 - LAUNCH INFO 2022-05-18 11:27:17,762 trainers: + LAUNCH INFO 2022-05-18 11:27:17,762 trainers: LAUNCH INFO 2022-05-18 11:27:17,762 training_script: train.py LAUNCH INFO 2022-05-18 11:27:17,762 training_script_args: [] LAUNCH INFO 2022-05-18 11:27:17,762 with_gloo: 0 LAUNCH INFO 2022-05-18 11:27:17,762 -------------------------------------------------- LAUNCH INFO 2022-05-18 11:27:17,772 Job: default, mode ps, replicas 1[1:1], elastic False LAUNCH INFO 2022-05-18 11:27:17,775 Run Pod: evjsyn, replicas 3, status ready - LAUNCH INFO 2022-05-18 11:27:17,795 Watching Pod: evjsyn, replicas 3, status running + LAUNCH INFO 2022-05-18 11:27:17,795 Watching Pod: evjsyn, replicas 3, status running 同时,在log目录下,会生成服务节点和训练节点的日志文件。 服务节点日志:default.evjsyn.ps.0.log,日志中须包含以下内容,证明服务节点启动成功,可以提供服务。 diff --git a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst index c72c1874041..fe7cb347af8 100755 --- a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst @@ -27,7 +27,7 @@ Gradient Merge 只是在训练流程上做了一些微调,达到模拟出大ba size 训练效果的目的。具体来说,就是使用若干原有大小的batch 数据进行训练,即通过“前向+反向” 网络计算得到梯度。其间会有一部分显存/内存用于存放梯度,然后对每个batch计算出的梯度进行叠加,当累加的次数达到某个预设值后,使用累加的梯度对模型进行参数更新,从而达到使用大batch 数据训练的效果。 -在较大的粒度上看, GM 是将训练一个step 的过程由原来的 “前向 + 反向 + 更新” 改变成 “(前向 + 反向 + 梯度累加)x k + 更新”, 通过在最终更新前进行 k 次梯度的累加模拟出 batch size 扩大 k 倍的效果。 +在较大的粒度上看, GM 是将训练一个step 的过程由原来的 “前向 + 反向 + 更新” 改变成 “(前向 + 反向 + 梯度累加)x k + 更新”, 通过在最终更新前进行 k 次梯度的累加模拟出 batch size 扩大 k 倍的效果。 更具体细节可以参考 `《MG-WFBP: Efficient Data Communication for Distributed Synchronous SGD Algorithms》 `__ 。 三、动态图使用方法 diff --git a/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst b/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst index bf02918274d..bf311bdec39 100644 --- a/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst @@ -20,7 +20,7 @@ :alt: forward_backward :align: center * Recompute-Offload 支持多卡并行训练, 当多卡并行时开启Offload,训练中同一节点上所有GPU 上的checkpoints 都将卸载到Host 内存中,会存在以下风险: - + - PCIe 带宽瓶颈: 同一节点上的所有GPU 和Host 内存间共享一根PCIe 带宽,如同一节点上GPU 数量较多(单机8卡)容易因为PCIe 带宽限制让训练速度进一步减慢。 - Host 内存溢出: 当同一节点上GPU 数量较多,且每张GPU checkpoints size 较大时,需要注意卸载量是否超出Host 内存大小。 @@ -36,7 +36,7 @@ batch size = seq * seq_max_len +==============+================+==========================+===============================+ | batch size | 18 * 512 | 180 * 512 | 258 * 512 | +--------------+----------------+--------------------------+-------------------------------+ -| speed | 23.94 sents/s | 17.82 sents/s | 15.47 sents/s | +| speed | 23.94 sents/s | 17.82 sents/s | 15.47 sents/s | +--------------+----------------+--------------------------+-------------------------------+ @@ -86,10 +86,10 @@ batch size = seq * seq_max_len block_name + "_fc_2", paddle.nn.Linear(input_size, input_size, bias_attr=False) ) - + return block - - + + class Naive_fc_net(paddle.nn.Layer): def __init__(self, input_size=10, recompute_blocks=[1, 3], @@ -103,7 +103,7 @@ batch size = seq * seq_max_len self.runfunc3 = get_fc_block(3, input_size, is_last=False) self.runfunc4 = get_fc_block(4, input_size, is_last=True) self.total_func = [self.runfunc0, self.runfunc1, self.runfunc2, self.runfunc3, self.runfunc4] - + def forward(self, inputs): nums = len(self.total_func) for i in range(nums): @@ -125,7 +125,7 @@ batch size = seq * seq_max_len random.seed(10) if cuda_state: paddle.set_cuda_rng_state(cuda_state) - + batch_size, input_size = 1, 10 model = Naive_fc_net( input_size, @@ -146,7 +146,7 @@ batch size = seq * seq_max_len param_.append(np.asarray(model.parameters()[9]).tolist()) grad_.append(np.asarray(model.parameters()[3]._grad_ivar()).tolist()) optimizer.clear_grad() - + return loss_, param_, grad_ 3.4 执行运行程序,打印结果 @@ -161,7 +161,7 @@ batch size = seq * seq_max_len loss_ref, param_ref, grad_ref = run_model( cuda_state, recompute_block=[] ) - + loss, param, grad = run_model(cuda_state, recompute_block=[1, 2]) print("normal_loss: {},\n recompute_loss: {}".format(loss_ref, loss)) @@ -187,7 +187,7 @@ recompute动态图代码:`代码示例 `__ 指出在每个GPU 上都保存一份模型参数和优化器状态副本是冗余的。 我们可以通过将上述参数和副本划分到不同GPU 中, -在每个GPU 只保存部分副本,来减少每张GPU上显存的占用,从而可以支持更大模型的训练。 +在每个GPU 只保存部分副本,来减少每张GPU上显存的占用,从而可以支持更大模型的训练。 一、原理介绍 @@ -17,14 +17,14 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ GroupSharded 实现了类似ZeRO-DP 的训练策略,将模型状态包括:模型参数(parameter)、参数梯度(gradient)、参数对应的优化器状态(以Adam为例moment和varience)切分到每一张GPU 上。让模型参数部分所占的显存随并行卡数的增加而减少。 -通过 paddle.distributed.sharding.group_sharded_parallel 提供的简单易用接口, 用户只需要添加几行代码就可将策略加入到原有的训练中。 +通过 paddle.distributed.sharding.group_sharded_parallel 提供的简单易用接口, 用户只需要添加几行代码就可将策略加入到原有的训练中。 模型训练过程中的显存消耗主要由两大部分组成:模型参数及优化器状态、训练产生的中间变量(activations)。 -GroupSharded 策略可以根据用户配置支持,分别切分模型参数、对应参数梯度和优化器状态,因此模型状态所消耗的显存可以随着并行GPU数量增加而线性减少; +GroupSharded 策略可以根据用户配置支持,分别切分模型参数、对应参数梯度和优化器状态,因此模型状态所消耗的显存可以随着并行GPU数量增加而线性减少; 但是每张GPU上仍然维护着模型完整的前向和反向,所以每张GPU依然需要存放模型的训练过程中的产生的全部的中间变量,这部分显存消耗 不会随着GPU 数量的增加而减少。 用户可以通过结合 recompute 策略来减少 activation这部分的显存消耗。 -通过GroupSharded 和增加并行GPU 数量,用户可以在A100-40G设备下8卡训练16.25B参量的模型 (需要结合 recompute, amp 策略)。 +通过GroupSharded 和增加并行GPU 数量,用户可以在A100-40G设备下8卡训练16.25B参量的模型 (需要结合 recompute, amp 策略)。 1.2 GroupSharded-hybrid-dp ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -32,7 +32,7 @@ GroupSharded 策略可以根据用户配置支持,分别切分模型参数、 GroupSharded hybrid数据并行策略,在GroupSharded 并行的基础上再增加一层数据并行逻辑。 该策略的目的是通过 ``限制GroupSharded 通信的节点数`` 和 ``增加多路数据并行`` 来提高训练吞吐。 如果一个模型在普通GroupSharded 训练时需要M 张GPU,则则开启hybrid-dp 至少需要 N*M GPU (N>= 2)。 -GroupSharded-hybrid-dp 适用的场景如下: +GroupSharded-hybrid-dp 适用的场景如下: * 当前有 4个 8 卡A100 节点 * 目标模型A 在GroupSharded 训练时至少需要 8卡 A100 (一个完整的8 卡A100节点) @@ -56,7 +56,7 @@ P.S. hybrid dp 是因为 GroupSharded parallelism 本身内含一层 data parall 二、功能效果 -------------------- -下面表格将对比 GroupSharded 策略对显存的影响。 +下面表格将对比 GroupSharded 策略对显存的影响。 模型为 GPT(11.375B),试验环境为 A100 (40GB), recompute = ON, amp(O2) = ON, hybrid-dp = OFF。 模型不变,单卡batch size 不变,当并行GPU数量增加时,显存的消耗将减小。 省下的显存可以用来增大模型。 @@ -124,7 +124,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 scaler.step(optimizer) scaler.update() else: - loss.backward() + loss.backward() optimizer.step() optimizer.clear_grad() @@ -137,7 +137,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 运行方式(需要保证当前机器有两张GPU): .. code-block:: bash - + export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch run_pretrain.py # run_pretrain.py 是用户运行动态图 GroupSharded 的 python 文件 @@ -147,7 +147,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 .. code-block:: bash launch train in GPU mode! - INFO 2022-05-18 09:34:51,803 launch_utils.py:561] Local start 2 processes. First process distributed environment info (Only For Debug): + INFO 2022-05-18 09:34:51,803 launch_utils.py:561] Local start 2 processes. First process distributed environment info (Only For Debug): +=======================================================================================+ | Distributed Envs Value | +---------------------------------------------------------------------------------------+ @@ -161,7 +161,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 | FLAGS_selected_gpus 6 | | FLAGS_selected_accelerators 6 | +=======================================================================================+ - + 日志信息位于log目录下: .. code-block:: bash diff --git a/docs/guides/06_distributed_training/model_parallel_cn.rst b/docs/guides/06_distributed_training/model_parallel_cn.rst index ab4b48a10dd..fae7aea5b05 100644 --- a/docs/guides/06_distributed_training/model_parallel_cn.rst +++ b/docs/guides/06_distributed_training/model_parallel_cn.rst @@ -97,7 +97,7 @@ 定义如下: .. code-block:: python - + class VocabParallelEmbedding(Layer): def __init__(self, num_embeddings, # Embedding参数的行数 @@ -138,9 +138,9 @@ 下面的例子给出在两张卡上实现Embedding算子模型并行的示例。 .. code-block:: python - + import paddle.distributed.fleet as fleet - word_embeddings = fleet.meta_parallel.VocabParallelEmbedding( + word_embeddings = fleet.meta_parallel.VocabParallelEmbedding( vocab_size, hidden_size, weight_attr=paddle.ParamAttr(initializer=nn.initializer.Normal( @@ -159,10 +159,10 @@ hcg = fleet.get_hybrid_communicate_group() global_rank = hcg.get_global_rank() # 全局rank mp_rank = hcg.get_model_parallel_rank() # 模型并行组rank - + 当结合使用模型并行和数据并行时,我们需要指定 ``dp_dgree`` 参数,设置数据并行的并行度。 - + 如上文所述,对于Transformer模型,存在两种类型的Dropout:全局Dropout和局部Dropout;对于全局Dropout,需要在模型并行的所有卡上设置相同的种子,对于局部Dropout,则需要设置不同的种子。我们通过如下代码分别设置全局和局部种子: @@ -239,7 +239,7 @@ hidden_size, inner_size, gather_output=False, - has_bias=True) + has_bias=True) self.linear2 = fleet.meta_parallel.RowParallelLinear( inner_size, @@ -286,7 +286,7 @@ } # 注意strategy是这里传递的,动态图只能这里,静态图还可以在distributed_optimizer里传 fleet.init(is_collective=True, strategy=strategy) - + hcg = fleet.get_hybrid_communicate_group() mp_id = hcg.get_model_parallel_rank() rank_id = dist.get_rank() @@ -297,11 +297,11 @@ optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters()) model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) - - + + for _ in range(5): np_data = np.random.randint(0, vocab_size, (batch_size, seq_length, )) - + output = model(paddle.to_tensor(np_data)) loss = output.mean() loss.backward() diff --git a/docs/guides/06_distributed_training/moe_cn.rst b/docs/guides/06_distributed_training/moe_cn.rst index 0fa8cc1f3b1..106b42964ff 100644 --- a/docs/guides/06_distributed_training/moe_cn.rst +++ b/docs/guides/06_distributed_training/moe_cn.rst @@ -53,14 +53,14 @@ MoE 构建一个可以正常训练的模型 .. code-block:: python - + num_experts = 8 d_model = 512 d_hidden = 2048 class ExpertLayer(Layer): def __init__(self, d_model, d_hidden, name=None): - super(ExpertLayer, self).__init__() + super(ExpertLayer, self).__init__() self.htoh4 = Linear(d_model, d_hidden) self.h4toh = Linear(d_hidden, d_model) @@ -105,15 +105,15 @@ MoE recompute_interval=0) self.linear2 = Linear(d_model, d_model) - self.dropout = Dropout(p=0.1) - + self.dropout = Dropout(p=0.1) + def forward(self, x): x = self.linear1(x) x = self.moe_layer(x) x = self.linear2(x) x = self.dropout(x) return x - + model = Model(d_model, d_hidden) optim = paddle.optimizer.SGD(parameters=model.parameters()) diff --git a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst index 831c8225c67..c0c76e574ac 100644 --- a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst +++ b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst @@ -141,7 +141,7 @@ ] super(AlexNetPipeDesc, self).__init__( layers=decs, loss_fn=nn.CrossEntropyLoss(), **kwargs) - + 然后初始化分布式环境,这一步主要是构建流水线通信组的拓扑 .. code-block:: python @@ -257,7 +257,7 @@ model.train_batch(...):这一步主要就是执行1F1B的流水线并行方式 运行方式(需要保证当前机器有两张GPU): .. code-block:: bash - + export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch alexnet_dygraph_pipeline.py # alexnet_dygraph_pipeline.py是用户运行动态图流水线的python文件 diff --git a/docs/guides/10_contribution/docs_contribution.md b/docs/guides/10_contribution/docs_contribution.md index a1e6a52c5b8..4a538974763 100644 --- a/docs/guides/10_contribution/docs_contribution.md +++ b/docs/guides/10_contribution/docs_contribution.md @@ -57,7 +57,7 @@ docs/api | |--utils | |--vision |-- api_label # 英文API文档的标签,用于API文档的相互引用 -|-- display_doc_list +|-- display_doc_list |-- gen_alias_api.py # 生成全量的API别名关系 |-- gen_alias_mapping.sh # 已废弃 |-- gen_doc.py # 生成英文API文档目录树程序 diff --git a/docs/guides/10_contribution/faq_cn.rst b/docs/guides/10_contribution/faq_cn.rst index 1c98336d198..31a66d0df0a 100644 --- a/docs/guides/10_contribution/faq_cn.rst +++ b/docs/guides/10_contribution/faq_cn.rst @@ -22,7 +22,7 @@ FAQ * develop分支请添加 :code:`test=develop` * release分支请添加如 :code:`test=release/1.4` 来触发release/1.4分支 * 文档预览请添加 :code:`test=document_preview` - + * 该CI触发规则以commit为单位,即对同一个PR来说,不管前面的commit是否已经添加,如果新commit想继续触发CI,那么仍然需要添加。 * 添加CI触发规则后,仍有部分CI没有触发:请关闭并重新开启本PR,来重新触发CI。 diff --git a/docs/guides/10_contribution/hackathon_cn.md b/docs/guides/10_contribution/hackathon_cn.md index af2b200ca50..5a97c001202 100644 --- a/docs/guides/10_contribution/hackathon_cn.md +++ b/docs/guides/10_contribution/hackathon_cn.md @@ -145,4 +145,4 @@ PS: ## 六、其他 -百度在法律法规许可范围内对本活动规则享有解释权。 +百度在法律法规许可范围内对本活动规则享有解释权。 diff --git a/docs/guides/advanced/gradient_clip_cn.rst b/docs/guides/advanced/gradient_clip_cn.rst index 8d6b1e83ac9..a93d44b5141 100644 --- a/docs/guides/advanced/gradient_clip_cn.rst +++ b/docs/guides/advanced/gradient_clip_cn.rst @@ -141,7 +141,7 @@ 其中: -.. math:: +.. math:: \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(norm(X[i]))^2}\\ @@ -232,7 +232,7 @@ clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) # 创建ClipGradByNorm类的实例,指定L2范数阈值 loss_fn = paddle.nn.MSELoss(reduction='mean') - optimizer = paddle.optimizer.SGD(learning_rate=0.01, + optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters(), grad_clip=clip) # 将创建的ClipGradByNorm类的实例传入优化器SGD中 diff --git a/docs/guides/advanced/gradient_clip_en.rst b/docs/guides/advanced/gradient_clip_en.rst index 31fd73f8b11..7ab996cf103 100644 --- a/docs/guides/advanced/gradient_clip_en.rst +++ b/docs/guides/advanced/gradient_clip_en.rst @@ -21,7 +21,7 @@ By default, Gradients of all parameters in SGD optimizer will be clipped: .. code:: ipython3 import paddle - + linear = paddle.nn.Linear(10, 10) clip = paddle.nn.ClipGradByValue(min=-1, max=1) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) @@ -109,7 +109,7 @@ The formula is as follow: where: -.. math:: +.. math:: \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(norm(X[i]))^2}\\ @@ -124,7 +124,7 @@ By default, Gradients of all parameters in SGD optimizer will be clipped: linear = paddle.nn.Linear(10, 10) clip = paddle.nn.ClipGradByGloabalNorm(clip_norm=1.0) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) - + You can also clip gradients of a part of parameters as follow: **b. Clip a part of gradients** diff --git a/docs/guides/advanced/index_cn.rst b/docs/guides/advanced/index_cn.rst index f32e09b1a6c..1a87d1ef677 100644 --- a/docs/guides/advanced/index_cn.rst +++ b/docs/guides/advanced/index_cn.rst @@ -19,4 +19,3 @@ customize_cn.ipynb gradient_clip_cn.rst model_to_onnx_cn.rst - \ No newline at end of file diff --git a/docs/guides/advanced/model_to_onnx_cn.rst b/docs/guides/advanced/model_to_onnx_cn.rst index 27de200f6a7..29d769f3903 100755 --- a/docs/guides/advanced/model_to_onnx_cn.rst +++ b/docs/guides/advanced/model_to_onnx_cn.rst @@ -17,7 +17,7 @@ Paddle转ONNX协议由 `paddle2onnx `_ - - `模型转换支持列表 `_ + - `算子转换支持列表 `_ + - `模型转换支持列表 `_ diff --git a/docs/guides/advanced/visualdl_usage_cn.md b/docs/guides/advanced/visualdl_usage_cn.md index e54eef5605f..3e536dbd5e6 100644 --- a/docs/guides/advanced/visualdl_usage_cn.md +++ b/docs/guides/advanced/visualdl_usage_cn.md @@ -94,7 +94,7 @@ if __name__ == '__main__': writer.add_scalar(tag="train/acc", step=step, value=value[step]) # 步骤二:向记录器添加一个tag为`train/loss`的数据 writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) - # 步骤一:创建第二个子文件夹scalar_test2 + # 步骤一:创建第二个子文件夹scalar_test2 value = [i/500.0 for i in range(1000)] with LogWriter(logdir="./log/scalar_test2") as writer: for step in range(1000): diff --git a/docs/guides/advanced/visualdl_usage_en.md b/docs/guides/advanced/visualdl_usage_en.md index ae85962e62a..8079505882c 100755 --- a/docs/guides/advanced/visualdl_usage_en.md +++ b/docs/guides/advanced/visualdl_usage_en.md @@ -18,7 +18,7 @@ To be conductive to analyze the characteristics of data, detect errors, and opti ## Toolkits of adding data -The six components (scalar, histogram, image, text, audio and high dimensional) are used to add data during program running. Class LogWriter must be initialized before adding data, in order to set the storage path and synchronization cycle. The input parameters of each components will be saved as log file in disk, after that the log file will be loaded into front end to display. +The six components (scalar, histogram, image, text, audio and high dimensional) are used to add data during program running. Class LogWriter must be initialized before adding data, in order to set the storage path and synchronization cycle. The input parameters of each components will be saved as log file in disk, after that the log file will be loaded into front end to display. ### LogWriter @@ -30,9 +30,9 @@ The definition of LogWriter : class LogWriter(dir, sync_cycle) ``` -> :param dir : the directory path to the saved log files. -> :param sync_cycle : specify how often should the system store data into the file system, that is, system will save the data into the file system once operations count reaches sync_cycle. -> :return: a new LogWriter instance. +> :param dir : the directory path to the saved log files. +> :param sync_cycle : specify how often should the system store data into the file system, that is, system will save the data into the file system once operations count reaches sync_cycle. +> :return: a new LogWriter instance. Demo 1. Create a LogWriter instance @@ -43,8 +43,8 @@ log_writer = LogWriter("./log", sync_cycle=10) class LogWriter include the following member functions: -* `mode()` -* `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()` +* `mode()` +* `scalar()`, `histogram()`, `image()`, `text()`, `audio()`, `embedding()` The member function mode() is used to specify the phase of program running. The input string is customized, such as `test`, `validation`, `test`, `conv_layer1`. Components with same mode are grouped together, so users can choose different modes to display on the frontend webpage. @@ -68,24 +68,24 @@ The scalar component is used to draw line charts. By passing sca The first step of using scalar component is initializing the member function scalar() of LogWriter instance, then you can add data through the member function add_record() of ScalarWriter instance. -* The member function `scalar()` of LogWriter instance : +* The member function `scalar()` of LogWriter instance : ```python -def scalar(tag, type) -``` +def scalar(tag, type) +``` -> :param tag : The scalar writer will label the data with tag. -> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". -> :return : A ScalarWriter instance to handle step and value records. +> :param tag : The scalar writer will label the data with tag. +> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". +> :return : A ScalarWriter instance to handle step and value records. -* The member function `add_record()` of ScalarWriter instance : +* The member function `add_record()` of ScalarWriter instance : ```python -def add_record(step, value) +def add_record(step, value) ``` -> :param step : Step number. -> :param value : Input data. +> :param step : Step number. +> :param value : Input data. Demo 3. scalar demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py) @@ -128,11 +128,11 @@ Figure 1. scalar component displays line charts
The right sidebar of VisualDL has adjustment options for each component, take scalar component as example: -* Smoothing : To adjust the smoothness of the line charts. -* X-axis : The horizontal ordinate of line charts, optional choice : Step, Relative, Wall Time. -* Tooltip sorting : Sorting method of tag, optional choice : default, descending, ascending, nearest. +* Smoothing : To adjust the smoothness of the line charts. +* X-axis : The horizontal ordinate of line charts, optional choice : Step, Relative, Wall Time. +* Tooltip sorting : Sorting method of tag, optional choice : default, descending, ascending, nearest. -There is also a ``RUNNING`` button at the bottom of the right sidebar, the frontend webpage will send request to the flask server for data synchronization. Switching to ``Stopped``, it will pause the data update. +There is also a ``RUNNING`` button at the bottom of the right sidebar, the frontend webpage will send request to the flask server for data synchronization. Switching to ``Stopped``, it will pause the data update. ### histogram -- component to display data distribution @@ -143,22 +143,22 @@ The first step of using histogram component is initializing the member function * The member function histogram() of LogWriter instance : ```python -def histogram(tag, num_buckets, type) +def histogram(tag, num_buckets, type) ``` -> :param tag : The histogram writer will label the data with tag. -> :param num_buckets : The number of pillar in the histogram. -> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". -> :return : A HistogramWriter instance to record distribution. +> :param tag : The histogram writer will label the data with tag. +> :param num_buckets : The number of pillar in the histogram. +> :param type : Data type, optional choice is limited to “float”, "double", "int", the default setting is "float". +> :return : A HistogramWriter instance to record distribution. * The member function add_record() of HistogramWriter instance : ```python -def add_record(step, value) +def add_record(step, value) ``` -> :param step : Step number. -> :param value : Input data, type is list[]. +> :param step : Step number. +> :param value : Input data, type is list[]. Demo 4. histogram demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py) @@ -207,13 +207,13 @@ The first step of using image component is initializing the member function imag * The member function image() of LogWriter instance : ```python -def image(tag, num_samples, step_cycle) +def image(tag, num_samples, step_cycle) ``` -> :param tag : The image writer will label the image with tag. -> :param num_samples : Appoint the number of samples to take in a step. -> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. -> :return: A ImageWriter instance to sample images. +> :param tag : The image writer will label the image with tag. +> :param num_samples : Appoint the number of samples to take in a step. +> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. +> :return: A ImageWriter instance to sample images. * Start a new sampling cycle, allocate memory space for the sampled data @@ -230,17 +230,17 @@ def is_sample_taken() * Add image data : ```python -def set_sample(index, image_shape, image_data) -``` +def set_sample(index, image_shape, image_data) +``` -> :param index : Combined with tag, used to determine the sub-frame of the image display. -> :param image_shape : The shape of image, [weight, height, channel(RGB is 3, GrayScale is 1)]. -> :param image_data : Image data with type numpy.ndarray, member function flatten() can turn the shape to row vector. +> :param index : Combined with tag, used to determine the sub-frame of the image display. +> :param image_shape : The shape of image, [weight, height, channel(RGB is 3, GrayScale is 1)]. +> :param image_data : Image data with type numpy.ndarray, member function flatten() can turn the shape to row vector. * End the current sampling period, load the sampled data into disk, and release the memory space : ```python -def finish_sample() +def finish_sample() ``` Demo 5. image demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py) @@ -317,22 +317,22 @@ The text component is used to visualize the text data. By passin The first step of using text component is initializing the member function text() of LogWriter instance, then you can add data through the member function add_record() of TextWriter instance. -* The member function text() of LogWriter instance : +* The member function text() of LogWriter instance : ```python def text(tag) ``` -> :param tag : Combined with tag, used to determine the sub-frame of the image display. +> :param tag : Combined with tag, used to determine the sub-frame of the image display. -* The member function add_record() of TextWriter instance : +* The member function add_record() of TextWriter instance : ```python def add_record(step, str) ``` -> :param step : Step number. -> :param value : Input data, type is string. +> :param step : Step number. +> :param value : Input data, type is string. Demo 6. text demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py) @@ -370,20 +370,20 @@ Each subgraph has a horizontal axis which can be dragged to display text of diff ### audio -- component to play audio -The audio component is used to play audio. By passing the audio data (type numpy.ndarray) into the audio() function, users can play audio directly, or choose to download. +The audio component is used to play audio. By passing the audio data (type numpy.ndarray) into the audio() function, users can play audio directly, or choose to download. The first step of using audio component is initializing the member function audio() of LogWriter instance. Then you can add data through the member functions start_sampling(), is_sample_taken(), set_sample(), and finish_sample() of AudioWriter instance. * The member function audio() of LogWriter instance : -```python -def audio(tag, num_samples, step_cycle) +```python +def audio(tag, num_samples, step_cycle) ``` -> :param tag : The audio writer will label the audio with tag. -> :param num_samples : Appoint the number of samples to take in a step. -> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. -> :return: An AudioWriter instance to sample images. +> :param tag : The audio writer will label the audio with tag. +> :param num_samples : Appoint the number of samples to take in a step. +> :param step_cycle : Store every `step_cycle` as a record, the default value is 1. +> :return: An AudioWriter instance to sample images. * Start a new sampling cycle, allocate memory space for the sampled data : @@ -403,14 +403,14 @@ def is_sample_taken() def set_sample(index, audio_params, audio_data) ``` -> :param index : Combined with tag, used to determine the sub-frame of the audio. -> :param audio_params : The parameters of audio, [sample rate, sample width, channels]. -> :param audio_data : Audio data with type numpy.ndarray, member function flatten() can turn the shape to row vector. +> :param index : Combined with tag, used to determine the sub-frame of the audio. +> :param audio_params : The parameters of audio, [sample rate, sample width, channels]. +> :param audio_data : Audio data with type numpy.ndarray, member function flatten() can turn the shape to row vector. * End the current sampling period, load the sampled data into disk, and release the memory space : ```python -def finish_sample() +def finish_sample() ``` Demo 7. audio demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py) @@ -497,8 +497,8 @@ The role of high dimensional component is to map data into 2D or The high dimensional component supports the following two dimensionality reduction algorithms : -* PCA : Principle Component Analysis -* [t-SNE](https://lvdmaaten.github.io/tsne/) : t-distributed stochastic neighbor embedding +* PCA : Principle Component Analysis +* [t-SNE](https://lvdmaaten.github.io/tsne/) : t-distributed stochastic neighbor embedding The first step of using audio component is initializing the member function embedding() of LogWriter instance. Then you can add data through the member functions add_embeddings_with_word_dict() of EmbeddingWriter instance. @@ -506,17 +506,17 @@ The first step of using audio component is initializing the member function embe * The member function embedding() of LogWriter instance ```python -def embedding() +def embedding() ``` * The member function add_embeddings_with_word_dict() of EmbeddingWriter instance : ```python -def add_embeddings_with_word_dict(data, Dict) +def add_embeddings_with_word_dict(data, Dict) ``` -> :param data : input data , type List[List(float)]. -> :param Dict : dictionary, type Dict[str, int]. +> :param data : input data , type List[List(float)]. +> :param Dict : dictionary, type Dict[str, int]. Demo 8. high dimensional demo program [GitHub](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py) @@ -532,8 +532,8 @@ log_writer = LogWriter("./log", sync_cycle=10) with log_writer.mode("train") as logger: train_embedding = logger.embedding() -# Initialize data List[List(float)] -hot_vectors = np.random.uniform(1, 2, size=(10, 3)) +# Initialize data List[List(float)] +hot_vectors = np.random.uniform(1, 2, size=(10, 3)) word_dict = { "label_1": 5, "label_2": 4, @@ -619,7 +619,7 @@ fluid.io.save_inference_model( executor=exe) ``` -After running the demo program above, you can start the flask server with command ``visualdl`` : +After running the demo program above, you can start the flask server with command ``visualdl`` : ```shell visualdl --logdir ./log --host 0.0.0.0 --port 8080 --model_pb paddle_lenet_5_model diff --git a/docs/guides/beginner/model_save_load_cn.rst b/docs/guides/beginner/model_save_load_cn.rst index e2d1f831523..aaadeffb654 100644 --- a/docs/guides/beginner/model_save_load_cn.rst +++ b/docs/guides/beginner/model_save_load_cn.rst @@ -47,7 +47,7 @@ 本教程着重介绍飞桨框架2.1的各个保存载入接口的关系及各种使用场景,不对接口参数进行详细介绍,如果需要了解具体接口参数的含义,请直接阅读对应API文档。 -`模型保存常见问题 <./../../faq/save_cn.html>`_ +`模型保存常见问题 <./../../faq/save_cn.html>`_ 二、训练调优场景的模型&参数保存载入 @@ -179,9 +179,9 @@ 参数保存时,先获取Program的state_dict,然后将state_dict保存至磁盘,示例如下(接前述示例): .. code-block:: python - + paddle.save(prog.state_dict(), "temp/model.pdparams") - + 如果想要保存整个静态图模型,除了state_dict还需要保存Program @@ -376,7 +376,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 return out, avg_loss else: return out - + 正确示例如下: @@ -541,7 +541,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 # save path = "example.dy_model/linear" paddle.jit.save( - layer=layer, + layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) @@ -560,7 +560,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 .. code-block:: python paddle.jit.save( - layer=layer, + layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) @@ -571,7 +571,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 .. code-block:: python paddle.jit.save( - layer=layer, + layer=layer, path=path, input_spec=[image]) diff --git a/docs/guides/beginner/tensor_cn.md b/docs/guides/beginner/tensor_cn.md index 571c9d4b676..5cd8bfc4523 100644 --- a/docs/guides/beginner/tensor_cn.md +++ b/docs/guides/beginner/tensor_cn.md @@ -382,7 +382,7 @@ Tensor dtype from Python floating point: paddle.float32 ```python ndim_2_Tensor = paddle.to_tensor([[(1+1j), (2+2j)], [(3+3j), (4+4j)]]) -print(ndim_2_Tensor) +print(ndim_2_Tensor) ``` ```text diff --git a/docs/guides/beginner/tensor_en.md b/docs/guides/beginner/tensor_en.md index d192cd76c58..4e966fe1af0 100644 --- a/docs/guides/beginner/tensor_en.md +++ b/docs/guides/beginner/tensor_en.md @@ -510,7 +510,7 @@ The created **Tensor** will have the same shape and dtype with the original Nump PaddlePaddle provides broadcasting semantics in some APIs like other deep learning frameworks, which allows using tensors with different shapes while operating. In General, broadcast is the rule how the smaller tensor is “broadcast” across the larger tsnsor so that they have same shapes. -Note that no copies happened while broadcasting. +Note that no copies happened while broadcasting. In PaddlePaddle, tensors are broadcastable when following rulrs hold(ref [Numpy Broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html#module-numpy.doc.broadcasting)): diff --git a/docs/guides/custom_op/new_python_op_cn.md b/docs/guides/custom_op/new_python_op_cn.md index 59e102d1a4d..1466354e301 100644 --- a/docs/guides/custom_op/new_python_op_cn.md +++ b/docs/guides/custom_op/new_python_op_cn.md @@ -272,7 +272,7 @@ def tanh(x): # 前向函数2:将两个2-D Tenosr相加,输入多个Tensor以list[Tensor]或tuple(Tensor)形式 def element_wise_add(x, y): # 必须先手动将Tensor转换为numpy数组,否则无法支持numpy的shape操作 - x = np.array(x) + x = np.array(x) y = np.array(y) if x.shape != y.shape: diff --git a/docs/guides/flags/cudnn_cn.rst b/docs/guides/flags/cudnn_cn.rst index 23ce85c89c5..26d259d4ab0 100644 --- a/docs/guides/flags/cudnn_cn.rst +++ b/docs/guides/flags/cudnn_cn.rst @@ -41,7 +41,7 @@ FLAGS_cudnn_deterministic ******************************************* (始于0.13.0) -cuDNN对于同一操作有几种算法,一些算法结果是非确定性的,如卷积算法。该flag用于调试。它表示是否选择cuDNN中的确定性函数。 +cuDNN对于同一操作有几种算法,一些算法结果是非确定性的,如卷积算法。该flag用于调试。它表示是否选择cuDNN中的确定性函数。 取值范围 --------------- diff --git a/docs/guides/flags/data_en.rst b/docs/guides/flags/data_en.rst index c156a37dd04..9aa5f8c5138 100644 --- a/docs/guides/flags/data_en.rst +++ b/docs/guides/flags/data_en.rst @@ -6,7 +6,7 @@ FLAGS_enable_cublas_tensor_op_math ******************************************* (since 1.2.0) -This Flag indicates whether to use Tensor Core, but it may lose some precision. +This Flag indicates whether to use Tensor Core, but it may lose some precision. Values accepted --------------- diff --git a/docs/guides/flags/distributed_cn.rst b/docs/guides/flags/distributed_cn.rst index 8b9dfcc1fee..e8e1697f59a 100644 --- a/docs/guides/flags/distributed_cn.rst +++ b/docs/guides/flags/distributed_cn.rst @@ -230,7 +230,7 @@ FLAGS_rpc_server_profile_path ******************************************* since(v0.15.0) -设置分析器输出日志文件路径前缀。完整路径为FLAGS_rpc_server_profile_path_listener_id,其中listener_id为随机数。 +设置分析器输出日志文件路径前缀。完整路径为FLAGS_rpc_server_profile_path_listener_id,其中listener_id为随机数。 取值范围 --------------- @@ -253,7 +253,7 @@ Bool型,缺省值为false。 示例 ------- -FLAGS_apply_pass_to_program=true - 当使用Fleet API时,在Program上使用IR Pass优化。 +FLAGS_apply_pass_to_program=true - 当使用Fleet API时,在Program上使用IR Pass优化。 FLAGS_allreduce_record_one_event diff --git a/docs/guides/flags/memory_cn.rst b/docs/guides/flags/memory_cn.rst index 94676721c2d..42ae8f41d5d 100644 --- a/docs/guides/flags/memory_cn.rst +++ b/docs/guides/flags/memory_cn.rst @@ -52,7 +52,7 @@ FLAGS_eager_delete_tensor_gb=0.0 - 垃圾占用大小达到0.0GB时释放内存 FLAGS_eager_delete_tensor_gb=1.0 - 垃圾占用内存大小达到1.0GB时释放内存垃圾。 -FLAGS_eager_delete_tensor_gb=-1.0 - 禁用垃圾回收策略。 +FLAGS_eager_delete_tensor_gb=-1.0 - 禁用垃圾回收策略。 注意 ------- @@ -222,7 +222,7 @@ Double型,范围为[0.0, 1.0],缺省值为1.0。 ------- FLAGS_memory_fraction_of_eager_deletion=0 - 保留所有临时变量,也就是禁用垃圾回收策略。 -FLAGS_memory_fraction_of_eager_deletion=1 - 释放所有临时变量。 +FLAGS_memory_fraction_of_eager_deletion=1 - 释放所有临时变量。 FLAGS_memory_fraction_of_eager_deletion=0.5 - 仅释放50%比例的占用内存最多的变量。 diff --git a/docs/guides/flags/memory_en.rst b/docs/guides/flags/memory_en.rst index 0e630e7d93d..74ad5a3910c 100644 --- a/docs/guides/flags/memory_en.rst +++ b/docs/guides/flags/memory_en.rst @@ -50,7 +50,7 @@ Example ------- FLAGS_eager_delete_tensor_gb=0.0 would make memory garbage release till the memory size of garbages reaches 0.0GB, i.e., release immediately once there is any garbage. -FLAGS_eager_delete_tensor_gb=1.0 would make memory garbage release till the memory size of garbages reaches 1.0GB. +FLAGS_eager_delete_tensor_gb=1.0 would make memory garbage release till the memory size of garbages reaches 1.0GB. FLAGS_eager_delete_tensor_gb=-1.0 would disable garbage collection strategy. @@ -71,7 +71,7 @@ Bool. The default value is True. Example ------- -FLAGS_fast_eager_deletion_mode=True would turn on fast garbage collection strategy. +FLAGS_fast_eager_deletion_mode=True would turn on fast garbage collection strategy. FLAGS_fast_eager_deletion_mode=False would turn off fast garbage collection strategy. @@ -168,7 +168,7 @@ Bool. The default value is False. Example ------- -FLAGS_init_allocated_mem=True will make the allocated memory initialize as a non-zero value. +FLAGS_init_allocated_mem=True will make the allocated memory initialize as a non-zero value. FLAGS_init_allocated_mem=False will not initialize the allocated memory. @@ -196,7 +196,7 @@ Allocate a chunk of GPU memory whose byte size is specified by the flag. Future Values accepted --------------- -Uint64 value greater than 0 which is the initial GPU memory size in MB. +Uint64 value greater than 0 which is the initial GPU memory size in MB. Example ------- @@ -213,7 +213,7 @@ FLAGS_memory_fraction_of_eager_deletion ******************************************* (since 1.4) -A memory size percentage when garbage collection strategy decides which variables should be released. If FLAGS_memory_fraction_of_eager_deletion=1.0, all temporary variables in the network would be released. If FLAGS_memory_fraction_of_eager_deletion=0.0, all temporary variables in the network would not be released. If 0.0`_ : 飞桨支持昆仑2代芯片(R200、R300)运行 - `飞桨框架昆仑2代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑2代芯片(R200、R300)安装说明 - - `飞桨框架昆仑2代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑2代芯片(R200、R300)训练示例 + - `飞桨框架昆仑2代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑2代芯片(R200、R300)训练示例 昆仑芯1代芯片: - `飞桨对昆仑1代芯片的支持 <./paddle_2.0_xpu_cn.html>`_ : 飞桨支持昆仑1代芯片(K100、K200)运行 - `飞桨框架昆仑1代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑1代芯片(K100、K200)安装说明 - `飞桨框架昆仑1代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑1代芯片(K100、K200)训练示例 - - `飞桨预测库昆仑1代芯片安装及使用 <./inference_install_example_cn.html>`_ : 飞桨预测库昆仑1代芯片(K100、K200)版安装及使用示例 + - `飞桨预测库昆仑1代芯片安装及使用 <./inference_install_example_cn.html>`_ : 飞桨预测库昆仑1代芯片(K100、K200)版安装及使用示例 .. toctree:: :hidden: diff --git a/docs/guides/index_cn.rst b/docs/guides/index_cn.rst index 0ef0a570059..1f5cba841c5 100644 --- a/docs/guides/index_cn.rst +++ b/docs/guides/index_cn.rst @@ -11,7 +11,7 @@ - `模型开发入门 <./beginner/index_cn.html>`_ - `模型开发更多用法 <./advanced/index_cn.html>`_ - `动态图转静态图 <./jit/index_cn.html>`_ -- `预测部署 <./infer/index_cn.html>`_ +- `预测部署 <./infer/index_cn.html>`_ - `分布式训练 <./06_distributed_training/index_cn.html>`_ - `性能调优 <./performance_improving/index_cn.html>`_ - `模型迁移 <./model_convert/index_cn.html>`_ diff --git a/docs/guides/index_en.rst b/docs/guides/index_en.rst index 3f84a2970c7..05374fc4773 100644 --- a/docs/guides/index_en.rst +++ b/docs/guides/index_en.rst @@ -20,7 +20,7 @@ Let's start with studying basic concept of PaddlePaddle: .. toctree:: :hidden: - + beginner/index_en.rst advanced/index_en.rst jit/index_en.rst diff --git a/docs/guides/infer/paddleslim/paddle_slim_en.rst b/docs/guides/infer/paddleslim/paddle_slim_en.rst index bb63edb98d0..734e5f4128d 100755 --- a/docs/guides/infer/paddleslim/paddle_slim_en.rst +++ b/docs/guides/infer/paddleslim/paddle_slim_en.rst @@ -86,7 +86,7 @@ Method Accuracy(baseline: 70.91% Knowledge Distillation(ResNet50) +1.06% - Knowledge Distillation(ResNet50) + int8 quantization +1.10% -71.76% Pruning(FLOPs-50%) + int8 quantization -1.71% -86.47% -===================================================== =========================== ============================ +===================================================== =========================== ============================ Object Detection +++++++++++++++++ @@ -95,7 +95,7 @@ Dataset: Pascal VOC; Model: MobileNet-V1-YOLOv3 ============================================================== ===================== =========================== -Method mAP(baseline: 76.2%) Model Size(baseline: 94MB) +Method mAP(baseline: 76.2%) Model Size(baseline: 94MB) ============================================================== ===================== =========================== Knowledge Distillation(ResNet34-YOLOv3) +2.8% - Pruning(FLOPs -52.88%) +1.4% -67.76% diff --git a/docs/guides/jit/case_analysis_cn.md b/docs/guides/jit/case_analysis_cn.md index 3307e753f6f..a745f9255b5 100644 --- a/docs/guides/jit/case_analysis_cn.md +++ b/docs/guides/jit/case_analysis_cn.md @@ -159,7 +159,7 @@ class SimpleNet(paddle.nn.Layer): out = out + y mask = paddle.to_tensor(self.mask) # <---- 每次都会调用 assign_op - out = out * mask + out = out * mask return out ``` @@ -209,7 +209,7 @@ class SimpleNet(object): # <---- 继承 Object def forward(self, x, y): out = self.linear(x) out = out + y - out = out * self.mask + out = out * self.mask return out ``` @@ -238,7 +238,7 @@ class SimpleNet(paddle.nn.Layer): out = self.linear(x) out = out + y # .... (略) - out = out * self.mask + out = out * self.mask return out ``` diff --git a/docs/guides/jit/grammar_list_cn.md b/docs/guides/jit/grammar_list_cn.md index bc9e73f7fdf..7e7d1b7376d 100644 --- a/docs/guides/jit/grammar_list_cn.md +++ b/docs/guides/jit/grammar_list_cn.md @@ -128,7 +128,7 @@ def ForTensor(x): ```python # break 的使用样例 def break_usage(x): - tensor_idx = -1 + tensor_idx = -1 for idx, val in enumerate(x) : if val == 2.0 : tensor_idx = idx diff --git a/docs/guides/jit/grammar_list_en.md b/docs/guides/jit/grammar_list_en.md index 88d3099f664..26e5ea8f357 100644 --- a/docs/guides/jit/grammar_list_en.md +++ b/docs/guides/jit/grammar_list_en.md @@ -52,7 +52,7 @@ While In the dynamic diagram, the code is interpreted and executed line by line, However, In the static graph, the control flow is realized through the `cond` operators. Each branch is represented by `true_fn` and `false_fn` respectively . Under this circumstance, the `false_fn` will be executed to build the computation graph. -When the condition variables in `If` are `Tensor`, `if-else` will be transformed to a `cond` operators. +When the condition variables in `If` are `Tensor`, `if-else` will be transformed to a `cond` operators. When the condition variables in `If` aren't `Tensor`, `if-else` will be executed as a python `if-else` code. @@ -117,7 +117,7 @@ The current dynamic-to-static supports adding break and continue statements in f ```python # break usage example : def break_usage(x): - tensor_idx = -1 + tensor_idx = -1 for idx, val in enumerate(x) : if val == 2.0 : tensor_idx = idx diff --git a/docs/guides/jit/index_cn.rst b/docs/guides/jit/index_cn.rst index 7931a16caa7..c82d404390a 100644 --- a/docs/guides/jit/index_cn.rst +++ b/docs/guides/jit/index_cn.rst @@ -20,7 +20,7 @@ .. toctree:: :hidden: - basic_usage_cn.rst + basic_usage_cn.rst principle_cn.md grammar_list_cn.md case_analysis_cn.md diff --git a/docs/guides/model_convert/index_en.rst b/docs/guides/model_convert/index_en.rst index e1452214534..b8cf6edf89e 100644 --- a/docs/guides/model_convert/index_en.rst +++ b/docs/guides/model_convert/index_en.rst @@ -8,5 +8,5 @@ Introduction of how to convert your model to Paddle 2.X. .. toctree:: :hidden: - + update_en.md diff --git a/docs/guides/model_convert/load_old_format_model_cn.rst b/docs/guides/model_convert/load_old_format_model_cn.rst index 34ade22a6bf..1651065b262 100644 --- a/docs/guides/model_convert/load_old_format_model_cn.rst +++ b/docs/guides/model_convert/load_old_format_model_cn.rst @@ -57,7 +57,7 @@ loader = paddle.io.DataLoader(dataset, feed_list=[image, label], places=place, - batch_size=BATCH_SIZE, + batch_size=BATCH_SIZE, shuffle=True, drop_last=True, num_workers=2) @@ -66,7 +66,7 @@ for data in loader(): exe.run( fluid.default_main_program(), - feed=data, + feed=data, fetch_list=[avg_loss]) @@ -214,7 +214,7 @@ model_path = "fc.example.model.save_params" fluid.io.save_params(exe, model_path) - # load + # load state_dict = paddle.load(model_path) print(state_dict) @@ -230,7 +230,7 @@ model_path = "fc.example.model.save_params_with_filename" fluid.io.save_params(exe, model_path, filename="__params__") - # load + # load import os params_file_path = os.path.join(model_path, "__params__") var_list = fluid.default_main_program().all_parameters() @@ -261,7 +261,7 @@ path = os.path.join("test_static_save_load", "model") paddle.static.save(prog, path) - # load program + # load program program=paddle.load(path + '.pdmodel') state_dict_param = paddle.load(path + '.pdparams') diff --git a/docs/guides/model_convert/migration_cn.rst b/docs/guides/model_convert/migration_cn.rst index 94f9e2ee60d..7702753c08e 100644 --- a/docs/guides/model_convert/migration_cn.rst +++ b/docs/guides/model_convert/migration_cn.rst @@ -157,14 +157,14 @@ Paddle 1.x的例子 np.random.seed(seed) fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - + if args.use_data_parallel: strategy = fluid.dygraph.parallel.prepare_context() mnist = MNIST() adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) if args.use_data_parallel: mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy) - + train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True) if args.use_data_parallel: @@ -186,7 +186,7 @@ paddle_upgrade_tool支持单文件的转化,你可以通过下方的命令直 .. code:: ipython3 - $ paddle_upgrade_tool --inpath models/dygraph/mnist/train.py --write + $ paddle_upgrade_tool --inpath models/dygraph/mnist/train.py --write 此时,命令行会弹出下方的提示: diff --git a/docs/guides/model_convert/paddle_api_mapping_cn.rst b/docs/guides/model_convert/paddle_api_mapping_cn.rst index ae7e5fee3ae..f72c97dcc01 100644 --- a/docs/guides/model_convert/paddle_api_mapping_cn.rst +++ b/docs/guides/model_convert/paddle_api_mapping_cn.rst @@ -6,13 +6,13 @@ Paddle 1.8 与 Paddle 2.0 API映射表 本文档基于Paddle 1.8 梳理了常用API与Paddle 2.0对应关系。你可以根据对应关系,快速熟悉Paddle 2.0的接口使用。 .. note:: - + - 2.0版本将会是一个长期维护的版本,我们将会发布新增二位版本号版本进行功能增强、以及性能优化,通过发布新增三位版本号版本进行bugfix。 - 我们还会继续维护1.8版本,但仅限于严重的bugfix。 .. note:: - 其中,迁移工具能否转换,是指使用迁移工具能否直接对PaddlePaddle 1.8的API进行迁移,了解更多关于迁移工具的内容,请参考 :ref:`版本迁移工具 ` + 其中,迁移工具能否转换,是指使用迁移工具能否直接对PaddlePaddle 1.8的API进行迁移,了解更多关于迁移工具的内容,请参考 :ref:`版本迁移工具 ` .. csv-table:: :header: "序号", "PaddlePaddle 1.8 API", "PaddlePaddle 2.0 API", "迁移工具能否转换" diff --git a/docs/guides/model_convert/update_en.md b/docs/guides/model_convert/update_en.md index 43419674e59..b702d9fae8b 100644 --- a/docs/guides/model_convert/update_en.md +++ b/docs/guides/model_convert/update_en.md @@ -503,11 +503,11 @@ Important changes: - `PaddleTensor`, `PaddleBuf`, etc. are deprecated, `ZeroCopyTensor` becomes the default Tensor type and renamed to `Tensor`. -- New `PredictorPool` tool class to simplify the creation of multi-threaded predictors, and more peripheral tools will be added +- New `PredictorPool` tool class to simplify the creation of multi-threaded predictors, and more peripheral tools will be added - Return value of `CreatePredictor` (formerly `CreatePaddlePredictor`) changed from `unique_ptr` to `shared_ptr` to avoid the problem of wrong decomposition order after Clone - + API changes diff --git a/docs/guides/performance_improving/amp_cn.md b/docs/guides/performance_improving/amp_cn.md index 18dcc48c005..09910ae10e3 100644 --- a/docs/guides/performance_improving/amp_cn.md +++ b/docs/guides/performance_improving/amp_cn.md @@ -62,7 +62,7 @@ - + @@ -75,15 +75,15 @@ - + - + - +
硬件支持的混合精度支持的混合精度
Nvidia GPU
华为 NPUfloat16float16
昆仑芯 XPUfloat16float16
寒武纪 MLUfloat16float16
diff --git a/docs/guides/performance_improving/amp_en.md b/docs/guides/performance_improving/amp_en.md index f30484d3f27..7403e70f215 100644 --- a/docs/guides/performance_improving/amp_en.md +++ b/docs/guides/performance_improving/amp_en.md @@ -2,7 +2,7 @@ In general, the default datatype (dtype) of training deep learning model is float32, and each data occupies 32 bits of storage space. In order to save the consumption of memory, the industry has proposed 16 bit data types (such as float16 and bfloat16 supported by GPU). Each data only needs 16 bits storage space, saving half the storage space compared with float32. Some chips can obtain faster computing speed on 16 bit data. For example, according to the data of NVIDIA, On a V100 GPU, matrix multiply and convolution operations can be speeded up to 8x in float16 over their float32 equivalents. -Considering that some operators (OPS) require high data accuracy (such as softmax and cross_entropy), this kind of operator still needs to be calculated with float32. Some operators (such as conv2d and matmul) are not sensitive to data accuracy, float16 / bfloat16 can be used to improve the calculation speed and reduce the storage space, Paddle provides **Automatic Mixed Precision (AMP)**, during model training, the appropriate data calculation accuracy (float32 or float16 / bfloat16) is automatically selected for the operator, which can accelerate the training without losing the training accuracy. Please refer to the papers jointly released by Baidu and NVIDIA in 2018: [MIXED PRECISION TRAINING](https://arxiv.org/pdf/1710.03740.pdf). This tutorial will introduce how to use automatic mixed precision training with PaddlePaddle. +Considering that some operators (OPS) require high data accuracy (such as softmax and cross_entropy), this kind of operator still needs to be calculated with float32. Some operators (such as conv2d and matmul) are not sensitive to data accuracy, float16 / bfloat16 can be used to improve the calculation speed and reduce the storage space, Paddle provides **Automatic Mixed Precision (AMP)**, during model training, the appropriate data calculation accuracy (float32 or float16 / bfloat16) is automatically selected for the operator, which can accelerate the training without losing the training accuracy. Please refer to the papers jointly released by Baidu and NVIDIA in 2018: [MIXED PRECISION TRAINING](https://arxiv.org/pdf/1710.03740.pdf). This tutorial will introduce how to use automatic mixed precision training with PaddlePaddle. ## I. overview @@ -61,7 +61,7 @@ Paddle AMP supports following hardware, and the data type supported by different - + @@ -74,15 +74,15 @@ Paddle AMP supports following hardware, and the data type supported by different - + - + - +
硬件支持的混合精度支持的混合精度
Nvidia GPU
华为 NPUfloat16float16
昆仑芯 XPUfloat16float16
寒武纪 MLUfloat16float16
diff --git a/docs/guides/performance_improving/analysis_tools/benchmark_cn.md b/docs/guides/performance_improving/analysis_tools/benchmark_cn.md index dfbea607be4..5f6b23f3ae0 100644 --- a/docs/guides/performance_improving/analysis_tools/benchmark_cn.md +++ b/docs/guides/performance_improving/analysis_tools/benchmark_cn.md @@ -25,7 +25,7 @@ 对框架做基准测试,需要覆盖不同训练任务和不同大小的模型,本文中选取了图像和NLP的最为常用的5个模型。 -任务种类| 模型名称| 网络结构| 数据集 +任务种类| 模型名称| 网络结构| 数据集 :---:|:--:|:---:|:---: 图像生成| CycleGAN| GAN| horse2zebra 图像分类| SE-ResNeXt50| Resnet-50| image-net diff --git a/docs/guides/performance_improving/memory_optimize_en.rst b/docs/guides/performance_improving/memory_optimize_en.rst index 2a1e3ecb0c5..220703db90f 100644 --- a/docs/guides/performance_improving/memory_optimize_en.rst +++ b/docs/guides/performance_improving/memory_optimize_en.rst @@ -11,24 +11,24 @@ Memory Allocation and Optimization -------------------------- Since version 1.6+, PaddlePaddle supports the AutoGrowth strategy, which allocates memory on demand. -AutoGrowth strategy has been enabled by default in version 1.7+, making it convenient for users to +AutoGrowth strategy has been enabled by default in version 1.7+, making it convenient for users to run multiple tasks on the same GPU card at the same time. -Because the native CUDA system calls :code:`cudaMalloc` and :code:`cudaFree` are synchronous operations, -which are very time-consuming, the AutoGrowth strategy will cache the allocated memory for subsequent allocation. +Because the native CUDA system calls :code:`cudaMalloc` and :code:`cudaFree` are synchronous operations, +which are very time-consuming, the AutoGrowth strategy will cache the allocated memory for subsequent allocation. The specific methods are as follows: - In the first few memory allocations, PaddlePaddle framework will call :code:`cudaMalloc` and allocate memory on demand. When releasing the allocated memory, it will not call :code:`cudaFree` to return the memory to GPU, but cache the memory inside the framework. - In the subsequent allocations, PaddlePaddle framework will first check if there is a fit block (block size larger than the required memory size) in the cached memory. If there is, it will split the required memory from the fit block and return. Otherwise, it will call :code:`cudaMalloc` to allocate memory from GPU. The allocated memory are also cached when being released for subsequent allocation. -Therefore, the AutoGrowth strategy may slow the speed in the first few batches of model training, +Therefore, the AutoGrowth strategy may slow the speed in the first few batches of model training, but will not affect the speed in the subsequent training process. 1.2. Pre-Allocation Strategy ---------------- -In addition to the AutoGrowth strategy, paddlepaddle also provides a Pre-Allocation strategy, +In addition to the AutoGrowth strategy, paddlepaddle also provides a Pre-Allocation strategy, which is the default memory allocation strategy before paddlepaddle 1.7. The Pre-Allocation strategy allocates a large size chunk at the first allocation, and the subsequent memory allocation is mostly obtained from the pre allocated memory chunk. @@ -38,20 +38,20 @@ Among them, the chunk size is determined by the environment variable :code:`FLAG chunk_size = FLAGS_fraction_of_gpu_memory_to_use * number of current available memory of a single GPU card -The default value of :code:`FLAGS_fraction_of_gpu_memory_to_use` is 0.92, that is, the framework will pre allocates +The default value of :code:`FLAGS_fraction_of_gpu_memory_to_use` is 0.92, that is, the framework will pre allocates 92% of the currently available memory of the GPU card. The specific way of Pre-Allocation strategy to allocate GPU memory is: -- When allocating memory of requested_size, +- When allocating memory of requested_size, - If requested_size <= chunk_size, the framework will first allocate a memory chunk of chunk_size, then split a block of requested_size and return the block. Every subsequent memory allocation will be performed on the chunk. - If requested_size > chunk_size, the framework will call :code:`cudaMalloc` to allocate memory block of requested_size and return. -- When freeing memory of requested_size, +- When freeing memory of requested_size, - If free_size <= chunk_size, the framework will put the memory block back into the pre-allocated chunk, instead of returning back to GPU. - If free_size > chunk_size, the framework will call :code:`cudaFree` and return the memory back to GPU. -If there are other tasks on your GPU card that occupy the memory, you can appropriately decrease :code:`FLAGS_fraction_of_gpu_memory_to_use` +If there are other tasks on your GPU card that occupy the memory, you can appropriately decrease :code:`FLAGS_fraction_of_gpu_memory_to_use` to ensure that the framework can pre-allocate the memory block of appropriate size, for example .. code-block:: shell @@ -62,7 +62,7 @@ If :code:`FLAGS_fraction_of_gpu_memory_to_use` is set to 0, the framework will c 1.3. Configuration of memory allocation strategy ----------------------- -Since version 1.6+, PaddlePaddle supports both the AutoGrowth strategy and the Pre-Allocation Strategy, and control the strategy used in framework by +Since version 1.6+, PaddlePaddle supports both the AutoGrowth strategy and the Pre-Allocation Strategy, and control the strategy used in framework by the environment variable :code:`FLAGS_allocator_strategy`. Use AutoGrowth strategy: @@ -78,7 +78,7 @@ Use Pre-Allocation strategy: export FLAGS_allocator_strategy=naive_best_fit # Use Pre-Allocation strategy Plus, since version 1.7.2+, PaddlePaddle provides an environment variable :code:`FLAGS_gpu_memory_limit_mb`, which controls the maximum gpu memory limit that the process can allocate. -If it is equal to 0, there would be no limit and all gpu memory would be available to the process. If it is larger than 0, the process would raise out of memory error if the allocated +If it is equal to 0, there would be no limit and all gpu memory would be available to the process. If it is larger than 0, the process would raise out of memory error if the allocated memory exceeds the limit even though there is available memory on the gpu card. The unit is MB and default value is 0. 2. Memory Optimization Strategy @@ -89,7 +89,7 @@ Paddlepaddle provides several general memory optimization methods to optimize th 2.1. GC Strategy: memory garbage eager collection ------------------------- -The principle of GC(Garbage Collection)is to release the memory space of useless variables eagerly during network running, +The principle of GC(Garbage Collection)is to release the memory space of useless variables eagerly during network running, in order to save memory space. GC is suitable for training and inference using Executor or ParallelExecutor, but it is not suitable for C++ inference library. **Since version 1.6+, GC Strategy is enabled by default.** @@ -99,8 +99,8 @@ GC Strategy is controlled by 3 environment variable: - :code:`FLAGS_eager_delete_tensor_gb` -Variable to enable GC, its data type is double. The default value is -1 in PaddlePaddle with version < 1.6, -and is 0 in PaddlePaddle with version >= 1.6. GC Strategy will cache a certain amount of memory garbage and release it uniformly. +Variable to enable GC, its data type is double. The default value is -1 in PaddlePaddle with version < 1.6, +and is 0 in PaddlePaddle with version >= 1.6. GC Strategy will cache a certain amount of memory garbage and release it uniformly. :code:`FLAGS_eager_delete_tensor_gb` means the threshold of cached memory garbage, the unit of which is GB. **It is recommended to set** :code:`FLAGS_eager_delete_tensor_gb=0`. If :code:`FLAGS_eager_delete_tensor_gb=0`, once there is memory garbage, it will be collected immediately to save memory. @@ -113,8 +113,8 @@ If :code:`FLAGS_eager_delete_tensor_gb<0`, GC Strategy is disabled. - :code:`FLAGS_memory_fraction_of_eager_deletion` Variable to control GC Strategy, its data type is double. The default value is 1, range [0,1]. It is only suitable for ParallelExecutor or CompiledProgram+with_data_parallel. -GC will sort the variables in descending order according to the memory space occupied by the variables, -and only collect the memory space of top :code:`FLAGS_memory_fraction_of_eager_deletion` variables. +GC will sort the variables in descending order according to the memory space occupied by the variables, +and only collect the memory space of top :code:`FLAGS_memory_fraction_of_eager_deletion` variables. **It is recommended to remain default value**, that is :code:`FLAGS_memory_fraction_of_eager_deletion=1`. If :code:`FLAGS_memory_fraction_of_eager_deletion=0.6`, top 60% variables will be collected. @@ -126,17 +126,17 @@ If :code:`FLAGS_memory_fraction_of_eager_deletion=1`, all variables will be coll - :code:`FLAGS_fast_eager_deletion_mode` -Variable to enable fast GC Strategy, its type is bool. The default value is True, which means use fast GC Strategy. +Variable to enable fast GC Strategy, its type is bool. The default value is True, which means use fast GC Strategy. Fast GC Strategy will collect the memory garbage immediately instead of waiting for CUDA Kernel finish. **It is recommended to remain default value**, that is :code:`FLAGS_fast_eager_deletion_mode=True`. 2.2. Inplace Strategy: output reuses input inside operator ---------------------------------- -The principle of Inplace strategy is that the output of some operators can reuses the memory space of input. +The principle of Inplace strategy is that the output of some operators can reuses the memory space of input. For example, the output and input of operator :code:`reshape` can reuse the same memory space. -Inplace Strategy is suitable for ParallelExecutor or CompiledProgram+with_data_parallel, which can be set through :code:`BuildStrategy`. +Inplace Strategy is suitable for ParallelExecutor or CompiledProgram+with_data_parallel, which can be set through :code:`BuildStrategy`. The Strategy is not suitable for Executor+Program or C++ inference library. **Since version 1.6+, Inplace Strategy is enabled by default.** @@ -152,8 +152,8 @@ The specific way of Inplace strategy is: .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) -In PaddlePaddle with version < 1.6, due to of some design problems, when the Inplace Strategy is enabled, -the variable in fetch_list in the subsequent :code:`exe.run` must be persistent. +In PaddlePaddle with version < 1.6, due to of some design problems, when the Inplace Strategy is enabled, +the variable in fetch_list in the subsequent :code:`exe.run` must be persistent. That is, if you the variables you want to fetch are loss and acc, you must set: .. code-block:: python diff --git a/docs/guides/performance_improving/paddle_tensorrt_infer.md b/docs/guides/performance_improving/paddle_tensorrt_infer.md index 60c5d9ae264..2877b064d38 100644 --- a/docs/guides/performance_improving/paddle_tensorrt_infer.md +++ b/docs/guides/performance_improving/paddle_tensorrt_infer.md @@ -38,13 +38,13 @@ NVIDIA TensorRT 是一个高性能的深度学习预测库,可为深度学习 在使用AnalysisPredictor时,我们通过配置AnalysisConfig中的接口 ``` c++ -config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, - batch_size /* max_batch_size*/, +config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, + batch_size /* max_batch_size*/, 3 /* min_subgraph_size*/, AnalysisConfig::Precision::kFloat32 /* precision*/, false /* use_static*/, false /* use_calib_mode*/); -``` +``` 的方式来指定使用Paddle-TRT子图方式来运行。 该接口中的参数的详细介绍如下: @@ -106,12 +106,12 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, 按照实际运行环境配置`run.sh`中的选项开关和所需lib路径。 -5. 编译与运行样例 +5. 编译与运行样例 ## Paddle-TRT INT8使用 -1. Paddle-TRT INT8 简介 +1. Paddle-TRT INT8 简介 神经网络的参数在一定程度上是冗余的,在很多任务上,我们可以在保证模型精度的前提下,将Float32的模型转换成Int8的模型。目前,Paddle-TRT支持离线将预训练好的Float32模型转换成Int8的模型,具体的流程如下: 1) **生成校准表**(Calibration table):我们准备500张左右的真实输入数据,并将数据输入到模型中去,Paddle-TRT会统计模型中每个op输入和输出值的范围信息,并将其记录到校准表中,这些信息有效减少了模型转换时的信息损失。 @@ -121,31 +121,31 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, 2. 编译测试INT8样例 将`run.sh`文件中的`mobilenet_test`改为`fluid_generate_calib_test`,运行 - ``` shell - sh run.sh + ``` shell + sh run.sh ``` 即可执行生成校准表样例,在该样例中,我们随机生成了500个输入来模拟这一过程,在实际业务中,建议大家使用真实样例。运行结束后,在 `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` 模型目录下会多出一个名字为trt_calib_*的文件,即校准表。 生成校准表后,将带校准表的模型文件拷贝到特定地址 - ``` shell - cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib + ``` shell + cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib ``` 将`run.sh`文件中的`fluid_generate_calib_test`改为`fluid_int8_test`,将模型路径改为`SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib`,运行 - ``` shell - sh run.sh + ``` shell + sh run.sh ``` 即可执行int8预测样例。 ## Paddle-TRT子图运行原理 - PaddlePaddle采用子图的形式对TensorRT进行集成,当模型加载后,神经网络可以表示为由变量和运算节点组成的计算图。Paddle TensorRT实现的功能是对整个图进行扫描,发现图中可以使用TensorRT优化的子图,并使用TensorRT节点替换它们。在模型的推断期间,如果遇到TensorRT节点,Paddle会调用TensorRT库对该节点进行优化,其他的节点调用Paddle的原生实现。TensorRT在推断期间能够进行Op的横向和纵向融合,过滤掉冗余的Op,并对特定平台下的特定的Op选择合适的kernel等进行优化,能够加快模型的预测速度。 + PaddlePaddle采用子图的形式对TensorRT进行集成,当模型加载后,神经网络可以表示为由变量和运算节点组成的计算图。Paddle TensorRT实现的功能是对整个图进行扫描,发现图中可以使用TensorRT优化的子图,并使用TensorRT节点替换它们。在模型的推断期间,如果遇到TensorRT节点,Paddle会调用TensorRT库对该节点进行优化,其他的节点调用Paddle的原生实现。TensorRT在推断期间能够进行Op的横向和纵向融合,过滤掉冗余的Op,并对特定平台下的特定的Op选择合适的kernel等进行优化,能够加快模型的预测速度。 -下图使用一个简单的模型展示了这个过程: +下图使用一个简单的模型展示了这个过程: **原始网络**

diff --git a/docs/guides/performance_improving/paddle_tensorrt_infer_en.md b/docs/guides/performance_improving/paddle_tensorrt_infer_en.md index bb30113f997..f013e12a6f8 100644 --- a/docs/guides/performance_improving/paddle_tensorrt_infer_en.md +++ b/docs/guides/performance_improving/paddle_tensorrt_infer_en.md @@ -32,13 +32,13 @@ We will introduce the obtaining, usage and theory of Paddle-TensorRT library in When using AnalysisPredictor, we enable Paddle-TRT by setting ``` c++ -config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, - batch_size /* max_batch_size*/, +config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, + batch_size /* max_batch_size*/, 3 /* min_subgraph_size*/, AnalysisConfig::Precision::kFloat32 /* precision*/, false /* use_static*/, false /* use_calib_mode*/); -``` +``` The details of this interface is as following: - **`workspace_size`**: type:int, default is 1 << 20. Sets the max workspace size of TRT. TensorRT will choose kernels under this constraint. @@ -95,7 +95,7 @@ The details of this interface is as following: Please configure `run.sh` depending on your environment. -4. Build and run the sample. +4. Build and run the sample. ``` shell sh run.sh @@ -103,7 +103,7 @@ The details of this interface is as following: ## Paddle-TRT INT8 usage -1. Paddle-TRT INT8 introduction +1. Paddle-TRT INT8 introduction The parameters of the neural network are redundant to some extent. In many tasks, we can turn the Float32 model into Int8 model on the premise of precision. At present, Paddle-TRT supports to turn the trained Float32 model into Int8 model off line. The specific processes are as follows: 1)**Create the calibration table**. We prepare about 500 real input data, and input the data to the model. Paddle-TRT will count the range information of each op input and output value in the model, and record in the calibration table. The information can reduce the information loss during model transformation. @@ -114,22 +114,22 @@ The details of this interface is as following: change the `mobilenet_test` in `run.sh` to `fluid_generate_calib_test` and run - ``` shell - sh run.sh + ``` shell + sh run.sh ``` We generate 500 input data to simulate the process, and it's suggested that you use real example for experiment. After the running period, there will be a new file named trt_calib_* under the `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` model directory, which is the calibration table. Then copy the model dir with calibration infomation to path - ``` shell - cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib + ``` shell + cp -rf SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/ SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib ``` change `fluid_generate_calib_test` in `run.sh` to `fluid_int8_test`, and change model dir path to `SAMPLE_BASE_DIR/sample/paddle-TRT/mobilenetv1_calib` and run - ``` shell - sh run.sh + ``` shell + sh run.sh ``` ## Paddle-TRT subgraph operation principle diff --git a/docs/guides/performance_improving/profiling_model.md b/docs/guides/performance_improving/profiling_model.md index 2d46e853352..cb8f746676c 100644 --- a/docs/guides/performance_improving/profiling_model.md +++ b/docs/guides/performance_improving/profiling_model.md @@ -118,16 +118,16 @@ p = profiler.Profiler(scheduler = [3,14], on_trace_ready=my_on_trace_ready, time ```text -----------------------------------------------Model Summary----------------------------------------------- Time unit: ms ---------------- ------ ---------------------------------------- ---------------------------------------- -Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) ---------------- ------ ---------------------------------------- ---------------------------------------- -ProfileStep 11 294.53 / 26.78 / 35.28 / 24.56 / 100.00 13.22 / 1.20 / 1.20 / 1.20 / 100.00 - Dataloader 11 141.49 / 12.86 / 17.53 / 10.34 / 48.04 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - Forward 11 51.41 / 4.67 / 6.18 / 3.93 / 17.45 3.92 / 0.36 / 0.36 / 0.35 / 29.50 - Backward 11 21.23 / 1.93 / 2.61 / 1.70 / 7.21 8.14 / 0.74 / 0.74 / 0.74 / 61.51 - Optimization 11 34.74 / 3.16 / 3.65 / 2.41 / 11.79 0.67 / 0.06 / 0.06 / 0.06 / 5.03 - Others - 45.66 / - / - / - / 15.50 0.53 / - / - / - / 3.96 ---------------- ------ ---------------------------------------- ---------------------------------------- +--------------- ------ ---------------------------------------- ---------------------------------------- +Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) +--------------- ------ ---------------------------------------- ---------------------------------------- +ProfileStep 11 294.53 / 26.78 / 35.28 / 24.56 / 100.00 13.22 / 1.20 / 1.20 / 1.20 / 100.00 + Dataloader 11 141.49 / 12.86 / 17.53 / 10.34 / 48.04 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + Forward 11 51.41 / 4.67 / 6.18 / 3.93 / 17.45 3.92 / 0.36 / 0.36 / 0.35 / 29.50 + Backward 11 21.23 / 1.93 / 2.61 / 1.70 / 7.21 8.14 / 0.74 / 0.74 / 0.74 / 61.51 + Optimization 11 34.74 / 3.16 / 3.65 / 2.41 / 11.79 0.67 / 0.06 / 0.06 / 0.06 / 5.03 + Others - 45.66 / - / - / - / 15.50 0.53 / - / - / - / 3.96 +--------------- ------ ---------------------------------------- ---------------------------------------- ``` 其中ProfileStep表示训练batch的迭代step过程,对应代码中每两次调用`p.step()`的间隔时间;Dataloader表示数据读取的时间,即`for batch_id, data in enumerate(train_loader())`的执行时间;Forward表示模型前向的时间,即`logits = model(x_data)`的执行时间,Backward表示反向传播的时间,即`loss.backward()`的执行时间;Optimization表示优化器的时间,即`opt.step()`的执行时间。 通过timeline可以看到,Dataloader占了执行过程的很大比重,Model Summary显示其甚至接近了50%。分析程序发现,这是由于模型本身比较简单,需要的计算量小,再加上Dataloader @@ -152,16 +152,16 @@ train_loader = paddle.io.DataLoader(cifar10_train, ```text -----------------------------------------------Model Summary----------------------------------------------- Time unit: ms ---------------- ------ ---------------------------------------- ---------------------------------------- -Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) ---------------- ------ ---------------------------------------- ---------------------------------------- -ProfileStep 11 90.94 / 8.27 / 11.82 / 7.85 / 100.00 13.27 / 1.21 / 1.22 / 1.19 / 100.00 - Dataloader 11 1.82 / 0.17 / 0.67 / 0.11 / 2.00 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - Forward 11 29.58 / 2.69 / 3.53 / 2.52 / 32.52 3.82 / 0.35 / 0.35 / 0.34 / 30.67 - Backward 11 15.21 / 1.38 / 1.95 / 1.31 / 16.72 8.30 / 0.75 / 0.77 / 0.74 / 60.71 - Optimization 11 17.55 / 1.60 / 1.92 / 1.55 / 19.30 0.66 / 0.06 / 0.06 / 0.06 / 4.82 - Others - 26.79 / - / - / - / 29.46 0.52 / - / - / - / 3.80 ---------------- ------ ---------------------------------------- ---------------------------------------- +--------------- ------ ---------------------------------------- ---------------------------------------- +Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) +--------------- ------ ---------------------------------------- ---------------------------------------- +ProfileStep 11 90.94 / 8.27 / 11.82 / 7.85 / 100.00 13.27 / 1.21 / 1.22 / 1.19 / 100.00 + Dataloader 11 1.82 / 0.17 / 0.67 / 0.11 / 2.00 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + Forward 11 29.58 / 2.69 / 3.53 / 2.52 / 32.52 3.82 / 0.35 / 0.35 / 0.34 / 30.67 + Backward 11 15.21 / 1.38 / 1.95 / 1.31 / 16.72 8.30 / 0.75 / 0.77 / 0.74 / 60.71 + Optimization 11 17.55 / 1.60 / 1.92 / 1.55 / 19.30 0.66 / 0.06 / 0.06 / 0.06 / 4.82 + Others - 26.79 / - / - / - / 29.46 0.52 / - / - / - / 3.80 +--------------- ------ ---------------------------------------- ---------------------------------------- ``` 可以看到,从Dataloader中取数据的时间大大减少,变成了平均只占一个step的2%,并且平均一个step所需要的时间也相应减少了。 @@ -210,13 +210,13 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 - Device Summary ```text -------------------Device Summary------------------- - ------------------------------ -------------------- - Device Utilization (%) - ------------------------------ -------------------- - CPU(Process) 77.13 - CPU(System) 25.99 - GPU2 55.50 - ------------------------------ -------------------- + ------------------------------ -------------------- + Device Utilization (%) + ------------------------------ -------------------- + CPU(Process) 77.13 + CPU(System) 25.99 + GPU2 55.50 + ------------------------------ -------------------- Note: CPU(Process) Utilization = Current process CPU time over all cpu cores / elapsed time, so max utilization can be reached 100% * number of cpu cores. CPU(System) Utilization = All processes CPU time over all cpu cores(busy time) / (busy time + idle time). @@ -235,26 +235,26 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ```text ---------------------------------------------Overview Summary--------------------------------------------- Time unit: ms - ------------------------- ------------------------- ------------------------- ------------------------- - Event Type Calls CPU Time Ratio (%) - ------------------------- ------------------------- ------------------------- ------------------------- - ProfileStep 8 4945.15 100.00 - CudaRuntime 28336 2435.63 49.25 - UserDefined 486 2280.54 46.12 - Dataloader 8 1819.15 36.79 - Forward 8 1282.64 25.94 - Operator 8056 1244.41 25.16 - OperatorInner 21880 374.18 7.57 - Backward 8 160.43 3.24 - Optimization 8 102.34 2.07 - ------------------------- ------------------------- ------------------------- ------------------------- - Calls GPU Time Ratio (%) - ------------------------- ------------------------- ------------------------- ------------------------- - Kernel 13688 2744.61 55.50 - Memcpy 496 29.82 0.60 - Memset 104 0.12 0.00 - Communication 784 257.23 5.20 - ------------------------- ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- ------------------------- + Event Type Calls CPU Time Ratio (%) + ------------------------- ------------------------- ------------------------- ------------------------- + ProfileStep 8 4945.15 100.00 + CudaRuntime 28336 2435.63 49.25 + UserDefined 486 2280.54 46.12 + Dataloader 8 1819.15 36.79 + Forward 8 1282.64 25.94 + Operator 8056 1244.41 25.16 + OperatorInner 21880 374.18 7.57 + Backward 8 160.43 3.24 + Optimization 8 102.34 2.07 + ------------------------- ------------------------- ------------------------- ------------------------- + Calls GPU Time Ratio (%) + ------------------------- ------------------------- ------------------------- ------------------------- + Kernel 13688 2744.61 55.50 + Memcpy 496 29.82 0.60 + Memset 104 0.12 0.00 + Communication 784 257.23 5.20 + ------------------------- ------------------------- ------------------------- ------------------------- Note: In this table, We sum up all collected events in terms of event type. The time of events collected on host are presented as CPU Time, and as GPU Time if on device. @@ -277,16 +277,16 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ```text --------------------------------------------------Model Summary-------------------------------------------------- Time unit: ms - --------------- ------ ------------------------------------------- ------------------------------------------- - Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) - --------------- ------ ------------------------------------------- ------------------------------------------- - ProfileStep 8 4945.15 / 618.14 / 839.15 / 386.34 / 100.00 2790.80 / 348.85 / 372.39 / 344.60 / 100.00 - Dataloader 8 1819.15 / 227.39 / 451.69 / 0.32 / 36.79 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - Forward 8 1282.64 / 160.33 / 161.49 / 159.19 / 25.94 1007.64 / 125.96 / 126.13 / 125.58 / 35.90 - Backward 8 160.43 / 20.05 / 21.00 / 19.21 / 3.24 1762.11 / 220.26 / 243.83 / 216.05 / 62.49 - Optimization 8 102.34 / 12.79 / 13.42 / 12.47 / 2.07 17.03 / 2.13 / 2.13 / 2.13 / 0.60 - Others - 1580.59 / - / - / - / 31.96 28.22 / - / - / - / 1.00 - --------------- ------ ------------------------------------------- ------------------------------------------- + --------------- ------ ------------------------------------------- ------------------------------------------- + Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) + --------------- ------ ------------------------------------------- ------------------------------------------- + ProfileStep 8 4945.15 / 618.14 / 839.15 / 386.34 / 100.00 2790.80 / 348.85 / 372.39 / 344.60 / 100.00 + Dataloader 8 1819.15 / 227.39 / 451.69 / 0.32 / 36.79 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + Forward 8 1282.64 / 160.33 / 161.49 / 159.19 / 25.94 1007.64 / 125.96 / 126.13 / 125.58 / 35.90 + Backward 8 160.43 / 20.05 / 21.00 / 19.21 / 3.24 1762.11 / 220.26 / 243.83 / 216.05 / 62.49 + Optimization 8 102.34 / 12.79 / 13.42 / 12.47 / 2.07 17.03 / 2.13 / 2.13 / 2.13 / 0.60 + Others - 1580.59 / - / - / - / 31.96 28.22 / - / - / - / 1.00 + --------------- ------ ------------------------------------------- ------------------------------------------- ``` Model Summary用于展示模型训练或者推理过程中,dataloader、forward、backward、optimization所消耗的时间。其中GPU Time对应着在该段过程内所发起的GPU侧活动的时间。 @@ -297,14 +297,14 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ```text -----------------------------Distribution Summary------------------------------ Time unit: ms - ------------------------- ------------------------- ------------------------- - Name Total Time Ratio (%) - ------------------------- ------------------------- ------------------------- - ProfileStep 4945.15 100.00 - Communication 257.23 5.20 - Computation 2526.52 51.09 - Overlap 39.13 0.79 - ------------------------- ------------------------- ------------------------- + ------------------------- ------------------------- ------------------------- + Name Total Time Ratio (%) + ------------------------- ------------------------- ------------------------- + ProfileStep 4945.15 100.00 + Communication 257.23 5.20 + Computation 2526.52 51.09 + Overlap 39.13 0.79 + ------------------------- ------------------------- ------------------------- ``` Distribution Summary用于展示分布式训练中通信(Communication)、计算(Computation)以及这两者Overlap的时间。 @@ -320,53 +320,53 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 (由于原始表单较长,这里截取一部分进行展示) ----------------------------------------------------------------Operator Summary---------------------------------------------------------------- Time unit: ms - ---------------------------------------------------- ------ ---------------------------------------- ---------------------------------------- - Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) - ---------------------------------------------------- ------ ---------------------------------------- ---------------------------------------- + ---------------------------------------------------- ------ ---------------------------------------- ---------------------------------------- + Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) + ---------------------------------------------------- ------ ---------------------------------------- ---------------------------------------- -----------------------------------------------------------Thread: All threads merged----------------------------------------------------------- - conv2d_grad grad_node 296 53.70 / 0.18 / 0.40 / 0.14 / 4.34 679.11 / 2.29 / 5.75 / 0.24 / 24.11 - conv2d_grad::infer_shape 296 0.44 / 0.00 / 0.00 / 0.00 / 0.81 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - conv2d_grad::compute 296 44.09 / 0.15 / 0.31 / 0.10 / 82.10 644.39 / 2.18 / 5.75 / 0.24 / 94.89 - cudnn::maxwell::gemm::computeWgradOffsetsKern... 224 - / - / - / - / - 0.50 / 0.00 / 0.00 / 0.00 / 0.08 - void scalePackedTensor_kernel(c... 224 - / - / - / - / - 0.79 / 0.00 / 0.01 / 0.00 / 0.12 - cudnn::maxwell::gemm::computeBOffsetsKernel(c... 464 - / - / - / - / - 0.95 / 0.00 / 0.01 / 0.00 / 0.15 - maxwell_scudnn_128x32_stridedB_splitK_large_nn 8 - / - / - / - / - 15.70 / 1.96 / 1.97 / 1.96 / 2.44 - cudnn::maxwell::gemm::computeOffsetsKernel(cu... 240 - / - / - / - / - 0.54 / 0.00 / 0.00 / 0.00 / 0.08 - maxwell_scudnn_128x32_stridedB_interior_nn 8 - / - / - / - / - 9.53 / 1.19 / 1.19 / 1.19 / 1.48 - maxwell_scudnn_128x64_stridedB_splitK_interio... 8 - / - / - / - / - 28.67 / 3.58 / 3.59 / 3.58 / 4.45 - maxwell_scudnn_128x64_stridedB_interior_nn 8 - / - / - / - / - 5.53 / 0.69 / 0.70 / 0.69 / 0.86 - maxwell_scudnn_128x128_stridedB_splitK_interi... 184 - / - / - / - / - 167.03 / 0.91 / 2.28 / 0.19 / 25.92 - maxwell_scudnn_128x128_stridedB_interior_nn 200 - / - / - / - / - 105.10 / 0.53 / 0.97 / 0.09 / 16.31 - MEMSET 104 - / - / - / - / - 0.12 / 0.00 / 0.00 / 0.00 / 0.02 - maxwell_scudnn_128x128_stridedB_small_nn 24 - / - / - / - / - 87.58 / 3.65 / 4.00 / 3.53 / 13.59 - void cudnn::winograd_nonfused::winogradWgradD... 72 - / - / - / - / - 15.66 / 0.22 / 0.36 / 0.09 / 2.43 - void cudnn::winograd_nonfused::winogradWgradD... 72 - / - / - / - / - 31.64 / 0.44 / 0.75 / 0.19 / 4.91 - maxwell_sgemm_128x64_nt 72 - / - / - / - / - 62.03 / 0.86 / 1.09 / 0.75 / 9.63 - void cudnn::winograd_nonfused::winogradWgradO... 72 - / - / - / - / - 14.45 / 0.20 / 0.49 / 0.04 / 2.24 - void cudnn::winograd::generateWinogradTilesKe... 48 - / - / - / - / - 1.78 / 0.04 / 0.06 / 0.02 / 0.28 - maxwell_scudnn_winograd_128x128_ldg1_ldg4_til... 24 - / - / - / - / - 45.94 / 1.91 / 1.93 / 1.90 / 7.13 - maxwell_scudnn_winograd_128x128_ldg1_ldg4_til... 24 - / - / - / - / - 40.93 / 1.71 / 1.72 / 1.69 / 6.35 - maxwell_scudnn_128x32_stridedB_splitK_interio... 24 - / - / - / - / - 9.91 / 0.41 / 0.77 / 0.15 / 1.54 - GpuMemcpyAsync:CPU->GPU 64 0.68 / 0.01 / 0.02 / 0.01 / 1.27 0.09 / 0.00 / 0.00 / 0.00 / 0.01 - MEMCPY_HtoD 64 - / - / - / - / - 0.09 / 0.00 / 0.00 / 0.00 / 100.00 - void phi::funcs::ConcatKernel_(float con... 16 - / - / - / - / - 2.84 / 0.18 / 0.36 / 0.06 / 0.42 - void phi::funcs::ForRangeElemwiseOp(float cons... 16 - / - / - / - / - 2.49 / 0.16 / 0.37 / 0.06 / 0.37 - void axpy_kernel_val(cublasAxpyPa... 16 - / - / - / - / - 1.63 / 0.10 / 0.14 / 0.07 / 0.24 - sync_batch_norm_grad grad_node 376 37.90 / 0.10 / 0.31 / 0.08 / 3.07 670.62 / 1.78 / 39.29 / 0.13 / 23.81 - sync_batch_norm_grad::infer_shape 376 1.60 / 0.00 / 0.01 / 0.00 / 4.22 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - sync_batch_norm_grad::compute 376 23.26 / 0.06 / 0.10 / 0.06 / 61.37 555.96 / 1.48 / 39.29 / 0.13 / 82.90 - void paddle::operators::KeBackwardLocalStats<... 376 - / - / - / - / - 129.62 / 0.34 / 1.83 / 0.04 / 23.32 - ncclAllReduceRingLLKernel_sum_f32(ncclColl) 376 - / - / - / - / - 128.00 / 0.34 / 37.70 / 0.01 / 23.02 - void paddle::operators::KeBNBackwardScaleBias... 376 - / - / - / - / - 126.37 / 0.34 / 1.84 / 0.03 / 22.73 - void paddle::operators::KeBNBackwardDataGPU 64 0.71 / 0.01 / 0.02 / 0.01 / 1.88 0.08 / 0.00 / 0.00 / 0.00 / 0.01 - MEMCPY_HtoD 64 - / - / - / - / - 0.08 / 0.00 / 0.00 / 0.00 / 100.00 - void phi::funcs::ConcatKernel_(float con... 16 - / - / - / - / - 6.40 / 0.40 / 0.53 / 0.34 / 0.95 - void phi::funcs::ForRangeElemwiseOp(float cons... 16 - / - / - / - / - 6.93 / 0.43 / 0.76 / 0.34 / 1.03 + conv2d_grad grad_node 296 53.70 / 0.18 / 0.40 / 0.14 / 4.34 679.11 / 2.29 / 5.75 / 0.24 / 24.11 + conv2d_grad::infer_shape 296 0.44 / 0.00 / 0.00 / 0.00 / 0.81 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + conv2d_grad::compute 296 44.09 / 0.15 / 0.31 / 0.10 / 82.10 644.39 / 2.18 / 5.75 / 0.24 / 94.89 + cudnn::maxwell::gemm::computeWgradOffsetsKern... 224 - / - / - / - / - 0.50 / 0.00 / 0.00 / 0.00 / 0.08 + void scalePackedTensor_kernel(c... 224 - / - / - / - / - 0.79 / 0.00 / 0.01 / 0.00 / 0.12 + cudnn::maxwell::gemm::computeBOffsetsKernel(c... 464 - / - / - / - / - 0.95 / 0.00 / 0.01 / 0.00 / 0.15 + maxwell_scudnn_128x32_stridedB_splitK_large_nn 8 - / - / - / - / - 15.70 / 1.96 / 1.97 / 1.96 / 2.44 + cudnn::maxwell::gemm::computeOffsetsKernel(cu... 240 - / - / - / - / - 0.54 / 0.00 / 0.00 / 0.00 / 0.08 + maxwell_scudnn_128x32_stridedB_interior_nn 8 - / - / - / - / - 9.53 / 1.19 / 1.19 / 1.19 / 1.48 + maxwell_scudnn_128x64_stridedB_splitK_interio... 8 - / - / - / - / - 28.67 / 3.58 / 3.59 / 3.58 / 4.45 + maxwell_scudnn_128x64_stridedB_interior_nn 8 - / - / - / - / - 5.53 / 0.69 / 0.70 / 0.69 / 0.86 + maxwell_scudnn_128x128_stridedB_splitK_interi... 184 - / - / - / - / - 167.03 / 0.91 / 2.28 / 0.19 / 25.92 + maxwell_scudnn_128x128_stridedB_interior_nn 200 - / - / - / - / - 105.10 / 0.53 / 0.97 / 0.09 / 16.31 + MEMSET 104 - / - / - / - / - 0.12 / 0.00 / 0.00 / 0.00 / 0.02 + maxwell_scudnn_128x128_stridedB_small_nn 24 - / - / - / - / - 87.58 / 3.65 / 4.00 / 3.53 / 13.59 + void cudnn::winograd_nonfused::winogradWgradD... 72 - / - / - / - / - 15.66 / 0.22 / 0.36 / 0.09 / 2.43 + void cudnn::winograd_nonfused::winogradWgradD... 72 - / - / - / - / - 31.64 / 0.44 / 0.75 / 0.19 / 4.91 + maxwell_sgemm_128x64_nt 72 - / - / - / - / - 62.03 / 0.86 / 1.09 / 0.75 / 9.63 + void cudnn::winograd_nonfused::winogradWgradO... 72 - / - / - / - / - 14.45 / 0.20 / 0.49 / 0.04 / 2.24 + void cudnn::winograd::generateWinogradTilesKe... 48 - / - / - / - / - 1.78 / 0.04 / 0.06 / 0.02 / 0.28 + maxwell_scudnn_winograd_128x128_ldg1_ldg4_til... 24 - / - / - / - / - 45.94 / 1.91 / 1.93 / 1.90 / 7.13 + maxwell_scudnn_winograd_128x128_ldg1_ldg4_til... 24 - / - / - / - / - 40.93 / 1.71 / 1.72 / 1.69 / 6.35 + maxwell_scudnn_128x32_stridedB_splitK_interio... 24 - / - / - / - / - 9.91 / 0.41 / 0.77 / 0.15 / 1.54 + GpuMemcpyAsync:CPU->GPU 64 0.68 / 0.01 / 0.02 / 0.01 / 1.27 0.09 / 0.00 / 0.00 / 0.00 / 0.01 + MEMCPY_HtoD 64 - / - / - / - / - 0.09 / 0.00 / 0.00 / 0.00 / 100.00 + void phi::funcs::ConcatKernel_(float con... 16 - / - / - / - / - 2.84 / 0.18 / 0.36 / 0.06 / 0.42 + void phi::funcs::ForRangeElemwiseOp(float cons... 16 - / - / - / - / - 2.49 / 0.16 / 0.37 / 0.06 / 0.37 + void axpy_kernel_val(cublasAxpyPa... 16 - / - / - / - / - 1.63 / 0.10 / 0.14 / 0.07 / 0.24 + sync_batch_norm_grad grad_node 376 37.90 / 0.10 / 0.31 / 0.08 / 3.07 670.62 / 1.78 / 39.29 / 0.13 / 23.81 + sync_batch_norm_grad::infer_shape 376 1.60 / 0.00 / 0.01 / 0.00 / 4.22 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + sync_batch_norm_grad::compute 376 23.26 / 0.06 / 0.10 / 0.06 / 61.37 555.96 / 1.48 / 39.29 / 0.13 / 82.90 + void paddle::operators::KeBackwardLocalStats<... 376 - / - / - / - / - 129.62 / 0.34 / 1.83 / 0.04 / 23.32 + ncclAllReduceRingLLKernel_sum_f32(ncclColl) 376 - / - / - / - / - 128.00 / 0.34 / 37.70 / 0.01 / 23.02 + void paddle::operators::KeBNBackwardScaleBias... 376 - / - / - / - / - 126.37 / 0.34 / 1.84 / 0.03 / 22.73 + void paddle::operators::KeBNBackwardDataGPU 64 0.71 / 0.01 / 0.02 / 0.01 / 1.88 0.08 / 0.00 / 0.00 / 0.00 / 0.01 + MEMCPY_HtoD 64 - / - / - / - / - 0.08 / 0.00 / 0.00 / 0.00 / 100.00 + void phi::funcs::ConcatKernel_(float con... 16 - / - / - / - / - 6.40 / 0.40 / 0.53 / 0.34 / 0.95 + void phi::funcs::ForRangeElemwiseOp(float cons... 16 - / - / - / - / - 6.93 / 0.43 / 0.76 / 0.34 / 1.03 ``` Operator Summary用于展示框架中算子(op)的执行信息。对于每一个Op,可以通过打印表单时候的op_detail选项控制是否打印出Op执行过程里面的子过程。同时展示每个子过程中的GPU上的活动,且子过程的活动算时间占比时以上层的时间为总时间。 @@ -376,22 +376,22 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 (由于原始表单较长,这里截取一部分进行展示) ---------------------------------------------------------------Kernel Summary--------------------------------------------------------------- Time unit: ms - ------------------------------------------------------------------------------------------ ------ ---------------------------------------- - Name Calls GPU Total / Avg / Max / Min / Ratio(%) - ------------------------------------------------------------------------------------------ ------ ---------------------------------------- - void paddle::operators::KeNormAffine 376 362.11 / 0.96 / 5.43 / 0.09 / 12.97 - ncclAllReduceRingLLKernel_sum_f32(ncclColl) 784 257.23 / 0.33 / 37.70 / 0.01 / 9.22 - maxwell_scudnn_winograd_128x128_ldg1_ldg4_tile418n_nt 72 176.84 / 2.46 / 3.35 / 1.90 / 6.34 - void paddle::operators::KeBNBackwardData 376 171.97 / 0.46 / 2.58 / 0.04 / 6.16 - maxwell_scudnn_128x128_stridedB_splitK_interior_nn 184 167.03 / 0.91 / 2.28 / 0.19 / 5.99 - void paddle::operators::KeBackwardLocalStats 376 362.11 / 0.96 / 5.43 / 0.09 / 12.97 + ncclAllReduceRingLLKernel_sum_f32(ncclColl) 784 257.23 / 0.33 / 37.70 / 0.01 / 9.22 + maxwell_scudnn_winograd_128x128_ldg1_ldg4_tile418n_nt 72 176.84 / 2.46 / 3.35 / 1.90 / 6.34 + void paddle::operators::KeBNBackwardData 376 171.97 / 0.46 / 2.58 / 0.04 / 6.16 + maxwell_scudnn_128x128_stridedB_splitK_interior_nn 184 167.03 / 0.91 / 2.28 / 0.19 / 5.99 + void paddle::operators::KeBackwardLocalStatsCPU 48 1519.87 / 31.66 / 213.82 / 0.02 / 30.73 0.07 / 0.00 / 0.00 / 0.00 / 0.00 - GpuMemcpyAsync:CPU->GPU 216 2.85 / 0.01 / 0.04 / 0.01 / 0.06 0.29 / 0.00 / 0.00 / 0.00 / 0.01 - GpuMemcpyAsync(same_gpu):GPU->GPU 168 3.61 / 0.02 / 0.05 / 0.01 / 0.07 0.33 / 0.00 / 0.01 / 0.00 / 0.01 - GpuMemcpySync:CUDAPinned->GPU 40 713.89 / 17.85 / 85.79 / 0.04 / 14.44 29.11 / 0.73 / 3.02 / 0.00 / 1.03 - BufferedReader:MemoryCopy 6 40.17 / 6.69 / 7.62 / 5.87 / 0.81 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + --------------------------------- ------ ---------------------------------------- ---------------------------------------- + Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) + --------------------------------- ------ ---------------------------------------- ---------------------------------------- + GpuMemcpySync:GPU->CPU 48 1519.87 / 31.66 / 213.82 / 0.02 / 30.73 0.07 / 0.00 / 0.00 / 0.00 / 0.00 + GpuMemcpyAsync:CPU->GPU 216 2.85 / 0.01 / 0.04 / 0.01 / 0.06 0.29 / 0.00 / 0.00 / 0.00 / 0.01 + GpuMemcpyAsync(same_gpu):GPU->GPU 168 3.61 / 0.02 / 0.05 / 0.01 / 0.07 0.33 / 0.00 / 0.01 / 0.00 / 0.01 + GpuMemcpySync:CUDAPinned->GPU 40 713.89 / 17.85 / 85.79 / 0.04 / 14.44 29.11 / 0.73 / 3.02 / 0.00 / 1.03 + BufferedReader:MemoryCopy 6 40.17 / 6.69 / 7.62 / 5.87 / 0.81 0.00 / 0.00 / 0.00 / 0.00 / 0.00 --------------------------------- ------ ---------------------------------------- ---------------------------------------- ``` @@ -417,12 +417,12 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ```text ------------------------------------------UserDefined Summary------------------------------------------ Time unit: ms - ----------- ------ ---------------------------------------- ---------------------------------------- - Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) - ----------- ------ ---------------------------------------- ---------------------------------------- + ----------- ------ ---------------------------------------- ---------------------------------------- + Name Calls CPU Total / Avg / Max / Min / Ratio(%) GPU Total / Avg / Max / Min / Ratio(%) + ----------- ------ ---------------------------------------- ---------------------------------------- --------------------------------------Thread: All threads merged--------------------------------------- - MyRecord 8 0.15 / 0.02 / 0.02 / 0.02 / 0.00 0.00 / 0.00 / 0.00 / 0.00 / 0.00 - ----------- ------ ---------------------------------------- ---------------------------------------- + MyRecord 8 0.15 / 0.02 / 0.02 / 0.02 / 0.00 0.00 / 0.00 / 0.00 / 0.00 / 0.00 + ----------- ------ ---------------------------------------- ---------------------------------------- ``` diff --git a/docs/install/compile/fromsource_en.rst b/docs/install/compile/fromsource_en.rst index b69144e1996..3e1592dc791 100644 --- a/docs/install/compile/fromsource_en.rst +++ b/docs/install/compile/fromsource_en.rst @@ -5,7 +5,7 @@ You can also choose to compile and install PaddlePaddle in the way of source code compilation. However, due to the diversity of the native environment, complicated problems may occur when compiling the source code, which may cause your installation to fail. In order to ensure your smooth installation, it is recommended that you prefer the normal installation method. .. toctree:: - + linux-compile_en.md macos-compile_en.md diff --git a/docs/install/compile/linux-compile.md b/docs/install/compile/linux-compile.md index 842f0876ea7..eb514ab975d 100644 --- a/docs/install/compile/linux-compile.md +++ b/docs/install/compile/linux-compile.md @@ -197,7 +197,7 @@ cd /paddle/build/python/dist #### 12. 在当前机器或目标机器安装编译好的`.whl`包: -For Python3: +For Python3: ``` pip3.7 install -U [whl包的名字] ``` diff --git a/docs/install/compile/linux-compile_en.md b/docs/install/compile/linux-compile_en.md index c03ab80315d..193d8546ecc 100644 --- a/docs/install/compile/linux-compile_en.md +++ b/docs/install/compile/linux-compile_en.md @@ -549,7 +549,7 @@ After the installation is complete, you can use `python` or `python3` to enter t ``` import paddle ``` -and then +and then ``` paddle.utils.run_check() ``` diff --git a/docs/install/conda/fromconda_en.rst b/docs/install/conda/fromconda_en.rst index ac3878e69fb..fb1eb259379 100644 --- a/docs/install/conda/fromconda_en.rst +++ b/docs/install/conda/fromconda_en.rst @@ -3,7 +3,7 @@ ============================== .. toctree:: - + linux-conda_en.md macos-conda_en.md diff --git a/docs/install/docker/fromdocker_en.rst b/docs/install/docker/fromdocker_en.rst index 1fd18637fd0..d44f176367f 100644 --- a/docs/install/docker/fromdocker_en.rst +++ b/docs/install/docker/fromdocker_en.rst @@ -3,6 +3,6 @@ ============================== .. toctree:: - + macos-docker_en.md diff --git a/docs/install/index_cn.rst b/docs/install/index_cn.rst index e46ad856ed2..d5ca48eb684 100644 --- a/docs/install/index_cn.rst +++ b/docs/install/index_cn.rst @@ -53,7 +53,7 @@ * Windows 安装 GPU 版本 - * Windows 7/8/10 支持 CUDA 10.1/10.2/11.2 单卡模式 + * Windows 7/8/10 支持 CUDA 10.1/10.2/11.2 单卡模式 * 不支持 **nvidia-docker** 方式安装 * Ubuntu 安装 GPU 版本 @@ -110,9 +110,9 @@ 3. 确认您需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python 使用以下命令输出 Python 路径,根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径 - + 在 Windows 环境下,输出 Python 路径的命令为: - + :: where python @@ -128,14 +128,14 @@ 使用以下命令确认是 3.6/3.7/3.8/3.9 :: - + python --version -5. 检查 pip 的版本,确认是 20.2.2+ +5. 检查 pip 的版本,确认是 20.2.2+ :: - - python -m ensurepip + + python -m ensurepip python -m pip --version @@ -148,11 +148,11 @@ 7. 如果您希望使用 `pip `_ 进行安装PaddlePaddle可以直接使用以下命令: - (1). **CPU版本** :如果您只是想安装CPU版本请参考如下命令安装 + (1). **CPU版本** :如果您只是想安装CPU版本请参考如下命令安装 安装CPU版本的命令为: :: - + python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple 或 @@ -160,7 +160,7 @@ python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple - (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 + (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 注意: @@ -175,7 +175,7 @@ python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple - + 请确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python。根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径。 8. 验证安装 diff --git a/docs/install/index_en.rst b/docs/install/index_en.rst index 08d97c18c09..5212fc609c8 100644 --- a/docs/install/index_en.rst +++ b/docs/install/index_en.rst @@ -14,7 +14,7 @@ * Add support for ROCm platform (2.1 Paddle's support for ROCm platform is experimental) * Linux system-related packages have been split into two types of packages, avx and noavx (Most machines use the avx instruction set. You can check whether your machine supports it through commands on the `PIP installation under Linux `_ page ) * Add a CPU image with jupyter pre-installed. Jupyter service will be started after starting the image -* Added support for Windows Visual Studio 2017 compilation, fully upgraded from VS2015 to VS2017 +* Added support for Windows Visual Studio 2017 compilation, fully upgraded from VS2015 to VS2017 ------------------------ @@ -50,7 +50,7 @@ The manuals will guide you to build and install PaddlePaddle on your 64-bit desk >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> * Currently, **PaddlePaddle** supports **CUDA** driver of **NVIDIA** graphics card and **ROCm** driver of **AMD** card. -* You need to install `cuDNN `_ , and version 7.6+ is required(For CUDA10.1/10.2) +* You need to install `cuDNN `_ , and version 7.6+ is required(For CUDA10.1/10.2) * If you need GPU multi-card mode, you need to install `NCCL 2 `_ * Only Ubuntu/CentOS support NCCL 2 @@ -58,8 +58,8 @@ The manuals will guide you to build and install PaddlePaddle on your 64-bit desk * Windows install GPU version - * Windows 7 / 8 / 10 support CUDA 10.1/10.2/11.2 single-card mode, but don't support CUDA 9.1/9.2/10.1 - * don't support install using **nvidia-docker** + * Windows 7 / 8 / 10 support CUDA 10.1/10.2/11.2 single-card mode, but don't support CUDA 9.1/9.2/10.1 + * don't support install using **nvidia-docker** * Ubuntu install GPU version * Ubuntu 16.04 supports CUDA 10.1/10.2/11.2 @@ -91,13 +91,13 @@ Please make sure your environment meets the above conditions. If you have other * support NCCL v2.4.2-v2.4.8 under CUDA10.1 * Ubuntu 18.04: - * support v2.4.2-v2.4.8 under CUDA10.1 + * support v2.4.2-v2.4.8 under CUDA10.1 * Support for CentOS * CentOS 6: not support NCCL * CentOS 7: - * support NCCL v2.4.2-v2.4.8 under CUDA10.1 + * support NCCL v2.4.2-v2.4.8 under CUDA10.1 * Support for MacOS * not support NCCL @@ -117,9 +117,9 @@ This section describes how to use pip to install. 3. Confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python Use the following command to output Python path. Depending on your environment, you may need to replace Python in all command lines in the description with specific Python path - + In the Windows environment, the command to output Python path is: - + :: where python @@ -135,14 +135,14 @@ This section describes how to use pip to install. Confirm the Python is 3.6/3.7/3.8/3.9 using command :: - + python --version - -5. Check the version of pip and confirm it is 20.2.2 or above + +5. Check the version of pip and confirm it is 20.2.2 or above :: - - python -m ensurepip + + python -m ensurepip python -m pip --version @@ -159,13 +159,13 @@ This section describes how to use pip to install. Command to install CPU version is: :: - + python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple or python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple - + (2). **GPU version** : If you only want to install GPU version, please refer to command below @@ -183,7 +183,7 @@ This section describes how to use pip to install. python -m pip install paddlepaddle-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple - + Please confirm that the Python where you need to install PaddlePaddle is your expected location, because your computer may have multiple Python. Depending on the environment, you may need to replace Python in all command lines in the instructions with Python 3 or specific Python path. 8. Verify installation diff --git a/docs/install/install_Kunlun_en.md b/docs/install/install_Kunlun_en.md index 68b7ad0aedc..e908595c477 100644 --- a/docs/install/install_Kunlun_en.md +++ b/docs/install/install_Kunlun_en.md @@ -157,38 +157,38 @@ Execute cmake : For Python3 ``` -cmake .. -DPY_VERSION=3.6 \ - -DCMAKE_BUILD_TYPE=Release \ - -DWITH_GPU=OFF \ - -DWITH_XPU=ON \ - -DON_INFER=ON \ - -DWITH_PYTHON=ON \ - -DWITH_AVX=ON \ - -DWITH_MKL=ON \ - -DWITH_MKLDNN=ON \ - -DWITH_XPU_BKCL=ON \ - -DWITH_DISTRIBUTE=ON \ +cmake .. -DPY_VERSION=3.6 \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_GPU=OFF \ + -DWITH_XPU=ON \ + -DON_INFER=ON \ + -DWITH_PYTHON=ON \ + -DWITH_AVX=ON \ + -DWITH_MKL=ON \ + -DWITH_MKLDNN=ON \ + -DWITH_XPU_BKCL=ON \ + -DWITH_DISTRIBUTE=ON \ -DWITH_NCCL=OFF - + make -j20 ``` For Python2 ``` -cmake .. -DPY_VERSION=2.7 \ - -DCMAKE_BUILD_TYPE=Release \ - -DWITH_GPU=OFF \ - -DWITH_XPU=ON \ - -DON_INFER=ON \ - -DWITH_PYTHON=ON \ - -DWITH_AVX=ON \ - -DWITH_MKL=ON \ - -DWITH_MKLDNN=ON \ - -DWITH_XPU_BKCL=ON \ - -DWITH_DISTRIBUTE=ON \ +cmake .. -DPY_VERSION=2.7 \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_GPU=OFF \ + -DWITH_XPU=ON \ + -DON_INFER=ON \ + -DWITH_PYTHON=ON \ + -DWITH_AVX=ON \ + -DWITH_MKL=ON \ + -DWITH_MKLDNN=ON \ + -DWITH_XPU_BKCL=ON \ + -DWITH_DISTRIBUTE=ON \ -DWITH_NCCL=OFF - + make -j20 ``` @@ -207,20 +207,20 @@ ulimit -n 4096 python_exe="/usr/bin/python3.7" export XPU_SDK_ROOT=$PWD/xpu_sdk -cmake .. -DPY_VERSION=3.7 \ - -DPYTHON_EXECUTABLE=$python_exe \ - -DWITH_ARM=ON \ - -DWITH_AARCH64=ON \ - -DWITH_TESTING=OFF \ - -DCMAKE_BUILD_TYPE=Release \ - -DON_INFER=ON \ - -DWITH_XBYAK=OFF \ - -DWITH_XPU=ON \ - -DWITH_GPU=OFF \ - -DWITH_LITE=ON \ - -DLITE_GIT_TAG=release/v2.9 \ +cmake .. -DPY_VERSION=3.7 \ + -DPYTHON_EXECUTABLE=$python_exe \ + -DWITH_ARM=ON \ + -DWITH_AARCH64=ON \ + -DWITH_TESTING=OFF \ + -DCMAKE_BUILD_TYPE=Release \ + -DON_INFER=ON \ + -DWITH_XBYAK=OFF \ + -DWITH_XPU=ON \ + -DWITH_GPU=OFF \ + -DWITH_LITE=ON \ + -DLITE_GIT_TAG=release/v2.9 \ -DXPU_SDK_ROOT=${XPU_SDK_ROOT} - + make VERBOSE=1 TARGET=ARMV8 -j32 ``` diff --git a/docs/install/pip/frompip_en.rst b/docs/install/pip/frompip_en.rst index 77cd91b4683..7706c500279 100644 --- a/docs/install/pip/frompip_en.rst +++ b/docs/install/pip/frompip_en.rst @@ -3,7 +3,7 @@ ============================== .. toctree:: - + linux-pip_en.md macos-pip_en.md diff --git a/docs/practices/cv/index_cn.rst b/docs/practices/cv/index_cn.rst index a432fec5e74..e28fd70d3a4 100644 --- a/docs/practices/cv/index_cn.rst +++ b/docs/practices/cv/index_cn.rst @@ -4,9 +4,9 @@ 这里提供了一些计算机视觉的案例: - + - `图像分类 <./image_classification.html>`_ :介绍使用 PaddlePaddle 在MNIST数据集上完成图像分类。 - - `图像分类 <./convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 + - `图像分类 <./convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 - `以图搜图 <./image_search.html>`_ : 介绍使用 PaddlePaddle 实现以图搜图。 - `图像分割 <./image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现U-Net模型完成图像分割。 - `OCR <./image_ocr.html>`_ : 介绍使用 PaddlePaddle 实现 OCR。 @@ -21,7 +21,7 @@ image_classification.ipynb convnet_image_classification.ipynb - image_search.ipynb + image_search.ipynb image_segmentation.ipynb image_ocr.ipynb super_resolution_sub_pixel.ipynb diff --git a/docs/practices/index_cn.rst b/docs/practices/index_cn.rst index 992bb490f53..650f84f6217 100644 --- a/docs/practices/index_cn.rst +++ b/docs/practices/index_cn.rst @@ -8,7 +8,7 @@ 快速上手: - `hello paddle <./quick_start/hello_paddle.html>`_ :简单介绍 PaddlePaddle,完成你的第一个 PaddlePaddle 项目。 - - `动态图 <./quick_start/dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 + - `动态图 <./quick_start/dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 - `高层API详细介绍 <./quick_start/high_level_api.html>`_ :详细介绍 PaddlePaddle 高层API。 - `模型加载与保存 <./quick_start/save_model.html>`_ :介绍 PaddlePaddle 模型的加载与保存。 - `线性回归 <./quick_start/linear_regression.html>`_ :介绍使用 PaddlePaddle 实现线性回归任务。 @@ -16,7 +16,7 @@ 计算机视觉: - `图像分类 <./cv/image_classification.html>`_ :介绍使用 PaddlePaddle 在MNIST数据集上完成图像分类。 - - `图像分类 <./cv/convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 + - `图像分类 <./cv/convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 - `以图搜图 <./cv/image_search.html>`_ : 介绍使用 PaddlePaddle 实现以图搜图。 - `图像分割 <./cv/image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现U-Net模型完成图像分割。 - `OCR <./cv/image_ocr.html>`_ : 介绍使用 PaddlePaddle 实现 OCR。 diff --git a/docs/practices/jit/index_cn.rst b/docs/practices/jit/index_cn.rst index be6d4b4afc5..30bd40ab0c0 100644 --- a/docs/practices/jit/index_cn.rst +++ b/docs/practices/jit/index_cn.rst @@ -3,7 +3,7 @@ ################ 这里提供了一篇动转静的示例: - + - `使用动转静完成以图搜图 <./image_search_with_jit.html>`_ : 介绍使用 PaddlePaddle 通过动转静完成以图搜图。 .. toctree:: diff --git a/docs/practices/nlp/index_cn.rst b/docs/practices/nlp/index_cn.rst index 51e27c6d42a..a8d01d80289 100644 --- a/docs/practices/nlp/index_cn.rst +++ b/docs/practices/nlp/index_cn.rst @@ -4,7 +4,7 @@ 这里提供了一些自然语言处理的示例: - + - `N-Gram <./n_gram_model.html>`_ :介绍使用 PaddlePaddle 实现N-Gram 模型。 - `文本分类 <./imdb_bow_classification.html>`_ :介绍使用 PaddlePaddle 在IMDB数据集上完成文本分类。 - `情感分类 <./pretrained_word_embeddings.html>`_ :介绍使用预训练词向量完成情感分类。 diff --git a/docs/practices/quick_start/index_cn.rst b/docs/practices/quick_start/index_cn.rst index 9343b583398..28b6acd8696 100644 --- a/docs/practices/quick_start/index_cn.rst +++ b/docs/practices/quick_start/index_cn.rst @@ -4,10 +4,10 @@ 这里提供了一些简单的案例,可以帮助你快速上手 PaddlePaddle : - + - `hello paddle <./hello_paddle.html>`_ :简单介绍 PaddlePaddle,完成你的第一个 PaddlePaddle 项目。 - - `动态图 <./dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 + - `动态图 <./dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 - `高层API详细介绍 <./high_level_api.html>`_ :详细介绍 PaddlePaddle 高层API。 - `模型加载与保存 <./save_model.html>`_ :介绍 PaddlePaddle 模型的加载与保存。 - `线性回归 <./linear_regression.html>`_ :介绍使用 PaddlePaddle 实现线性回归任务。 diff --git a/docs/practices/recommendations/index_cn.rst b/docs/practices/recommendations/index_cn.rst index e86965406f9..7d9db330979 100644 --- a/docs/practices/recommendations/index_cn.rst +++ b/docs/practices/recommendations/index_cn.rst @@ -4,7 +4,7 @@ 这里提供了一篇推荐的示例: - + - `电影推荐 <./collaborative_filtering.html>`_ : 介绍使用 PaddlePaddle 实现协同过滤完成电影推荐。 diff --git a/docs/practices/reinforcement_learning/index_cn.rst b/docs/practices/reinforcement_learning/index_cn.rst index fb6ef38ca99..0aac4a2f831 100644 --- a/docs/practices/reinforcement_learning/index_cn.rst +++ b/docs/practices/reinforcement_learning/index_cn.rst @@ -4,7 +4,7 @@ 这里提供了一些强化学习的示例: - + - `演员-评论家算法 <./actor_critic_method.html>`_ : 介绍使用 PaddlePaddle 实现演员-评论家算法。 - `深度确定梯度策略(DDPG) <./deep_deterministic_policy_gradient.html>`_ : 介绍使用 PaddlePaddle 实现 DDPG 算法。 diff --git a/docs/practices/time_series/index_cn.rst b/docs/practices/time_series/index_cn.rst index dd594eb143c..c973e45b796 100644 --- a/docs/practices/time_series/index_cn.rst +++ b/docs/practices/time_series/index_cn.rst @@ -4,7 +4,7 @@ 这里提供了一篇时序数据的示例: - + - `异常数据检测 <./autoencoder.html>`_ : 介绍使用 PaddlePaddle 完成时序数据异常点检测。 diff --git a/docs/release_note_cn.md b/docs/release_note_cn.md index 676bcdd8169..3b2dcd3cb0c 100644 --- a/docs/release_note_cn.md +++ b/docs/release_note_cn.md @@ -171,9 +171,9 @@ - 预编译安装包中移除CUDA sm35 ARCH: 受到包体积大小的影响,在预编译的安装包中移除了 CUDA sm35 架构。 ([#41754](https://github.com/PaddlePaddle/Paddle/pull/41754)) -- `paddle.to_tensor` 将一个 python int scalar 转换为 Tensor 时,在 Windows 上的默认数据类型由 int32 变为 int64,从而与 Linux/Mac 保持对齐。([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) +- `paddle.to_tensor` 将一个 python int scalar 转换为 Tensor 时,在 Windows 上的默认数据类型由 int32 变为 int64,从而与 Linux/Mac 保持对齐。([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) -- 为了与 python3 下的除法行为保持一致,除法符号 `/` 从 rounding divide 变成 true divide,计算输出结果的数据类型从 int 切换成 float。 ([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) +- 为了与 python3 下的除法行为保持一致,除法符号 `/` 从 rounding divide 变成 true divide,计算输出结果的数据类型从 int 切换成 float。 ([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) @@ -285,254 +285,254 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API - 新增4个自动微分类 API,支持科学计算需求,具体列表如下:([#40692](https://github.com/PaddlePaddle/Paddle/pull/40692)) - + - `paddle.incubate.autograd.vjp`,计算向量-雅可比矩阵乘积。 - + - `paddle.incubate.autograd.jvp`,计算雅可比矩阵-向量乘积。 - + - `paddle.incubate.autograd.Jacobian`,计算雅可比矩阵。 - + - `paddle.incubate.autograd.Hessian`,计算海森矩阵。 - 新增线性代数类 API - - - 新增 `paddle.linalg.triangular_solve`,计算具有唯一解的三角系数线性方程组。([#36714](https://github.com/PaddlePaddle/Paddle/pull/36714)) - - - 新增 `paddle.linalg.eig`,计算一般方阵的特征分解。([#35764](https://github.com/PaddlePaddle/Paddle/pull/35764)) - + + - 新增 `paddle.linalg.triangular_solve`,计算具有唯一解的三角系数线性方程组。([#36714](https://github.com/PaddlePaddle/Paddle/pull/36714)) + + - 新增 `paddle.linalg.eig`,计算一般方阵的特征分解。([#35764](https://github.com/PaddlePaddle/Paddle/pull/35764)) + - 新增 `paddle.linalg.sovle`,计算线性方程组的解。([#35715](https://github.com/PaddlePaddle/Paddle/pull/35715)) - - - 新增 `paddle.linalg.lstsq`,计算线性方程组的最小二乘解。([#38585](https://github.com/PaddlePaddle/Paddle/pull/38585), [#38621](https://github.com/PaddlePaddle/Paddle/pull/38621)) - + + - 新增 `paddle.linalg.lstsq`,计算线性方程组的最小二乘解。([#38585](https://github.com/PaddlePaddle/Paddle/pull/38585), [#38621](https://github.com/PaddlePaddle/Paddle/pull/38621)) + - 新增 `paddle.linalg.qr`,计算矩阵的 QR 分解。([#35742](https://github.com/PaddlePaddle/Paddle/pull/35742), [#38824](https://github.com/PaddlePaddle/Paddle/pull/38824)) - - - 新增 `paddle.inner`,计算矩阵内积。([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) - - - 新增 `paddle.outer`,计算矩阵外积。([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) - - - 新增 `paddle.linalg.cov`,计算向量间协方差。([#38392](https://github.com/PaddlePaddle/Paddle/pull/38392)) - - - 新增 `paddle.linalg.cholesky_sovle`,计算方程 cholesky 解。([#38167](https://github.com/PaddlePaddle/Paddle/pull/38167)) - - - 新增 `paddle.linalg.lu`、 `paddle.linalg.lu_unpack`,计算矩阵 lu 分解、解压缩 lu 矩阵。([#38617](https://github.com/PaddlePaddle/Paddle/pull/38617), [#38559](https://github.com/PaddlePaddle/Paddle/pull/38559), [#38616](https://github.com/PaddlePaddle/Paddle/pull/38616)) + + - 新增 `paddle.inner`,计算矩阵内积。([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) + + - 新增 `paddle.outer`,计算矩阵外积。([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) + + - 新增 `paddle.linalg.cov`,计算向量间协方差。([#38392](https://github.com/PaddlePaddle/Paddle/pull/38392)) + + - 新增 `paddle.linalg.cholesky_sovle`,计算方程 cholesky 解。([#38167](https://github.com/PaddlePaddle/Paddle/pull/38167)) + + - 新增 `paddle.linalg.lu`、 `paddle.linalg.lu_unpack`,计算矩阵 lu 分解、解压缩 lu 矩阵。([#38617](https://github.com/PaddlePaddle/Paddle/pull/38617), [#38559](https://github.com/PaddlePaddle/Paddle/pull/38559), [#38616](https://github.com/PaddlePaddle/Paddle/pull/38616)) - 新增21个概率分布类 API,包括6个随机变量分布,13个随机变量变换,2个 KL 散度计算,用于强化学习、变分推断、科学计算等场景,具体列表如下:([#40536](https://github.com/PaddlePaddle/Paddle/pull/40536), [#38820](https://github.com/PaddlePaddle/Paddle/pull/38820), [#38558](https://github.com/PaddlePaddle/Paddle/pull/38558/files), [#38445](https://github.com/PaddlePaddle/Paddle/pull/38445), [#38244](https://github.com/PaddlePaddle/Paddle/pull/38244), [#38047](https://github.com/PaddlePaddle/Paddle/pull/38047)) - + - `paddle.distribution.ExponentialFamily`,指数分布族基类。 - + - `paddle.distribution.Beta`,`Beta` 分布。 - + - `paddle.distribution.Dirichlet`,`Dirichlet` 分布。 - + - `paddle.distribution.Independent`,独立分布,用于创建高阶分布。 - + - `paddle.distribution.TransformedDistribution`,变换分布,用于通过基础分布及一系列变换生成高阶分布。 - + - `paddle.distribution.Multionmial`,多项分布。 - + - `paddle.distribution.Transform`,随机变量变换的基类。 - + - `paddle.distribution.AbsTransform`,取绝对值变换。 - + - `paddle.distribution.AffineTransform`,仿射变换。 - + - `paddle.distribution.ChainTransform`,变换的链式组合。 - + - `paddle.distribution.ExpTransform`,指数变换。 - + - `paddle.distribution.IndependentTransform`,独立变换,用于扩展变换定义域的 `event_dim`。 - + - `paddle.distribution.PowerTransform`,幂变换。 - + - `paddle.distribution.ReshapeTransform`,`reshape` 变换。 - + - `paddle.distribution.SigmoidTransform`,`sigmoid` 变换。 - + - `paddle.distribution.SoftmaxTransform`,`softmax` 变换。 - + - `paddle.distribution.StackTransform`,`stack` 变换,用于以 `stack` 方式组合多个变换。 - + - `paddle.distribution.StickBreakingTransform` , `stickbreaking` 变换。 - + - `paddle.distribution.TanhTransform`,`tanh` 变换。 - + - `paddle.distribution.kl_divergence`,计算 KL 散度。 - + - `paddle.distribution.register_kl`,注册用户自定义 KL 散度计算函数。 - 新增高层 API - - - 新增 `paddle.vision.models.AlexNet`、`paddle.vision.models.alexnet`,支持直接使用 AlexNet 模型。([#36058](https://github.com/PaddlePaddle/Paddle/pull/36058)) - - - 新增 `paddle.vision.models.DenseNet`、 `paddle.vision.models.densenet121`、 `paddle.vision.models.densenet161`、 `paddle.vision.models.densenet169`、 `paddle.vision.models.densenet201`、 `paddle.vision.models.densenet264`,支持直接使用 DenseNet 模型。([#36069](https://github.com/PaddlePaddle/Paddle/pull/36069)) - - - 新增 `paddle.vision.models.GoogLeNet`、`paddle.vision.models.googlenet`,支持直接使用 GoogLeNet 模型。([#36034](https://github.com/PaddlePaddle/Paddle/pull/36034)) - - - 新增 `paddle.vision.models.InceptionV3`、`paddle.vision.models.inception_v3`,支持直接使用 InceptionV3 模型。([#36064](https://github.com/PaddlePaddle/Paddle/pull/36064)) - - - 新增 `paddle.vision.models.MobileNetV3Small`、 `paddle.vision.models.MobileNetV3Large`、`paddle.vision.models.mobilenet_v3_small`、`paddle.vision.models.mobilenet_v3_large`,支持直接使用 MobileNetV3 模型。([#38653](https://github.com/PaddlePaddle/Paddle/pull/38653)) - - - 新增 `paddle.vision.models.resnext50_32x4d`、 `paddle.vision.models.resnext50_64x4d`、`paddle.vision.models.resnext101_32x4d`、`paddle.vision.models.resnext101_64x4d`、`paddle.vision.models.resnext152_32x4d`、`paddle.vision.models.resnext152_64x4d`,支持直接使用 ResNeXt 模型。([#36070](https://github.com/PaddlePaddle/Paddle/pull/36070)) - - - 新增 `paddle.vision.models.ShuffleNetV2`、 `paddle.vision.models.shufflenet_v2_x0_25`、`paddle.vision.models.shufflenet_v2_x0_33`、`paddle.vision.models.shufflenet_v2_x0_5`、`paddle.vision.models.shufflenet_v2_x1_0`、`paddle.vision.models.shufflenet_v2_x1_5`、`paddle.vision.models.shufflenet_v2_x2_0`、`paddle.vision.models.shufflenet_v2_swish`,支持直接使用 ShuffleNetV2 模型。([#36067](https://github.com/PaddlePaddle/Paddle/pull/36067)) - - - 新增 `paddle.vision.models.SqueezeNet`、 `paddle.vision.models.squeezenet1_0`、`paddle.vision.models.squeezenet1_1`,支持直接使用 SqueezeNet 模型。([#36066](https://github.com/PaddlePaddle/Paddle/pull/36066)) - + + - 新增 `paddle.vision.models.AlexNet`、`paddle.vision.models.alexnet`,支持直接使用 AlexNet 模型。([#36058](https://github.com/PaddlePaddle/Paddle/pull/36058)) + + - 新增 `paddle.vision.models.DenseNet`、 `paddle.vision.models.densenet121`、 `paddle.vision.models.densenet161`、 `paddle.vision.models.densenet169`、 `paddle.vision.models.densenet201`、 `paddle.vision.models.densenet264`,支持直接使用 DenseNet 模型。([#36069](https://github.com/PaddlePaddle/Paddle/pull/36069)) + + - 新增 `paddle.vision.models.GoogLeNet`、`paddle.vision.models.googlenet`,支持直接使用 GoogLeNet 模型。([#36034](https://github.com/PaddlePaddle/Paddle/pull/36034)) + + - 新增 `paddle.vision.models.InceptionV3`、`paddle.vision.models.inception_v3`,支持直接使用 InceptionV3 模型。([#36064](https://github.com/PaddlePaddle/Paddle/pull/36064)) + + - 新增 `paddle.vision.models.MobileNetV3Small`、 `paddle.vision.models.MobileNetV3Large`、`paddle.vision.models.mobilenet_v3_small`、`paddle.vision.models.mobilenet_v3_large`,支持直接使用 MobileNetV3 模型。([#38653](https://github.com/PaddlePaddle/Paddle/pull/38653)) + + - 新增 `paddle.vision.models.resnext50_32x4d`、 `paddle.vision.models.resnext50_64x4d`、`paddle.vision.models.resnext101_32x4d`、`paddle.vision.models.resnext101_64x4d`、`paddle.vision.models.resnext152_32x4d`、`paddle.vision.models.resnext152_64x4d`,支持直接使用 ResNeXt 模型。([#36070](https://github.com/PaddlePaddle/Paddle/pull/36070)) + + - 新增 `paddle.vision.models.ShuffleNetV2`、 `paddle.vision.models.shufflenet_v2_x0_25`、`paddle.vision.models.shufflenet_v2_x0_33`、`paddle.vision.models.shufflenet_v2_x0_5`、`paddle.vision.models.shufflenet_v2_x1_0`、`paddle.vision.models.shufflenet_v2_x1_5`、`paddle.vision.models.shufflenet_v2_x2_0`、`paddle.vision.models.shufflenet_v2_swish`,支持直接使用 ShuffleNetV2 模型。([#36067](https://github.com/PaddlePaddle/Paddle/pull/36067)) + + - 新增 `paddle.vision.models.SqueezeNet`、 `paddle.vision.models.squeezenet1_0`、`paddle.vision.models.squeezenet1_1`,支持直接使用 SqueezeNet 模型。([#36066](https://github.com/PaddlePaddle/Paddle/pull/36066)) + - 新增 `paddle.vision.models.wide_resnet50_2`、`paddle.vision.models.wide_resnet101_2`,支持直接使用 WideResNet 模型。([#36952](https://github.com/PaddlePaddle/Paddle/pull/36952)) - - - 新增`paddle.vision.ops.nms` API,支持单类别和多类别非极大抑制(non-maximum supression, nms)算法,用于目标检测预测任务加速。([#40962](https://github.com/PaddlePaddle/Paddle/pull/40962)) - - - 新增`paddle.vision.ops.roi_pool` 和 `paddle.vision.ops.RoIPool`,支持检测任务中 RoI 区域池化操作。 ([#36154](https://github.com/PaddlePaddle/Paddle/pull/36154)) - - - 新增`paddle.vision.ops.roi_align` 和 `paddle.vision.ops.RoIAlign`,支持检测任务中 RoI Align 操作。 ([#35102](https://github.com/PaddlePaddle/Paddle/pull/36154)) - - - 新增 `paddle.text.ViterbiDecoder`、`paddle.text.viterbi_decode` Viterbi 解码 API,主要用于序列标注模型的预测。 ([#35778](https://github.com/PaddlePaddle/Paddle/pull/35778)) - -- 新增 11 个 Sparse 类 API,支持创建 COO、CSR 格式的Sparse Tensor,与 Tensor 互相转换等基础功能: - + + - 新增`paddle.vision.ops.nms` API,支持单类别和多类别非极大抑制(non-maximum supression, nms)算法,用于目标检测预测任务加速。([#40962](https://github.com/PaddlePaddle/Paddle/pull/40962)) + + - 新增`paddle.vision.ops.roi_pool` 和 `paddle.vision.ops.RoIPool`,支持检测任务中 RoI 区域池化操作。 ([#36154](https://github.com/PaddlePaddle/Paddle/pull/36154)) + + - 新增`paddle.vision.ops.roi_align` 和 `paddle.vision.ops.RoIAlign`,支持检测任务中 RoI Align 操作。 ([#35102](https://github.com/PaddlePaddle/Paddle/pull/36154)) + + - 新增 `paddle.text.ViterbiDecoder`、`paddle.text.viterbi_decode` Viterbi 解码 API,主要用于序列标注模型的预测。 ([#35778](https://github.com/PaddlePaddle/Paddle/pull/35778)) + +- 新增 11 个 Sparse 类 API,支持创建 COO、CSR 格式的Sparse Tensor,与 Tensor 互相转换等基础功能: + - `paddle.sparse.sparse_coo_tensor`,创建 COO 格式的 Sparse Tensor。 ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `paddle.sparse.sparse_csr_tensor`,创建 CSR 格式的 Sparse Tensor。 ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - - - `paddle.sparse.ReLU`,支持 SparseCooTensor 的 ReLU 激活层。([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) - - - `paddle.sparse.functional.relu`,支持 SparseCooTensor 的 ReLU 函数。([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) - + + - `paddle.sparse.ReLU`,支持 SparseCooTensor 的 ReLU 激活层。([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) + + - `paddle.sparse.functional.relu`,支持 SparseCooTensor 的 ReLU 函数。([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) + - `Tensor.values()`,获取 SparseCooTensor 或者 SparseCsrTensor 的非零元素方法。([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.indices()`,获取 SparseCooTensor 的坐标信息的方法。([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.crows()`,获取 SparseCsrTensor 的压缩行信息的方法。([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.cols()`,获取 SparseCsrTensor 的列信息的方法。([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.to_sparse_coo()`,将 DenseTensor 或者 SparseCsrTensor 转换为 SparseCooTensor。 ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `Tensor.to_sparse_csr()`,将 DenseTensor 或者 SparseCooTensor 转换为 SparseCsrTensor。([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `Tensor.to_dense()`,将 SparseCooTensor 或者 SparseCsrTensor 转换为 DenseTensor。([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - 新增硬件相关 API - - - 新增 `paddle.device.cuda.max_memory_allocated`、`paddle.device.cuda.max_memory_reserved`、 `paddle.device.cuda.memory_allocated` 和 `paddle.device.cuda.memory_reserved` 四个 GPU 显存监测相关 API,方便实时查看和分析模型显存占用指标。([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) - - - 新增 `paddle.device.cuda.get_device_properties`,支持返回 CUDA 设备属性信息。([#35661](https://github.com/PaddlePaddle/Paddle/pull/35661)) - - - 新增 `paddle.device.cuda.get_device_name` 和 `paddle.device.cuda.get_device_capability`,支持返回 GPU 设备名称信息和计算能力的主要和次要修订号。([#35672](https://github.com/PaddlePaddle/Paddle/pull/35672)) + + - 新增 `paddle.device.cuda.max_memory_allocated`、`paddle.device.cuda.max_memory_reserved`、 `paddle.device.cuda.memory_allocated` 和 `paddle.device.cuda.memory_reserved` 四个 GPU 显存监测相关 API,方便实时查看和分析模型显存占用指标。([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) + + - 新增 `paddle.device.cuda.get_device_properties`,支持返回 CUDA 设备属性信息。([#35661](https://github.com/PaddlePaddle/Paddle/pull/35661)) + + - 新增 `paddle.device.cuda.get_device_name` 和 `paddle.device.cuda.get_device_capability`,支持返回 GPU 设备名称信息和计算能力的主要和次要修订号。([#35672](https://github.com/PaddlePaddle/Paddle/pull/35672)) - 新增 Tensor 操作 API - - - 新增 `paddle.nansum`,沿 `axis` 对输入 Tensor 求和,且忽略掉 `NaNs` 值。([#38137](https://github.com/PaddlePaddle/Paddle/pull/38137)) - + + - 新增 `paddle.nansum`,沿 `axis` 对输入 Tensor 求和,且忽略掉 `NaNs` 值。([#38137](https://github.com/PaddlePaddle/Paddle/pull/38137)) + - 新增 `paddle.nanmean`,沿 `axis`对输入 Tensor 求平均,且忽略掉 `NaNs` 值。([#40472](https://github.com/PaddlePaddle/Paddle/pull/40472)) - - - 新增 `paddle.clone`,返回输入 Tensor 的拷贝,并且提供梯度计算。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - - - 新增 `paddle.Tensor.element_size`,返回 Tensor 中的单个元素在计算机中所分配的 bytes 数量。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - - - 新增 `paddle.Tensor.to_uva_tensor`,支持将 numpy 对象转换为实际存储在 CPU,但可作为 CUDA 对象进行虚拟地址访问的功能。([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#38950](https://github.com/PaddlePaddle/Paddle/pull/38950)) - - - 新增`paddle.rot90`,沿 `axes` 指定的平面将 n 维 Tensor 旋转90度。([#37634](https://github.com/PaddlePaddle/Paddle/pull/37634)) - - - 新增`paddle.logit` 和 `paddle.Tensor.logit`,计算输入 Tensor 的 logit 函数值。([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) - - - 新增 `paddle.repeat_interleave`,沿着指定轴对输入进行复制,创建并返回到一个新的 Tensor。([#37981](https://github.com/PaddlePaddle/Paddle/pull/37981)) - - - 新增 `paddle.renorm`,把 Tensor 在指定的 `axis` 切分成多块后分别进行 p norm 操作。([#38130](https://github.com/PaddlePaddle/Paddle/pull/38130), [#38459](https://github.com/PaddlePaddle/Paddle/pull/38459)) - - - 新增 `paddle.mode` 和 `paddle.Tensor.mode`,沿指定轴查找输入 Tensor 的众数及对应的索引。([#38446](https://github.com/PaddlePaddle/Paddle/pull/38446)) - - - 新增 `paddle.quantile` 和 `paddle.Tensor.quantile`,沿指定轴计算 Tensor 的 q 分位数。([#38567](https://github.com/PaddlePaddle/Paddle/pull/38567)) - - - 新增 `paddle.kthvalue` 和 `paddle.Tensor.kthvalue`,查找 Tensor 中指定轴上第 k 小的数及对应的索引。([#38386](https://github.com/PaddlePaddle/Paddle/pull/38386)) - - - 新增 `paddle.is_floating_point` 和 `paddle.Tensor.is_floating_point`,判断输入 Tensor 是否为浮点类型。([#37885](https://github.com/PaddlePaddle/Paddle/pull/37885)) - - - 新增 `paddle.erfinv` 和 `paddle.Tensor.erfinv`,计算输入 Tensor 的逆误差函数。([#38295](https://github.com/PaddlePaddle/Paddle/pull/38295)) - - - 新增 `paddle.lerp` 和 `paddle.Tensor.lerp`,根据给定权重计算输入Tensor间的线性插值。([#37253](https://github.com/PaddlePaddle/Paddle/pull/37253)) - - - 新增 `paddle.angle`,用于计算复数 Tensor 的相位角。 ([#37689](https://github.com/PaddlePaddle/Paddle/pull/37689)) - - - 新增`paddle.rad2deg`和`paddle.Tensor.rad2deg`,将元素从弧度的角度转换为度。([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) - - - 新增`paddle.deg2rad`和`paddle.Tensor.deg2rad`,将元素从度的角度转换为弧度。([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) - - - 新增`paddle.gcd`和`paddle.Tensor.gcd`,计算两个输入的按元素绝对值的最大公约数。([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) - - - 新增`paddle.lcm`和`paddle.Tensor.lcm`,计算两个输入的按元素绝对值的最小公倍数。([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) - - - 新增`paddle.amax`和`paddle.Tensor.amax`,对指定维度上的 Tensor 元素求最大值,正向结果和 max 一样,有多个相等的最大值时,反向的梯度平均分到这多个值的位置上。([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) - - - 新增`paddle.amin`和`paddle.Tensor.amin`,对指定维度上的 Tensor 元素求最小值,正向结果和 min 一样,有多个相等的最小值时,反向的梯度平均分到这多个值的位置上。([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) - - - 新增`paddle.isclose`,用于判断两个 Tensor 的每个元素是否接近。([#37135](https://github.com/PaddlePaddle/Paddle/pull/37135)) - - - 新增`paddle.put_along_axis` 和`paddle.take_along_axis`,用于提取或放置指定索引下标的元素。([#38608](https://github.com/PaddlePaddle/Paddle/pull/38608)) - - - 新增 `paddle.bincount` 和 `paddle.Tensor.bincount`,用于统计 Tensor 中每个元素出现的次数。([#36317](https://github.com/PaddlePaddle/Paddle/pull/36317)) - - - 新增 `paddle.fmax`、 `paddle.fmin`,扩展了max/min的功能,支持比较的两个 Tensor 中有 NaN 值的情况,即如果对应位置上有1个 NaN 值,则返回那个非 NaN 值;如果对应位置上有2个 NaN 值,则返回 NaN 值。([#37826](https://github.com/PaddlePaddle/Paddle/pull/37826)) - - - 新增 `paddle.diff`,用于计算沿给定维度的第 n 个前向差值,目前支持 n=1。([#37441](https://github.com/PaddlePaddle/Paddle/pull/37441)) - - - 新增 `paddle.asinh`、`paddle.acosh`、`paddle.atanh` 反双曲函数类 API。 ([#37076](https://github.com/PaddlePaddle/Paddle/pull/37076)) - - - 新增 `paddle.as_real`,`paddle.as_complex` 用于实数 Tensor 和复数 Tensor 之间的转换。 ([#37784](https://github.com/PaddlePaddle/Paddle/pull/37784)) - - - 新增 `paddle.complex` 用于给定实部和虚部构造复数 Tensor。 ([#37918](https://github.com/PaddlePaddle/Paddle/pull/37918), [#38272](https://github.com/PaddlePaddle/Paddle/pull/38272)) - - - 新增 `paddle.det` 与 `paddle.slogdet`,用于计算矩阵的行列式和行列式的自然对数。 ([#34992](https://github.com/PaddlePaddle/Paddle/pull/34992)) - - - 新增`paddle.nn.utils.parameters_to_vector`,可以将输入的多个 parameter 展平并连接为1个1-D Tensor。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - - - 新增`paddle.nn.utils.vector_to_parameters`,将1个1-D Tensor按顺序切分给输入的多个 parameter。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + + - 新增 `paddle.clone`,返回输入 Tensor 的拷贝,并且提供梯度计算。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + + - 新增 `paddle.Tensor.element_size`,返回 Tensor 中的单个元素在计算机中所分配的 bytes 数量。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + + - 新增 `paddle.Tensor.to_uva_tensor`,支持将 numpy 对象转换为实际存储在 CPU,但可作为 CUDA 对象进行虚拟地址访问的功能。([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#38950](https://github.com/PaddlePaddle/Paddle/pull/38950)) + + - 新增`paddle.rot90`,沿 `axes` 指定的平面将 n 维 Tensor 旋转90度。([#37634](https://github.com/PaddlePaddle/Paddle/pull/37634)) + + - 新增`paddle.logit` 和 `paddle.Tensor.logit`,计算输入 Tensor 的 logit 函数值。([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) + + - 新增 `paddle.repeat_interleave`,沿着指定轴对输入进行复制,创建并返回到一个新的 Tensor。([#37981](https://github.com/PaddlePaddle/Paddle/pull/37981)) + + - 新增 `paddle.renorm`,把 Tensor 在指定的 `axis` 切分成多块后分别进行 p norm 操作。([#38130](https://github.com/PaddlePaddle/Paddle/pull/38130), [#38459](https://github.com/PaddlePaddle/Paddle/pull/38459)) + + - 新增 `paddle.mode` 和 `paddle.Tensor.mode`,沿指定轴查找输入 Tensor 的众数及对应的索引。([#38446](https://github.com/PaddlePaddle/Paddle/pull/38446)) + + - 新增 `paddle.quantile` 和 `paddle.Tensor.quantile`,沿指定轴计算 Tensor 的 q 分位数。([#38567](https://github.com/PaddlePaddle/Paddle/pull/38567)) + + - 新增 `paddle.kthvalue` 和 `paddle.Tensor.kthvalue`,查找 Tensor 中指定轴上第 k 小的数及对应的索引。([#38386](https://github.com/PaddlePaddle/Paddle/pull/38386)) + + - 新增 `paddle.is_floating_point` 和 `paddle.Tensor.is_floating_point`,判断输入 Tensor 是否为浮点类型。([#37885](https://github.com/PaddlePaddle/Paddle/pull/37885)) + + - 新增 `paddle.erfinv` 和 `paddle.Tensor.erfinv`,计算输入 Tensor 的逆误差函数。([#38295](https://github.com/PaddlePaddle/Paddle/pull/38295)) + + - 新增 `paddle.lerp` 和 `paddle.Tensor.lerp`,根据给定权重计算输入Tensor间的线性插值。([#37253](https://github.com/PaddlePaddle/Paddle/pull/37253)) + + - 新增 `paddle.angle`,用于计算复数 Tensor 的相位角。 ([#37689](https://github.com/PaddlePaddle/Paddle/pull/37689)) + + - 新增`paddle.rad2deg`和`paddle.Tensor.rad2deg`,将元素从弧度的角度转换为度。([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) + + - 新增`paddle.deg2rad`和`paddle.Tensor.deg2rad`,将元素从度的角度转换为弧度。([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) + + - 新增`paddle.gcd`和`paddle.Tensor.gcd`,计算两个输入的按元素绝对值的最大公约数。([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) + + - 新增`paddle.lcm`和`paddle.Tensor.lcm`,计算两个输入的按元素绝对值的最小公倍数。([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) + + - 新增`paddle.amax`和`paddle.Tensor.amax`,对指定维度上的 Tensor 元素求最大值,正向结果和 max 一样,有多个相等的最大值时,反向的梯度平均分到这多个值的位置上。([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) + + - 新增`paddle.amin`和`paddle.Tensor.amin`,对指定维度上的 Tensor 元素求最小值,正向结果和 min 一样,有多个相等的最小值时,反向的梯度平均分到这多个值的位置上。([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) + + - 新增`paddle.isclose`,用于判断两个 Tensor 的每个元素是否接近。([#37135](https://github.com/PaddlePaddle/Paddle/pull/37135)) + + - 新增`paddle.put_along_axis` 和`paddle.take_along_axis`,用于提取或放置指定索引下标的元素。([#38608](https://github.com/PaddlePaddle/Paddle/pull/38608)) + + - 新增 `paddle.bincount` 和 `paddle.Tensor.bincount`,用于统计 Tensor 中每个元素出现的次数。([#36317](https://github.com/PaddlePaddle/Paddle/pull/36317)) + + - 新增 `paddle.fmax`、 `paddle.fmin`,扩展了max/min的功能,支持比较的两个 Tensor 中有 NaN 值的情况,即如果对应位置上有1个 NaN 值,则返回那个非 NaN 值;如果对应位置上有2个 NaN 值,则返回 NaN 值。([#37826](https://github.com/PaddlePaddle/Paddle/pull/37826)) + + - 新增 `paddle.diff`,用于计算沿给定维度的第 n 个前向差值,目前支持 n=1。([#37441](https://github.com/PaddlePaddle/Paddle/pull/37441)) + + - 新增 `paddle.asinh`、`paddle.acosh`、`paddle.atanh` 反双曲函数类 API。 ([#37076](https://github.com/PaddlePaddle/Paddle/pull/37076)) + + - 新增 `paddle.as_real`,`paddle.as_complex` 用于实数 Tensor 和复数 Tensor 之间的转换。 ([#37784](https://github.com/PaddlePaddle/Paddle/pull/37784)) + + - 新增 `paddle.complex` 用于给定实部和虚部构造复数 Tensor。 ([#37918](https://github.com/PaddlePaddle/Paddle/pull/37918), [#38272](https://github.com/PaddlePaddle/Paddle/pull/38272)) + + - 新增 `paddle.det` 与 `paddle.slogdet`,用于计算矩阵的行列式和行列式的自然对数。 ([#34992](https://github.com/PaddlePaddle/Paddle/pull/34992)) + + - 新增`paddle.nn.utils.parameters_to_vector`,可以将输入的多个 parameter 展平并连接为1个1-D Tensor。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + + - 新增`paddle.nn.utils.vector_to_parameters`,将1个1-D Tensor按顺序切分给输入的多个 parameter。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - 新增组网类 API - - - 新增 `paddle.nn.Fold`、`paddle.nn.functional.fold`,支持将提取出的滑动局部区域块还原成 batch 的 Tensor。([#38613](https://github.com/PaddlePaddle/Paddle/pull/38613)) - - - 新增 `paddle.nn.CELU`、`paddle.nn.functional.celu`,支持 CELU 激活层。([#36088](https://github.com/PaddlePaddle/Paddle/pull/36088)) - + + - 新增 `paddle.nn.Fold`、`paddle.nn.functional.fold`,支持将提取出的滑动局部区域块还原成 batch 的 Tensor。([#38613](https://github.com/PaddlePaddle/Paddle/pull/38613)) + + - 新增 `paddle.nn.CELU`、`paddle.nn.functional.celu`,支持 CELU 激活层。([#36088](https://github.com/PaddlePaddle/Paddle/pull/36088)) + - 新增 `paddle.nn.HingeEmbeddingLoss`,增加计算 hinge embedding 损失的方式,通常用于学习 nonlinear embedding 或半监督学习。([#37540](https://github.com/PaddlePaddle/Paddle/pull/37540)) - + - 新增 `paddle.nn.ZeroPad2D` API,按照 padding 属性对输入进行零填充。([#37151](https://github.com/PaddlePaddle/Paddle/pull/37151)) - - - 新增 `paddle.nn.MaxUnPool3D` 和 `paddle.nn.MaxUnPool1D`,用于计算 3D 最大反池化和 1D 最大反池化。([#38716](https://github.com/PaddlePaddle/Paddle/pull/38716)) - - - 新增 `paddle.incubate.graph_khop_sampler`、`paddle.incubate.graph_sample_neighbors`、 `paddle.incubate.graph_reindex` API,支持图多阶邻居采样和图编号重索引操作,主要用于图神经网络模型训练。([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#40809](https://github.com/PaddlePaddle/Paddle/pull/40809)) + + - 新增 `paddle.nn.MaxUnPool3D` 和 `paddle.nn.MaxUnPool1D`,用于计算 3D 最大反池化和 1D 最大反池化。([#38716](https://github.com/PaddlePaddle/Paddle/pull/38716)) + + - 新增 `paddle.incubate.graph_khop_sampler`、`paddle.incubate.graph_sample_neighbors`、 `paddle.incubate.graph_reindex` API,支持图多阶邻居采样和图编号重索引操作,主要用于图神经网络模型训练。([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#40809](https://github.com/PaddlePaddle/Paddle/pull/40809)) - 新增随机数类 API - - - 新增 `paddle.poisson`,以输入 Tensor 为泊松分布的 lambda 参数,生成一个泊松分布的随机数 Tensor。([#38117](https://github.com/PaddlePaddle/Paddle/pull/38117)) - - - 新增 `paddle.randint_like` API,支持新建服从均匀分布的、范围在[low, high) 的随机 Tensor,输出的形状与输入的形状一致。([#36169](https://github.com/PaddlePaddle/Paddle/pull/36169)) - - - 新增 `paddle.Tensor.exponential_`,为 inplace 式 API,通过指数分布随机数来填充输入 Tensor。([#38256](https://github.com/PaddlePaddle/Paddle/pull/38256)) + + - 新增 `paddle.poisson`,以输入 Tensor 为泊松分布的 lambda 参数,生成一个泊松分布的随机数 Tensor。([#38117](https://github.com/PaddlePaddle/Paddle/pull/38117)) + + - 新增 `paddle.randint_like` API,支持新建服从均匀分布的、范围在[low, high) 的随机 Tensor,输出的形状与输入的形状一致。([#36169](https://github.com/PaddlePaddle/Paddle/pull/36169)) + + - 新增 `paddle.Tensor.exponential_`,为 inplace 式 API,通过指数分布随机数来填充输入 Tensor。([#38256](https://github.com/PaddlePaddle/Paddle/pull/38256)) - 新增参数初始化类 API - - - 新增`paddle.nn.initializer.Dirac`,通过迪拉克 delta 函数来初始化 3D/4D/5D 参数,其常用于卷积层 Conv1D/Conv2D/Conv3D 的参数初始化。([#37389](https://github.com/PaddlePaddle/Paddle/pull/37389)) - - - 新增`paddle.nn.initializer.Orthogonal`,正交矩阵初始化,被初始化后的参数是(半)正交向量。([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) - - - 新增`paddle.nn.initializer.calculate_gain`,获取激活函数的推荐增益值,增益值可用于设置某些初始化 API,以调整初始化范围。([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) + + - 新增`paddle.nn.initializer.Dirac`,通过迪拉克 delta 函数来初始化 3D/4D/5D 参数,其常用于卷积层 Conv1D/Conv2D/Conv3D 的参数初始化。([#37389](https://github.com/PaddlePaddle/Paddle/pull/37389)) + + - 新增`paddle.nn.initializer.Orthogonal`,正交矩阵初始化,被初始化后的参数是(半)正交向量。([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) + + - 新增`paddle.nn.initializer.calculate_gain`,获取激活函数的推荐增益值,增益值可用于设置某些初始化 API,以调整初始化范围。([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) - 新增学习率类 API - + - 新增 `paddle.optimizer.lr.MultiplicativeDecay`,提供 `lambda` 函数设置学习率的策略。([#38250](https://github.com/PaddlePaddle/Paddle/pull/38250)) - 新增分布式相关 API - + - 新增 `paddle.incubate.optimizer.DistributedFusedLamb`,使得 Lamb 优化器可分布式更新参数。([#40011](https://github.com/PaddlePaddle/Paddle/pull/40011), [#39972](https://github.com/PaddlePaddle/Paddle/pull/39972), [#39900](https://github.com/PaddlePaddle/Paddle/pull/39900), [#39747](https://github.com/PaddlePaddle/Paddle/pull/39747), [#39148](https://github.com/PaddlePaddle/Paddle/pull/39148), [#39416](https://github.com/PaddlePaddle/Paddle/pull/39416)) - 新增优化器相关 API([#40710](https://github.com/PaddlePaddle/Paddle/pull/40710)) - + - `paddle.incubate.optimizer.functional.minimize_bfgs`,增加二阶优化器 BFGS。 - + - `paddle.incubate.optimizer.functional.minimize_lbfgs`,增加二阶优化器 L-BFGS。 -- 新增 `paddle.incubate.multiprocessing`模块,支持 Tensor(CPU/GPU)在 python 进程间传输。([#37302](https://github.com/PaddlePaddle/Paddle/pull/37302), [#41339](https://github.com/PaddlePaddle/Paddle/pull/41339)) +- 新增 `paddle.incubate.multiprocessing`模块,支持 Tensor(CPU/GPU)在 python 进程间传输。([#37302](https://github.com/PaddlePaddle/Paddle/pull/37302), [#41339](https://github.com/PaddlePaddle/Paddle/pull/41339)) - 新增 `paddle.incubate.autotune.set_config` API,支持多版本 Kernel 自动选择、混合精度数据布局自动转换、DataLoader 的 num_workers 自动选择,以自动提升模型性能。([#42301](https://github.com/PaddlePaddle/Paddle/pull/42301)) @@ -543,66 +543,66 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### IR(Intermediate Representation) - 动态图转静态图 - - - 变量类型 StaticAnalysis 模块新增支持类似 `a, b = paddle.shape(x)` 的类型标记。([#39245](https://github.com/PaddlePaddle/Paddle/pull/39245)) - - - 新增支持 `InputSpec.name` 作为 Program 缓存 hash key 的计算字段。([#38273](https://github.com/PaddlePaddle/Paddle/pull/38273)) - - - 新增支持 `dict['key'] = x.shape` 语法。([#40611](https://github.com/PaddlePaddle/Paddle/pull/40611)) - - - 新增支持 Pure FP16 训练。([#36944](https://github.com/PaddlePaddle/Paddle/pull/36944)) - - - 新增支持 `for i in [x,y,z]` 语法。([#37259](https://github.com/PaddlePaddle/Paddle/pull/37259)) - - - 新增支持 python3 的 type hint 语法。([#36544](https://github.com/PaddlePaddle/Paddle/pull/36544)) + + - 变量类型 StaticAnalysis 模块新增支持类似 `a, b = paddle.shape(x)` 的类型标记。([#39245](https://github.com/PaddlePaddle/Paddle/pull/39245)) + + - 新增支持 `InputSpec.name` 作为 Program 缓存 hash key 的计算字段。([#38273](https://github.com/PaddlePaddle/Paddle/pull/38273)) + + - 新增支持 `dict['key'] = x.shape` 语法。([#40611](https://github.com/PaddlePaddle/Paddle/pull/40611)) + + - 新增支持 Pure FP16 训练。([#36944](https://github.com/PaddlePaddle/Paddle/pull/36944)) + + - 新增支持 `for i in [x,y,z]` 语法。([#37259](https://github.com/PaddlePaddle/Paddle/pull/37259)) + + - 新增支持 python3 的 type hint 语法。([#36544](https://github.com/PaddlePaddle/Paddle/pull/36544)) - Pass开发 - + - 新增基于 NVIDIA cuBlasLt Epilogue 的 FC + [relu|gelu] 的前向与反向融合。([#39437](https://github.com/PaddlePaddle/Paddle/pull/39437)) - Kernel Primitive API - - - 新增 GPU 平台 KP 算子,包括 cast、scale、clip、bce_loss、abs_grad、reduce_sum_grad、reduce_mean_grad、clip、bce_loss、full、full_like、distribution、 random、masked_select_kernel、where_index、masked_select_grad、dropout、sigmoid、where、abs_grad。 ([#36203](https://github.com/PaddlePaddle/Paddle/pull/36203), [#36423](https://github.com/PaddlePaddle/Paddle/pull/36423), [#39390](https://github.com/PaddlePaddle/Paddle/pull/39390), [#39734](https://github.com/PaddlePaddle/Paddle/pull/39734), [#38500](https://github.com/PaddlePaddle/Paddle/pull/38500), [#38959](https://github.com/PaddlePaddle/Paddle/pull/38959), [#39197](https://github.com/PaddlePaddle/Paddle/pull/39197/), [#39563](https://github.com/PaddlePaddle/Paddle/pull/39563), [#39666](https://github.com/PaddlePaddle/Paddle/pull/39666), [#40517](https://github.com/PaddlePaddle/Paddle/pull/40517), [#40617](https://github.com/PaddlePaddle/Paddle/pull/40617), [#40766](https://github.com/PaddlePaddle/Paddle/pull/40766), [#39898](https://github.com/PaddlePaddle/Paddle/pull/39898), [#39609](https://github.com/PaddlePaddle/Paddle/pull/39609)) - - - 新增支持 XPU2 源码编译模式。([#37254](https://github.com/PaddlePaddle/Paddle/pull/37254), [#40397](https://github.com/PaddlePaddle/Paddle/pull/40397), [#38455](https://github.com/PaddlePaddle/Paddle/pull/38455)) - + + - 新增 GPU 平台 KP 算子,包括 cast、scale、clip、bce_loss、abs_grad、reduce_sum_grad、reduce_mean_grad、clip、bce_loss、full、full_like、distribution、 random、masked_select_kernel、where_index、masked_select_grad、dropout、sigmoid、where、abs_grad。 ([#36203](https://github.com/PaddlePaddle/Paddle/pull/36203), [#36423](https://github.com/PaddlePaddle/Paddle/pull/36423), [#39390](https://github.com/PaddlePaddle/Paddle/pull/39390), [#39734](https://github.com/PaddlePaddle/Paddle/pull/39734), [#38500](https://github.com/PaddlePaddle/Paddle/pull/38500), [#38959](https://github.com/PaddlePaddle/Paddle/pull/38959), [#39197](https://github.com/PaddlePaddle/Paddle/pull/39197/), [#39563](https://github.com/PaddlePaddle/Paddle/pull/39563), [#39666](https://github.com/PaddlePaddle/Paddle/pull/39666), [#40517](https://github.com/PaddlePaddle/Paddle/pull/40517), [#40617](https://github.com/PaddlePaddle/Paddle/pull/40617), [#40766](https://github.com/PaddlePaddle/Paddle/pull/40766), [#39898](https://github.com/PaddlePaddle/Paddle/pull/39898), [#39609](https://github.com/PaddlePaddle/Paddle/pull/39609)) + + - 新增支持 XPU2 源码编译模式。([#37254](https://github.com/PaddlePaddle/Paddle/pull/37254), [#40397](https://github.com/PaddlePaddle/Paddle/pull/40397), [#38455](https://github.com/PaddlePaddle/Paddle/pull/38455)) + - 新增支持 KP 算子在 XPU2 和 GPU 中复用,包括 reduce、broadcast、elementwise_add、`exp、log、relu、sigmoid、leaky_relu、softplus、hard_swish、reciprocal`。([#36904](https://github.com/PaddlePaddle/Paddle/pull/36904), [#37226](https://github.com/PaddlePaddle/Paddle/pull/37226), [#38918](https://github.com/PaddlePaddle/Paddle/pull/38918), [#40560](https://github.com/PaddlePaddle/Paddle/pull/40560/), [#39787](https://github.com/PaddlePaddle/Paddle/pull/39787), [#39917](https://github.com/PaddlePaddle/Paddle/pull/39917), [#40002](https://github.com/PaddlePaddle/Paddle/pull/40002), [#40364](https://github.com/PaddlePaddle/Paddle/pull/40364)) - - - 新增 XPU2 平台 KP 算子单测,包括 `brelu、ceil、celu、elu、floor、hard_shrink、hard_sigmoid、log1p、logsigmoid、relu6、silu、soft_relu、softsign、sqrt、square、swish、thresholded_relu、softshrink`。([#40448](https://github.com/PaddlePaddle/Paddle/pull/40448), [#40524](https://github.com/PaddlePaddle/Paddle/pull/40524)) - + + - 新增 XPU2 平台 KP 算子单测,包括 `brelu、ceil、celu、elu、floor、hard_shrink、hard_sigmoid、log1p、logsigmoid、relu6、silu、soft_relu、softsign、sqrt、square、swish、thresholded_relu、softshrink`。([#40448](https://github.com/PaddlePaddle/Paddle/pull/40448), [#40524](https://github.com/PaddlePaddle/Paddle/pull/40524)) + - 新增 XPU2 KP 模型支持,包括 resnet50、deepfm、wide_deep、yolov3-darknet53、det_mv3_db、bert、transformer、mobilenet_v3、GPT2。 #### 混合精度训练 -- 从混合精度训练 `paddle.amp.GradScaler` 的 `minimize` 中拆分出 `paddle.amp.Gradscaler.unscale_` 方法,提供恢复 loss 的独立接口。([#35825](https://github.com/PaddlePaddle/Paddle/pull/35825)) +- 从混合精度训练 `paddle.amp.GradScaler` 的 `minimize` 中拆分出 `paddle.amp.Gradscaler.unscale_` 方法,提供恢复 loss 的独立接口。([#35825](https://github.com/PaddlePaddle/Paddle/pull/35825)) - 为 `paddle.nn.ClipByGlobalNorm` 动态图模式添加 FP16 支持,为clip op 添加 FP16 Kernel,使`clip`相关操作支持 FP16。([#36198](https://github.com/PaddlePaddle/Paddle/pull/36198), [#36577](https://github.com/PaddlePaddle/Paddle/pull/36577)) -- 支持 `paddle.amp.decorate` 传入的`optimizer`参数为 None。([#37541](https://github.com/PaddlePaddle/Paddle/pull/37541)) +- 支持 `paddle.amp.decorate` 传入的`optimizer`参数为 None。([#37541](https://github.com/PaddlePaddle/Paddle/pull/37541)) - 为 merged_momentum op 添加支持输入多学习率、支持 use_nesterov 策略的计算、支持 regularization 计算。([#37527](https://github.com/PaddlePaddle/Paddle/pull/37527)) -- 为`paddle.optimizer.Momentum`优化器添加 multi_tensor 策略、为`Optimzizer`类的`clear_grad`添加`set_to_zero`分支。([#37564](https://github.com/PaddlePaddle/Paddle/pull/37564)) +- 为`paddle.optimizer.Momentum`优化器添加 multi_tensor 策略、为`Optimzizer`类的`clear_grad`添加`set_to_zero`分支。([#37564](https://github.com/PaddlePaddle/Paddle/pull/37564)) -- 为`paddle.optimizer.Adam`优化器添加 multi_tensor 策略。([#38010](https://github.com/PaddlePaddle/Paddle/pull/38010)) +- 为`paddle.optimizer.Adam`优化器添加 multi_tensor 策略。([#38010](https://github.com/PaddlePaddle/Paddle/pull/38010)) -- 为`paddle.optimizer.SGD`优化器添加 multi_precision 策略。([#38231](https://github.com/PaddlePaddle/Paddle/pull/38231)) +- 为`paddle.optimizer.SGD`优化器添加 multi_precision 策略。([#38231](https://github.com/PaddlePaddle/Paddle/pull/38231)) -- 为优化器 `state_dict` 方法添加存储 `master weight` 参数。([#39121](https://github.com/PaddlePaddle/Paddle/pull/39121)) +- 为优化器 `state_dict` 方法添加存储 `master weight` 参数。([#39121](https://github.com/PaddlePaddle/Paddle/pull/39121)) - 添加支持 op CUDA bfloat16 混合精度训练,支持 O1、O2 模式,通过 `paddle.amp.auto_cast`可开启上述训练模式。([#39029](https://github.com/PaddlePaddle/Paddle/pull/39029), [#39815](https://github.com/PaddlePaddle/Paddle/pull/39815)) -- 为如下 ops 添加 bfloat16 CUDA Kernel:matmul、concat、split、dropout、reshape、slice、squeeze、stack、transpose、unbind、elementwize_max、elementwize_add、elementwize_mul、elementwize_sub、scale、sum、layer_norm、p_norm、reduce_sum、softmax、log_softmax、sigmoid、sqrt、softplus、square、gaussian_random、fill_constant、fill_any_like。([#39485](https://github.com/PaddlePaddle/Paddle/pull/39485), [#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683), [#39843](https://github.com/PaddlePaddle/Paddle/pull/39843), [#39999](https://github.com/PaddlePaddle/Paddle/pull/39999), [#40004](https://github.com/PaddlePaddle/Paddle/pull/40004), [#40027](https://github.com/PaddlePaddle/Paddle/pull/40027)) +- 为如下 ops 添加 bfloat16 CUDA Kernel:matmul、concat、split、dropout、reshape、slice、squeeze、stack、transpose、unbind、elementwize_max、elementwize_add、elementwize_mul、elementwize_sub、scale、sum、layer_norm、p_norm、reduce_sum、softmax、log_softmax、sigmoid、sqrt、softplus、square、gaussian_random、fill_constant、fill_any_like。([#39485](https://github.com/PaddlePaddle/Paddle/pull/39485), [#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683), [#39843](https://github.com/PaddlePaddle/Paddle/pull/39843), [#39999](https://github.com/PaddlePaddle/Paddle/pull/39999), [#40004](https://github.com/PaddlePaddle/Paddle/pull/40004), [#40027](https://github.com/PaddlePaddle/Paddle/pull/40027)) -- 为如下 ops 添加 bfloat16 CPU Kernel:dropout、reshape、slice、squeeze、unsqueeze、stack、transpose、unbind、elementwize_max、elementwise_mul、elementwise_sub、gather。 ([#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683)) +- 为如下 ops 添加 bfloat16 CPU Kernel:dropout、reshape、slice、squeeze、unsqueeze、stack、transpose、unbind、elementwize_max、elementwise_mul、elementwise_sub、gather。 ([#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683)) - 支持打印 bfloat16 类型的 Tensor。([#39375](https://github.com/PaddlePaddle/Paddle/pull/39375), [#39370](https://github.com/PaddlePaddle/Paddle/pull/39370)) - 为`p_norm`、`elementwise_max` 、`fill_constant_batch_size_like``scatter`增加 FP16 计算支持。([#35888](https://github.com/PaddlePaddle/Paddle/pull/35888), [#39907](https://github.com/PaddlePaddle/Paddle/pull/39907), [#38136](https://github.com/PaddlePaddle/Paddle/pull/38136), [#38499](https://github.com/PaddlePaddle/Paddle/pull/38499)) -- 为如下 ops 增加 int16_t 支持:cumsum、less_than、less_equal、greater_than、greater_equal、equal、not_equal、fill_any_like、grather_nd、reduce_sum、where_index、reshape、unsqueeze。([#39636](https://github.com/PaddlePaddle/Paddle/pull/39636)) +- 为如下 ops 增加 int16_t 支持:cumsum、less_than、less_equal、greater_than、greater_equal、equal、not_equal、fill_any_like、grather_nd、reduce_sum、where_index、reshape、unsqueeze。([#39636](https://github.com/PaddlePaddle/Paddle/pull/39636)) -- 为 cross_entropy op 增加 int16_t label 类型的支持。([#39409](https://github.com/PaddlePaddle/Paddle/pull/39409)) +- 为 cross_entropy op 增加 int16_t label 类型的支持。([#39409](https://github.com/PaddlePaddle/Paddle/pull/39409)) - 为 embedding op 增加 int16_t id 类型的支持。([#39381](https://github.com/PaddlePaddle/Paddle/pull/39381)) @@ -610,7 +610,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 elementwise_min op 增加 FP16 类型的支持。([#38123](https://github.com/PaddlePaddle/Paddle/pull/38123)) -- 更新 bfloat16 AMP oneDNN 默认支持列表。([#39304](https://github.com/PaddlePaddle/Paddle/pull/39304)) +- 更新 bfloat16 AMP oneDNN 默认支持列表。([#39304](https://github.com/PaddlePaddle/Paddle/pull/39304)) #### 飞桨高可复用算子库 PHI @@ -629,505 +629,505 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - **算子规模化迁移改写**:迁移了约250个高频算子的前、反向算子内核 Kernel 至新算子库,改写为函数式,支持在 C++端通过调用多个基础 Kernel 函数封装,快速组合实现高性能算子;同时,添加相应的 yaml 算子定义,并接入新动态图执行体系,提升 python API 调度性能。迁移改写的算子包括: - sqrt ([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - square([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - sin ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - sinh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - elementwise_fmax([#40140](https://github.com/PaddlePaddle/Paddle/pull/40140)) - + - elementwise_fmin([#40140](https://github.com/PaddlePaddle/Paddle/pull/40140)) - + - pool2d([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - max_pool2d_with_index([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - pool3d([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - max_pool3d_with_index([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - fill_constant ([#36930](https://github.com/PaddlePaddle/Paddle/pull/36930), [#39465](https://github.com/PaddlePaddle/Paddle/pull/39465)) - + - p_norm ([#40819](https://github.com/PaddlePaddle/Paddle/pull/40819)) - + - fill_constant_batch_size_like ([#40784](https://github.com/PaddlePaddle/Paddle/pull/40784)) - + - conv2d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - conv2d_transpose([#40675](https://github.com/PaddlePaddle/Paddle/pull/40675), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - conv3d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - conv3d_transpose([#40675](https://github.com/PaddlePaddle/Paddle/pull/40675), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - mish([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - gather_nd ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - gather ([#40500](https://github.com/PaddlePaddle/Paddle/pull/40500)) - + - scatter ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - scatter_nd_add ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - sgd([40045](https://github.com/PaddlePaddle/Paddle/pull/40045)) - + - momentum ([#41319](https://github.com/PaddlePaddle/Paddle/pull/41319)) - + - rmsprop([#40994](https://github.com/PaddlePaddle/Paddle/pull/40994)) - + - index_sample([#38130](https://github.com/PaddlePaddle/Paddle/pull/38130), [#38459](https://github.com/PaddlePaddle/Paddle/pull/38459),[#39905](https://github.com/PaddlePaddle/Paddle/pull/39905)) - + - adam ([#40351](https://github.com/PaddlePaddle/Paddle/pull/40351)) - + - layer_norm([#40193](https://github.com/PaddlePaddle/Paddle/pull/40193)) - + - adagrad([#40994](https://github.com/PaddlePaddle/Paddle/pull/40994/)) - + - adamax ([#40173](https://github.com/PaddlePaddle/Paddle/pull/40173)) - + - adadelta ([#40173](https://github.com/PaddlePaddle/Paddle/pull/40173)) - + - clip([#40602](https://github.com/PaddlePaddle/Paddle/pull/40602), [#41661](https://github.com/PaddlePaddle/Paddle/pull/41661), [#41675](https://github.com/PaddlePaddle/Paddle/pull/41675)) - + - ceil ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - cos ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - atan ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - cosh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - erf([#40388](https://github.com/PaddlePaddle/Paddle/pull/40388)) - + - asin ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - acos ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - scale ([#39278](https://github.com/PaddlePaddle/Paddle/pull/39278)) - + - elementwise_pow ([#40993](https://github.com/PaddlePaddle/Paddle/pull/40993)) - + - elementwise_sub ([#39225](https://github.com/PaddlePaddle/Paddle/pull/39225), [#37260](https://github.com/PaddlePaddle/Paddle/pull/37260)) - + - round ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - floor ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - pow ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - elementwise_floordiv ([#40993](https://github.com/PaddlePaddle/Paddle/pull/40993)) - + - reciprocal([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - log1p ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - allclose ([#40469](https://github.com/PaddlePaddle/Paddle/pull/40469)) - + - mul ([#40833](https://github.com/PaddlePaddle/Paddle/pull/40833)) - + - elementwise_max ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_min ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_mod ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_add ([#39048](https://github.com/PaddlePaddle/Paddle/pull/39048), [#37043](https://github.com/PaddlePaddle/Paddle/pull/37043)) - + - matmul_v2 ([#36844](https://github.com/PaddlePaddle/Paddle/pull/36844), [#38713](https://github.com/PaddlePaddle/Paddle/pull/38713)) - + - elementwise_mul ([#41042](https://github.com/PaddlePaddle/Paddle/pull/41042), [#40252](https://github.com/PaddlePaddle/Paddle/pull/40252), [#37471](https://github.com/PaddlePaddle/Paddle/pull/37471)) - + - elementwise_div ([#40172](https://github.com/PaddlePaddle/Paddle/pull/40172), [#40039](https://github.com/PaddlePaddle/Paddle/pull/40039), [#37418](https://github.com/PaddlePaddle/Paddle/pull/37418)) - - - SelectedRows ([#39037](https://github.com/PaddlePaddle/Paddle/pull/39037), [#39087](https://github.com/PaddlePaddle/Paddle/pull/39087), [#39128](https://github.com/PaddlePaddle/Paddle/pull/39128), [#39162](https://github.com/PaddlePaddle/Paddle/pull/39162), [#39236](https://github.com/PaddlePaddle/Paddle/pull/39236)) - + + - SelectedRows ([#39037](https://github.com/PaddlePaddle/Paddle/pull/39037), [#39087](https://github.com/PaddlePaddle/Paddle/pull/39087), [#39128](https://github.com/PaddlePaddle/Paddle/pull/39128), [#39162](https://github.com/PaddlePaddle/Paddle/pull/39162), [#39236](https://github.com/PaddlePaddle/Paddle/pull/39236)) + - fill_any_like ([#39807](https://github.com/PaddlePaddle/Paddle/pull/39807)) - + - dot([#38359](https://github.com/PaddlePaddle/Paddle/pull/38359)) - + - sum ([#40873](https://github.com/PaddlePaddle/Paddle/pull/40873)) - + - cumsum ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - diag_v2 ([#39914](https://github.com/PaddlePaddle/Paddle/pull/39914)) - + - auc ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - log_loss ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - one_hot_v2([39876](https://github.com/PaddlePaddle/Paddle/pull/39876)) - + - sigmoid_cross_entropy_with_logits ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - bce_loss ([#39868](https://github.com/PaddlePaddle/Paddle/pull/39868)) - + - argsort ([#40151](https://github.com/PaddlePaddle/Paddle/pull/40151)) - + - arg_max ([#40222](https://github.com/PaddlePaddle/Paddle/pull/40222)) - + - arg_min ([#40222](https://github.com/PaddlePaddle/Paddle/pull/40222)) - + - segment_pool ([#40099](https://github.com/PaddlePaddle/Paddle/pull/40099)) - + - frobenius_norm([#40707](https://github.com/PaddlePaddle/Paddle/pull/40707), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - dist ([#40178](https://github.com/PaddlePaddle/Paddle/pull/40178)) - + - isnan_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - logical_and ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - logical_not ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - isfinite_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - logical_or ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - isinf_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - is_empty ([#39919](https://github.com/PaddlePaddle/Paddle/pull/39919)) - + - logical_xor ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - less_than([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - not_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - less_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - equal_all([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - uniform_random ([#39937](https://github.com/PaddlePaddle/Paddle/pull/39937)) - + - randint ([#39876](https://github.com/PaddlePaddle/Paddle/pull/39876), [#41375](https://github.com/PaddlePaddle/Paddle/pull/41375)) - + - randperm ([#41265](https://github.com/PaddlePaddle/Paddle/pull/41265)) - + - unbind ([#39789](https://github.com/PaddlePaddle/Paddle/pull/39789)) - + - bernoulli ([#39590](https://github.com/PaddlePaddle/Paddle/pull/39590)) - + - increment ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - multinomial ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - addmm ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - cholesky ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - where ([#39811](https://github.com/PaddlePaddle/Paddle/pull/39811)) - + - log10 ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - log2 ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - expm1([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - atan2 ([#39806](https://github.com/PaddlePaddle/Paddle/pull/39806)) - + - gaussian_random ([#39932](https://github.com/PaddlePaddle/Paddle/pull/39932), [#40122](https://github.com/PaddlePaddle/Paddle/pull/40122), [#40191](https://github.com/PaddlePaddle/Paddle/pull/40191)) - + - empty ([#38334](https://github.com/PaddlePaddle/Paddle/pull/38334)) - + - truncated_gaussian_random ([#39971](https://github.com/PaddlePaddle/Paddle/pull/39971), [#40191](https://github.com/PaddlePaddle/Paddle/pull/40191)) - + - mv ([#39861](https://github.com/PaddlePaddle/Paddle/pull/39861), [#39954](https://github.com/PaddlePaddle/Paddle/pull/39954)) - + - tan ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - set_value ([#40195](https://github.com/PaddlePaddle/Paddle/pull/40195), [#40478](https://github.com/PaddlePaddle/Paddle/pull/40478), [#40636](https://github.com/PaddlePaddle/Paddle/pull/40636)) - + - bitwise_and ([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - bitwise_not([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - bitwise_or([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - poisson([#39814](https://github.com/PaddlePaddle/Paddle/pull/39814)) - + - cholesky_solve([#40387](https://github.com/PaddlePaddle/Paddle/pull/40387)) - + - bitwise_xor([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - triangular_solve([#40417](https://github.com/PaddlePaddle/Paddle/pull/40417)) - + - sigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - + - atanh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - softsign([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - thresholded_relu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - tanh_shrink ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - stanh([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - reduce_mean ([#37559](https://github.com/PaddlePaddle/Paddle/pull/37559)) - + - reduce_max([#40225](https://github.com/PaddlePaddle/Paddle/pull/40225)) - + - reduce_min ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - mean ([#40872](https://github.com/PaddlePaddle/Paddle/pull/40872), [#41319](https://github.com/PaddlePaddle/Paddle/pull/41319)) - + - reduce_all ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - reduce_any ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - logsumexp ([#40790](https://github.com/PaddlePaddle/Paddle/pull/40790)) - + - softshrink([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - range ([#41265](https://github.com/PaddlePaddle/Paddle/pull/41265), [#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - stack([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - tile ([#40371](https://github.com/PaddlePaddle/Paddle/pull/40371)) - + - unique([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - unstack([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - slice([#40736](https://github.com/PaddlePaddle/Paddle/pull/40736)) - + - transpose2([#39327](https://github.com/PaddlePaddle/Paddle/pull/39327)) - + - unsqueeze2( [#40596](https://github.com/PaddlePaddle/Paddle/pull/40596)) - + - squeeze2( [#40596](https://github.com/PaddlePaddle/Paddle/pull/40596)) - + - strided_slice ([#40708](https://github.com/PaddlePaddle/Paddle/pull/40708)) - + - softmax ([#39547](https://github.com/PaddlePaddle/Paddle/pull/39547)) - + - leaky_relu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - gelu ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - prelu ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - log_softmax ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - elu ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - logsigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - + - psroi_pool ([#40353](https://github.com/PaddlePaddle/Paddle/pull/40353), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - kthvalue([#40575](https://github.com/PaddlePaddle/Paddle/pull/40575)) - + - mode ([#40571](https://github.com/PaddlePaddle/Paddle/pull/40571)) - + - yolo_box([#40112](https://github.com/PaddlePaddle/Paddle/pull/40112)) - + - yolov3_loss ([#40944](https://github.com/PaddlePaddle/Paddle/pull/40944)) - + - temporal_shift([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - depthwise_conv2d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - pad3d ([#40701](https://github.com/PaddlePaddle/Paddle/pull/40701)) - + - pad( [#40012](https://github.com/PaddlePaddle/Paddle/pull/40012)) - + - greater_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - kldiv_loss ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - isclose ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - silu ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - unfold ([#39778](https://github.com/PaddlePaddle/Paddle/pull/39778)) - + - batch_norm([39347](https://github.com/PaddlePaddle/Paddle/pull/39347)) - + - norm([#39324](https://github.com/PaddlePaddle/Paddle/pull/39324)) - + - roi_pool ([#40574](https://github.com/PaddlePaddle/Paddle/pull/40574), [#40682](https://github.com/PaddlePaddle/Paddle/pull/40682), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - roi_align ([#40382](https://github.com/PaddlePaddle/Paddle/pull/40382), [#40556](https://github.com/PaddlePaddle/Paddle/pull/40556), [#41402](https://github.com/PaddlePaddle/Paddle/pull/41402)) - + - deformable_conv ([#40700](https://github.com/PaddlePaddle/Paddle/pull/40700), [#40794](https://github.com/PaddlePaddle/Paddle/pull/40794), [#41644](https://github.com/PaddlePaddle/Paddle/pull/41644)) - + - deformable_conv_v1 ([#40794](https://github.com/PaddlePaddle/Paddle/pull/40794), [#41644](https://github.com/PaddlePaddle/Paddle/pull/41644)) - + - label_smooth ([#39796](https://github.com/PaddlePaddle/Paddle/pull/39796)) - + - grid_sampler ([#40585](https://github.com/PaddlePaddle/Paddle/pull/40585)) - + - greater_than([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - pixel_shuffle ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - nearest_interp_v2 ([#40855](https://github.com/PaddlePaddle/Paddle/pull/40855)) - + - bilinear_interp_v2 ([#40855](https://github.com/PaddlePaddle/Paddle/pull/40855)) - + - softmax_with_cross_entropy ([#40832](https://github.com/PaddlePaddle/Paddle/pull/40832)) - + - rnn ([#41007](https://github.com/PaddlePaddle/Paddle/pull/41007)) - + - reverse ([#40791](https://github.com/PaddlePaddle/Paddle/pull/40791)) - + - trace ([#39510](https://github.com/PaddlePaddle/Paddle/pull/39510)) - + - kron([#40427](https://github.com/PaddlePaddle/Paddle/pull/40427)) - + - accuracy([#39982](https://github.com/PaddlePaddle/Paddle/pull/39982)) - + - gather_tree ([#40082](https://github.com/PaddlePaddle/Paddle/pull/40082), [#39844](https://github.com/PaddlePaddle/Paddle/pull/39844)) - + - dropout([#40148](https://github.com/PaddlePaddle/Paddle/pull/40148)) - + - bincount ([#39947](https://github.com/PaddlePaddle/Paddle/pull/39947)) - + - warpctc ([#41389](https://github.com/PaddlePaddle/Paddle/pull/41389), [#40023](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/40023)) - + - multiplex([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#40102](https://github.com/PaddlePaddle/Paddle/pull/40102)) - + - qr([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#40007](https://github.com/PaddlePaddle/Paddle/pull/40007)) - + - assign_value ([#40967](https://github.com/PaddlePaddle/Paddle/pull/40967)) - + - assign ([#40022](https://github.com/PaddlePaddle/Paddle/pull/40022)) - + - cast ([#37610](https://github.com/PaddlePaddle/Paddle/pull/37610)) - + - tril_triu([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - where_index ([#40255](https://github.com/PaddlePaddle/Paddle/pull/40255)) - + - index_select ([#40260](https://github.com/PaddlePaddle/Paddle/pull/40260), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - roll ([#40257](https://github.com/PaddlePaddle/Paddle/pull/40257), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - cumprod (熊昆 [#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - shard_index ([#40254](https://github.com/PaddlePaddle/Paddle/pull/40254)) - + - reshape2 ([#40914](https://github.com/PaddlePaddle/Paddle/pull/40914), [#39631](https://github.com/PaddlePaddle/Paddle/pull/39631), [#38833](https://github.com/PaddlePaddle/Paddle/pull/38833), [#37164](https://github.com/PaddlePaddle/Paddle/pull/37164)) - + - flip ([#39822](https://github.com/PaddlePaddle/Paddle/pull/39822), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - eye ([#39712](https://github.com/PaddlePaddle/Paddle/pull/39712), [#40105](https://github.com/PaddlePaddle/Paddle/pull/40105), [#41476](https://github.com/PaddlePaddle/Paddle/pull/41476)) - + - lookup_table_v2([#39901](https://github.com/PaddlePaddle/Paddle/pull/39901)) - + - searchsorted([#40520](https://github.com/PaddlePaddle/Paddle/pull/40520), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - adamw ([#40351](https://github.com/PaddlePaddle/Paddle/pull/40351)) - + - tanh ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - cross ([#39829](https://github.com/PaddlePaddle/Paddle/pull/39829)) - + - concat ([#38955](https://github.com/PaddlePaddle/Paddle/pull/38955), [#41112](https://github.com/PaddlePaddle/Paddle/pull/41112)) - + - split ([#39060](https://github.com/PaddlePaddle/Paddle/pull/39060)) - + - linspace ([#40124](https://github.com/PaddlePaddle/Paddle/pull/40124)) - + - huber_loss ([#39761](https://github.com/PaddlePaddle/Paddle/pull/39761)) - + - hierarchical_sigmoid([#40553](https://github.com/PaddlePaddle/Paddle/pull/40553)) - + - nll_loss ([#39936](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39936)) - + - graph_send_recv ([#40092](https://github.com/PaddlePaddle/Paddle/pull/40092), [#40320](https://github.com/PaddlePaddle/Paddle/pull/40320)) - + - abs([#39492](https://github.com/PaddlePaddle/Paddle/pull/39492), [#39762](https://github.com/PaddlePaddle/Paddle/pull/39762)) - + - exp([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - rsqrt([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - viterbi_decode ([#40186](https://github.com/PaddlePaddle/Paddle/pull/40186)) - + - conj ([#38247](https://github.com/PaddlePaddle/Paddle/pull/38247)) - + - real ([#39777](https://github.com/PaddlePaddle/Paddle/pull/39777), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - imag ([#39777](https://github.com/PaddlePaddle/Paddle/pull/39777), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - take_along_axis ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40270](https://github.com/PaddlePaddle/Paddle/pull/40270), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - put_along_axis ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - lgamma ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - relu ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - maxout ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - log ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - bilinear_tensor_product([#39903](https://github.com/PaddlePaddle/Paddle/pull/39903)) - + - flatten_contiguous_range ([#38712](https://github.com/PaddlePaddle/Paddle/pull/38712), [#36957](https://github.com/PaddlePaddle/Paddle/pull/36957), [#41345](https://github.com/PaddlePaddle/Paddle/pull/41345)) - + - matrix_rank ([#40074](https://github.com/PaddlePaddle/Paddle/pull/40074), [#40519](https://github.com/PaddlePaddle/Paddle/pull/40519), [#41466](https://github.com/PaddlePaddle/Paddle/pull/41466)) - + - logit ([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) - + - lerp ([#40105](https://github.com/PaddlePaddle/Paddle/pull/40105), [#39524](https://github.com/PaddlePaddle/Paddle/pull/39524)) - + - erfinv ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - broadcast_tensors([#40047](https://github.com/PaddlePaddle/Paddle/pull/40047)) - + - gumbel_softmax([#39873](https://github.com/PaddlePaddle/Paddle/pull/39873)) - + - diagonal ([#39575](https://github.com/PaddlePaddle/Paddle/pull/39575)) - + - trunc ([#39543](https://github.com/PaddlePaddle/Paddle/pull/39543), [#39772](https://github.com/PaddlePaddle/Paddle/pull/39772)) - + - multi_dot ([#40038](https://github.com/PaddlePaddle/Paddle/pull/40038)) - + - matrix_power ([#40231](https://github.com/PaddlePaddle/Paddle/pull/40231)) - + - digamma([#39240](https://github.com/PaddlePaddle/Paddle/pull/39240)) - + - masked_select([#39193](https://github.com/PaddlePaddle/Paddle/pull/39193)) - + - determinant ([#40539](https://github.com/PaddlePaddle/Paddle/pull/40539)) - + - eigh ([#40213](https://github.com/PaddlePaddle/Paddle/pull/40213)) - + - size ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - shape ([#40248](https://github.com/PaddlePaddle/Paddle/pull/40248)) - + - reduce_sum([#37559](https://github.com/PaddlePaddle/Paddle/pull/37559), [#41295](https://github.com/PaddlePaddle/Paddle/pull/41295)) - + - reduce_prod ([#39844](https://github.com/PaddlePaddle/Paddle/pull/39844)) - + - histogram([#39496](https://github.com/PaddlePaddle/Paddle/pull/39496)) - + - meshgrid ([#41411](https://github.com/PaddlePaddle/Paddle/pull/41411)) - + - brelu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - hard_swish ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - hard_shrink ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - selu (熊昆 [#39819](https://github.com/PaddlePaddle/Paddle/pull/39819)) - + - expand_v2 ([#39471](https://github.com/PaddlePaddle/Paddle/pull/39471)) - + - top_k_v2([#40064](https://github.com/PaddlePaddle/Paddle/pull/40064)) - + - expand_as_v2([#40373](https://github.com/PaddlePaddle/Paddle/pull/40373)) - + - swish ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - hard_sigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - exp, det, assign, gaussian_random, matrix_rank, eye, deformable_conv。([#41755]exp, det, assign, gaussian_random, matrix_rank, eye, deformable_conv。([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755), [#41737](https://github.com/PaddlePaddle/Paddle/pull/41737) @@ -1145,23 +1145,23 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - **新动态图执行机制接入主框架,联合调试**:我们目前利用一些环境变量区分静态图模式和动态图模式(含新动态图和老动态图模式),这些模式下我们已经适配了大部分的动态图的逻辑,但是仍有大量问题正在修复中。([#37638](https://github.com/PaddlePaddle/Paddle/pull/37638), [#37643](https://github.com/PaddlePaddle/Paddle/pull/37643), [#37653](https://github.com/PaddlePaddle/Paddle/pull/37653), [#38314](https://github.com/PaddlePaddle/Paddle/pull/38314), [#38337](https://github.com/PaddlePaddle/Paddle/pull/38337), [#38338](https://github.com/PaddlePaddle/Paddle/pull/38338), [#39164](https://github.com/PaddlePaddle/Paddle/pull/39164), [#39326](https://github.com/PaddlePaddle/Paddle/pull/39326), [#40391](https://github.com/PaddlePaddle/Paddle/pull/40391), [#40201](https://github.com/PaddlePaddle/Paddle/pull/40201), [#40854](https://github.com/PaddlePaddle/Paddle/pull/40854), [#40887](https://github.com/PaddlePaddle/Paddle/pull/40887)) - **更新了动态图下的一些判断逻辑,支持兼容形态下的动态图快速执行路径**:([#40786](https://github.com/PaddlePaddle/Paddle/pull/40786)) - + - 非静态图模式(目前的过渡方案):`_non_static_mode()`。 - + - 在动态图模式下且判断在新动态图(推荐的判断逻辑):`_in_dygrah_mode()`。 - + - 在动态图模式下且判断在老动态图(不推荐的判断逻辑,在将来的版本中将废弃):`_in_legacy_dygraph()`。 - + - 在动态图模式下开启老动态图并关闭新动态图:`_enable_legacy_dygraph()` 或者退出 `_test_eager_guard()`。 - + - 在动态图模式下开启新动态图并关闭老动态图:`_disable_legacy_dygraph()` 或者 `with _test_eager_guard()`。 - + - 在静态图或者动态图模式下判断在新动态图:`_in_eager_without_dygraph_check()`。 - **动态图重构后支持 inplace 策略**:输入与输出为同一个 Tensor。 - + - 为动态图重构中间态适配 inplace 策略。([#40400](https://github.com/PaddlePaddle/Paddle/pull/40400)) - + - 为动态图重构最终态适配 inplace 策略。([#40695](https://github.com/PaddlePaddle/Paddle/pull/40695)) - 动态图重构后,为 PyLayer 功能添加 inplace 策略。([#41043](https://github.com/PaddlePaddle/Paddle/pull/41043)) @@ -1175,9 +1175,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 统一动态图重构后与重构前对 inplace version 检查的报错信息。([#41209](https://github.com/PaddlePaddle/Paddle/pull/41209)) - **动态图重构后支持 view 策略**:输入与输出 Tensor 共享底层数据。 - + - 为动态图重构中间态适配 view 机制。包括`reshape`、`squeeze`、`unsqueeze`、`flatten` API。([#40830](https://github.com/PaddlePaddle/Paddle/pull/40830)) - + - 为动态图重构最终态适配 view 机制。包括`reshape` API。([#40891](https://github.com/PaddlePaddle/Paddle/pull/40891)) - **添加支持新动态图 eager Tensor 在 python 端的 weakref**。([#41797](https://github.com/PaddlePaddle/Paddle/pull/41797)) @@ -1210,119 +1210,119 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 分布式训练 - 集合通信多机多卡训练基础功能 - - - 新增弹性功能(含节点故障、扩容、缩容),提升分布式的容错能力。 ([#36684](https://github.com/PaddlePaddle/Paddle/pull/36684), [#37177](https://github.com/PaddlePaddle/Paddle/pull/37177), [#37781](https://github.com/PaddlePaddle/Paddle/pull/37781)) - - - Launch启动模块,重构并新增 `master` 协同和节点个数 `nnodes` 定义 ,提升分布式启动易用性。 ([#40086](https://github.com/PaddlePaddle/Paddle/pull/40086), [#40568](https://github.com/PaddlePaddle/Paddle/pull/40568), [#40782](https://github.com/PaddlePaddle/Paddle/pull/40782), [#40844](https://github.com/PaddlePaddle/Paddle/pull/40844), [#40936](https://github.com/PaddlePaddle/Paddle/pull/40936), [#41190](https://github.com/PaddlePaddle/Paddle/pull/41190), [#41314](https://github.com/PaddlePaddle/Paddle/pull/41314)) - - - 新增对 GPU/NPU/XPU 多种硬件的异构训练的支持。([#37613](https://github.com/PaddlePaddle/Paddle/pull/37613), [#37998](https://github.com/PaddlePaddle/Paddle/pull/37998)) - + + - 新增弹性功能(含节点故障、扩容、缩容),提升分布式的容错能力。 ([#36684](https://github.com/PaddlePaddle/Paddle/pull/36684), [#37177](https://github.com/PaddlePaddle/Paddle/pull/37177), [#37781](https://github.com/PaddlePaddle/Paddle/pull/37781)) + + - Launch启动模块,重构并新增 `master` 协同和节点个数 `nnodes` 定义 ,提升分布式启动易用性。 ([#40086](https://github.com/PaddlePaddle/Paddle/pull/40086), [#40568](https://github.com/PaddlePaddle/Paddle/pull/40568), [#40782](https://github.com/PaddlePaddle/Paddle/pull/40782), [#40844](https://github.com/PaddlePaddle/Paddle/pull/40844), [#40936](https://github.com/PaddlePaddle/Paddle/pull/40936), [#41190](https://github.com/PaddlePaddle/Paddle/pull/41190), [#41314](https://github.com/PaddlePaddle/Paddle/pull/41314)) + + - 新增对 GPU/NPU/XPU 多种硬件的异构训练的支持。([#37613](https://github.com/PaddlePaddle/Paddle/pull/37613), [#37998](https://github.com/PaddlePaddle/Paddle/pull/37998)) + - 新增 fleet_executor 异步流水执行器。([#36966](https://github.com/PaddlePaddle/Paddle/pull/36966), [#37049](https://github.com/PaddlePaddle/Paddle/pull/37049), [#37087](https://github.com/PaddlePaddle/Paddle/pull/37087), [#37126](https://github.com/PaddlePaddle/Paddle/pull/37126), [#37150](https://github.com/PaddlePaddle/Paddle/pull/37150), [#37203](https://github.com/PaddlePaddle/Paddle/pull/37203), [#37167](https://github.com/PaddlePaddle/Paddle/pull/37167), [#37282](https://github.com/PaddlePaddle/Paddle/pull/37282), [#37319](https://github.com/PaddlePaddle/Paddle/pull/37319), [#37462](https://github.com/PaddlePaddle/Paddle/pull/37462), [#37507](https://github.com/PaddlePaddle/Paddle/pull/37507), [#37533](https://github.com/PaddlePaddle/Paddle/pull/37533), [#37576](https://github.com/PaddlePaddle/Paddle/pull/37576), [#37605](https://github.com/PaddlePaddle/Paddle/pull/37605), [#37691](https://github.com/PaddlePaddle/Paddle/pull/37691), [#37742](https://github.com/PaddlePaddle/Paddle/pull/37742), [#37783](https://github.com/PaddlePaddle/Paddle/pull/37783), [#37809](https://github.com/PaddlePaddle/Paddle/pull/37809), [#37862](https://github.com/PaddlePaddle/Paddle/pull/37862), [#37882](https://github.com/PaddlePaddle/Paddle/pull/37882), [#37934](https://github.com/PaddlePaddle/Paddle/pull/37934), [#38024](https://github.com/PaddlePaddle/Paddle/pull/38024), [#38083](https://github.com/PaddlePaddle/Paddle/pull/38083), [#38164](https://github.com/PaddlePaddle/Paddle/pull/38164), [#38261](https://github.com/PaddlePaddle/Paddle/pull/38261), [#38290](https://github.com/PaddlePaddle/Paddle/pull/38290), [#40607](https://github.com/PaddlePaddle/Paddle/pull/40607), [#37093](https://github.com/PaddlePaddle/Paddle/pull/37093), [#37106](https://github.com/PaddlePaddle/Paddle/pull/37106), [#37143](https://github.com/PaddlePaddle/Paddle/pull/37143), [#37338](https://github.com/PaddlePaddle/Paddle/pull/37338), [#37376](https://github.com/PaddlePaddle/Paddle/pull/37376), [#37485](https://github.com/PaddlePaddle/Paddle/pull/37485), [#37531](https://github.com/PaddlePaddle/Paddle/pull/37531), [#37623](https://github.com/PaddlePaddle/Paddle/pull/37623), [#37693](https://github.com/PaddlePaddle/Paddle/pull/37693), [#37755](https://github.com/PaddlePaddle/Paddle/pull/37755), [#37807](https://github.com/PaddlePaddle/Paddle/pull/37807), [#37889](https://github.com/PaddlePaddle/Paddle/pull/37889), [#38420](https://github.com/PaddlePaddle/Paddle/pull/38420), [#38539](https://github.com/PaddlePaddle/Paddle/pull/38539), [#36892](https://github.com/PaddlePaddle/Paddle/pull/36892), [#37084](https://github.com/PaddlePaddle/Paddle/pull/37084), [#37158](https://github.com/PaddlePaddle/Paddle/pull/37158), [#37361](https://github.com/PaddlePaddle/Paddle/pull/37361), [#37509](https://github.com/PaddlePaddle/Paddle/pull/37509), [#37603](https://github.com/PaddlePaddle/Paddle/pull/37603), [#37703](https://github.com/PaddlePaddle/Paddle/pull/37703), [#37824](https://github.com/PaddlePaddle/Paddle/pull/37824), [#38114](https://github.com/PaddlePaddle/Paddle/pull/38114), [#38322](https://github.com/PaddlePaddle/Paddle/pull/38322), [#38535](https://github.com/PaddlePaddle/Paddle/pull/38535), [#38650](https://github.com/PaddlePaddle/Paddle/pull/38650), [#38709](https://github.com/PaddlePaddle/Paddle/pull/38709), [#38799](https://github.com/PaddlePaddle/Paddle/pull/38799), [#38839](https://github.com/PaddlePaddle/Paddle/pull/38839), [#38904](https://github.com/PaddlePaddle/Paddle/pull/38904)) - - - 新增分布式大模型推理功能。([#38795](https://github.com/PaddlePaddle/Paddle/pull/38795), [#39012](https://github.com/PaddlePaddle/Paddle/pull/39012), [#39032](https://github.com/PaddlePaddle/Paddle/pull/39032), [#39076](https://github.com/PaddlePaddle/Paddle/pull/39076), [#39194](https://github.com/PaddlePaddle/Paddle/pull/39194), [#39207](https://github.com/PaddlePaddle/Paddle/pull/39207), [#39241](https://github.com/PaddlePaddle/Paddle/pull/39241), [#39603](https://github.com/PaddlePaddle/Paddle/pull/39603), [#39758](https://github.com/PaddlePaddle/Paddle/pull/39758), [#39992](https://github.com/PaddlePaddle/Paddle/pull/39992)) + + - 新增分布式大模型推理功能。([#38795](https://github.com/PaddlePaddle/Paddle/pull/38795), [#39012](https://github.com/PaddlePaddle/Paddle/pull/39012), [#39032](https://github.com/PaddlePaddle/Paddle/pull/39032), [#39076](https://github.com/PaddlePaddle/Paddle/pull/39076), [#39194](https://github.com/PaddlePaddle/Paddle/pull/39194), [#39207](https://github.com/PaddlePaddle/Paddle/pull/39207), [#39241](https://github.com/PaddlePaddle/Paddle/pull/39241), [#39603](https://github.com/PaddlePaddle/Paddle/pull/39603), [#39758](https://github.com/PaddlePaddle/Paddle/pull/39758), [#39992](https://github.com/PaddlePaddle/Paddle/pull/39992)) - 动态图混合并行 - + - 重构 `paddle.distributed.fleet.utils.recompute`,支持新动态图。 ([#41396](https://github.com/PaddlePaddle/Paddle/pull/41396)) - - - 支持 Pure FP16 训练。([#36420](https://github.com/PaddlePaddle/Paddle/pull/36420)) - + + - 支持 Pure FP16 训练。([#36420](https://github.com/PaddlePaddle/Paddle/pull/36420)) + - 新增 MoE(Mixture of Experts)并行策略, 支持超大 MoE 模型训练。([#41092](https://github.com/PaddlePaddle/Paddle/pull/41092), [#40895](https://github.com/PaddlePaddle/Paddle/pull/40895), [#40850](https://github.com/PaddlePaddle/Paddle/pull/40580), [#39224](https://github.com/PaddlePaddle/Paddle/pull/39224)) - + - 新增 GroupSharded 并行策略,支持 stage1、stage2、stage3三个阶段模型状态分组切片训练策略,支持同、异步通信,并可与 Recompute、AMP O1\O2、Offload、GroupShardedClipGrad、GroupShardedScaler 等基础功能组合使用。([#37489](https://github.com/PaddlePaddle/Paddle/pull/37489), [#37568](https://github.com/PaddlePaddle/Paddle/pull/37568), [#37707](https://github.com/PaddlePaddle/Paddle/pull/37707), [#37836](https://github.com/PaddlePaddle/Paddle/pull/37836), [#37947](https://github.com/PaddlePaddle/Paddle/pull/37947), [#38151](https://github.com/PaddlePaddle/Paddle/pull/38151), [#38407](https://github.com/PaddlePaddle/Paddle/pull/38407), [#38052](https://github.com/PaddlePaddle/Paddle/pull/38052), [#39112](https://github.com/PaddlePaddle/Paddle/pull/39112), [#38989](https://github.com/PaddlePaddle/Paddle/pull/38989), [#39171](https://github.com/PaddlePaddle/Paddle/pull/39171), [#39285](https://github.com/PaddlePaddle/Paddle/pull/39285), [#39334](https://github.com/PaddlePaddle/Paddle/pull/39334), [#39397](https://github.com/PaddlePaddle/Paddle/pull/39397), [#39581](https://github.com/PaddlePaddle/Paddle/pull/39581), [#39668](https://github.com/PaddlePaddle/Paddle/pull/39668), [#40129](https://github.com/PaddlePaddle/Paddle/pull/40129), [#40396](https://github.com/PaddlePaddle/Paddle/pull/40396), [#40488](https://github.com/PaddlePaddle/Paddle/pull/40488), [#40601](https://github.com/PaddlePaddle/Paddle/pull/40601),[#37725](https://github.com/PaddlePaddle/Paddle/pull/37725),[#37904](https://github.com/PaddlePaddle/Paddle/pull/37904), [#38064](https://github.com/PaddlePaddle/Paddle/pull/38064)) - 静态图混合并行 - - - 新增`scale_gradient`标志位至`gradient_scale_configs`,用于控制流水线并行下梯度聚合运算对梯度进行求平均运算的位置。([#36384](https://github.com/PaddlePaddle/Paddle/pull/36384)) - - - 张量模型并行下,dropout 支持设置确定性随机种子生成器,以确保非分布式变量的随机一致性和分布式变量的随机性。([#36228](https://github.com/PaddlePaddle/Paddle/pull/36228)) - + + - 新增`scale_gradient`标志位至`gradient_scale_configs`,用于控制流水线并行下梯度聚合运算对梯度进行求平均运算的位置。([#36384](https://github.com/PaddlePaddle/Paddle/pull/36384)) + + - 张量模型并行下,dropout 支持设置确定性随机种子生成器,以确保非分布式变量的随机一致性和分布式变量的随机性。([#36228](https://github.com/PaddlePaddle/Paddle/pull/36228)) + - NPU 混合并行支持 Offload,可节约40%显存。([#37224](https://github.com/PaddlePaddle/Paddle/pull/37224)) - - - 为 seed op 增加 `force_cpu` 可选参数,使 dropout 可以直接从 CPU 读取 seed 的值。([#35820](https://github.com/PaddlePaddle/Paddle/pull/35820)) - + + - 为 seed op 增加 `force_cpu` 可选参数,使 dropout 可以直接从 CPU 读取 seed 的值。([#35820](https://github.com/PaddlePaddle/Paddle/pull/35820)) + - 完善Automatic Sparsity (ASP)sharding策略,支持根据program选择sharding策略。(#[#40028](https://github.com/PaddlePaddle/Paddle/pull/40028)) - 自动并行 - - - 新增逻辑进程与物理设备自动映射后的进程重新启动(relaunch)。([#37523](https://github.com/PaddlePaddle/Paddle/pull/37523), [#37326](https://github.com/PaddlePaddle/Paddle/pull/37326)) - - - 完善自动并行底层机制和接口,利于各个模块统一和添加优化 pass。([#36617](https://github.com/PaddlePaddle/Paddle/pull/36617), [#38132](https://github.com/PaddlePaddle/Paddle/pull/38132)) - - - 新增统一资源表示,支持逻辑进程与物理设备自动映射功能。([#37091](https://github.com/PaddlePaddle/Paddle/pull/37091), [#37482](https://github.com/PaddlePaddle/Paddle/pull/37482), [#37094](https://github.com/PaddlePaddle/Paddle/pull/37094)) - + + - 新增逻辑进程与物理设备自动映射后的进程重新启动(relaunch)。([#37523](https://github.com/PaddlePaddle/Paddle/pull/37523), [#37326](https://github.com/PaddlePaddle/Paddle/pull/37326)) + + - 完善自动并行底层机制和接口,利于各个模块统一和添加优化 pass。([#36617](https://github.com/PaddlePaddle/Paddle/pull/36617), [#38132](https://github.com/PaddlePaddle/Paddle/pull/38132)) + + - 新增统一资源表示,支持逻辑进程与物理设备自动映射功能。([#37091](https://github.com/PaddlePaddle/Paddle/pull/37091), [#37482](https://github.com/PaddlePaddle/Paddle/pull/37482), [#37094](https://github.com/PaddlePaddle/Paddle/pull/37094)) + - 完善自动并行计算图反向和更新部分的分布式属性补全功能。([#36744](https://github.com/PaddlePaddle/Paddle/pull/36744)) - - - 新增数据切分功能。([#36055](https://github.com/PaddlePaddle/Paddle/pull/36055)) - + + - 新增数据切分功能。([#36055](https://github.com/PaddlePaddle/Paddle/pull/36055)) + - 新增张量重切分功能,根据张量和算子的分布式属性对张量进行重新切分。([#40865](https://github.com/PaddlePaddle/Paddle/pull/40865), [#41106](https://github.com/PaddlePaddle/Paddle/pull/41106)) - + - 新增资源数量或并行策略变化时分布式参数的自动转换功能。([#40434](https://github.com/PaddlePaddle/Paddle/pull/40434)) - - - 新增梯度累加功能(GradientMerge),减少通信次数,提升训练效率。([#38259](https://github.com/PaddlePaddle/Paddle/pull/38259), [#40737](https://github.com/PaddlePaddle/Paddle/pull/40737)) - - - 新增重计算功能(Recompute),优化显存。([#38920](https://github.com/PaddlePaddle/Paddle/pull/38920)) - + + - 新增梯度累加功能(GradientMerge),减少通信次数,提升训练效率。([#38259](https://github.com/PaddlePaddle/Paddle/pull/38259), [#40737](https://github.com/PaddlePaddle/Paddle/pull/40737)) + + - 新增重计算功能(Recompute),优化显存。([#38920](https://github.com/PaddlePaddle/Paddle/pull/38920)) + - 新增 Sharding 优化 pass, 支持 p-g-os 3 个stage 的切分优化。([#38502](https://github.com/PaddlePaddle/Paddle/pull/38502)) - + - 新增 AMP + FP16 优化 pass。([#38764](https://github.com/PaddlePaddle/Paddle/pull/38764), [#40615](https://github.com/PaddlePaddle/Paddle/pull/40615)) - + - 新增 Transformer 类模型的 QKV fuse 切分。([#39080](https://github.com/PaddlePaddle/Paddle/pull/39080)) - - - 新增 while op 的分布式属性推导功能,确保迭代推导算法能收敛。([#39939](https://github.com/PaddlePaddle/Paddle/pull/39939), [#39086](https://github.com/PaddlePaddle/Paddle/pull/39086), [#39014](https://github.com/PaddlePaddle/Paddle/pull/39014)) - + + - 新增 while op 的分布式属性推导功能,确保迭代推导算法能收敛。([#39939](https://github.com/PaddlePaddle/Paddle/pull/39939), [#39086](https://github.com/PaddlePaddle/Paddle/pull/39086), [#39014](https://github.com/PaddlePaddle/Paddle/pull/39014)) + - 支持子 block 和 while op 控制流的训练和推理。([#39612](https://github.com/PaddlePaddle/Paddle/pull/39612), [#39895](https://github.com/PaddlePaddle/Paddle/pull/39895), [#40077](https://github.com/PaddlePaddle/Paddle/pull/40077)) - 参数服务器 - + - GPUPS 下,新增 NAN/INF 值检查工具。 ([#38131](https://github.com/PaddlePaddle/Paddle/pull/38131)) - - - GPUPS 下,新增 set_date 接口,适配增量训练。([#36194](https://github.com/PaddlePaddle/Paddle/pull/36194)) - + + - GPUPS 下,新增 set_date 接口,适配增量训练。([#36194](https://github.com/PaddlePaddle/Paddle/pull/36194)) + - GPUPS 下,新增异步 release dataset 功能。 ([#37790](https://github.com/PaddlePaddle/Paddle/pull/37790)) - + - GPUPS 下,支持 Dump 参数和中间层([#36157](https://github.com/PaddlePaddle/Paddle/pull/36157)); - + - GPUPS 下,支持优化器参数配置。([#39783](https://github.com/PaddlePaddle/Paddle/pull/39783), [#39849](https://github.com/PaddlePaddle/Paddle/pull/39849)) - + - 统一参数服务器下,重构通信、存储等各个模块基类,提升各个模块的易二次开发性。([#41207](https://github.com/PaddlePaddle/Paddle/pull/41207), [#41022](https://github.com/PaddlePaddle/Paddle/pull/41022), [#40702](https://github.com/PaddlePaddle/Paddle/pull/40702), [#39341](https://github.com/PaddlePaddle/Paddle/pull/39341) [#39377](https://github.com/PaddlePaddle/Paddle/pull/39377), [#39191](https://github.com/PaddlePaddle/Paddle/pull/39191), [#39064](https://github.com/PaddlePaddle/Paddle/pull/39064)) - - - 统一参数服务器下,新增评估指标模块,支持 AUC/WuAUC/MaskAuc 等评估指标计算及可自定义扩展。 ([#38789](https://github.com/PaddlePaddle/Paddle/pull/38789)) - + + - 统一参数服务器下,新增评估指标模块,支持 AUC/WuAUC/MaskAuc 等评估指标计算及可自定义扩展。 ([#38789](https://github.com/PaddlePaddle/Paddle/pull/38789)) + - 支持在昆仑2芯片上的 XPU 参数服务器训练。 ([#41917](https://github.com/PaddlePaddle/Paddle/pull/41917), [#42266](https://github.com/PaddlePaddle/Paddle/pull/42266), [#41916](https://github.com/PaddlePaddle/Paddle/pull/41916)) #### Profiler -- Python 层新增性能分析模块 `paddle.profiler`: 提供对训推过程中性能数据的收集,导出和统计的功能。 ([#40065](https://github.com/PaddlePaddle/Paddle/pull/40065), [#40357](https://github.com/PaddlePaddle/Paddle/pull/40357), [#40888](https://github.com/PaddlePaddle/Paddle/pull/40888)) - +- Python 层新增性能分析模块 `paddle.profiler`: 提供对训推过程中性能数据的收集,导出和统计的功能。 ([#40065](https://github.com/PaddlePaddle/Paddle/pull/40065), [#40357](https://github.com/PaddlePaddle/Paddle/pull/40357), [#40888](https://github.com/PaddlePaddle/Paddle/pull/40888)) + - `paddle.profiler.Profiler`,性能分析器,用户交互的接口。([#41029](https://github.com/PaddlePaddle/Paddle/pull/41029), [#41524](https://github.com/PaddlePaddle/Paddle/pull/41524), [#41157](https://github.com/PaddlePaddle/Paddle/pull/41157), [#40249](https://github.com/PaddlePaddle/Paddle/pull/40249), [#40111](https://github.com/PaddlePaddle/Paddle/pull/40111), [#39964](https://github.com/PaddlePaddle/Paddle/pull/39964), [#40133](https://github.com/PaddlePaddle/Paddle/pull/40133)) - + - `paddle.profiler.RecordEvent`,提供自定义打点来记录时间的功能。 ([#39693](https://github.com/PaddlePaddle/Paddle/pull/39693), [#39694](https://github.com/PaddlePaddle/Paddle/pull/39694), [#39695](https://github.com/PaddlePaddle/Paddle/pull/39695), [#39675](https://github.com/PaddlePaddle/Paddle/pull/39675),[#41445](https://github.com/PaddlePaddle/Paddle/pull/41445), [#41132](https://github.com/PaddlePaddle/Paddle/pull/41132)) - + - `paddle.profiler.ProfilerTarget`,指定性能分析的目标设备。 - + - `paddle.profiler.ProfilerState`,表示性能分析器的状态。 - + - `paddle.profiler.SortedKeys`,指定统计表单内数据的排序方式。 - + - `paddle.profiler.make_scheduler`,生成性能分析器状态的调度器,实现采集范围的周期性控制。 - + - `paddle.profiler.export_chrome_tracing`,将性能数据保存到可供 chrome://tracing 插件查看的 google chrome tracing 文件。 ([#39316](https://github.com/PaddlePaddle/Paddle/pull/39316), [#39984](https://github.com/PaddlePaddle/Paddle/pull/39984), [#41029](https://github.com/PaddlePaddle/Paddle/pull/41029)) - + - `paddle.profiler.export_protobuf`,将性能数据保存到内部结构表示的 protobuf 文件。 ([#39519](https://github.com/PaddlePaddle/Paddle/pull/39519), [#39109](https://github.com/PaddlePaddle/Paddle/pull/39109), [#39474](https://github.com/PaddlePaddle/Paddle/pull/39474)) - + - `paddle.profiler.load_profiler_result`,载入所保存到 protobuf 文件的性能数据。 - - - `paddle.profiler.Profiler`通过指定 `timer_only` 参数,对模型进行数据读取、step 开销和吞吐量的统计。([#40386](https://github.com/PaddlePaddle/Paddle/pull/40386)) -- C++层重构 Profiler 底层基础设施 - + - `paddle.profiler.Profiler`通过指定 `timer_only` 参数,对模型进行数据读取、step 开销和吞吐量的统计。([#40386](https://github.com/PaddlePaddle/Paddle/pull/40386)) + +- C++层重构 Profiler 底层基础设施 + - 重构 Profiler 的控制器架构。([#38826](https://github.com/PaddlePaddle/Paddle/pull/38826), [#39230](https://github.com/PaddlePaddle/Paddle/pull/39230), [#39779](https://github.com/PaddlePaddle/Paddle/pull/39779) ) - + - 新增 Host Tracer,收集主机侧性能指标。([#37629](https://github.com/PaddlePaddle/Paddle/pull/39629), [#37766](https://github.com/PaddlePaddle/Paddle/pull/37766), [#37944](https://github.com/PaddlePaddle/Paddle/pull/37944), [#38280](https://github.com/PaddlePaddle/Paddle/pull/38280), [#39975](https://github.com/PaddlePaddle/Paddle/pull/39975), [#40460](https://github.com/PaddlePaddle/Paddle/pull/40460)) - + - 新增 CUDA Tracer,收集设备侧性能指标。([#39488](https://github.com/PaddlePaddle/Paddle/pull/39488)) - + - Profiler 支持分级。([#39926](https://github.com/PaddlePaddle/Paddle/pull/39926)) - 修改新动态图下 op 的打点名称和类型。([#41771](https://github.com/PaddlePaddle/Paddle/pull/41771/) @@ -1336,169 +1336,169 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. 飞桨的编译器功能在逐步丰富中,针对 CINN([GitHub - PaddlePaddle/CINN: Compiler Infrastructure for Neural Networks](https://github.com/PaddlePaddle/CINN)) 的变更,Paddle 侧接入也进行了相对应的更改,以适配编译器 CINN 的功能。其中主要包括增加Paddle-CINN 运行流程的子图管理相关功能,显存和速度性能的优化、开发过程发现的 bug 修复。 - 功能开发: - + - 子图 op 相关: - + - 添加从计算图中找到并生成 CINN 子图的功能。([#36345](https://github.com/PaddlePaddle/Paddle/pull/36345)) - + - 新增 cinn_launch op 作为运行时接入 CINN 的入口,负责调度 CINN 对子图进行编译、初始化数据空间、调度生成 Kernel 的执行。([#36600](https://github.com/PaddlePaddle/Paddle/pull/36600)) - + - 为 cinn_launch op 的 Kernel 实现添加辅助类 CinnLaunchContext 管理子图编译、运行的中间数据,提升可扩展性和代码可读性。([#37938](https://github.com/PaddlePaddle/Paddle/pull/37938)) - + - 为 CINN 子图添加额外的 fetch 结点,从而保证 CINN 外部结点能取到待fetch变量的值。([#37172](https://github.com/PaddlePaddle/Paddle/pull/37172), [#37190](https://github.com/PaddlePaddle/Paddle/pull/37190)) - + - 添加对 CINN 子图符号化的功能,符号化用于拓扑排序子图并返回 CINN 执行序列。([#36417](https://github.com/PaddlePaddle/Paddle/pull/36417) - + - 新增 CinnCompiler 类,用于调用 CINN 编译模型中可使用 CINN 算子替换的子图。 ([#36562](https://github.com/PaddlePaddle/Paddle/pull/36562), [#36975](https://github.com/PaddlePaddle/Paddle/pull/36975)) - + - 为 CINN 符号化类新增获取子图 fetch 变量名的接口,防止编译优化中将 fetch 变量融合消除。([#37218](https://github.com/PaddlePaddle/Paddle/pull/37218)) - + - 程序开发检查、debug、API 变更相关: - + - 同步更新 CINN 中 NetBuilder API 名称的变化。([#40392](https://github.com/PaddlePaddle/Paddle/pull/40392)) - + - 为 Paddle-CINN 添加必要的用于 debug 的日志信息。([#36867](https://github.com/PaddlePaddle/Paddle/pull/36867)) - + - 添加 Paddle desc 与 CINN desc 互转函数。([#36100](https://github.com/PaddlePaddle/Paddle/pull/36100)) - + - 相比 Paddle,CINN 中实现的算子可能存在未使用到某些输入变量,因此在 cinn_launch op 中去除对输入变量必须被使用的检查。([#37119](https://github.com/PaddlePaddle/Paddle/pull/37119)) - + - 新增 cinn_instruction_run op 用于调用 CINN 执行单个生成指令,便于 Paddle 侧构建 Graph 调度运行子图。([#39435](https://github.com/PaddlePaddle/Paddle/pull/39435), [#39576](https://github.com/PaddlePaddle/Paddle/pull/39576)) - + - 在 Paddle 中添加编译 CINN 所需的 CUDA/CUBLAS/MKL/CINN pass 应用等控制宏。([#37066](https://github.com/PaddlePaddle/Paddle/pull/37066), [#36660](https://github.com/PaddlePaddle/Paddle/pull/36660)) - + - 增加 FLAGS_allow_cinn_ops 和 FLAGS_deny_cinn_ops 两个控制标记,用于控制 Paddle 训练中使用 CINN 算子代替原生算子的种类。([#36842](https://github.com/PaddlePaddle/Paddle/pull/36842)) - 性能优化: - + - 速度优化 - + - 优化 CinnCacheKey 的计算耗时。([#37786](https://github.com/PaddlePaddle/Paddle/pull/37786), [#37317](https://github.com/PaddlePaddle/Paddle/pull/37317)) - + - 缓存 CINN 编译子图的变量 scope,降低运行参数构造开销。([#37983](https://github.com/PaddlePaddle/Paddle/pull/37983)) - + - 子图编译时接入 CINN 自动调优,支持通过 flag 启用,便于后续进一步调优训练性能。([#41795](https://github.com/PaddlePaddle/Paddle/pull/41795)) - + - 重构子图编译时对编译结果的正确性校验,避免运行时重复检查,降低调度开销。([#41777](https://github.com/PaddlePaddle/Paddle/pull/41777)) - + - 在 Paddle-CINN 训练功能中默认启用 TransposeFolding 和 GemmRewriter 优化 pass。([#41084](https://github.com/PaddlePaddle/Paddle/pull/41084)) - + - 将 Paddle 中创建的 cuda stream 传入 CINN,使得 Paddle 和 CINN 执行计算时共用同一个 CUDA stream。([#37337](https://github.com/PaddlePaddle/Paddle/pull/37337)) - + - 将 CINN 优化 pass 应用逻辑从 Paddle 中移动到 CINN 中。([#42047](https://github.com/PaddlePaddle/Paddle/pull/42047), [#42070](https://github.com/PaddlePaddle/Paddle/pull/42070)) - + - 显存优化 - + - 为 cinn_launch op 添加 NoNeedBufferVars 声明无须 buffer 的输入变量列表,以便显存优化提前释放无效空间。([#38367](https://github.com/PaddlePaddle/Paddle/pull/38367)) - + - 传入子图外部变量的引用计数信息,便于 cinn_launch 内子图复用显存优化 pass,降低使用 CINN 的显存开销。([#39209](https://github.com/PaddlePaddle/Paddle/pull/39209), [#39622](https://github.com/PaddlePaddle/Paddle/pull/39622)) - + - 添加 CINN 编译生成的可执行指令集合转换为 Paddle Graph 的功能,支持复用 Paddle 调度器及显存优化 pass,进一步降低使用 CINN 的显存开销。([#39724](https://github.com/PaddlePaddle/Paddle/pull/39724), [#39911](https://github.com/PaddlePaddle/Paddle/pull/39911)) - + - 添加 cinn_instruction_run op 的 Kernel 支持根据编译结果推断的数据类型动态申请空间。([#40920](https://github.com/PaddlePaddle/Paddle/pull/40920)) - 问题修复: - + - 修复并优化 CINN 子图的生成逻辑。([#36503](https://github.com/PaddlePaddle/Paddle/pull/36503)) - + - 修复 Paddle-CINN 不支持无输入子图的问题。([#40814](https://github.com/PaddlePaddle/Paddle/pull/40814)) - + - 修复由于 CINN 无法处理 batch_norm 等算子中存在的无用输出而报错的问题。([#36996](https://github.com/PaddlePaddle/Paddle/pull/36996)) - + - 修复若干 CINN 子图划分以及符号化中存在的 bug,解决 Paddle 训练接入 CINN 全流程打通过程中遇到的问题。 ([#36739](https://github.com/PaddlePaddle/Paddle/pull/36739), [#36698](https://github.com/PaddlePaddle/Paddle/pull/36698) ) - + - CINN 尚不支持控制流,添加遇控制流跳过的逻辑。([#40812](https://github.com/PaddlePaddle/Paddle/pull/40812)) #### 其他 - 模型量化 - + - 升级量化存储格式,并统一动、静态图量化格式。([#41041](https://github.com/PaddlePaddle/Paddle/pull/41041)) - + - 新增离线量化方法: EMD、Adaround。([#40421](https://github.com/PaddlePaddle/Paddle/pull/40421), [#38460](https://github.com/PaddlePaddle/Paddle/pull/38460)) - + - 支持更多 op 适配模 op 量化。([#40083](https://github.com/PaddlePaddle/Paddle/pull/40083)) - - - 支持控制流中的OP量化。([#37498](https://github.com/PaddlePaddle/Paddle/pull/37498)) - + + - 支持控制流中的OP量化。([#37498](https://github.com/PaddlePaddle/Paddle/pull/37498)) + - 新增支持matmul_v2 OP的量化。([#36469](https://github.com/PaddlePaddle/Paddle/pull/36469)) - + - 新增支持量化后的 matmul_v2 在 TensorRT 上的推理。([#36594](https://github.com/PaddlePaddle/Paddle/pull/36594)) - 显存优化 - - - 实现多 stream 安全 Allocator,支持在多 stream 异步计算场景下安全高效地使用显存。([#37290](https://github.com/PaddlePaddle/Paddle/pull/37290)) - - - 新增运行时显存监控模块(paddle.device.cuda.max_memory_allocated, paddle.device.cuda.max_memory_reserved, paddle.device.cuda.memory_allocated and paddle.device.cuda.memory_reserved),支持高性能地实时统计显存数据。([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) - - - 实现 CPU-GPU 统一内存寻址(CUDA Managed Memory),支持在显存受限场景下训练超大模型。([#39075](https://github.com/PaddlePaddle/Paddle/pull/39075)) - - - C++底层新增GetBasePtr接口,用来获取设备接口CUDAMalloc创建的设备地址。([#37978](https://github.com/PaddlePaddle/Paddle/pull/37978)) - - - 减少AutoGrowth Allocator 中 free blocks 的数量,提升显存分配性能。([#35732](https://github.com/PaddlePaddle/Paddle/pull/35732)) - - - 对于 `initializer.Normal` 和 `initializer.Constant` 数据类型是 FP16 的 Tensor 去除多余的 float32 临时 Tensor 以及 cast,节省2倍显存。 ([#38818](https://github.com/PaddlePaddle/Paddle/pull/38818)) - -- 动态图高阶导数组网测试 - + + - 实现多 stream 安全 Allocator,支持在多 stream 异步计算场景下安全高效地使用显存。([#37290](https://github.com/PaddlePaddle/Paddle/pull/37290)) + + - 新增运行时显存监控模块(paddle.device.cuda.max_memory_allocated, paddle.device.cuda.max_memory_reserved, paddle.device.cuda.memory_allocated and paddle.device.cuda.memory_reserved),支持高性能地实时统计显存数据。([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) + + - 实现 CPU-GPU 统一内存寻址(CUDA Managed Memory),支持在显存受限场景下训练超大模型。([#39075](https://github.com/PaddlePaddle/Paddle/pull/39075)) + + - C++底层新增GetBasePtr接口,用来获取设备接口CUDAMalloc创建的设备地址。([#37978](https://github.com/PaddlePaddle/Paddle/pull/37978)) + + - 减少AutoGrowth Allocator 中 free blocks 的数量,提升显存分配性能。([#35732](https://github.com/PaddlePaddle/Paddle/pull/35732)) + + - 对于 `initializer.Normal` 和 `initializer.Constant` 数据类型是 FP16 的 Tensor 去除多余的 float32 临时 Tensor 以及 cast,节省2倍显存。 ([#38818](https://github.com/PaddlePaddle/Paddle/pull/38818)) + +- 动态图高阶导数组网测试 + - 为动态图增加三阶导数组网测试,以及Broadcast情况的测试。 ([#36814](https://github.com/PaddlePaddle/Paddle/pull/36814) , [#37377](https://github.com/PaddlePaddle/Paddle/pull/37377)) -- 自定义 op:支持 ROCm(HIP) 平台进行自定义 op 注册。 ([#36771](https://github.com/PaddlePaddle/Paddle/pull/36771)) +- 自定义 op:支持 ROCm(HIP) 平台进行自定义 op 注册。 ([#36771](https://github.com/PaddlePaddle/Paddle/pull/36771)) - Cost Model:增加基于运行 Profile 的 Cost Model。 ([#35774](https://github.com/PaddlePaddle/Paddle/pull/35774)) -- 提供定制化层 (nn.Layer)的自动稀疏训练支持,让用戶可根据自定义的Prune函数来对其设计的层进行稀疏剪枝。([#40253](https://github.com/PaddlePaddle/Paddle/pull/40253)) +- 提供定制化层 (nn.Layer)的自动稀疏训练支持,让用戶可根据自定义的Prune函数来对其设计的层进行稀疏剪枝。([#40253](https://github.com/PaddlePaddle/Paddle/pull/40253)) -- 新增字符串张量底层数据结构表示,使框架具备字符串张量表示和计算的能力。([#39830](https://github.com/PaddlePaddle/Paddle/pull/39830), [#40992](https://github.com/PaddlePaddle/Paddle/pull/40992)) +- 新增字符串张量底层数据结构表示,使框架具备字符串张量表示和计算的能力。([#39830](https://github.com/PaddlePaddle/Paddle/pull/39830), [#40992](https://github.com/PaddlePaddle/Paddle/pull/40992)) - 新增或者升级 oneDNN FP32/int8/bfloat16 Kernel,包括: - + - ELU ([#37149](https://github.com/PaddlePaddle/Paddle/pull/37149)) - + - exp ([#38624](https://github.com/PaddlePaddle/Paddle/pull/38624)) - + - stack ([#37002](https://github.com/PaddlePaddle/Paddle/pull/37002)) - + - softplus ([#36382](https://github.com/PaddlePaddle/Paddle/pull/36382)) - + - round ([#39653](https://github.com/PaddlePaddle/Paddle/pull/39653)) - + - shape ([#36033](https://github.com/PaddlePaddle/Paddle/pull/36033)) - + - flatten and flatten2 ([#35892](https://github.com/PaddlePaddle/Paddle/pull/35892)) - + - slice ([#37630](https://github.com/PaddlePaddle/Paddle/pull/37630)) - + - elementwise_mul ([#40546](https://github.com/PaddlePaddle/Paddle/pull/40546)) - + - elementwise_add ([#38176](https://github.com/PaddlePaddle/Paddle/pull/38176)) - + - ementwise_div ([#36158](https://github.com/PaddlePaddle/Paddle/pull/36158)) - + - elementwise_sub ([#35662](https://github.com/PaddlePaddle/Paddle/pull/35662)) - + - roi_align ([#37848](https://github.com/PaddlePaddle/Paddle/pull/37848)) - + - nearest_interp and nearest_interp_v2 ([#37985](https://github.com/PaddlePaddle/Paddle/pull/37985),[#38622](https://github.com/PaddlePaddle/Paddle/pull/38622),[#39490](https://github.com/PaddlePaddle/Paddle/pull/39490)) - + - assembly optimized Adam ([#39158](https://github.com/PaddlePaddle/Paddle/pull/39158)) - + - logsoftmax ([#39793](https://github.com/PaddlePaddle/Paddle/pull/39793)) - + - activation ([#40721](https://github.com/PaddlePaddle/Paddle/pull/40721)) - + - mul ([#38552](https://github.com/PaddlePaddle/Paddle/pull/38552)) - + - mean ([#37104](https://github.com/PaddlePaddle/Paddle/pull/37104)) - + - relu ([#36265](https://github.com/PaddlePaddle/Paddle/pull/36265)) - + - pool2d ([#37081](https://github.com/PaddlePaddle/Paddle/pull/37081)) - + - concat ([#35889](https://github.com/PaddlePaddle/Paddle/pull/35889)) - + - conv2d ([#38507](https://github.com/PaddlePaddle/Paddle/pull/38507),[#38938](https://github.com/PaddlePaddle/Paddle/pull/38938),[#36284](https://github.com/PaddlePaddle/Paddle/pull/36284)) - + - LayerNorm ([#40418](https://github.com/PaddlePaddle/Paddle/pull/40418)) - 增加基于 SSD-内存-GPU显存 的3级存储图检索引擎,支持大规模图神经网络训练。([#42472](https://github.com/PaddlePaddle/Paddle/pull/42472), [#42321](https://github.com/PaddlePaddle/Paddle/pull/42321), [#42027](https://github.com/PaddlePaddle/Paddle/pull/42027)) @@ -1509,45 +1509,45 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API -- 为 `paddle.Model`新增支持混合精度训练 O2 模式,即支持原来动/静态图的 Pure FP16 训练模式。([#36441](https://github.com/PaddlePaddle/Paddle/pull/40962441)) +- 为 `paddle.Model`新增支持混合精度训练 O2 模式,即支持原来动/静态图的 Pure FP16 训练模式。([#36441](https://github.com/PaddlePaddle/Paddle/pull/40962441)) -- 为 `paddle.nn.Layer` 支持 self chain 调用。([#36609](https://github.com/PaddlePaddle/Paddle/pull/36609)) +- 为 `paddle.nn.Layer` 支持 self chain 调用。([#36609](https://github.com/PaddlePaddle/Paddle/pull/36609)) -- 为 `paddle.nn.Layer`的`to`方法添加`is_distributed`属性的设置,保证网络参数转换前后分布式属性保持一致。([#36221](https://github.com/PaddlePaddle/Paddle/pull/36221)) +- 为 `paddle.nn.Layer`的`to`方法添加`is_distributed`属性的设置,保证网络参数转换前后分布式属性保持一致。([#36221](https://github.com/PaddlePaddle/Paddle/pull/36221)) -- 完善 `paddle.nn.Layer`的`to` 方法的参数转换逻辑,降低转换过程占用的峰值显存,提高转换成功率。([#36862](https://github.com/PaddlePaddle/Paddle/pull/36862)) +- 完善 `paddle.nn.Layer`的`to` 方法的参数转换逻辑,降低转换过程占用的峰值显存,提高转换成功率。([#36862](https://github.com/PaddlePaddle/Paddle/pull/36862)) -- 为 `paddle.incubate.graph_send_recv`支持设置输出 Tensor 的 shape,有利于减少实际计算过程的显存占用。([#40509](https://github.com/PaddlePaddle/Paddle/pull/40509)) +- 为 `paddle.incubate.graph_send_recv`支持设置输出 Tensor 的 shape,有利于减少实际计算过程的显存占用。([#40509](https://github.com/PaddlePaddle/Paddle/pull/40509)) -- 为 `paddle.incubate.segment_sum`、`segment_mean`、`segment_max`、`segment_min` 新增 int32、int64 数据类型支持。([#40577](https://github.com/PaddlePaddle/Paddle/pull/40577)) +- 为 `paddle.incubate.segment_sum`、`segment_mean`、`segment_max`、`segment_min` 新增 int32、int64 数据类型支持。([#40577](https://github.com/PaddlePaddle/Paddle/pull/40577)) -- 为 transpose op 新增 bool 类型支持。([#35886](https://github.com/PaddlePaddle/Paddle/pull/35886)) +- 为 transpose op 新增 bool 类型支持。([#35886](https://github.com/PaddlePaddle/Paddle/pull/35886)) -- 将 `paddle.mm` 底层算子从 matmul 切换到matmul_v2。 ([#35770](https://github.com/PaddlePaddle/Paddle/pull/35770)) +- 将 `paddle.mm` 底层算子从 matmul 切换到matmul_v2。 ([#35770](https://github.com/PaddlePaddle/Paddle/pull/35770)) -- 为 `paddle.einsum` 支持静态图模式调用,支持未知 shape。 ([#40360](https://github.com/PaddlePaddle/Paddle/pull/40360)) +- 为 `paddle.einsum` 支持静态图模式调用,支持未知 shape。 ([#40360](https://github.com/PaddlePaddle/Paddle/pull/40360)) -- 为 `paddle.nn.functional.margin_cross_entropy` 和 `paddle.nn.functional.class_center_sample` 支持数据并行。([#39852](https://github.com/PaddlePaddle/Paddle/pull/39852)) +- 为 `paddle.nn.functional.margin_cross_entropy` 和 `paddle.nn.functional.class_center_sample` 支持数据并行。([#39852](https://github.com/PaddlePaddle/Paddle/pull/39852)) - 为 `paddle.nn.functional.grid_sample`支持形状为[1]的输入。([#36183](https://github.com/PaddlePaddle/Paddle/pull/36183)) -- 为 `paddle.nn.PRelu` 支持 `NHWC` 数据格式。([#37019](https://github.com/PaddlePaddle/Paddle/pull/37019)) +- 为 `paddle.nn.PRelu` 支持 `NHWC` 数据格式。([#37019](https://github.com/PaddlePaddle/Paddle/pull/37019)) -- 为 `paddle.nn.functional.class_center_sample` 支持使用 `paddle.seed` 固定随机状态。([#38248](https://github.com/PaddlePaddle/Paddle/pull/38248)) +- 为 `paddle.nn.functional.class_center_sample` 支持使用 `paddle.seed` 固定随机状态。([#38248](https://github.com/PaddlePaddle/Paddle/pull/38248)) - 为 `paddle.fft` 下所有 API 新增 ROCM 后端支持,并优化 CUFFT 后端报错信息。([#36415](https://github.com/PaddlePaddle/Paddle/pull/36415), [#36114](https://github.com/PaddlePaddle/Paddle/pull/36114/files)) -- 为 `Tensor.getitem` 增加对切片部分维度为0的功能支持,即允许切片索引结果为空。([#37313](https://github.com/PaddlePaddle/Paddle/pull/37313)) +- 为 `Tensor.getitem` 增加对切片部分维度为0的功能支持,即允许切片索引结果为空。([#37313](https://github.com/PaddlePaddle/Paddle/pull/37313)) -- 为 `Tensor.setitem` 支持 int 和 bool 类型 Tensor 使用 bool 索引。([#37761](https://github.com/PaddlePaddle/Paddle/pull/37761)) +- 为 `Tensor.setitem` 支持 int 和 bool 类型 Tensor 使用 bool 索引。([#37761](https://github.com/PaddlePaddle/Paddle/pull/37761)) -- 为 `paddle.nn.functional.interpolate` 支持 nearest 模式时输入 shape 为 5D。([#38868](https://github.com/PaddlePaddle/Paddle/pull/38868)) +- 为 `paddle.nn.functional.interpolate` 支持 nearest 模式时输入 shape 为 5D。([#38868](https://github.com/PaddlePaddle/Paddle/pull/38868)) -- 为 `paddle.nn.Embedding`、`paddle.gather` 增加 int16 支持。([#40964](https://github.com/PaddlePaddle/Paddle/pull/40964), [#40052](https://github.com/PaddlePaddle/Paddle/pull/40052)) +- 为 `paddle.nn.Embedding`、`paddle.gather` 增加 int16 支持。([#40964](https://github.com/PaddlePaddle/Paddle/pull/40964), [#40052](https://github.com/PaddlePaddle/Paddle/pull/40052)) -- 为 `paddle.distributed.spawn`添加 CPU 单机数据并行。 ([#35745](https://github.com/PaddlePaddle/Paddle/pull/35745), [#36758](https://github.com/PaddlePaddle/Paddle/pull/36758), [#36637](https://github.com/PaddlePaddle/Paddle/pull/36637)) +- 为 `paddle.distributed.spawn`添加 CPU 单机数据并行。 ([#35745](https://github.com/PaddlePaddle/Paddle/pull/35745), [#36758](https://github.com/PaddlePaddle/Paddle/pull/36758), [#36637](https://github.com/PaddlePaddle/Paddle/pull/36637)) -- 新增`depthwise_conv2d`MKLDNN 算子。([#38484](https://github.com/PaddlePaddle/Paddle/pull/38484)) +- 新增`depthwise_conv2d`MKLDNN 算子。([#38484](https://github.com/PaddlePaddle/Paddle/pull/38484)) - 为`paddle.abs`、`paddle.transpose`、`paddle.squeeze`、`paddle.unsqueeze`、 `paddle.matmul`、`paddle.full` 静态图数据类型检测中增加复数类型。([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) @@ -1555,7 +1555,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 `paddle.autograd.PyLayer` 增加检查 inplace 策略下,输入叶子节点的 Tensor 的检查报错机制。([#37931](https://github.com/PaddlePaddle/Paddle/pull/37931)) -- 为 `paddle.autograd.PyLayer` 支持 HIP 库。([#38184](https://github.com/PaddlePaddle/Paddle/pull/38184)) +- 为 `paddle.autograd.PyLayer` 支持 HIP 库。([#38184](https://github.com/PaddlePaddle/Paddle/pull/38184)) - 为 `paddle.take_along_axis`、`paddle.put_along_axis` 支持更多 size 的输入,允许 index 矩阵的 shape size 大于 arr 矩阵的 shape size。 ([#39072](https://github.com/PaddlePaddle/Paddle/pull/39072)) @@ -1563,17 +1563,17 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持 API `paddle.nn.Pad2D`在 tuple 格式的 pad 输入。([#35985](https://github.com/PaddlePaddle/Paddle/pull/35985/files)) -- 新增 `paddle.distributed.InMemoryDataset` 中 tdm_sample API 以支持 TDM 算法中的采样操作。([#37044](https://github.com/PaddlePaddle/Paddle/pull/37044)) +- 新增 `paddle.distributed.InMemoryDataset` 中 tdm_sample API 以支持 TDM 算法中的采样操作。([#37044](https://github.com/PaddlePaddle/Paddle/pull/37044)) - 新增对于`paddle.jit.save`的 Pre-saving Hooks 机制。([#38186](https://github.com/PaddlePaddle/Paddle/pull/38186)) - 新增高阶微分相关 API: - - - `elementwise_add` 增加三阶 Kernel,支持三阶微分的计算。([#36508](https://github.com/PaddlePaddle/Paddle/pull/36508), [#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) - + + - `elementwise_add` 增加三阶 Kernel,支持三阶微分的计算。([#36508](https://github.com/PaddlePaddle/Paddle/pull/36508), [#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) + - `matmul_v2` 增加三阶 Kernel,支持三阶微分的计算。([#36459](https://github.com/PaddlePaddle/Paddle/pull/36459)) - - - `elementwise_mul` 增加三阶 Kernel,支持三阶微分的计算。 ([#37152](https://github.com/PaddlePaddle/Paddle/pull/37547)) + + - `elementwise_mul` 增加三阶 Kernel,支持三阶微分的计算。 ([#37152](https://github.com/PaddlePaddle/Paddle/pull/37547)) - 完善`paddle.amp.GradScaler`调用 check_finite_and_unscale op 的逻辑,消除该处创建 bool 变量所引入的 cudaMemcpy。([#37770](https://github.com/PaddlePaddle/Paddle/pull/37770)) @@ -1588,62 +1588,62 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### IR(Intermediate Representation) - 动态图转静态图 - - - 优化动转静下 `ProgramCache.last` 接口行为,使其返回最近使用的 Program,而非最后生成的Program。([#39541](https://github.com/PaddlePaddle/Paddle/pull/39541)) - - - 优化动转静下 `paddle.reshape` API 的报错信息,新增推荐用法提示。([#40599](https://github.com/PaddlePaddle/Paddle/pull/40599)) - - - 优化动转静代码转写时 `is_api_in_module` 函数中异常捕获类型。([#40243](https://github.com/PaddlePaddle/Paddle/pull/40243)) - - - 优化动转静模块报错提示,默认隐藏warning信息。([#39730](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39730)) - - - 增加动转静对于type hint语法的支持,提高变量类型分析的准确性。([#39572](https://github.com/PaddlePaddle/Paddle/pull/39572)) - - - 优化 `paddle.cond` 功能,允许bool、int等基本类型支持值相等。([#37888](https://github.com/PaddlePaddle/Paddle/pull/37888)) - - - 优化动转静`@to_static` 装饰普通函数时,允许切换train/eval模式。([#37383](https://github.com/PaddlePaddle/Paddle/pull/37383)) - - - 优化动转静报错栈,突出用户相关代码,减少框架冗余报错栈。([#36741](https://github.com/PaddlePaddle/Paddle/pull/36741)) - - - 移除`paddle.cond` 返回值中 `no_value` 占位符。([#36513](https://github.com/PaddlePaddle/Paddle/pull/36513)、[#36826](https://github.com/PaddlePaddle/Paddle/pull/36826)) - - - 为动转静 run_program op 适配新动态图模式。([#40198](https://github.com/PaddlePaddle/Paddle/pull/40198), [#40355](https://github.com/PaddlePaddle/Paddle/pull/40355)) - - - 新增对于 zip 语法的检查。 ([#37846](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/37846)) - + + - 优化动转静下 `ProgramCache.last` 接口行为,使其返回最近使用的 Program,而非最后生成的Program。([#39541](https://github.com/PaddlePaddle/Paddle/pull/39541)) + + - 优化动转静下 `paddle.reshape` API 的报错信息,新增推荐用法提示。([#40599](https://github.com/PaddlePaddle/Paddle/pull/40599)) + + - 优化动转静代码转写时 `is_api_in_module` 函数中异常捕获类型。([#40243](https://github.com/PaddlePaddle/Paddle/pull/40243)) + + - 优化动转静模块报错提示,默认隐藏warning信息。([#39730](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39730)) + + - 增加动转静对于type hint语法的支持,提高变量类型分析的准确性。([#39572](https://github.com/PaddlePaddle/Paddle/pull/39572)) + + - 优化 `paddle.cond` 功能,允许bool、int等基本类型支持值相等。([#37888](https://github.com/PaddlePaddle/Paddle/pull/37888)) + + - 优化动转静`@to_static` 装饰普通函数时,允许切换train/eval模式。([#37383](https://github.com/PaddlePaddle/Paddle/pull/37383)) + + - 优化动转静报错栈,突出用户相关代码,减少框架冗余报错栈。([#36741](https://github.com/PaddlePaddle/Paddle/pull/36741)) + + - 移除`paddle.cond` 返回值中 `no_value` 占位符。([#36513](https://github.com/PaddlePaddle/Paddle/pull/36513)、[#36826](https://github.com/PaddlePaddle/Paddle/pull/36826)) + + - 为动转静 run_program op 适配新动态图模式。([#40198](https://github.com/PaddlePaddle/Paddle/pull/40198), [#40355](https://github.com/PaddlePaddle/Paddle/pull/40355)) + + - 新增对于 zip 语法的检查。 ([#37846](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/37846)) + - 修复 `paddle.signal.frame`、`paddle.signal.stft`、`paddle.signal.istft` 因维度和类型判断错误导致的动转静失败问题。([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) - + - 为 mean、pad3d ops 新增注册复数类型 Kernel。([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) #### 混合精度训练 -- 为 amp 添加 GPU Compute Capability 环境检查,对无法产生训练加速效果的 GPU 环境添加使用警告。([#38086](https://github.com/PaddlePaddle/Paddle/pull/38086)) +- 为 amp 添加 GPU Compute Capability 环境检查,对无法产生训练加速效果的 GPU 环境添加使用警告。([#38086](https://github.com/PaddlePaddle/Paddle/pull/38086)) -- 添加`paddle.amp.decorate`与`paddle.DataParallel`同时使用时调用顺序的检查。([#38785](https://github.com/PaddlePaddle/Paddle/pull/38785)) +- 添加`paddle.amp.decorate`与`paddle.DataParallel`同时使用时调用顺序的检查。([#38785](https://github.com/PaddlePaddle/Paddle/pull/38785)) #### 分布式训练 - 分布式训练基础功能 - + - 优化 Fleet API 和 DistributedStrategy 配置以使用动态图并行功能,提升动态图易用性。([#40408](https://github.com/PaddlePaddle/Paddle/pull/40408)) - - - 优化动态图混合并行 HybridParallelClipGrad 策略,支持4D混合并行 + Pure FP16 训练。([#36237](https://github.com/PaddlePaddle/Paddle/pull/36237), [#36555](https://github.com/PaddlePaddle/Paddle/pull/36555)) - - - 重构动态图数据并行策略,以支持新动态图和新通信库功能。([#40389](https://github.com/PaddlePaddle/Paddle/pull/40389), [#40593](https://github.com/PaddlePaddle/Paddle/pull/40593), [#40836](https://github.com/PaddlePaddle/Paddle/pull/40836), [#41119](https://github.com/PaddlePaddle/Paddle/pull/41119), [#41413](https://github.com/PaddlePaddle/Paddle/pull/41413), [#39987](https://github.com/PaddlePaddle/Paddle/pull/39987)) - - - 为 fused_attention op 支持分布式张量模型并行。([#40101](https://github.com/PaddlePaddle/Paddle/pull/40101)) - - - 为 fused_feedforward op 支持分布式张量模型并行。([#40160](https://github.com/PaddlePaddle/Paddle/pull/40160)) + + - 优化动态图混合并行 HybridParallelClipGrad 策略,支持4D混合并行 + Pure FP16 训练。([#36237](https://github.com/PaddlePaddle/Paddle/pull/36237), [#36555](https://github.com/PaddlePaddle/Paddle/pull/36555)) + + - 重构动态图数据并行策略,以支持新动态图和新通信库功能。([#40389](https://github.com/PaddlePaddle/Paddle/pull/40389), [#40593](https://github.com/PaddlePaddle/Paddle/pull/40593), [#40836](https://github.com/PaddlePaddle/Paddle/pull/40836), [#41119](https://github.com/PaddlePaddle/Paddle/pull/41119), [#41413](https://github.com/PaddlePaddle/Paddle/pull/41413), [#39987](https://github.com/PaddlePaddle/Paddle/pull/39987)) + + - 为 fused_attention op 支持分布式张量模型并行。([#40101](https://github.com/PaddlePaddle/Paddle/pull/40101)) + + - 为 fused_feedforward op 支持分布式张量模型并行。([#40160](https://github.com/PaddlePaddle/Paddle/pull/40160)) - 图检索引擎 - - - 优化图引擎的图采样接口返回的数据格式,采样速度提升3倍。([#37315](https://github.com/PaddlePaddle/Paddle/pull/37315)) - - - 减少图引擎线程量以提升性能。([#37098](https://github.com/PaddlePaddle/Paddle/pull/37098)) - + + - 优化图引擎的图采样接口返回的数据格式,采样速度提升3倍。([#37315](https://github.com/PaddlePaddle/Paddle/pull/37315)) + + - 减少图引擎线程量以提升性能。([#37098](https://github.com/PaddlePaddle/Paddle/pull/37098)) + - 优化图引擎数据传输以提升性能。([#37341](https://github.com/PaddlePaddle/Paddle/pull/37341)) - - - 利用模型中 embedding op 的拓扑关系,优化 embedding op 的合并逻辑以提升性能。[(#35942)](https://github.com/PaddlePaddle/Paddle/pull/35942) + + - 利用模型中 embedding op 的拓扑关系,优化 embedding op 的合并逻辑以提升性能。[(#35942)](https://github.com/PaddlePaddle/Paddle/pull/35942) - 通信库:重构通信库,提升通信库的易扩展性和二次开发性,支持异构通信。 ([#41398](https://github.com/PaddlePaddle/Paddle/pull/41398), [#39720](https://github.com/PaddlePaddle/Paddle/pull/39720), [#40911](https://github.com/PaddlePaddle/Paddle/pull/40911), [#40579](https://github.com/PaddlePaddle/Paddle/pull/40579), [#40629](https://github.com/PaddlePaddle/Paddle/pull/40629), [#40437](https://github.com/PaddlePaddle/Paddle/pull/40437), [#40430](https://github.com/PaddlePaddle/Paddle/pull/40430), [#40228](https://github.com/PaddlePaddle/Paddle/pull/40228), [#40181](https://github.com/PaddlePaddle/Paddle/pull/40181), [#40100](https://github.com/PaddlePaddle/Paddle/pull/40100), [#40097](https://github.com/PaddlePaddle/Paddle/pull/40097), [#39892](https://github.com/PaddlePaddle/Paddle/pull/39892), [#39384](https://github.com/PaddlePaddle/Paddle/pull/39384), [#39737](https://github.com/PaddlePaddle/Paddle/pull/39737), [#40040](https://github.com/PaddlePaddle/Paddle/pull/40040)) @@ -1657,22 +1657,22 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 自定义算子 -- 增强 C++自定义算子机制对二阶反向算子编写功能,支持为二阶反向算子的梯度输入变量添加后缀作为输出使用。([#41781](https://github.com/PaddlePaddle/Paddle/pull/41781)) +- 增强 C++自定义算子机制对二阶反向算子编写功能,支持为二阶反向算子的梯度输入变量添加后缀作为输出使用。([#41781](https://github.com/PaddlePaddle/Paddle/pull/41781)) -- 移除 Tensor API 成员方法中对废弃的枚举类型 PlaceType 的使用,进行相应兼容处理,并添加 deprecated warning 提示。([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) +- 移除 Tensor API 成员方法中对废弃的枚举类型 PlaceType 的使用,进行相应兼容处理,并添加 deprecated warning 提示。([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) -- 为原 Tensor API 的一系列废弃接口,包括不完整构造函数、reshape、mutable_data、copy_to 方法添加 deprecated warning 提示。([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) +- 为原 Tensor API 的一系列废弃接口,包括不完整构造函数、reshape、mutable_data、copy_to 方法添加 deprecated warning 提示。([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) #### 其他 - 报错调试优化 - - - 优化 cross_entropy op 对 `label` 的边界检查报错信息。([#40001](https://github.com/PaddlePaddle/Paddle/pull/40001)) - - - 为动态图添加 op 执行时`infer_shape`和`compute`方法的 profile record,用于在 timeline 中展示其开销。([#39023](https://github.com/PaddlePaddle/Paddle/pull/39023)) - - - 替换了 Windows 下容易出现未知异常的 `pybind::index_error` 报错提示。([#40538](https://github.com/PaddlePaddle/Paddle/pull/40538)) - + + - 优化 cross_entropy op 对 `label` 的边界检查报错信息。([#40001](https://github.com/PaddlePaddle/Paddle/pull/40001)) + + - 为动态图添加 op 执行时`infer_shape`和`compute`方法的 profile record,用于在 timeline 中展示其开销。([#39023](https://github.com/PaddlePaddle/Paddle/pull/39023)) + + - 替换了 Windows 下容易出现未知异常的 `pybind::index_error` 报错提示。([#40538](https://github.com/PaddlePaddle/Paddle/pull/40538)) + - 添加用户 scatter op 越界检查的报错信息。([#37429](https://github.com/PaddlePaddle/Paddle/pull/37429)) - 下载工具:针对`paddle.utils.download.get_path_from_url`中解压含多文件目录速度慢的问题,将原先循环遍历目录下文件逐一解压的方式替换为在目录上调用 extractall 一次解压的方式,解压速度大幅提升。([#37311](https://github.com/PaddlePaddle/Paddle/pull/37311)) @@ -1683,53 +1683,53 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 分布式训练 -- 混合并行优化器 sharding 支持 optimize_cast 优化,将前反向参数 cast 移到优化器阶段,性能提升7%。([#35878](https://github.com/PaddlePaddle/Paddle/pull/35878)) +- 混合并行优化器 sharding 支持 optimize_cast 优化,将前反向参数 cast 移到优化器阶段,性能提升7%。([#35878](https://github.com/PaddlePaddle/Paddle/pull/35878)) - GPUPS 优化:支持梯度 fuse allreduce 训练,训练提升20%。 ([#35131](https://github.com/PaddlePaddle/Paddle/pull/35131)) -- GPUPS 优化:dump CPU 优化提速3.21倍。 ([#40068](https://github.com/PaddlePaddle/Paddle/pull/40068)) +- GPUPS 优化:dump CPU 优化提速3.21倍。 ([#40068](https://github.com/PaddlePaddle/Paddle/pull/40068)) -- CPU 参数服务器流式训练优化:支持稀疏参数统计量自动统计、稀疏参数增量保存等功能,训练性能提升20%。([#36465](https://github.com/PaddlePaddle/Paddle/pull/36465), [#36601](https://github.com/PaddlePaddle/Paddle/pull/36601), [#36734](https://github.com/PaddlePaddle/Paddle/pull/36734), [#36909](https://github.com/PaddlePaddle/Paddle/pull/36909), [#36943](https://github.com/PaddlePaddle/Paddle/pull/36943), [#37181](https://github.com/PaddlePaddle/Paddle/pull/37181), [#37194](https://github.com/PaddlePaddle/Paddle/pull/37194), [#37515](https://github.com/PaddlePaddle/Paddle/pull/37515), [#37626](https://github.com/PaddlePaddle/Paddle/pull/37626), [#37995](https://github.com/PaddlePaddle/Paddle/pull/37995), [#38582](https://github.com/PaddlePaddle/Paddle/pull/38582), [#39250](https://github.com/PaddlePaddle/Paddle/pull/39250), [#40762](https://github.com/PaddlePaddle/Paddle/pull/40762), [#41234](https://github.com/PaddlePaddle/Paddle/pull/41234), [#41320](https://github.com/PaddlePaddle/Paddle/pull/41320), [#41400](https://github.com/PaddlePaddle/Paddle/pull/41400)) +- CPU 参数服务器流式训练优化:支持稀疏参数统计量自动统计、稀疏参数增量保存等功能,训练性能提升20%。([#36465](https://github.com/PaddlePaddle/Paddle/pull/36465), [#36601](https://github.com/PaddlePaddle/Paddle/pull/36601), [#36734](https://github.com/PaddlePaddle/Paddle/pull/36734), [#36909](https://github.com/PaddlePaddle/Paddle/pull/36909), [#36943](https://github.com/PaddlePaddle/Paddle/pull/36943), [#37181](https://github.com/PaddlePaddle/Paddle/pull/37181), [#37194](https://github.com/PaddlePaddle/Paddle/pull/37194), [#37515](https://github.com/PaddlePaddle/Paddle/pull/37515), [#37626](https://github.com/PaddlePaddle/Paddle/pull/37626), [#37995](https://github.com/PaddlePaddle/Paddle/pull/37995), [#38582](https://github.com/PaddlePaddle/Paddle/pull/38582), [#39250](https://github.com/PaddlePaddle/Paddle/pull/39250), [#40762](https://github.com/PaddlePaddle/Paddle/pull/40762), [#41234](https://github.com/PaddlePaddle/Paddle/pull/41234), [#41320](https://github.com/PaddlePaddle/Paddle/pull/41320), [#41400](https://github.com/PaddlePaddle/Paddle/pull/41400)) #### 算子优化 -- 优化 `FasterTokenizer` 性能,性能与优化前相比提升10%。 ([#36701](https://github.com/PaddlePaddle/Paddle/pull/36701)) +- 优化 `FasterTokenizer` 性能,性能与优化前相比提升10%。 ([#36701](https://github.com/PaddlePaddle/Paddle/pull/36701)) -- 优化 `index_select` 反向计算,性能较优化前有3.7~25.2倍提升。([#37055](https://github.com/PaddlePaddle/Paddle/pull/37055)) +- 优化 `index_select` 反向计算,性能较优化前有3.7~25.2倍提升。([#37055](https://github.com/PaddlePaddle/Paddle/pull/37055)) -- 优化 `paddle.nn.ClipByGlobalNorm` 的性能,以10*10的 `paddle.nn.Linear` 为例,性能与优化前相比提升30%左右。 ([#38209](https://github.com/PaddlePaddle/Paddle/pull/38209)) +- 优化 `paddle.nn.ClipByGlobalNorm` 的性能,以10*10的 `paddle.nn.Linear` 为例,性能与优化前相比提升30%左右。 ([#38209](https://github.com/PaddlePaddle/Paddle/pull/38209)) -- 优化 `pnorm` 在 `axis` 维度极大或极小情况下的性能,前向速度提升31~96倍,反向速度提升1.1~19倍。([#37685](https://github.com/PaddlePaddle/Paddle/pull/37685), [#38215](https://github.com/PaddlePaddle/Paddle/pull/38215), [#39011](https://github.com/PaddlePaddle/Paddle/pull/39011)) +- 优化 `pnorm` 在 `axis` 维度极大或极小情况下的性能,前向速度提升31~96倍,反向速度提升1.1~19倍。([#37685](https://github.com/PaddlePaddle/Paddle/pull/37685), [#38215](https://github.com/PaddlePaddle/Paddle/pull/38215), [#39011](https://github.com/PaddlePaddle/Paddle/pull/39011)) - 优化 `softmax` 前、反向性能,对于 `axis!=-1` 的配置加速比为2倍左右。([#38602](https://github.com/PaddlePaddle/Paddle/pull/38602), [#38609](https://github.com/PaddlePaddle/Paddle/pull/38609), [#32387](https://github.com/PaddlePaddle/Paddle/pull/32387), [#37927](https://github.com/PaddlePaddle/Paddle/pull/37927/files)) -- 优化 `log_softmax` 前、反向性能,对于 `axis!=-1`的配置加速比为6~20倍左右。([#38992](https://github.com/PaddlePaddle/Paddle/pull/38992), [#40612](https://github.com/PaddlePaddle/Paddle/pull/40612)) +- 优化 `log_softmax` 前、反向性能,对于 `axis!=-1`的配置加速比为6~20倍左右。([#38992](https://github.com/PaddlePaddle/Paddle/pull/38992), [#40612](https://github.com/PaddlePaddle/Paddle/pull/40612)) -- 优化 `softmax_with_cross_entropy` 前、反向性能,对于 `hard_label` 的配置加速比为1.3倍左右。([#39553](https://github.com/PaddlePaddle/Paddle/pull/39553), [#40424](https://github.com/PaddlePaddle/Paddle/pull/40424), [#40643](https://github.com/PaddlePaddle/Paddle/pull/40643)) +- 优化 `softmax_with_cross_entropy` 前、反向性能,对于 `hard_label` 的配置加速比为1.3倍左右。([#39553](https://github.com/PaddlePaddle/Paddle/pull/39553), [#40424](https://github.com/PaddlePaddle/Paddle/pull/40424), [#40643](https://github.com/PaddlePaddle/Paddle/pull/40643)) -- 优化 `top_k` 性能,对于一维且 `k` 较大时(k=5000)的配置加速比为22倍以上。([#40941](https://github.com/PaddlePaddle/Paddle/pull/40941)) +- 优化 `top_k` 性能,对于一维且 `k` 较大时(k=5000)的配置加速比为22倍以上。([#40941](https://github.com/PaddlePaddle/Paddle/pull/40941)) -- 优化 `elementwise_mul` 反向计算,较优化前有1.85~12.16倍性能提升。([#37728](https://github.com/PaddlePaddle/Paddle/pull/37728)) +- 优化 `elementwise_mul` 反向计算,较优化前有1.85~12.16倍性能提升。([#37728](https://github.com/PaddlePaddle/Paddle/pull/37728)) -- 优化 `elementwise_min` 反向和 `elementwise_max` 反向,较优化前打平或有1.05~18.75倍性能提升。([#38236](https://github.com/PaddlePaddle/Paddle/pull/38236), [#37906](https://github.com/PaddlePaddle/Paddle/pull/37906)) +- 优化 `elementwise_min` 反向和 `elementwise_max` 反向,较优化前打平或有1.05~18.75倍性能提升。([#38236](https://github.com/PaddlePaddle/Paddle/pull/38236), [#37906](https://github.com/PaddlePaddle/Paddle/pull/37906)) -- 优化 `nearest_interp` 前向和反向计算,前向较优化前性能有1.5~2.3倍提升;反向性能较优化前有60%~1.8倍提升。([#38528](https://github.com/PaddlePaddle/Paddle/pull/38528), [#39067](https://github.com/PaddlePaddle/Paddle/pull/39067)) +- 优化 `nearest_interp` 前向和反向计算,前向较优化前性能有1.5~2.3倍提升;反向性能较优化前有60%~1.8倍提升。([#38528](https://github.com/PaddlePaddle/Paddle/pull/38528), [#39067](https://github.com/PaddlePaddle/Paddle/pull/39067)) -- 优化 `bilinear_interp` 前向和反向计算,前向较优化前性能有0.4~2.3倍提升;反向性能较优化前有10%~30%提升。([#39243](https://github.com/PaddlePaddle/Paddle/pull/39243), [#39423](https://github.com/PaddlePaddle/Paddle/pull/39423)) +- 优化 `bilinear_interp` 前向和反向计算,前向较优化前性能有0.4~2.3倍提升;反向性能较优化前有10%~30%提升。([#39243](https://github.com/PaddlePaddle/Paddle/pull/39243), [#39423](https://github.com/PaddlePaddle/Paddle/pull/39423)) -- 优化 `dropout` 前向和反向计算,性能提升约20%。([#39795](https://github.com/PaddlePaddle/Paddle/pull/39795), [#38859](https://github.com/PaddlePaddle/Paddle/pull/38859), [#38279](https://github.com/PaddlePaddle/Paddle/pull/38279), [#40053](https://github.com/PaddlePaddle/Paddle/pull/40053)) +- 优化 `dropout` 前向和反向计算,性能提升约20%。([#39795](https://github.com/PaddlePaddle/Paddle/pull/39795), [#38859](https://github.com/PaddlePaddle/Paddle/pull/38859), [#38279](https://github.com/PaddlePaddle/Paddle/pull/38279), [#40053](https://github.com/PaddlePaddle/Paddle/pull/40053)) -- 优化 `grid_sampler`前向和反向计算,前向较优化前性能有10%~30%提升;反向性能较优化前有10%~60%提升。([#39751](https://github.com/PaddlePaddle/Paddle/pull/39751)) +- 优化 `grid_sampler`前向和反向计算,前向较优化前性能有10%~30%提升;反向性能较优化前有10%~60%提升。([#39751](https://github.com/PaddlePaddle/Paddle/pull/39751)) - 优化 `group_norm` 前向和反向计算,前向性能提升1.04~2.35倍,反向性能提升1.12~1.18倍。([#39944](https://github.com/PaddlePaddle/Paddle/pull/39944), [#40657](https://github.com/PaddlePaddle/Paddle/pull/40657), [#39596](https://github.com/PaddlePaddle/Paddle/pull/39596)) -- 优化 `conv1d` 前向和反向计算,前向性能提升1.00~2.01倍,反向性能提升1.01~474.56倍。([#38425](https://github.com/PaddlePaddle/Paddle/pull/38425)) +- 优化 `conv1d` 前向和反向计算,前向性能提升1.00~2.01倍,反向性能提升1.01~474.56倍。([#38425](https://github.com/PaddlePaddle/Paddle/pull/38425)) -- 优化 `elementwise_div` 反向计算,反向性能提升1.02~29.25倍。([#38044](https://github.com/PaddlePaddle/Paddle/pull/38044)) +- 优化 `elementwise_div` 反向计算,反向性能提升1.02~29.25倍。([#38044](https://github.com/PaddlePaddle/Paddle/pull/38044)) -- 优化 `gelu` 前向和反向计算,前向性能提升1.13~1.43倍,反向性能提升1.10~1.55倍。([#38188](https://github.com/PaddlePaddle/Paddle/pull/38188), [#38263](https://github.com/PaddlePaddle/Paddle/pull/38263)) +- 优化 `gelu` 前向和反向计算,前向性能提升1.13~1.43倍,反向性能提升1.10~1.55倍。([#38188](https://github.com/PaddlePaddle/Paddle/pull/38188), [#38263](https://github.com/PaddlePaddle/Paddle/pull/38263)) -- 优化 `elementwise_sub` 反向计算,反向性能提升1.04~15.64倍。([#37754](https://github.com/PaddlePaddle/Paddle/pull/37754)) +- 优化 `elementwise_sub` 反向计算,反向性能提升1.04~15.64倍。([#37754](https://github.com/PaddlePaddle/Paddle/pull/37754)) - 优化 `flip` 在输入一维数据时前向性能,性能提升100%。([#37825](https://github.com/PaddlePaddle/Paddle/pull/37825)) @@ -1739,15 +1739,15 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 优化 `gelu` FP16 前向和反向计算,前向较优化前提升9%~12%,反向较优化前提升2%~9%。([#38980](https://github.com/PaddlePaddle/Paddle/pull/38980)) -- 移除 `gather_nd`前反向算子中的 CPU -> GPU 显式数据传输操作,移除 `index_select` 前反向算子中的显式同步操作,将 `scatter_nd` 中的 GPU -> GPU 数据传输由同步操作改成异步操作。([#40933](https://github.com/PaddlePaddle/Paddle/pull/40933)) +- 移除 `gather_nd`前反向算子中的 CPU -> GPU 显式数据传输操作,移除 `index_select` 前反向算子中的显式同步操作,将 `scatter_nd` 中的 GPU -> GPU 数据传输由同步操作改成异步操作。([#40933](https://github.com/PaddlePaddle/Paddle/pull/40933)) -- 优化 `Lars optimzier` 计算,优化后 Resnet50 PF16 模型训练性能较优化前提升5.1%。 ([#35652](https://github.com/PaddlePaddle/Paddle/pull/35652), [#35476](https://github.com/PaddlePaddle/Paddle/pull/35476)) +- 优化 `Lars optimzier` 计算,优化后 Resnet50 PF16 模型训练性能较优化前提升5.1%。 ([#35652](https://github.com/PaddlePaddle/Paddle/pull/35652), [#35476](https://github.com/PaddlePaddle/Paddle/pull/35476)) -- 优化 `AvgPool2dGrad` 计算,优化后性能较优化前提升2.6倍。 ([#35389](https://github.com/PaddlePaddle/Paddle/pull/35389)) +- 优化 `AvgPool2dGrad` 计算,优化后性能较优化前提升2.6倍。 ([#35389](https://github.com/PaddlePaddle/Paddle/pull/35389)) -- 优化 `Elementwise` 类计算对于多元输出的功能支持,优化后计算性能较优化前提升最多可达15% 。([#38329](https://github.com/PaddlePaddle/Paddle/pull/38329), [#38410](https://github.com/PaddlePaddle/Paddle/pull/38410)) +- 优化 `Elementwise` 类计算对于多元输出的功能支持,优化后计算性能较优化前提升最多可达15% 。([#38329](https://github.com/PaddlePaddle/Paddle/pull/38329), [#38410](https://github.com/PaddlePaddle/Paddle/pull/38410)) -- 优化 `Categorical`的 `probs`计算,简化计算逻辑,性能提升 4 ~ 5 倍。([#42178](https://github.com/PaddlePaddle/Paddle/pull/42178)) +- 优化 `Categorical`的 `probs`计算,简化计算逻辑,性能提升 4 ~ 5 倍。([#42178](https://github.com/PaddlePaddle/Paddle/pull/42178)) - `paddle.sum` 性能优化,性能相比优化前提升约20%。([#42309](https://github.com/PaddlePaddle/Paddle/pull/42309)) @@ -1773,81 +1773,81 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API -- 修复 `paddle.sum` 输入参数类型和输出参数类型不一致且 `axis` 轴对应的 reduce 元素个数为1时,输出类型错误问题。([#36123](https://github.com/PaddlePaddle/Paddle/pull/36123)) +- 修复 `paddle.sum` 输入参数类型和输出参数类型不一致且 `axis` 轴对应的 reduce 元素个数为1时,输出类型错误问题。([#36123](https://github.com/PaddlePaddle/Paddle/pull/36123)) - 修复 `paddle.flops` 在 layer 输出类型为 tuple 时的 `AttributeError`。([#38850](https://github.com/PaddlePaddle/Paddle/pull/38850)) -- 修复 `paddle.diag` 因为没有反向 Kernel 而无法传播梯度的问题。([#40447](https://github.com/PaddlePaddle/Paddle/pull/40447)) +- 修复 `paddle.diag` 因为没有反向 Kernel 而无法传播梯度的问题。([#40447](https://github.com/PaddlePaddle/Paddle/pull/40447)) -- 修复 `paddle.sort` 输入存在 NaN 值排序错误。 ([#41070](https://github.com/PaddlePaddle/Paddle/pull/41070)) +- 修复 `paddle.sort` 输入存在 NaN 值排序错误。 ([#41070](https://github.com/PaddlePaddle/Paddle/pull/41070)) -- 修复 `paddle.full_like` 输入存在 Inf 值构建 Tensor 错误。 ([#40232](https://github.com/PaddlePaddle/Paddle/pull/40232)) +- 修复 `paddle.full_like` 输入存在 Inf 值构建 Tensor 错误。 ([#40232](https://github.com/PaddlePaddle/Paddle/pull/40232)) -- 修复 `paddle.strided_slice` 在输入 starts 中数据小于 -rank 时,strided_slice 结果与 slice 不一致的 bug。 ([#39066](https://github.com/PaddlePaddle/Paddle/pull/39066)) +- 修复 `paddle.strided_slice` 在输入 starts 中数据小于 -rank 时,strided_slice 结果与 slice 不一致的 bug。 ([#39066](https://github.com/PaddlePaddle/Paddle/pull/39066)) - 修复 `max_pool` 系列算子在返回 index 时 infer_shape 计算错误的问题,受影响的 API 有 `paddle.nn.functional.max_pool1d/2d/3d`, `paddle.nn.functional.adaptive_max_pool1d/2d/3d`, `paddle.nn.MaxPool1D/2D/3D`, `paddle.nn.AdaptiveMaxPool1D/2D/3D`。([#40139](https://github.com/PaddlePaddle/Paddle/pull/40139)) - 修复 `max_pool` 系列算子返回的 pooling_mask 的 dtype 错误的问题,现在 pooling_mask 的 dtype 为 int32,受影响的 API 有 `paddle.nn.functional.max_pool1d/2d/3d`, `paddle.nn.functional.adaptive_max_pool1d/2d/3d`, `paddle.nn.MaxPool1D/2D/3D`, `paddle.nn.AdaptiveMaxPool1D/2D/3D`。([#39314](https://github.com/PaddlePaddle/Paddle/pull/39314)) -- 修复 `paddle.shape` 默认存在反向梯度导致计算错误的问题。([#37340](https://github.com/PaddlePaddle/Paddle/pull/37340)) +- 修复 `paddle.shape` 默认存在反向梯度导致计算错误的问题。([#37340](https://github.com/PaddlePaddle/Paddle/pull/37340)) -- 修复 `paddle.nn.Layer` 的 `to` 方法同时转换 dtype 和 place 存在的 bug。([#37007](https://github.com/PaddlePaddle/Paddle/pull/38007)) +- 修复 `paddle.nn.Layer` 的 `to` 方法同时转换 dtype 和 place 存在的 bug。([#37007](https://github.com/PaddlePaddle/Paddle/pull/38007)) -- 修复 `paddle.amp.decorate` 无法对非叶子网络层的参数改写为 FP16 的 bug。([#38402](https://github.com/PaddlePaddle/Paddle/pull/38402)) +- 修复 `paddle.amp.decorate` 无法对非叶子网络层的参数改写为 FP16 的 bug。([#38402](https://github.com/PaddlePaddle/Paddle/pull/38402)) -- 修复 `paddle.amp.decorate` 将 `paddle.nn.BatchNorm1D`、`paddle.nn.BatchNorm2D`、`paddle.nn.BatchNorm3D` 非输入参数改写为 FP16 的 bug。([#38541](https://github.com/PaddlePaddle/Paddle/pull/38541)) +- 修复 `paddle.amp.decorate` 将 `paddle.nn.BatchNorm1D`、`paddle.nn.BatchNorm2D`、`paddle.nn.BatchNorm3D` 非输入参数改写为 FP16 的 bug。([#38541](https://github.com/PaddlePaddle/Paddle/pull/38541)) -- 修复 `paddle.amp.decorate` 将 `paddle.nn.SyncBatchNorm` 非输入参数改写为 FP16 的 bug。([#40943](https://github.com/PaddlePaddle/Paddle/pull/40943)) +- 修复 `paddle.amp.decorate` 将 `paddle.nn.SyncBatchNorm` 非输入参数改写为 FP16 的 bug。([#40943](https://github.com/PaddlePaddle/Paddle/pull/40943)) - 修复 `paddle.nn.Layer.to` 当中多余的 warning。([#36700](https://github.com/PaddlePaddle/Paddle/pull/36700)) -- 修复 `paddle.nn.RNN` 在控制流下使用报错的问题。([#41162](https://github.com/PaddlePaddle/Paddle/pull/41162)) +- 修复 `paddle.nn.RNN` 在控制流下使用报错的问题。([#41162](https://github.com/PaddlePaddle/Paddle/pull/41162)) -- 修复 `paddle.to_tensor` 无法指定 Tensor 的 CUDA Place 的问题。([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) +- 修复 `paddle.to_tensor` 无法指定 Tensor 的 CUDA Place 的问题。([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) - 修复 `paddle.nn.Identity` 没有公开的问题。([#39615](https://github.com/PaddlePaddle/Paddle/pull/39615)) -- 修复动态图重构后,`fill_` 和 `zero_` inplace API的输入在 CUDAPinned Place上时,输出值不正确的 bug。([#41229](https://github.com/PaddlePaddle/Paddle/pull/41229)) +- 修复动态图重构后,`fill_` 和 `zero_` inplace API的输入在 CUDAPinned Place上时,输出值不正确的 bug。([#41229](https://github.com/PaddlePaddle/Paddle/pull/41229)) -- 动态图重构后,修复使用 append op 的方式调用 assign op 导致输出 Tensor 的 inplace version 值不正确的bug,修改为使用 `_C_ops` 的方式调用 assign op。([#41118](https://github.com/PaddlePaddle/Paddle/pull/41118)) +- 动态图重构后,修复使用 append op 的方式调用 assign op 导致输出 Tensor 的 inplace version 值不正确的bug,修改为使用 `_C_ops` 的方式调用 assign op。([#41118](https://github.com/PaddlePaddle/Paddle/pull/41118)) -- 移除 `elementwise_add` 三阶 Kernel 中不合理的代码,修复组网过程未初始化问题。 ([#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) +- 移除 `elementwise_add` 三阶 Kernel 中不合理的代码,修复组网过程未初始化问题。 ([#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) -- 修复 `conv2d` 执行 cuDNN Kernel 时属性缺失的问题。([#38827](https://github.com/PaddlePaddle/Paddle/pull/38827)) +- 修复 `conv2d` 执行 cuDNN Kernel 时属性缺失的问题。([#38827](https://github.com/PaddlePaddle/Paddle/pull/38827)) -- 修复 `multiclass_nms3` 输出 shape 不正确的问题。([#40059](https://github.com/PaddlePaddle/Paddle/pull/40059)) +- 修复 `multiclass_nms3` 输出 shape 不正确的问题。([#40059](https://github.com/PaddlePaddle/Paddle/pull/40059)) -- 修复 `yolo_box` 输出 shape 不正确的问题。([#40056](https://github.com/PaddlePaddle/Paddle/pull/40056)) +- 修复 `yolo_box` 输出 shape 不正确的问题。([#40056](https://github.com/PaddlePaddle/Paddle/pull/40056)) -- 修复高阶微分 `gradients` 接口在指定 target_grad 时未按预期生效的问题。([#40940](https://github.com/PaddlePaddle/Paddle/pull/40940/)) +- 修复高阶微分 `gradients` 接口在指定 target_grad 时未按预期生效的问题。([#40940](https://github.com/PaddlePaddle/Paddle/pull/40940/)) -- 修复动态图 op`_BatchNormBase` 基类中修改了 default_dtype,导致后续组网参数类型错误的问题,受影响的API有 `paddle.nn.BatchNorm1D`,`paddle.nn.BatchNorm2D`,`paddle.nn.BatchNorm3D`,`paddle.nn.SyncBatchNorm`。具体原因是当 `get_default_dtype() == 'float16'` 时,通过 `set_default_dtype('float32')`修改默认参数数据类型,动态图组网的参数类型是通过 default_dtype 来创建的,因此当默认参数类型被修改后导致后续的组网参数类型错误。 ([#36376](https://github.com/PaddlePaddle/Paddle/pull/36376)) +- 修复动态图 op`_BatchNormBase` 基类中修改了 default_dtype,导致后续组网参数类型错误的问题,受影响的API有 `paddle.nn.BatchNorm1D`,`paddle.nn.BatchNorm2D`,`paddle.nn.BatchNorm3D`,`paddle.nn.SyncBatchNorm`。具体原因是当 `get_default_dtype() == 'float16'` 时,通过 `set_default_dtype('float32')`修改默认参数数据类型,动态图组网的参数类型是通过 default_dtype 来创建的,因此当默认参数类型被修改后导致后续的组网参数类型错误。 ([#36376](https://github.com/PaddlePaddle/Paddle/pull/36376)) -- 修复 batchnorm op 中,当数据类型为 FP32 ,且数据维度 `dims = 2,data_layout = NHWC` 时,反向 op 内中间变量未定义问题。 ([#37020](https://github.com/PaddlePaddle/Paddle/pull/37020)) +- 修复 batchnorm op 中,当数据类型为 FP32 ,且数据维度 `dims = 2,data_layout = NHWC` 时,反向 op 内中间变量未定义问题。 ([#37020](https://github.com/PaddlePaddle/Paddle/pull/37020)) -- 修复静态图模式下,`paddle.static.nn.prelu` 对于 `NHWC` 输入格式且 `mode==channel` 权重的 shape 错误问题。([#38310](https://github.com/PaddlePaddle/Paddle/pull/38310)) +- 修复静态图模式下,`paddle.static.nn.prelu` 对于 `NHWC` 输入格式且 `mode==channel` 权重的 shape 错误问题。([#38310](https://github.com/PaddlePaddle/Paddle/pull/38310)) -- 修复多机情况下,`paddle.nn.functional.class_center_sample` CUDA 种子设置 bug。([#38815](https://github.com/PaddlePaddle/Paddle/pull/38815)) +- 修复多机情况下,`paddle.nn.functional.class_center_sample` CUDA 种子设置 bug。([#38815](https://github.com/PaddlePaddle/Paddle/pull/38815)) -- 修复 `paddle.nn.functional.one_hot` 在输入不正确参数时,CUDA 版本无法正确报错的问题。([#41335](https://github.com/PaddlePaddle/Paddle/pull/41335)) +- 修复 `paddle.nn.functional.one_hot` 在输入不正确参数时,CUDA 版本无法正确报错的问题。([#41335](https://github.com/PaddlePaddle/Paddle/pull/41335)) -- 修复 DCU 设备上回收显存的 callback 未及时触发导致显存 OOM 的问题。([#40445](https://github.com/PaddlePaddle/Paddle/pull/40445)) +- 修复 DCU 设备上回收显存的 callback 未及时触发导致显存 OOM 的问题。([#40445](https://github.com/PaddlePaddle/Paddle/pull/40445)) -- 修复 `setitem` 索引赋值反向梯度传递异常以及动态图部分场景下 inplace 逻辑处理异常的问题。 ([#37023](https://github.com/PaddlePaddle/Paddle/pull/37023), [#38298](https://github.com/PaddlePaddle/Paddle/pull/38298)) +- 修复 `setitem` 索引赋值反向梯度传递异常以及动态图部分场景下 inplace 逻辑处理异常的问题。 ([#37023](https://github.com/PaddlePaddle/Paddle/pull/37023), [#38298](https://github.com/PaddlePaddle/Paddle/pull/38298)) -- 修复动转静下 Tensor array 使用 Slice 索引异常的问题。([#39251](https://github.com/PaddlePaddle/Paddle/pull/39251)) +- 修复动转静下 Tensor array 使用 Slice 索引异常的问题。([#39251](https://github.com/PaddlePaddle/Paddle/pull/39251)) - 修复 `paddle.Tensor.register_hook` 接口使用时临时变量未析构,从而导致内存或显存泄漏的问题。([#40716](https://github.com/PaddlePaddle/Paddle/pull/40716)) -- 修复 `Tensor.getitem` 当索引是全为 False 的 bool Tensor 时无法取值的问题。([#41297](https://github.com/PaddlePaddle/Paddle/pull/41297)) +- 修复 `Tensor.getitem` 当索引是全为 False 的 bool Tensor 时无法取值的问题。([#41297](https://github.com/PaddlePaddle/Paddle/pull/41297)) -- 修复 `Tensor.getitem` 当索引是 bool scalar Tensor 时无法取值的问题。([#40829](https://github.com/PaddlePaddle/Paddle/pull/40829)) +- 修复 `Tensor.getitem` 当索引是 bool scalar Tensor 时无法取值的问题。([#40829](https://github.com/PaddlePaddle/Paddle/pull/40829)) -- 修复 `paddle.index_select` 在 index 为 0-shape Tensor 时报错的问题。([#41383](https://github.com/PaddlePaddle/Paddle/pull/41383)) +- 修复 `paddle.index_select` 在 index 为 0-shape Tensor 时报错的问题。([#41383](https://github.com/PaddlePaddle/Paddle/pull/41383)) -- 修复 `paddle.index_select`,`paddle.index_sample` 申请的 GPU 线程数超过有限机器资源时报错的问题。([#41127](https://github.com/PaddlePaddle/Paddle/pull/41127), [#37816](https://github.com/PaddlePaddle/Paddle/pull/37816), [#39736](https://github.com/PaddlePaddle/Paddle/pull/39736), [#41563](https://github.com/PaddlePaddle/Paddle/pull/41563)) +- 修复 `paddle.index_select`,`paddle.index_sample` 申请的 GPU 线程数超过有限机器资源时报错的问题。([#41127](https://github.com/PaddlePaddle/Paddle/pull/41127), [#37816](https://github.com/PaddlePaddle/Paddle/pull/37816), [#39736](https://github.com/PaddlePaddle/Paddle/pull/39736), [#41563](https://github.com/PaddlePaddle/Paddle/pull/41563)) -- 修复 ReduceConfig、elemwise_grad、gather、gather_nd、scatter ops 申请 GPU 线程数超过有限机器资源时报错的问题。([#40813](https://github.com/PaddlePaddle/Paddle/pull/40813), [#41127](https://github.com/PaddlePaddle/Paddle/pull/41127)) +- 修复 ReduceConfig、elemwise_grad、gather、gather_nd、scatter ops 申请 GPU 线程数超过有限机器资源时报错的问题。([#40813](https://github.com/PaddlePaddle/Paddle/pull/40813), [#41127](https://github.com/PaddlePaddle/Paddle/pull/41127)) - 修复 Kernel Primitive API 中 ReadData,ReadDataBc,ReadDataReduce 在 NX != 1 时访存越界的问题。([#36373](https://github.com/PaddlePaddle/Paddle/pull/36373)) @@ -1863,7 +1863,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 `paddle.nn.Sequential` 在 for 循环遍历 sublayers 时会报 KeyError 错误的 bug。([#39372](https://github.com/PaddlePaddle/Paddle/pull/39372)) -- 修复 `paddle.nn.functional.unfold` 在静态图下编译时检查 shape 错误的 bug。([#38907](https://github.com/PaddlePaddle/Paddle/pull/38907), [#38819](https://github.com/PaddlePaddle/Paddle/pull/38819)) +- 修复 `paddle.nn.functional.unfold` 在静态图下编译时检查 shape 错误的 bug。([#38907](https://github.com/PaddlePaddle/Paddle/pull/38907), [#38819](https://github.com/PaddlePaddle/Paddle/pull/38819)) - 修复静态图使用 dropout 时如果指定了 `axis` 后会报错的问题。([#37223](https://github.com/PaddlePaddle/Paddle/pull/37223)) @@ -1871,19 +1871,19 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 `paddle.nn.functional.label_smooth`在输入为空 Tensor 时抛出 FPE 的问题。([#35861](https://github.com/PaddlePaddle/Paddle/pull/35861)) -- 修复 reshape op 空 Tensor 形变问题, 支持将空 Tensor rehape 成[-1]。 ([#36087](https://github.com/PaddlePaddle/Paddle/pull/36087)) +- 修复 reshape op 空 Tensor 形变问题, 支持将空 Tensor rehape 成[-1]。 ([#36087](https://github.com/PaddlePaddle/Paddle/pull/36087)) - 修复 `fill_diagonal`参数 offset 非零时会造成修改值跨行问题。([#36212](https://github.com/PaddlePaddle/Paddle/pull/36212)) -- 修改动态图模式下 range op 返回 stop gradient 设置成 True。([#37486](https://github.com/PaddlePaddle/Paddle/pull/37486)) +- 修改动态图模式下 range op 返回 stop gradient 设置成 True。([#37486](https://github.com/PaddlePaddle/Paddle/pull/37486)) - 修复 Lamb 优化器当 Beta1Pow 和 Beta2Pow 在 GPU 上时更新错误的 bug。([#38518](https://github.com/PaddlePaddle/Paddle/pull/38518)) -- 修复 conv2d 算子 FLAGS_cudnn_deterministic 设置不生效的问题。([#37173](https://github.com/PaddlePaddle/Paddle/pull/37173)) +- 修复 conv2d 算子 FLAGS_cudnn_deterministic 设置不生效的问题。([#37173](https://github.com/PaddlePaddle/Paddle/pull/37173)) -- 修复因早期版本的 cufft 没有定义 CUFFT_VERSION 引发的问题。([#37312](https://github.com/PaddlePaddle/Paddle/pull/37312)) +- 修复因早期版本的 cufft 没有定义 CUFFT_VERSION 引发的问题。([#37312](https://github.com/PaddlePaddle/Paddle/pull/37312)) -- 修复 `paddle.ifftshit` , `paddle.fftshift` 计算错误问题。([#36834](https://github.com/PaddlePaddle/Paddle/pull/36834), [#36748](https://github.com/PaddlePaddle/Paddle/pull/36748)) +- 修复 `paddle.ifftshit` , `paddle.fftshift` 计算错误问题。([#36834](https://github.com/PaddlePaddle/Paddle/pull/36834), [#36748](https://github.com/PaddlePaddle/Paddle/pull/36748)) - 修复 `paddle.fft` 系列 API 中的 `axis` 计算错误。 ([#36321](https://github.com/PaddlePaddle/Paddle/pull/36321)) @@ -1897,9 +1897,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 fused_attention op 中 FusedResidualDropoutBias 在V100上计算出 nan/inf 问题。([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) -- 修复 full_like op 在执行时引入的多余的 data transform 问题。([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) +- 修复 full_like op 在执行时引入的多余的 data transform 问题。([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) -- 修复 p_norm op 在 GPU 环境上计算 nan 的问题。([#41804](https://github.com/PaddlePaddle/Paddle/pull/41804)) +- 修复 p_norm op 在 GPU 环境上计算 nan 的问题。([#41804](https://github.com/PaddlePaddle/Paddle/pull/41804)) - 修复 split op 在参数 sections 存在为0的 size 情况下,段错误的问题。([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755)) @@ -1907,34 +1907,34 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 import paddle 时由于 PIL 版本升级导致的废弃接口报 warning 的问题。([#42307](https://github.com/PaddlePaddle/Paddle/pull/42307)) -- 修复静态图下 `paddle.linalg.matrix_rank`不支持 tol 为 FP64 Tensor 的问题。([#42085](https://github.com/PaddlePaddle/Paddle/pull/42085)) +- 修复静态图下 `paddle.linalg.matrix_rank`不支持 tol 为 FP64 Tensor 的问题。([#42085](https://github.com/PaddlePaddle/Paddle/pull/42085)) #### IR(Intermediate Representation) - 动态图转静态图 - - - 修复 `tensor_array` 搭配控制流使用时,在反向梯度累加时存在的类型推导错误问题。([#39585](https://github.com/PaddlePaddle/Paddle/pull/39585), [#39689](https://github.com/PaddlePaddle/Paddle/pull/39689)) - - - 修复动转静 AMP 训练时参数梯度类型未被正确设置的问题。([#40938](https://github.com/PaddlePaddle/Paddle/pull/40938)) - - - 修复代码中存在错位注释时,动转静代码解析报错的问题。([#39035](https://github.com/PaddlePaddle/Paddle/pull/39035), [#38003](https://github.com/PaddlePaddle/Paddle/pull/38003)) - - - 修复动转静代码中调用非 forward 函数时,Tensor 未被正确转化为 Variable 的问题。([#37296](https://github.com/PaddlePaddle/Paddle/pull/37296), [#38540](https://github.com/PaddlePaddle/Paddle/pull/38540)) - - - 修复动转静代码转写时 `paddle` 被错误地作为变量传递的问题。([#37999](https://github.com/PaddlePaddle/Paddle/pull/37999)) - - - 修复模型动转静后调用 `paddle.flops` 时模型参数统计错误的问题。([#36852](https://github.com/PaddlePaddle/Paddle/pull/36852)) - - - 修复使用 `paddle.jit.save/load` 接口加载模型后,在 train 模式和 no_grad 上下文中,显存会一直增长的问题。([#36434](https://github.com/PaddlePaddle/Paddle/pull/36434)) - - - 添加在 convert_call 对 generator function 转换时的警告。([#35369](https://github.com/PaddlePaddle/Paddle/pull/35369)) - - - 修复 run_program op 依赖分析的问题。 ([#38470](https://github.com/PaddlePaddle/Paddle/pull/38470)) - - - 修复控制流 For 中返回单值时代码转换的问题。([#40683](https://github.com/PaddlePaddle/Paddle/pull/40683)) - + + - 修复 `tensor_array` 搭配控制流使用时,在反向梯度累加时存在的类型推导错误问题。([#39585](https://github.com/PaddlePaddle/Paddle/pull/39585), [#39689](https://github.com/PaddlePaddle/Paddle/pull/39689)) + + - 修复动转静 AMP 训练时参数梯度类型未被正确设置的问题。([#40938](https://github.com/PaddlePaddle/Paddle/pull/40938)) + + - 修复代码中存在错位注释时,动转静代码解析报错的问题。([#39035](https://github.com/PaddlePaddle/Paddle/pull/39035), [#38003](https://github.com/PaddlePaddle/Paddle/pull/38003)) + + - 修复动转静代码中调用非 forward 函数时,Tensor 未被正确转化为 Variable 的问题。([#37296](https://github.com/PaddlePaddle/Paddle/pull/37296), [#38540](https://github.com/PaddlePaddle/Paddle/pull/38540)) + + - 修复动转静代码转写时 `paddle` 被错误地作为变量传递的问题。([#37999](https://github.com/PaddlePaddle/Paddle/pull/37999)) + + - 修复模型动转静后调用 `paddle.flops` 时模型参数统计错误的问题。([#36852](https://github.com/PaddlePaddle/Paddle/pull/36852)) + + - 修复使用 `paddle.jit.save/load` 接口加载模型后,在 train 模式和 no_grad 上下文中,显存会一直增长的问题。([#36434](https://github.com/PaddlePaddle/Paddle/pull/36434)) + + - 添加在 convert_call 对 generator function 转换时的警告。([#35369](https://github.com/PaddlePaddle/Paddle/pull/35369)) + + - 修复 run_program op 依赖分析的问题。 ([#38470](https://github.com/PaddlePaddle/Paddle/pull/38470)) + + - 修复控制流 For 中返回单值时代码转换的问题。([#40683](https://github.com/PaddlePaddle/Paddle/pull/40683)) + - 修复控制流 cond 的输入包含 LoDTensorArray 时,生成反向 op 会报错的问题。([#39585](https://github.com/PaddlePaddle/Paddle/pull/39585)) - + - 修复 `padddle.jit.save`在导出动转静模型时丢失顶层 Layer 的 forward_pre_hook 和 forward_post_hook 的问题。([#42273](https://github.com/PaddlePaddle/Paddle/pull/42273)) - 修复 `paddle.expand`中 shape 参数包含 Tensor 在动转静时会转换报错的问题。([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) @@ -1942,47 +1942,47 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 分布式训练 - 分布式训练基础功能 - - - 修复分布式多机训练时,端口报错的问题。([#37274](https://github.com/PaddlePaddle/Paddle/pull/37274)) - - - 修复 brpc 编译依赖问题。([#37064](https://github.com/PaddlePaddle/Paddle/pull/37064)) - - - 修复 Fleet 启动时,由于 tcp 自连接产生的端口被占用的问题。([#38174](https://github.com/PaddlePaddle/Paddle/pull/38174)) - + + - 修复分布式多机训练时,端口报错的问题。([#37274](https://github.com/PaddlePaddle/Paddle/pull/37274)) + + - 修复 brpc 编译依赖问题。([#37064](https://github.com/PaddlePaddle/Paddle/pull/37064)) + + - 修复 Fleet 启动时,由于 tcp 自连接产生的端口被占用的问题。([#38174](https://github.com/PaddlePaddle/Paddle/pull/38174)) + - 修复数据并行下,由于 FP16 参数在多卡下初始化不一致,导致精度下降的问题。([#38838](https://github.com/PaddlePaddle/Paddle/pull/38838), [#38563](https://github.com/PaddlePaddle/Paddle/pull/38563), [#38405](https://github.com/PaddlePaddle/Paddle/pull/38405)) - + - 修复数据并行下,由于 FP16 梯度同步时,没有除以卡数,导致精度下降的问题。([#38378](https://github.com/PaddlePaddle/Paddle/pull/38378)) - 动态图混合并行 - + - 修复在混合并行下,通过使用新 update 接口,FP16 模式不更新参数的问题。([#36017](https://github.com/PaddlePaddle/Paddle/pull/36017)) - 静态图混合并行 - - - 修复分布式 dp 模式下 grad merge 与 ClipGradientByGlobalNorm 不兼容的问题。([#36334](https://github.com/PaddlePaddle/Paddle/pull/36334)) - - - 修复混合并行下,张量模型并行的非分布式参数在初始化阶段未被广播,导致各卡非分布式参数不一致的问题。([#36186](https://github.com/PaddlePaddle/Paddle/pull/36186)) - - - 修复 sharding 开启 offload 时,sharding 的 save_persistables 接口未保存 FP16 参数和 offload 持久化变量的问题。([#40477](https://github.com/PaddlePaddle/Paddle/pull/40477)) - + + - 修复分布式 dp 模式下 grad merge 与 ClipGradientByGlobalNorm 不兼容的问题。([#36334](https://github.com/PaddlePaddle/Paddle/pull/36334)) + + - 修复混合并行下,张量模型并行的非分布式参数在初始化阶段未被广播,导致各卡非分布式参数不一致的问题。([#36186](https://github.com/PaddlePaddle/Paddle/pull/36186)) + + - 修复 sharding 开启 offload 时,sharding 的 save_persistables 接口未保存 FP16 参数和 offload 持久化变量的问题。([#40477](https://github.com/PaddlePaddle/Paddle/pull/40477)) + - 修复开启 sharding 训练时,ema 参数在非0号卡上无法保存的问题。([#39860](https://github.com/PaddlePaddle/Paddle/pull/39860)) - + - 修复 FC 按照列切分梯度计算错误的问题。([#38724](https://github.com/PaddlePaddle/Paddle/pull/38724)) - - - 修复 DistributedStrategy 设置为 without_graph_optimizer 时和 rnn 一起使用报错的问题。 ([#36176](https://github.com/PaddlePaddle/Paddle/pull/36176)) + + - 修复 DistributedStrategy 设置为 without_graph_optimizer 时和 rnn 一起使用报错的问题。 ([#36176](https://github.com/PaddlePaddle/Paddle/pull/36176)) - GPUPS 参数服务器训练 - + - 修复 GPUPS 宏定义触发 CPU 分支编译问题。([#37248](https://github.com/PaddlePaddle/Paddle/pull/37248)) - + - 修复 GPUPS 流水线训练时在保存 delta 和 pullsparse 并发时引发的偶发报错问题。([#37233](https://github.com/PaddlePaddle/Paddle/pull/37233)) - + - 修复 HDFSClient 查询目录未返回全路径,引发下载报错问题。 ([#36590](https://github.com/PaddlePaddle/Paddle/pull/36590)) - - - 修复 GPUPS 流水线训练时拉取老参数问题。([#36512](https://github.com/PaddlePaddle/Paddle/pull/36512)) - + + - 修复 GPUPS 流水线训练时拉取老参数问题。([#36512](https://github.com/PaddlePaddle/Paddle/pull/36512)) + - 修复 GPUPS 多流 allocation 问题。([#37476](https://github.com/PaddlePaddle/Paddle/pull/37476)) - + - 修复 GPUPS pybind 出 core 的问题。([#37287](https://github.com/PaddlePaddle/Paddle/pull/37287)) #### 其他 @@ -1993,89 +1993,89 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复动态图量化训练保存模型节点异常的问题。([#38102](https://github.com/PaddlePaddle/Paddle/pull/38102), [#38012](https://github.com/PaddlePaddle/Paddle/pull/38012)) -- 修复离线量化 flatten op 输出错误问题。([#37722](https://github.com/PaddlePaddle/Paddle/pull/37722)) +- 修复离线量化 flatten op 输出错误问题。([#37722](https://github.com/PaddlePaddle/Paddle/pull/37722)) - 修复了反量化 matmul op 时,维度对不上的问题。([#36982](https://github.com/PaddlePaddle/Paddle/pull/36982)) - 修复了量化无权重的 matmul_v2 时,错误添加量化 op 的问题。([#36593](https://github.com/PaddlePaddle/Paddle/pull/36593)) -- 修复 conv op channel wise 量化在保存模型时 quant_axis 属性保存错误。([#39054](https://github.com/PaddlePaddle/Paddle/pull/39054)) +- 修复 conv op channel wise 量化在保存模型时 quant_axis 属性保存错误。([#39054](https://github.com/PaddlePaddle/Paddle/pull/39054)) - 修复 ChannelWise 量化训练速度慢的问题。([#40772](https://github.com/PaddlePaddle/Paddle/pull/40772)) - 修复量化训练初始化为0的 Tensor 出 NAN 的问题。([#36762](https://github.com/PaddlePaddle/Paddle/pull/36762)) -- 修复多线程场景下混合精度 amp_level 设置错误问题。([#39198](https://github.com/PaddlePaddle/Paddle/pull/39198)) +- 修复多线程场景下混合精度 amp_level 设置错误问题。([#39198](https://github.com/PaddlePaddle/Paddle/pull/39198)) -- 修复混合精度训练与 PyLayer,Recompute 等一起使用时,PyLayer 和 Recompute 中未正确设置混合精度的问题。([#39950](https://github.com/PaddlePaddle/Paddle/pull/39950), [#40042](https://github.com/PaddlePaddle/Paddle/pull/40042)) +- 修复混合精度训练与 PyLayer,Recompute 等一起使用时,PyLayer 和 Recompute 中未正确设置混合精度的问题。([#39950](https://github.com/PaddlePaddle/Paddle/pull/39950), [#40042](https://github.com/PaddlePaddle/Paddle/pull/40042)) -- 修复了 Mac 下编译自定义算子时 `D_GLIBCXX_USE_CXX11_ABI` 未生效的问题。([#37878](https://github.com/PaddlePaddle/Paddle/pull/37878)) +- 修复了 Mac 下编译自定义算子时 `D_GLIBCXX_USE_CXX11_ABI` 未生效的问题。([#37878](https://github.com/PaddlePaddle/Paddle/pull/37878)) -- 修复 initializer 相关 API 在 block=None 时动静行为不统一的问题。([#37827](https://github.com/PaddlePaddle/Paddle/pull/37827)) +- 修复 initializer 相关 API 在 block=None 时动静行为不统一的问题。([#37827](https://github.com/PaddlePaddle/Paddle/pull/37827)) -- 修复 python3.6 环境下没有 fluid 模块的 bug。([#35862](https://github.com/PaddlePaddle/Paddle/pull/35862)) +- 修复 python3.6 环境下没有 fluid 模块的 bug。([#35862](https://github.com/PaddlePaddle/Paddle/pull/35862)) -- 修复优化器 `paddle.optimizer.Adamw` 错误调用 adam op 的 bug。([#36028](https://github.com/PaddlePaddle/Paddle/pull/36028)) +- 修复优化器 `paddle.optimizer.Adamw` 错误调用 adam op 的 bug。([#36028](https://github.com/PaddlePaddle/Paddle/pull/36028)) -- 修复 multi tensor 策略下 `paddle.optimizer.Momentum` 优化器参数 `regularizer` 属性为 None 时的逻辑错误。([#38344](https://github.com/PaddlePaddle/Paddle/pull/38344)) +- 修复 multi tensor 策略下 `paddle.optimizer.Momentum` 优化器参数 `regularizer` 属性为 None 时的逻辑错误。([#38344](https://github.com/PaddlePaddle/Paddle/pull/38344)) -- 修复 multi tensor 策略下 `paddle.optimizer.Momentum`、`paddle.optimizer.Adam` 优化器会对 `multi_precision` 属性进行修改的错误。([#38991](https://github.com/PaddlePaddle/Paddle/pull/38991)) +- 修复 multi tensor 策略下 `paddle.optimizer.Momentum`、`paddle.optimizer.Adam` 优化器会对 `multi_precision` 属性进行修改的错误。([#38991](https://github.com/PaddlePaddle/Paddle/pull/38991)) -- 修复最终态 API amp 与 optional 类型 Tensor 组合使用的代码编译错误。([#40980](https://github.com/PaddlePaddle/Paddle/pull/40980)) +- 修复最终态 API amp 与 optional 类型 Tensor 组合使用的代码编译错误。([#40980](https://github.com/PaddlePaddle/Paddle/pull/40980)) -- 修复 paddle+lite+xpu 预测库调用 lite CPU 预测时会报错的 bug,修复 paddle+lite(without NNAdapter) 编译时会报错的 bug。 ([#37449](https://github.com/PaddlePaddle/Paddle/pull/37449)) +- 修复 paddle+lite+xpu 预测库调用 lite CPU 预测时会报错的 bug,修复 paddle+lite(without NNAdapter) 编译时会报错的 bug。 ([#37449](https://github.com/PaddlePaddle/Paddle/pull/37449)) -- 修复 Debug 编译模式下 LoDTensorArray 因 Pybind11 绑定不一致导致 crash 的 bug。([#37954](https://github.com/PaddlePaddle/Paddle/pull/37954)) +- 修复 Debug 编译模式下 LoDTensorArray 因 Pybind11 绑定不一致导致 crash 的 bug。([#37954](https://github.com/PaddlePaddle/Paddle/pull/37954)) -- 修复 shape 参数为 Tensor 和 int 构成列表的极端情况下,无法正确构建 Tensor 的 bug。([#38284](https://github.com/PaddlePaddle/Paddle/pull/38284)) +- 修复 shape 参数为 Tensor 和 int 构成列表的极端情况下,无法正确构建 Tensor 的 bug。([#38284](https://github.com/PaddlePaddle/Paddle/pull/38284)) -- 修复 `paddle.optimizer.AdamW` API 兼容性问题。([#37905](https://github.com/PaddlePaddle/Paddle/pull/37905)) +- 修复 `paddle.optimizer.AdamW` API 兼容性问题。([#37905](https://github.com/PaddlePaddle/Paddle/pull/37905)) -- 修复 _InstanceNormBase 中 extra_repr 的返回错误。([#38537](https://github.com/PaddlePaddle/Paddle/pull/38537)) +- 修复 _InstanceNormBase 中 extra_repr 的返回错误。([#38537](https://github.com/PaddlePaddle/Paddle/pull/38537)) -- 修复联编开启 -DWITH_DISTRIBUTED 生成 Paddle Inference 缺少符号 `paddle::distributed::TensorTable` 的问题。 ([#41128](https://github.com/PaddlePaddle/Paddle/pull/41128)) +- 修复联编开启 -DWITH_DISTRIBUTED 生成 Paddle Inference 缺少符号 `paddle::distributed::TensorTable` 的问题。 ([#41128](https://github.com/PaddlePaddle/Paddle/pull/41128)) -- matmul_v2 op 新增 shape check,在 shape 中存在0值进行信息报错。 ([#35791](https://github.com/PaddlePaddle/Paddle/pull/35791)) +- matmul_v2 op 新增 shape check,在 shape 中存在0值进行信息报错。 ([#35791](https://github.com/PaddlePaddle/Paddle/pull/35791)) -- 修复动态图 recompute 对于没有梯度输入提示信息反复打印,改成用 warning 只打印一次的方式。([#38293](https://github.com/PaddlePaddle/Paddle/pull/38293)) +- 修复动态图 recompute 对于没有梯度输入提示信息反复打印,改成用 warning 只打印一次的方式。([#38293](https://github.com/PaddlePaddle/Paddle/pull/38293)) -- 修复 gelu op 在视觉模型中训练后期在验证集上精度低的问题。([#38450](https://github.com/PaddlePaddle/Paddle/pull/38450)) +- 修复 gelu op 在视觉模型中训练后期在验证集上精度低的问题。([#38450](https://github.com/PaddlePaddle/Paddle/pull/38450)) -- 修复 adamw op 在数值计算上误差问题。([#37746](https://github.com/PaddlePaddle/Paddle/pull/37746)) +- 修复 adamw op 在数值计算上误差问题。([#37746](https://github.com/PaddlePaddle/Paddle/pull/37746)) -- 补充 sparse_momentum `_C_ops` 接口 MasterParam 和 MasterParamOut 参数。([#39969](https://github.com/PaddlePaddle/Paddle/pull/39969)) +- 补充 sparse_momentum `_C_ops` 接口 MasterParam 和 MasterParamOut 参数。([#39969](https://github.com/PaddlePaddle/Paddle/pull/39969)) -- 修复 python3.6 环境下没有 `distributed` 模块的 bug。([#35848](https://github.com/PaddlePaddle/Paddle/pull/35848)) +- 修复 python3.6 环境下没有 `distributed` 模块的 bug。([#35848](https://github.com/PaddlePaddle/Paddle/pull/35848)) -- 修复 eigh 单元测试数据初始化问题。([#39568](https://github.com/PaddlePaddle/Paddle/pull/39568)) +- 修复 eigh 单元测试数据初始化问题。([#39568](https://github.com/PaddlePaddle/Paddle/pull/39568)) -- 修复 eigvalsh 单元测试数据初始化问题。([#39841](https://github.com/PaddlePaddle/Paddle/pull/39841)) +- 修复 eigvalsh 单元测试数据初始化问题。([#39841](https://github.com/PaddlePaddle/Paddle/pull/39841)) -- 修复 segment op 在 V100上寄存器使用过多导致不能正常运行的问题。([#38113](https://github.com/PaddlePaddle/Paddle/pull/38113)) +- 修复 segment op 在 V100上寄存器使用过多导致不能正常运行的问题。([#38113](https://github.com/PaddlePaddle/Paddle/pull/38113)) -- 修复 conv 相关算子稀疏化维度错误的问题。([#36054](https://github.com/PaddlePaddle/Paddle/pull/36054)) +- 修复 conv 相关算子稀疏化维度错误的问题。([#36054](https://github.com/PaddlePaddle/Paddle/pull/36054)) -- 提供自动稀疏训练(Automatic SParsity)静态图相关功能 Alias 至 `Paddle.static.sparsity`。([#36525](https://github.com/PaddlePaddle/Paddle/pull/36525)) +- 提供自动稀疏训练(Automatic SParsity)静态图相关功能 Alias 至 `Paddle.static.sparsity`。([#36525](https://github.com/PaddlePaddle/Paddle/pull/36525)) -- 修复 divide op 整数除法还是整数的 bug。([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) +- 修复 divide op 整数除法还是整数的 bug。([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) -- 修复 `paddle.multiplex` 候选 Tensor 大小为0崩溃问题。([#34972](https://github.com/PaddlePaddle/Paddle/pull/34972)) +- 修复 `paddle.multiplex` 候选 Tensor 大小为0崩溃问题。([#34972](https://github.com/PaddlePaddle/Paddle/pull/34972)) -- 修复 `paddle.kl_div` 参数 `reduction` 给定情况下速度异常的问题。([#37283](https://github.com/PaddlePaddle/Paddle/pull/37283)) +- 修复 `paddle.kl_div` 参数 `reduction` 给定情况下速度异常的问题。([#37283](https://github.com/PaddlePaddle/Paddle/pull/37283)) -- 修复 Cifar 数据集加载 data source 无序的问题。 ([#37272](https://github.com/PaddlePaddle/Paddle/pull/37272)) +- 修复 Cifar 数据集加载 data source 无序的问题。 ([#37272](https://github.com/PaddlePaddle/Paddle/pull/37272)) -- 修复 ProgressBar 类中 loss 从 uint16 到 float 的转换。([#39231](https://github.com/PaddlePaddle/Paddle/pull/39231)) +- 修复 ProgressBar 类中 loss 从 uint16 到 float 的转换。([#39231](https://github.com/PaddlePaddle/Paddle/pull/39231)) -- 修复 ShareBufferWith 共享数据类型的问题。([#37464](https://github.com/PaddlePaddle/Paddle/pull/37464), [#37247](https://github.com/PaddlePaddle/Paddle/pull/37247)) +- 修复 ShareBufferWith 共享数据类型的问题。([#37464](https://github.com/PaddlePaddle/Paddle/pull/37464), [#37247](https://github.com/PaddlePaddle/Paddle/pull/37247)) -- 修复 `paddle.io.DataLoader` 使用 IterableDataset 并且 num_workers>0 时的性能问题。([#40541](https://github.com/PaddlePaddle/Paddle/pull/40541)) +- 修复 `paddle.io.DataLoader` 使用 IterableDataset 并且 num_workers>0 时的性能问题。([#40541](https://github.com/PaddlePaddle/Paddle/pull/40541)) -- 修复 `paddle.vision.ops.yolo_loss` 动态图返回值不全的问题。([#40185](https://github.com/PaddlePaddle/Paddle/pull/40185)) +- 修复 `paddle.vision.ops.yolo_loss` 动态图返回值不全的问题。([#40185](https://github.com/PaddlePaddle/Paddle/pull/40185)) -- 移出 `paddle.io.BatchSampler` 对输入参数 dataset 需要是 `paddle.io.Dataset` 类型的限制,扩大对用户自定义数据集的支持。([#40184](https://github.com/PaddlePaddle/Paddle/pull/40184)) +- 移出 `paddle.io.BatchSampler` 对输入参数 dataset 需要是 `paddle.io.Dataset` 类型的限制,扩大对用户自定义数据集的支持。([#40184](https://github.com/PaddlePaddle/Paddle/pull/40184)) -- 修复 `paddle.summary` 报错op_flops不存在的问题。([#36489](https://github.com/PaddlePaddle/Paddle/pull/36489)) +- 修复 `paddle.summary` 报错op_flops不存在的问题。([#36489](https://github.com/PaddlePaddle/Paddle/pull/36489)) - 修复 lars_momentum op 在 lars_weight_decay=0 时公式错误的问题。([#40892](https://github.com/PaddlePaddle/Paddle/pull/40892)) @@ -2083,9 +2083,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 optimizer-offload 不支持 adamw op type 的问题。 ([#36432](https://github.com/PaddlePaddle/Paddle/pull/36432)) -- 修复多线程场景下,Tracer 中 enable_program_desc_tracing_数据不安全的问题。([#39776](https://github.com/PaddlePaddle/Paddle/pull/39776)) +- 修复多线程场景下,Tracer 中 enable_program_desc_tracing_数据不安全的问题。([#39776](https://github.com/PaddlePaddle/Paddle/pull/39776)) -- 修复模型读取时模型档案大小未初始化的问题。([#40518](https://github.com/PaddlePaddle/Paddle/pull/40518)) +- 修复模型读取时模型档案大小未初始化的问题。([#40518](https://github.com/PaddlePaddle/Paddle/pull/40518)) - 修复 Expand op 逻辑 bug,当输入Tensor X 的维度,小于要拓展的 shape 时,可能导致取得 Out.Shape 是错误的。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) @@ -2093,26 +2093,26 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 Expand_As op 计算输出 shape 时逻辑的错误。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - + - 修复 `core.VarDesc.VarType.STRINGS` 类型的变量获取 `lod_level` 属性报错的问题,并且设置其 `lod_level` 为None。([#39077](https://github.com/PaddlePaddle/Paddle/pull/39077)) - + - 修复框架功能 `PyLayer` 不支持不同 dtype 的问题。 ([#37974](https://github.com/PaddlePaddle/Paddle/pull/37974)) -- 修复了学习率衰减 API `paddle.optimizer.lr.PolynomialDecay` 的零除问题。 ([#38782](https://github.com/PaddlePaddle/Paddle/pull/38782)) +- 修复了学习率衰减 API `paddle.optimizer.lr.PolynomialDecay` 的零除问题。 ([#38782](https://github.com/PaddlePaddle/Paddle/pull/38782)) -- 修复调用 DisableGlogInfo() 接口后依旧残留部分日志的问题。 ([#36356](https://github.com/PaddlePaddle/Paddle/pull/36356)) +- 修复调用 DisableGlogInfo() 接口后依旧残留部分日志的问题。 ([#36356](https://github.com/PaddlePaddle/Paddle/pull/36356)) -- 修复 SimpleRNN、GRU和LSTM API CPU训练时多层RNN(dropout 设置为0时)反向计算出错的问题。 ([#37080](https://github.com/PaddlePaddle/Paddle/pull/37080)) +- 修复 SimpleRNN、GRU和LSTM API CPU训练时多层RNN(dropout 设置为0时)反向计算出错的问题。 ([#37080](https://github.com/PaddlePaddle/Paddle/pull/37080)) -- 为 cufft 和 hipfft 后端的 fft 添加了 cache。 ([#36646](https://github.com/PaddlePaddle/Paddle/pull/36646)) +- 为 cufft 和 hipfft 后端的 fft 添加了 cache。 ([#36646](https://github.com/PaddlePaddle/Paddle/pull/36646)) -- 使 `paddle.roll` 的 shifts 参数支持传入 Tensor。 ([#36727](https://github.com/PaddlePaddle/Paddle/pull/36727)) +- 使 `paddle.roll` 的 shifts 参数支持传入 Tensor。 ([#36727](https://github.com/PaddlePaddle/Paddle/pull/36727)) - 为 fft 添加 onemkl 作为可选的计算后端。 ([#36414](https://github.com/PaddlePaddle/Paddle/pull/36414)) -- 修复 mamtul_v2 和 elementwise_div 两个 op 在 bfloat16 类型下的精度问题。([#42479](https://github.com/PaddlePaddle/Paddle/pull/42479)) +- 修复 mamtul_v2 和 elementwise_div 两个 op 在 bfloat16 类型下的精度问题。([#42479](https://github.com/PaddlePaddle/Paddle/pull/42479)) -- 修复显存回收时 LoDTensorArray 只清理内部 Tensor 而未清空 Array 导致的下个 step 可能出错的问题。([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) +- 修复显存回收时 LoDTensorArray 只清理内部 Tensor 而未清空 Array 导致的下个 step 可能出错的问题。([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) ## 4. 部署方向(Paddle Inference) @@ -2136,9 +2136,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - switch_ir_debug 接口增加 dump 模型的功能。([#36581](https://github.com/PaddlePaddle/Paddle/pull/36581)) -- 新增 TensorRT config 的配置接口:`void UpdateConfigInterleaved(paddle_infer::Config* c, bool with_interleaved)`,用于 int8 量化推理中特殊的数据排布。([#38884](https://github.com/PaddlePaddle/Paddle/pull/38884)) +- 新增 TensorRT config 的配置接口:`void UpdateConfigInterleaved(paddle_infer::Config* c, bool with_interleaved)`,用于 int8 量化推理中特殊的数据排布。([#38884](https://github.com/PaddlePaddle/Paddle/pull/38884)) -- log 中增加 TensorRT inspector 输出信息,仅在 TensorRT 8.2及以上版本有效。 ([#38362](https://github.com/PaddlePaddle/Paddle/pull/38362),[#38200](https://github.com/PaddlePaddle/Paddle/pull/38200))) +- log 中增加 TensorRT inspector 输出信息,仅在 TensorRT 8.2及以上版本有效。 ([#38362](https://github.com/PaddlePaddle/Paddle/pull/38362),[#38200](https://github.com/PaddlePaddle/Paddle/pull/38200))) - 增加 TensorRT ASP 稀疏推理支持。([#36413](https://github.com/PaddlePaddle/Paddle/pull/36413)) @@ -2146,11 +2146,11 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### CPU性能优化 -- 优化 MKLDNN 的缓存机制。([#38336](https://github.com/PaddlePaddle/Paddle/pull/38336), [#36980](https://github.com/PaddlePaddle/Paddle/pull/36980), [#36695](https://github.com/PaddlePaddle/Paddle/pull/36695)) +- 优化 MKLDNN 的缓存机制。([#38336](https://github.com/PaddlePaddle/Paddle/pull/38336), [#36980](https://github.com/PaddlePaddle/Paddle/pull/36980), [#36695](https://github.com/PaddlePaddle/Paddle/pull/36695)) -- 新增 matmul_scale_fuse pass。([#37962](https://github.com/PaddlePaddle/Paddle/pull/37962)) +- 新增 matmul_scale_fuse pass。([#37962](https://github.com/PaddlePaddle/Paddle/pull/37962)) -- 新增 MKLDNN reshape_transpose_matmul_v2_mkldnn_fuse_pass。([#37847](https://github.com/PaddlePaddle/Paddle/pull/37847), [#40948](https://github.com/PaddlePaddle/Paddle/pull/40948)) +- 新增 MKLDNN reshape_transpose_matmul_v2_mkldnn_fuse_pass。([#37847](https://github.com/PaddlePaddle/Paddle/pull/37847), [#40948](https://github.com/PaddlePaddle/Paddle/pull/40948)) - 新增 MKLDNN conv_hard_sigmoid_mkldnn_fuse_pass。([#36869](https://github.com/PaddlePaddle/Paddle/pull/36869)) @@ -2178,23 +2178,23 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持 `multiclass_nms3` 使用 TensorRT 推理。([#41181](https://github.com/PaddlePaddle/Paddle/pull/41181) [#41344](https://github.com/PaddlePaddle/Paddle/pull/41344)) -- 支持 flatten_contiguous_rang op 使用 TensorRT 推理。([#38922](https://github.com/PaddlePaddle/Paddle/pull/38922)) +- 支持 flatten_contiguous_rang op 使用 TensorRT 推理。([#38922](https://github.com/PaddlePaddle/Paddle/pull/38922)) - 支持 `pool2d` 属性 `padding` 的维度为4、`global_pooling` 和 `ceil_mode` 为 True 情况下使用 TensorRT 推理。([#39545](https://github.com/PaddlePaddle/Paddle/pull/39545)) - 支持 batch_norm 和 elementwise_add 为5维时使用 TensorRT 推理。([#36446](https://github.com/PaddlePaddle/Paddle/pull/36446)) -- 新增 pool3d 使用 TensorRT 推理。([#36545](https://github.com/PaddlePaddle/Paddle/pull/36545), [#36783](https://github.com/PaddlePaddle/Paddle/pull/36783)) +- 新增 pool3d 使用 TensorRT 推理。([#36545](https://github.com/PaddlePaddle/Paddle/pull/36545), [#36783](https://github.com/PaddlePaddle/Paddle/pull/36783)) - 增加 `reduce` int32 和 float 类型使用 TensorRT 推理,增加 `reduce_mean` GPU 算子 int32、int64 注册。([#39088](https://github.com/PaddlePaddle/Paddle/pull/39088)) -- 修改 MatmulV2ToMul pass,修改限定条件(不支持广播)和 op_teller 映射条件。([#36652](https://github.com/PaddlePaddle/Paddle/pull/36652)) +- 修改 MatmulV2ToMul pass,修改限定条件(不支持广播)和 op_teller 映射条件。([#36652](https://github.com/PaddlePaddle/Paddle/pull/36652)) -- 增加 TenorRT plugin 接口 AddPluginV2IOExt 的支持 。([#36493](https://github.com/PaddlePaddle/Paddle/pull/36493)) +- 增加 TenorRT plugin 接口 AddPluginV2IOExt 的支持 。([#36493](https://github.com/PaddlePaddle/Paddle/pull/36493)) - 增加 roi_align op 中 aligned 属性并支持 TensorRT 推理。([#38905](https://github.com/PaddlePaddle/Paddle/pull/38905)) -- 增加 concat 属性 `axis = -1` 时支持 TensorRT 推理。([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) +- 增加 concat 属性 `axis = -1` 时支持 TensorRT 推理。([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) - 新增 TensorRT plugin :preln_emb_eltwise_layernorm、 preln_skip_la、rnorm ops, 用于 ERNIE 类模型性能优化。([#39570](https://github.com/PaddlePaddle/Paddle/pull/39570)) @@ -2207,25 +2207,25 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 div op 对 TensorRT 的支持。([#41243](https://github.com/PaddlePaddle/Paddle/pull/41243)) - 量化支持 - + - `PostTrainingQuantization` API新增支持`paddle.io.DataLoader` 对象或者 `Python Generator`的输入。([#38686](https://github.com/PaddlePaddle/Paddle/pull/38686)) - - - ERNIE 全量化模型推理支持 interleaved 数据排布。([#39424](https://github.com/PaddlePaddle/Paddle/pull/39424)) - - - 支持 PaddleSlim 新量化模型格式推理。([#41049](https://github.com/PaddlePaddle/Paddle/pull/41049)) - + + - ERNIE 全量化模型推理支持 interleaved 数据排布。([#39424](https://github.com/PaddlePaddle/Paddle/pull/39424)) + + - 支持 PaddleSlim 新量化模型格式推理。([#41049](https://github.com/PaddlePaddle/Paddle/pull/41049)) + - 新增 matmul int8 量化的推理 op converter 和 plugin。([#37285](https://github.com/PaddlePaddle/Paddle/pull/37285)) - + - 新增判断模型所有 op 能否支持 int8 量化的 pass。([#36042](https://github.com/PaddlePaddle/Paddle/pull/36042)) - - - 支持 multihead attention 非变长分支中 FC 部分的量化推理。([#39660](https://github.com/PaddlePaddle/Paddle/pull/39660)) + + - 支持 multihead attention 非变长分支中 FC 部分的量化推理。([#39660](https://github.com/PaddlePaddle/Paddle/pull/39660)) #### 昇腾NPU 相关功能 - - 重构 shape 算子前向计算逻辑,支持在 NPU 上执行。([#39613](https://github.com/PaddlePaddle/Paddle/pull/39613)) - + - 重构 reshape 算子前向计算逻辑,支持 ShapeTensor 输入。([#38748](https://github.com/PaddlePaddle/Paddle/pull/38748)) - + - 模型权重加载时精度类型统一。([#39160](https://github.com/PaddlePaddle/Paddle/pull/39160)) ### (3)问题修复 @@ -2234,11 +2234,11 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复保存静态图时模型剪裁的问题。([#37579](https://github.com/PaddlePaddle/Paddle/pull/37579)) -- C API 增加对的字符串的封装 PD_Cstr,并提供构造和析构的方式,避免用户直接使用 C 运行时库来析构字符串。 ([#38667](https://github.com/PaddlePaddle/Paddle/pull/38667)) +- C API 增加对的字符串的封装 PD_Cstr,并提供构造和析构的方式,避免用户直接使用 C 运行时库来析构字符串。 ([#38667](https://github.com/PaddlePaddle/Paddle/pull/38667)) - 修复预测时内存复用的逻辑问题。([#37324](https://github.com/PaddlePaddle/Paddle/pull/37324)) -- 修复多线程下内存复用报错问题。([#37894](https://github.com/PaddlePaddle/Paddle/pull/37894)) +- 修复多线程下内存复用报错问题。([#37894](https://github.com/PaddlePaddle/Paddle/pull/37894)) - 在没有权重文件时,允许传递空字符串进行推理。([#38579](https://github.com/PaddlePaddle/Paddle/pull/38579)) @@ -2248,7 +2248,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 TensorRT engine 析构问题。([#35842](https://github.com/PaddlePaddle/Paddle/pull/35842), [#35938](https://github.com/PaddlePaddle/Paddle/pull/35938)) -- lite xpu 接口修复无法选择 xpu 卡的问题。([#36610](https://github.com/PaddlePaddle/Paddle/pull/36610)) +- lite xpu 接口修复无法选择 xpu 卡的问题。([#36610](https://github.com/PaddlePaddle/Paddle/pull/36610)) - TensorRT 动态 shape 参数自动生成接口增加文件存在性检查。([#36628](https://github.com/PaddlePaddle/Paddle/pull/36628)) @@ -2258,13 +2258,13 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复预测时 cuDNN 默认算法选择配置,使用非 deterministic 策略。 ([#41491](https://github.com/PaddlePaddle/Paddle/pull/41491)) -- 修复 deformable_conv op 在 TensorRT plugin 资源回收处理错误的问题。 ([#38374](https://github.com/PaddlePaddle/Paddle/pull/38374)) +- 修复 deformable_conv op 在 TensorRT plugin 资源回收处理错误的问题。 ([#38374](https://github.com/PaddlePaddle/Paddle/pull/38374)) -- 修复 deformable_conv op 在 TensorRT plugin 序列化错误问题。 ([#38057](https://github.com/PaddlePaddle/Paddle/pull/38057)) +- 修复 deformable_conv op 在 TensorRT plugin 序列化错误问题。 ([#38057](https://github.com/PaddlePaddle/Paddle/pull/38057)) -- 适配 TensorRT 8.0 新的构建引擎和系列化 API。 ([#36769](https://github.com/PaddlePaddle/Paddle/pull/36769)) +- 适配 TensorRT 8.0 新的构建引擎和系列化 API。 ([#36769](https://github.com/PaddlePaddle/Paddle/pull/36769)) -- 修复 Flatten2MatmulFusePass、Squeeze2MatmulFusePass、Reshape2MatmulFusePass 没有生效问题。([#37644](https://github.com/PaddlePaddle/Paddle/pull/37644)) +- 修复 Flatten2MatmulFusePass、Squeeze2MatmulFusePass、Reshape2MatmulFusePass 没有生效问题。([#37644](https://github.com/PaddlePaddle/Paddle/pull/37644)) - 修复 TensorRT 输入数据在上时报错的问题。([#37427](https://github.com/PaddlePaddle/Paddle/pull/37427)) @@ -2284,21 +2284,21 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 prelu 在 dynamic shape 时不支持一维输入的问题。([#39389](https://github.com/PaddlePaddle/Paddle/pull/39389)) -- 修复 slice 的 special_slice_plugin 的核函数计算错误的问题。([#39875](https://github.com/PaddlePaddle/Paddle/pull/39875)) +- 修复 slice 的 special_slice_plugin 的核函数计算错误的问题。([#39875](https://github.com/PaddlePaddle/Paddle/pull/39875)) -- 暂时禁用 skip_layernorm 变长下的 int8 分支,防止精度下降。([#39991](https://github.com/PaddlePaddle/Paddle/pull/39991)) +- 暂时禁用 skip_layernorm 变长下的 int8 分支,防止精度下降。([#39991](https://github.com/PaddlePaddle/Paddle/pull/39991)) - 修复关于支持 preln_ernie 模型的一些 bug。([#39733](https://github.com/PaddlePaddle/Paddle/pull/39733)) -- 修复 slice 在 ERNIE 中 threads 可能超过限制的 bug,修复 spacial_slice 误触的 bug。([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) +- 修复 slice 在 ERNIE 中 threads 可能超过限制的 bug,修复 spacial_slice 误触的 bug。([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) -- 修复 elementwise 在维度相同时不支持广播的问题。([#37908](https://github.com/PaddlePaddle/Paddle/pull/37908)) +- 修复 elementwise 在维度相同时不支持广播的问题。([#37908](https://github.com/PaddlePaddle/Paddle/pull/37908)) -- 修复 nearest_interp op 当 align_corners 为 True 时,TensorRT layer 的结果和原生 op 的结果有 diff,底层实现不一样。([#37525](https://github.com/PaddlePaddle/Paddle/pull/37525)) +- 修复 nearest_interp op 当 align_corners 为 True 时,TensorRT layer 的结果和原生 op 的结果有 diff,底层实现不一样。([#37525](https://github.com/PaddlePaddle/Paddle/pull/37525)) -- 修复qkv_plugin: 核函数计算错误。([#37096](https://github.com/PaddlePaddle/Paddle/pull/37096)) +- 修复qkv_plugin: 核函数计算错误。([#37096](https://github.com/PaddlePaddle/Paddle/pull/37096)) -- 修复动态量化的推理 pass 的问题。([#35879](https://github.com/PaddlePaddle/Paddle/pull/35879)) +- 修复动态量化的推理 pass 的问题。([#35879](https://github.com/PaddlePaddle/Paddle/pull/35879)) - 当 Tensor 请求的内存容量低于已分配的 size 时直接复用。([#37880](https://github.com/PaddlePaddle/Paddle/pull/37880)) @@ -2312,7 +2312,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复老版本模型在使用新版本 roi_align 时崩溃问题。([#38788](https://github.com/PaddlePaddle/Paddle/pull/38788)) 外部开发者 -- 修复 softmax 在 python 和 C++上性能差异较大的问题。([#37130](https://github.com/PaddlePaddle/Paddle/pull/37130)) +- 修复 softmax 在 python 和 C++上性能差异较大的问题。([#37130](https://github.com/PaddlePaddle/Paddle/pull/37130)) - 修复 matmul 在静态 shape 2维输入和动态 shape 3维输入情况下推理失败问题。([#36849](https://github.com/PaddlePaddle/Paddle/pull/36849)) @@ -2332,38 +2332,38 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 开启 TensorRT 时,conv2d 算子中 padding 方式支持 VALID 及 SAME 属性。([#38999](https://github.com/PaddlePaddle/Paddle/pull/38999)) -- 修复 MKLDNN 多输入算子量化问题。([#39593](https://github.com/PaddlePaddle/Paddle/pull/39593), [#39346](https://github.com/PaddlePaddle/Paddle/pull/39346), [#40717](https://github.com/PaddlePaddle/Paddle/pull/40717)) +- 修复 MKLDNN 多输入算子量化问题。([#39593](https://github.com/PaddlePaddle/Paddle/pull/39593), [#39346](https://github.com/PaddlePaddle/Paddle/pull/39346), [#40717](https://github.com/PaddlePaddle/Paddle/pull/40717)) -- 修复 MKLDNN 量化场景下 conv+activation 的 scale 错误问题。([#38331](https://github.com/PaddlePaddle/Paddle/pull/38331)) +- 修复 MKLDNN 量化场景下 conv+activation 的 scale 错误问题。([#38331](https://github.com/PaddlePaddle/Paddle/pull/38331)) -- 修复 MKLDNN 无参数算子量化中,根据后续算子量化情况不同需做不同处理的问题。([#39342](https://github.com/PaddlePaddle/Paddle/pull/39342)) +- 修复 MKLDNN 无参数算子量化中,根据后续算子量化情况不同需做不同处理的问题。([#39342](https://github.com/PaddlePaddle/Paddle/pull/39342)) -- 修复 MKLDNN cpu_bfloat16_placement_pass 中的数据类型相关问题。([#38702](https://github.com/PaddlePaddle/Paddle/pull/38702)) +- 修复 MKLDNN cpu_bfloat16_placement_pass 中的数据类型相关问题。([#38702](https://github.com/PaddlePaddle/Paddle/pull/38702)) -- 修复 MKLDNN bfloat16 推理中 split 算子执行问题。([#39548](https://github.com/PaddlePaddle/Paddle/pull/39548)) +- 修复 MKLDNN bfloat16 推理中 split 算子执行问题。([#39548](https://github.com/PaddlePaddle/Paddle/pull/39548)) -- 修复 MKLDNN matmul_v2 算子不支持6维问题。([#36342](https://github.com/PaddlePaddle/Paddle/pull/36342), [#38665](https://github.com/PaddlePaddle/Paddle/pull/38665)) +- 修复 MKLDNN matmul_v2 算子不支持6维问题。([#36342](https://github.com/PaddlePaddle/Paddle/pull/36342), [#38665](https://github.com/PaddlePaddle/Paddle/pull/38665)) -- 修复 MKLDNN matmul_v2_transpose_reshape 中的 MKLDNN DeviceContext 错误问题。([#38554](https://github.com/PaddlePaddle/Paddle/pull/38554)) +- 修复 MKLDNN matmul_v2_transpose_reshape 中的 MKLDNN DeviceContext 错误问题。([#38554](https://github.com/PaddlePaddle/Paddle/pull/38554)) -- 修复分割模型在 MKLDNN 推理场景计算结果错误问题。([#37310](https://github.com/PaddlePaddle/Paddle/pull/37310)) +- 修复分割模型在 MKLDNN 推理场景计算结果错误问题。([#37310](https://github.com/PaddlePaddle/Paddle/pull/37310)) -- 修复 MKLDNN bfloat16 placement 算子列表并添加缺失算子。([#36291](https://github.com/PaddlePaddle/Paddle/pull/36291)) +- 修复 MKLDNN bfloat16 placement 算子列表并添加缺失算子。([#36291](https://github.com/PaddlePaddle/Paddle/pull/36291)) -- 修复 MKLDNN 算子的格式问题,包括: FC、conv_transpose、6维 Tensor 报错问题、conv 对 `NHWC` 输入的输出 format 错误问题。([#38890](https://github.com/PaddlePaddle/Paddle/pull/38890), [#37344](https://github.com/PaddlePaddle/Paddle/pull/37344), [#37175](https://github.com/PaddlePaddle/Paddle/pull/37175), [#38553](https://github.com/PaddlePaddle/Paddle/pull/38553), [#40049](https://github.com/PaddlePaddle/Paddle/pull/40049), [#39097](https://github.com/PaddlePaddle/Paddle/pull/39097)) +- 修复 MKLDNN 算子的格式问题,包括: FC、conv_transpose、6维 Tensor 报错问题、conv 对 `NHWC` 输入的输出 format 错误问题。([#38890](https://github.com/PaddlePaddle/Paddle/pull/38890), [#37344](https://github.com/PaddlePaddle/Paddle/pull/37344), [#37175](https://github.com/PaddlePaddle/Paddle/pull/37175), [#38553](https://github.com/PaddlePaddle/Paddle/pull/38553), [#40049](https://github.com/PaddlePaddle/Paddle/pull/40049), [#39097](https://github.com/PaddlePaddle/Paddle/pull/39097)) -- 修复 MKLDNN 多线程推理场景因 cache 机制报错问题。([#36290](https://github.com/PaddlePaddle/Paddle/pull/36290), [#35884](https://github.com/PaddlePaddle/Paddle/pull/35884)) +- 修复 MKLDNN 多线程推理场景因 cache 机制报错问题。([#36290](https://github.com/PaddlePaddle/Paddle/pull/36290), [#35884](https://github.com/PaddlePaddle/Paddle/pull/35884)) -- 修复 MKLDNN 因 matmul 及 FC 引起的量化模型精度异常问题。([#38023](https://github.com/PaddlePaddle/Paddle/pull/38023), [#37618](https://github.com/PaddlePaddle/Paddle/pull/37618)) +- 修复 MKLDNN 因 matmul 及 FC 引起的量化模型精度异常问题。([#38023](https://github.com/PaddlePaddle/Paddle/pull/38023), [#37618](https://github.com/PaddlePaddle/Paddle/pull/37618)) -- 修复 MKLDNN 量化转换脚本因 pass 缺少引起的量化模型精度异常问题。([#37619](https://github.com/PaddlePaddle/Paddle/pull/37619), [#40542](https://github.com/PaddlePaddle/Paddle/pull/40542), - [#38912](https://github.com/PaddlePaddle/Paddle/pull/38912)) +- 修复 MKLDNN 量化转换脚本因 pass 缺少引起的量化模型精度异常问题。([#37619](https://github.com/PaddlePaddle/Paddle/pull/37619), [#40542](https://github.com/PaddlePaddle/Paddle/pull/40542), + [#38912](https://github.com/PaddlePaddle/Paddle/pull/38912)) - 修复 MKLDNN 开启量 op 因为数据类型不匹配崩溃的问题。([#38133](https://github.com/PaddlePaddle/Paddle/pull/38133)) - 修复 MKLDNN 某些 op 修改 layout 后需要改回原 layout 的问题。([#39422](https://github.com/PaddlePaddle/Paddle/pull/39422)) -- 修复针对昇腾910推理场景下,由于未释放 GIL 锁,导致与昇腾软件栈冲突,python API 下报错的问题。 ([#38605](https://github.com/PaddlePaddle/Paddle/pull/38605)) +- 修复针对昇腾910推理场景下,由于未释放 GIL 锁,导致与昇腾软件栈冲突,python API 下报错的问题。 ([#38605](https://github.com/PaddlePaddle/Paddle/pull/38605)) ## 5. 环境适配 @@ -2374,30 +2374,30 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. 备注: - PIP 源安装是指用 `pip install paddlepaddle` 或 `pip install paddlepaddle-gpu`从 PIP 官网下载安装包及依赖库的安装方式,支持架构种类少,安装包更轻量,下载源来自国外(相比bos源支持架构种类精简,安装包更轻量,只提供一种 CUDA 版本的安装包)。 - + - 2.3版本之前,飞桨 PIP 源安装包(CUDA10.2)支持的 GPU 架构为:3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5。 - + - 2.3版本之后,飞桨 PIP 源安装包(CUDA11.0)支持的 GPU 架构为:6.0, 6.1, 7.0, 7.5, 8.0 - 飞桨官网 bos 源是指从飞桨官网下载安装包及依赖库的安装方式,支持的 GPU 架构更多,下载源来自国内,速度较快。(相比PIP源支持架构种类多,提供多个 CUDA 版本的安装包): - + - 2.3版本之前,飞桨官网 bos 源安装包支持的 GPU 架构: - + - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; - + - CUDA11 : 5.2,6.0,6.1,7.0,7.5,8.0。 - + - 2.3版本之后,飞桨官网 bos 源安装包支持的 GPU 架构 - + - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; - + - CUDA11 : 3.5, 5.0, 6.0, 6.1, 7.0, 7.5, 8.0。 - 支持 Python 3.10,修复 Windows 下某些 PythonC API 变化导致的编译 bug。([#41180](https://github.com/PaddlePaddle/Paddle/pull/42180)) -- Windows 平台支持 Visual Studio 2019 编译。 ([#38719](https://github.com/PaddlePaddle/Paddle/pull/38719)) +- Windows 平台支持 Visual Studio 2019 编译。 ([#38719](https://github.com/PaddlePaddle/Paddle/pull/38719)) -- 消除 Windows 平台编译时出现的各种 warning。 ([#38034](https://github.com/PaddlePaddle/Paddle/pull/38034), [#37890](https://github.com/PaddlePaddle/Paddle/pull/37890), [#37442](https://github.com/PaddlePaddle/Paddle/pull/37442), [#37439](https://github.com/PaddlePaddle/Paddle/pull/37439), [#36857](https://github.com/PaddlePaddle/Paddle/pull/36857)) +- 消除 Windows 平台编译时出现的各种 warning。 ([#38034](https://github.com/PaddlePaddle/Paddle/pull/38034), [#37890](https://github.com/PaddlePaddle/Paddle/pull/37890), [#37442](https://github.com/PaddlePaddle/Paddle/pull/37442), [#37439](https://github.com/PaddlePaddle/Paddle/pull/37439), [#36857](https://github.com/PaddlePaddle/Paddle/pull/36857)) - 修复底层数据结构升级引入的 jetson 编译问题。 ([#39669](https://github.com/PaddlePaddle/Paddle/pull/39669), [#39441](https://github.com/PaddlePaddle/Paddle/pull/39441)) diff --git a/docs/release_note_en.md b/docs/release_note_en.md index be25861cb45..d4e43c0a4e5 100644 --- a/docs/release_note_en.md +++ b/docs/release_note_en.md @@ -124,17 +124,17 @@ We are excited to release the PaddlePaddle Framework V2.3.0. This version contai ### API - Added more than 100 new APIs, covering automatic differentiation, linear algebra, probability distribution, sparse tensor, framework performance analysis, hardware device management, vision domain, etc. - + - Added 4 new automatic differentiation APIs, 11 new linear algebra APIs, and 21 new probability distribution APIs to better support use cases in scientific computing, reinforcement learning, xand other application areas. - + - Added 11 new Sparse Tensor APIs including basic functions of sparse tensor construction and conversion. The COO and CSR formats are supported. - + - Added 9 new framework performance analysis APIs. The new performance profiling APIs, centered around Paddle.Profiler.Profiler, help users collect and analyze performance statistics during training and inference. - + - Added 7 APIs for device management, facilitating hardware information acquistion. - + - Added several visual and text domain APIs to facilitate ~~the~~ reusability of MobileNetV3, ResNeXt and other backbone networks, to achieve the fast networking. - + ### **Paddle** HIgh reusability operator l**ibrary** @@ -143,30 +143,30 @@ We are excited to release the PaddlePaddle Framework V2.3.0. This version contai ### **Distributed Training** - Fully upgrade the adaptive distributed training architecture, including multiple modules such as elastic resource management, asynchronous pipelined executor, heterogeneous communication, and automatic parallelism, and support the hard-aware distributed training and inference under a variety of heterogeneous hardware. - + - Add MoE parallel strategy, GroupSharded parallel strategy, and Pure FP16 under dynamic graph hybrid Parallelism, which further supports the efficient distributed training of large models under the dynamic graph. - + - Comprehensively upgrade and optimize the architecture of general heterogeneous parameter server, and simplify each module, such as communication and storage, to improve the secondary development experience of parameter server. The performance of GPU parameter server is improved by 2.38 times under 100 billion parameters and 10 billion data. - + ### **Compile and Install** - + - From version 2.3.0, PaddlePaddle upgrades GPU architectures supported. - + ### **Inference Deployment** - Add the Java API and ONNX Runtime CPU backend. - + - Support the TensorRT 8.0 / 8.2 and structured sparsity, with deep performance optimization for ERNIE-like structural models. - + ### **Hardware Backend Extention** - Add custom device support: provide a plug-in way to extend PaddlePaddle hardware backend. - + - Add training/inference support for multiple heterogeneous chips such as HUAWEI Ascend 910 / GraphCore IPU / Cambricon MLU / KUNLUNXIN 2. - + ### **Framework Architecture** @@ -177,9 +177,9 @@ We are excited to release the PaddlePaddle Framework V2.3.0. This version contai - Due to limitation of the binary size, sm35 CUDA ARCH is dropped in pre-compiled binaries. ([#41754](https://github.com/PaddlePaddle/Paddle/pull/41754)) - When `paddle.to_tensor` converts a python int scalar to a Tensor, the default data type on Windows changes from int32 to int64, thus alignment with Linux/Mac. ([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) - + - To keep consistency with division behavior under python3, the division symbol `/` has been changed from “rounding divide” to “true divide”, and the data type of the computed output has been switched from int to float. ([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) - +
@@ -291,251 +291,251 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API - Add 4 new automatic differentiation APIs to support scientific computing, as listed below: ([#40692](https://github.com/PaddlePaddle/Paddle/pull/40692)) - + - `paddle.incubate.autograd.vjp`, compute vector-Jacobi matrix product. - + - `paddle.incubate.autograd.jvp`, compute Jacobi matrix-vector product. - + - `paddle.incubate.autograd.Jacobian`, compute Jacobi matrix. - + - `paddle.incubate.autograd.Hessian`, compute Hessian matrix. - + - Add linear algebra class API - + - Add `paddle.linalg.triangular_solve`, to compute a system of linear equations with unique solutions through a triangular coefficient. ([#36714](https://github.com/PaddlePaddle/Paddle/pull/36714)) - + - Add `paddle.linalg.eig`, to compute the characteristic decomposition of the general square matrix. ([#35764](https://github.com/PaddlePaddle/Paddle/pull/35764)) - + - Add `paddle.linalg.sovle`, to compute solutions to systems of linear equations. ([#35715](https://github.com/PaddlePaddle/Paddle/pull/35715)) - + - Add `paddle.linalg.lstsq`, to compute least-squares solutions to systems of linear equations. ([#38585](https://github.com/PaddlePaddle/Paddle/pull/38585), [#38621](https://github.com/PaddlePaddle/Paddle/pull/38621)) - + - Add `paddle.linalg.qr`, compute QR decomposition of matrix. ([#35742](https://github.com/PaddlePaddle/Paddle/pull/35742), [#38824](https://github.com/PaddlePaddle/Paddle/pull/38824)) - + - Add `paddle.inner`, to compute inner product of a matrix. ([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) - + - Add `paddle.outer`, to compute outer product of a matrix. ([#37706](https://github.com/PaddlePaddle/Paddle/pull/37706)) - + - Add `paddle.linalg.cov`, to compute covariance between vectors. ([#38392](https://github.com/PaddlePaddle/Paddle/pull/38392)) - + - Add `paddle.linalg.cholesky_sovle`, to compute the cholesky solution of the equation. ([#38167](https://github.com/PaddlePaddle/Paddle/pull/38167)) - + - Add `paddle.linalg.lu` and `paddle.linalg.lu_unpack`, to compute matrix lu decomposition, and decompress lu matrix. ([#38617](https://github.com/PaddlePaddle/Paddle/pull/38617), [#38559](https://github.com/PaddlePaddle/Paddle/pull/38559), [#38616](https://github.com/PaddlePaddle/Paddle/pull/38616)) - + - Add 21 new probability distribution class APIs for reinforcement learning, variation inference, scientific computing, and other scenarios. Including 6 random variable distributions, 13 random variable transformations, and 2 KL divergence computing. as listed below: ([#40536](https://github.com/PaddlePaddle/Paddle/pull/40536), [#38820](https://github.com/PaddlePaddle/Paddle/pull/38820), [#38558](https://github.com/PaddlePaddle/Paddle/pull/38558/files), [#38445](https://github.com/PaddlePaddle/Paddle/pull/38445), [#38244](https://github.com/PaddlePaddle/Paddle/pull/38244), [#38047](https://github.com/PaddlePaddle/Paddle/pull/38047)) - + - `paddle.distribution.ExponentialFamily`, exponential distribution family base class. - + - `paddle.distribution.Beta`, `Beta` distribution. - + - `paddle.distribution.Dirichlet`, `Dirichlet` distribution. - + - `paddle.distribution.Independent`, Independent distribution, used to create higher order distributions. - + - `paddle.distribution.TransformedDistribution`, Transform distribution, used to generate higher-order distributions through the base distribution and a series of transformations. - + - `paddle.distribution.Multionmial`, a multinomial distribution. - + - `paddle.distribution.Transform`, base class for transforming random variables. - + - `paddle.distribution.AbsTransform`, take absolute value transform. - + - `paddle.distribution.AffineTransform`, affine transform. - + - `paddle.distribution.ChainTransform`, chain combination of the transform. - + - `paddle.distribution.ExpTransform`, exponential transform. - + - `paddle.distribution.IndependentTransform`, independent transform, used to extend the `event_dim` of the transform definition field. - + - `paddle.distribution.PowerTransform`, power transform. - + - `paddle.distribution.ReshapeTransform`, `reshape` transform. - + - `paddle.distribution.SigmoidTransform`, `sigmoid` transform. - + - `paddle.distribution.SoftmaxTransform`, `softmax` transform. - + - `paddle.distribution.StackTransform`, `stack` transform, used to combine multiple transforms in a `stack` method. - + - `paddle.distribution.StickBreakingTransform` , `stickbreaking` transform. - + - `paddle.distribution.TanhTransform`, `tanh` transform. - + - `paddle.distribution.kl_divergence`, compute KL divergence. - + - `paddle.distribution.register_kl`, register user-defined KL divergence calculation function. - + - Add high-level API - + - Add `paddle.vision.models.AlexNet` and `paddle.vision.models.alexnet`, to use AlexNet models directly. ([#36058](https://github.com/PaddlePaddle/Paddle/pull/36058)) - + - Add `paddle.vision.models.DenseNet`, `paddle.vision.models.densenet121`, `paddle.vision.models.densenet161`, `paddle.vision.models. densenet169`, `paddle.vision.models.densenet201`, and `paddle.vision.models.densenet264`, to use DenseNet models directly. ([#36069](https://github.com/PaddlePaddle/Paddle/pull/36069)) - + - Add `paddle.vision.models.GoogLeNet` and `paddle.vision.models.googlenet`, to use GoogLeNet models directly. ([#36034](https://github.com/PaddlePaddle/Paddle/pull/36034)) - + - Add `paddle.vision.models.InceptionV3`, `paddle.vision.models.inception_v3`, to use InceptionV3 models directly. ([#36064](https://github.com/PaddlePaddle/Paddle/pull/36064)) - + - Add `paddle.vision.models.MobileNetV3Small`, `paddle.vision.models.MobileNetV3Large`, `paddle.vision.models.mobilenet_v3_small`, and `paddle.vision.models.mobilenet_v3_large`, to use MobileNetV3 models directly . ([#38653](https://github.com/PaddlePaddle/Paddle/pull/38653)) - + - Add `paddle.vision.models.resnext50_32x4d`, `paddle.vision.models.resnext50_64x4d`, `paddle.vision.models. paddle.vision.models.resnext101_32x4d`, `paddle.vision.models.resnext101_64x4d`, `paddle.vision.models.resnext152_32x4d`, and `paddle.vision.models.resnext152_64x4d`, to use ResNeXt models directly. ([#36070](https://github.com/PaddlePaddle/Paddle/pull/36070)) - + - Add `paddle.vision.models.ShuffleNetV2`, `paddle.vision.models.shufflenet_v2_x0_25`, `paddle.vision.models.shufflenet_v2_x0_33`, `paddle.vision.models.shufflenet_v2_x0_5`, `paddle.vision.models.shufflenet_v2_x1_0`, `paddle.vision.models.shufflenet_v2_x1_5`, `paddle.vision.models.shufflenet_v2_x2_0`, and `paddle.vision.models.shufflenet_v2_swish`, to use ShuffleNetV2 models directly ([#36067](https://github.com/PaddlePaddle/Paddle/pull/36067)) - + - Add `paddle.vision.models.SqueezeNet`, `paddle.vision.models.squeezenet1_0`, and `paddle.vision.models.squeezenet1_1`, to use SqueezeNet models directly. ([#36066](https://github.com/PaddlePaddle/Paddle/pull/36066)) - + - Add `paddle.vision.models.wide_resnet50_2`, and `paddle.vision.models.wide_resnet101_2`, to use WideResNet models directly. ([#36952](https://github.com/PaddlePaddle/Paddle/pull/36952)) - + - Add `paddle.vision.ops.nms` API, to support single-category and multi-category non-maximum suppression (NMS) algorithms for target detection and prediction task acceleration ([#40962](https://github.com/PaddlePaddle/Paddle/pull/40962)) - + - Add `paddle.vision.ops.roi_pool` and `paddle.vision.ops.RoIPool`, to support RoI region pooling operations in detection tasks. ([#36154](https://github.com/PaddlePaddle/Paddle/pull/36154)) - + - Add `paddle.vision.ops.roi_align` and `paddle.vision.ops.RoIAlign`, to support RoI Align operations in detection tasks. ([#35102](https://github.com/PaddlePaddle/Paddle/pull/36154)) - + - Add `paddle.text.ViterbiDecoder`, and `paddle.text.viterbi_decode` Viterbi decoding API, mainly for sequence tagging model prediction. ([#35778](https://github.com/PaddlePaddle/Paddle/pull/35778)) - + - Add 11 Sparse class APIs, to support basic functions, such as creating Sparse Tensor in COO and CSR formats, and add C++ inter-converting with Tensor. - + - `paddle.sparse.sparse_coo_tensor`,create Sparse Tensor in COO format. ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `paddle.sparse.sparse_csr_tensor`,create Sparse Tensor in CSR format. ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `paddle.sparse.ReLU`,support ReLU activation layer for SparseCooTensor.([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) - + - `paddle.sparse.functional.relu`,support ReLU function of SparseCooTensor.([#40959](https://github.com/PaddlePaddle/Paddle/pull/40959)) - + - `Tensor.values()`,c++ method to get non-zero elements of a SparseCooTensor or SparseCsrTensor. ([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.indices()`,c++ method to get the coordinate information of a SparseCooTensor. ([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.crows()`,c++ method to get information about the compressed row information of the SparseCsrTensor.([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.cols()`,c++ method to get the column information of the SparseCsrTensor ([#40608](https://github.com/PaddlePaddle/Paddle/pull/40608)) - + - `Tensor.to_sparse_coo()`,c++ method to convert a DenseTensor or SparseCsrTensor to a SparseCooTensor. ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `Tensor.to_sparse_csr()`,c++ convert a DenseTensor or SparseCooTensor to a SparseCsrTensor. ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - `Tensor.to_dense()`,c++ convert a SparseCooTensor or SparseCsrTensor to a DenseTensor. ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) - + - Add hardware related APIs - + - Add four GPU memory monitoring related APIs: `paddle.device.cuda.max_memory_allocated`, `paddle.device.cuda.max_memory_reserved`, `paddle.device.cuda.memory_allocated`, and `paddle.device.cuda.memory_reserved`, to view and analyze the GPU memory usage in real-time. ([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) - + - Add `paddle.device.cuda.get_device_properties`, to return the properties of the GPU device. ([#35661](https://github.com/PaddlePaddle/Paddle/pull/35661)) - + - Add `paddle.device.cuda.get_device_name` and `paddle.device.cuda.get_device_capability`, to return the name and compute capability of the GPU device. ([#35672](https://github.com/PaddlePaddle/Paddle/pull/35672)) - + - Add Tensor operation API - + - Add `paddle.nansum`, to sum input Tensor along `axis` with ignoring the `NaNs` values. ([#38137](https://github.com/PaddlePaddle/Paddle/pull/38137)) - + - Add `paddle.nanmean`,to average input Tensor along `axis` with ignoring the `NaNs` values. ([#40472](https://github.com/PaddlePaddle/Paddle/pull/40472)) - + - Add `paddle.clone`, to return a copy of the input Tensor and provide gradient calculation. ([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - + - Add `paddle.Tensor.element_size`, to return the number of bytes allocated for a single element in a Tensor. ([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - + - Add `paddle.Tensor.to_uva_tensor`, to convert the numpy objects to be accessed by CUDA objects with virtual addresses, which are stored in CPU memory physically. ([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#38950](https://github.com/PaddlePaddle/Paddle/pull/38950)) - + - Add `paddle.rot90`, to rotate the n-dimensional Tensor by 90 degrees along the plane specified by `axes`. ([#37634](https://github.com/PaddlePaddle/Paddle/pull/37634)) - + - Add `paddle.logit` and `paddle.Tensor.logit`, to compute the logit function values for input Tensor. ([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) - + - Add `paddle.repeat_interleave`, to copy the input along the specified axis, and return a new Tensor. ([#37981](https://github.com/PaddlePaddle/Paddle/pull/37981)) - + - Add `paddle.renorm`, to split the Tensor into multiple pieces at the specified `axis` and then perform p norm operations separately. ([#38130](https://github.com/PaddlePaddle/Paddle/pull/38130), [#38459](https://github.com/PaddlePaddle/Paddle/pull/38459)) - + - Add `paddle.mode` and `paddle.Tensor.mode`, to search the values and indices of the input Tensor along the specified axis. ([#38446](https://github.com/PaddlePaddle/Paddle/pull/38446)) - + - Add `paddle.quantile` and `paddle.Tensor.quantile`, to compute the q-quantile of a Tensor along the specified axis. ([#38567](https://github.com/PaddlePaddle/Paddle/pull/38567)) - + - Add `paddle.kthvalue` and `paddle.Tensor.kthvalue`, to find the values and indices of the kth smallest at the specified axis. ([#38386](https://github.com/PaddlePaddle/Paddle/pull/38386)) - + - Add `paddle.is_floating_point` and `paddle.Tensor.is_floating_point`, to determine if the input Tensor is the floating point type. ([#37885](https://github.com/PaddlePaddle/Paddle/pull/37885)) - + - Add `paddle.erfinv` and `paddle.Tensor.erfinv`, to compute the inverse error function of the input Tensor. ([#38295](https://github.com/PaddlePaddle/Paddle/pull/38295)) - + - Add `paddle.lerp` and `paddle.Tensor.lerp`, to compute linear interpolation among the input Tensors based on the given weights. ([#37253](https://github.com/PaddlePaddle/Paddle/pull/37253)) - + - Add `paddle.angle`, to compute the phase angle of a complex Tensor. ([#37689](https://github.com/PaddlePaddle/Paddle/pull/37689)) - + - Add `paddle.rad2deg` and `paddle.Tensor.rad2deg`, to convert each of the elements of input from the angles in radians to the degrees. ([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) - + - Add `paddle.deg2rad` and `paddle.Tensor.deg2rad`, to convert each of the elements of input from the degrees in radians to the angles. ([#37598](https://github.com/PaddlePaddle/Paddle/pull/37598)) - + - Add `paddle.gcd` and `paddle.Tensor.gcd`, to compute the greatest common divisors of the absolute values of two inputs by element. ([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) - + - Add `paddle.lcm` and `paddle.Tensor.lcm`, to compute the least common multiple of the absolute value of two inputs by element. ([#37819](https://github.com/PaddlePaddle/Paddle/pull/37819)) - + - Add `paddle.amax` and `paddle.Tensor.amax`, to get the maximum value of Tensor elements along the specified dimension. ([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) - + - Add `paddle.amin` and `paddle.Tensor.amin`, to get the minimum value of Tensor elements along the specified dimension. ([#38417](https://github.com/PaddlePaddle/Paddle/pull/38417)) - + - Add `paddle.isclose`, to determine if each element of two Tensors is close to each other. ([#37135](https://github.com/PaddlePaddle/Paddle/pull/37135)) - + - Add `paddle.put_along_axis` and `paddle.take_along_axis`, for extracting or placing elements with specified index subscripts. ([#38608](https://github.com/PaddlePaddle/Paddle/pull/38608)) - + - Add `paddle.bincount` and `paddle.Tensor.bincount`, for counting the number of occurrences of each element in a Tensor. ([#36317](https://github.com/PaddlePaddle/Paddle/pull/36317)) - + - Add `paddle.fmax` and `paddle.fmin`, to extend the max/min function to support the case of NaN values in the two Tensors. If there is one NaN value in the corresponding position, return that non-NaN value; if there are two NaN values in the corresponding position, return the NaN value. ([#37826](https://github.com/PaddlePaddle/Paddle/pull/37826)) - + - Add `paddle.diff`, for computing the nth forward difference along a given dimension. It currently supports n=1. ([#37441](https://github.com/PaddlePaddle/Paddle/pull/37441)) - + - Add inverse hyperbolic functions: `paddle.asinh`, `paddle.acosh`, and `paddle.atanh`. ([#37076](https://github.com/PaddlePaddle/Paddle/pull/37076)) - + - Add `paddle.as_real` and `paddle.as_complex` for conversion between real Tensor and complex Tensor. ([#37784](https://github.com/PaddlePaddle/Paddle/pull/37784)) - + - Add `paddle.complex`, for constructing a complex Tensor with the given real and imaginary parts. ([#37918](https://github.com/PaddlePaddle/Paddle/pull/37918), [#38272](https://github.com/PaddlePaddle/Paddle/pull/38272)) - + - Add `paddle.det` and `paddle.slogdet`, to compute the determinant of a matrix and the natural logarithm of the determinant. ([#34992](https://github.com/PaddlePaddle/Paddle/pull/34992)) - + - Add `paddle.nn.utils.parameters_to_vector`, to flatten parameters to a 1-D Tensor. ([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - + - Add `paddle.nn.utils.vector_to_parameters`, to transform a Tensor with 1-D shape to the parameters. ([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - + - Add networking class APIs - + - Add `paddle.nn.Fold` and `paddle.nn.functional.fold`, to extract sliding local area blocks for the Tensors of a batch. ([#38613](https://github.com/PaddlePaddle/Paddle/pull/38613)) - + - Add `paddle.nn.CELU` and `paddle.nn.functional.celu`, to support the CELU activation layer. ([#36088](https://github.com/PaddlePaddle/Paddle/pull/36088)) - + - Add `paddle.nn.HingeEmbeddingLoss`. Add a way to compute hinge embedding loss. It is usually used for nonlinear embedding or semi-supervised learning. ([#37540](https://github.com/PaddlePaddle/Paddle/pull/37540)) - + - Add `paddle.nn.ZeroPad2D` API, for zero-padding according to the padding property. ([#37151](https://github.com/PaddlePaddle/Paddle/pull/37151)) - + - Add `paddle.nn.MaxUnPool3D` and `paddle.nn.MaxUnPool1D`, for computing 3D maximum inverse pooling and 1D maximum inverse pooling. ([#38716](https://github.com/PaddlePaddle/Paddle/pull/38716)) - + - Add `paddle.incubate.graph_khop_sampler`, `paddle.incubate.graph_sample_neighbors`, and `paddle.incubate.graph_reindex` APIs, to support graph multi-order neighbor sampling and graph reindexing operations. They are mainly used for graph neural network model training. ([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#40809](https://github.com/PaddlePaddle/Paddle/pull/40809)) - + - Add random number class APIs - + - Add `paddle.poisson`, to generate a Tensor that obeys Poisson distributed with the lambda parameter. ([#38117](https://github.com/PaddlePaddle/Paddle/pull/38117)) - + - Add `paddle.randint_like` API, to generate a new Tensor that obeys uniform distribution in the range [low, high), with the shape of the output matching the shape of the input. ([#36169](https://github.com/PaddlePaddle/Paddle/pull/36169)) - + - Add `paddle.Tensor.exponential_`. It is an inplace style API that populates the input Tensor with exponentially distributed random numbers. ([#38256](https://github.com/PaddlePaddle/Paddle/pull/38256)) - + - Add parameter initialization class APIs - + - Add `paddle.nn.initializer.Dirac`, to initialize 3D/4D/5D parameters with Dirac delta functions. It is commonly used for initialization of Conv1D/Conv2D/Conv3D parameters in the convolution layer. ([#37389](https://github.com/PaddlePaddle/Paddle/pull/37389)) - + - Add `paddle.nn.initializer.Orthogonal` for orthogonal matrix initialization. The initialized parameter is the (semi-) orthogonal vector. ([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) - + - Add `paddle.nn.initializer.calculate_gain`, to get the recommended gain value for the activation function. The gain value can be used to set certain initialization APIs to adjust the initialization range. ([#37163](https://github.com/PaddlePaddle/Paddle/pull/37163)) - + - Add learning rate class API - + - Add `paddle.optimizer.lr.MultiplicativeDecay`, to provide the `lambda` function to set the learning rate. ([#38250](https://github.com/PaddlePaddle/Paddle/pull/38250)) - Add distributed-related APIs - + - Add `paddle.incubate.optimizer.DistributedFusedLamb`, to allow the Lamb optimizer to update parameters distributedly. ([#40011](https://github.com/PaddlePaddle/Paddle/pull/40011), [#39972](https://github.com/PaddlePaddle/Paddle/pull/39972), [#39900](https://github.com/PaddlePaddle/Paddle/pull/39900), [#39747](https://github.com/PaddlePaddle/Paddle/pull/39747), [#39148](https://github.com/PaddlePaddle/Paddle/pull/39148), [#39416](https://github.com/PaddlePaddle/Paddle/pull/39416)) - Add new optimizer-related APIs([#40710](https://github.com/PaddlePaddle/Paddle/pull/40710)) - + - `paddle.incubate.optimizer.functional.minimize_bfgs`,add second-order optimizer BFGS. - + - `paddle.incubate.optimizer.functional.minimize_lbfgs`,add second-order optimizer L-BFGS. - + - Add `paddle.incubate.multiprocessing` module, to provide Tensor (CPU/GPU) data transfer between python processes. ([#37302](https://github.com/PaddlePaddle/Paddle/pull/37302), [#41339](https://github.com/PaddlePaddle/Paddle/pull/41339)) - Add `paddle.incubate.autotune.set_config` API, to support multi-version Kernel auto-selection, mixed precision data layout auto-conversion, and num_workers auto-selection for DataLoader to automatically improve model performance. ([#42301](https://github.com/PaddlePaddle/Paddle/pull/42301)) @@ -543,599 +543,599 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - Add `paddle.incubate.nn.FusedMultiTransformer` and `paddle.incubate.nn.functional.fused_multi_transformer` API, to fuse multiple layers of transformers into a single op to improve model inference performance. It should be noted that only forward is supported. ([#42311](https://github.com/PaddlePaddle/Paddle/pull/42311)) - Add einsum_v2 operators for consistent interface between imperative and static mode. It is compatible with the `paddle.einsum` implementation at the original python side, while supporting dynamic to static export and more complete Infershape inference. ([#42495](https://github.com/PaddlePaddle/Paddle/pull/42495), [#42327](https://github.com/PaddlePaddle/Paddle/pull/42327), [#42397](https://github.com/PaddlePaddle/Paddle/pull/42397), [#42105](https://github.com/PaddlePaddle/Paddle/pull/42105)) - + #### IR(Intermediate Representation) - Dynamic graph to static graph - + - For the variable type StaticAnalysis module, add support for type tag similar to `a, b = paddle.shape(x)` . ([#39245](https://github.com/PaddlePaddle/Paddle/pull/39245)) - + - Add a computed field, supporting `InputSpec.name` as the Program cache hash key. ([#38273](https://github.com/PaddlePaddle/Paddle/pull/38273)) - + - Add syntax for supporting `dict['key'] = x.shape`. ([#40611](https://github.com/PaddlePaddle/Paddle/pull/40611)) - + - Add the support for Pure FP16 training. ([#36944](https://github.com/PaddlePaddle/Paddle/pull/36944)) - + - Add the support `for i in [x,y,z]` syntax. ([#37259](https://github.com/PaddlePaddle/Paddle/pull/37259)) - + - Add the support for type hint syntax of python3. ([#36544](https://github.com/PaddlePaddle/Paddle/pull/36544)) - + - Pass development - + - Add forward and backward fusion for FC + [relu|gelu] based on NVIDIA cuBlasLt Epilogue. ([#39437](https://github.com/PaddlePaddle/Paddle/pull/39437)) - Kernel Primitive API - + - Add KP operators on GPU platform, including cast, scale, clip, bce_loss, abs_grad, reduce_sum_grad, reduce_mean_grad, clip, bce_loss, full, full_like, distribution, random , masked_select_kernel, where_index, masked_select_grad, dropout, sigmoid, where, and abs_grad. ([#36203](https://github.com/PaddlePaddle/Paddle/pull/36203), [#36423](https://github.com/PaddlePaddle/Paddle/pull/36423), [#39390](https://github.com/PaddlePaddle/Paddle/pull/39390), [#39734](https://github.com/PaddlePaddle/Paddle/pull/39734), [#38500](https://github.com/PaddlePaddle/Paddle/pull/38500), [#38959](https://github.com/PaddlePaddle/Paddle/pull/38959), [#39197](https://github.com/PaddlePaddle/Paddle/pull/39197/), [#39563](https://github.com/PaddlePaddle/Paddle/pull/39563), [#39666](https://github.com/PaddlePaddle/Paddle/pull/39666), [#40517](https://github.com/PaddlePaddle/Paddle/pull/40517), [#40617](https://github.com/PaddlePaddle/Paddle/pull/40617), [#40766](https://github.com/PaddlePaddle/Paddle/pull/40766), [#39898](https://github.com/PaddlePaddle/Paddle/pull/39898), [#39609](https://github.com/PaddlePaddle/Paddle/pull/39609)) - + - Add the support for XPU2 source code compilation mode. ([#37254](https://github.com/PaddlePaddle/Paddle/pull/37254), [#40397](https://github.com/PaddlePaddle/Paddle/pull/40397), [#38455](https://github.com/PaddlePaddle/Paddle/pull/38455)) - + - Add the support for KP operator reuse on XPU2 and GPU, including reduce, broadcast, elementwise_add, `exp、log、relu、sigmoid、leaky_relu、softplus、hard_swish、reciprocal`。([#36904](https://github.com/PaddlePaddle/Paddle/pull/36904), [#37226](https://github.com/PaddlePaddle/Paddle/pull/37226), [#38918](https://github.com/PaddlePaddle/Paddle/pull/38918), [#40560](https://github.com/PaddlePaddle/Paddle/pull/40560/), [#39787](https://github.com/PaddlePaddle/Paddle/pull/39787), [#39917](https://github.com/PaddlePaddle/Paddle/pull/39917), [#40002](https://github.com/PaddlePaddle/Paddle/pull/40002), [#40364](https://github.com/PaddlePaddle/Paddle/pull/40364)) - + - Add unit tests of KP operators on the XPU2 platform, including `brelu、ceil、celu、elu、floor、hard_shrink、hard_sigmoid、log1p、logsigmoid、relu6、silu、soft_relu、softsign、sqrt、square、swish、thresholded_relu、softshrink`。([#40448](https://github.com/PaddlePaddle/Paddle/pull/40448), [#40524](https://github.com/PaddlePaddle/Paddle/pull/40524)) - + - Add the support for XPU2 KP models, including resnet50, deepfm, wide_deep, yolov3-darknet53, det_mv3_db, bert, transformer, mobilenet_v3, and GPT2. - + #### **Mixed Precision Training** - Split the `paddle.amp.GradScaler.unscale_` method from the `minimize` of the mixed precision training `paddle.amp.GradScaler`, to provide a separate interface for recovering the loss. ([#35825](https://github.com/PaddlePaddle/Paddle/pull/35825)) - + - Add the FP16 support for `paddle.nn.ClipByGlobalNorm` dynamic graph mode. Add FP16 Kernel for clip op to enable clip-related operations to support FP16 compute. ([#36198](https://github.com/PaddlePaddle/Paddle/pull/36198), [#36577](https://github.com/PaddlePaddle/Paddle/pull/36577)) - + - Support the case that the `optimizer` parameter transferred from `paddle.amp.decorate` is Nan. ([#37541](https://github.com/PaddlePaddle/Paddle/pull/37541)) - + - For the merged_momentum op,add the support of input multiple learning rates , the computing for use_nesterov policy and the regularization computing . ([#37527](https://github.com/PaddlePaddle/Paddle/pull/37527)) - + - Add multi_tensor policy to `paddle.optimizer.Momentum` optimizer. Add `set_to_zero` branch to `clear_grad` of `Optimzizer` class. ([#37564](https://github.com/PaddlePaddle/Paddle/pull/37564)) - + - Add multi_tensor policy to `paddle.optimizer.Adam` . ([#38010](https://github.com/PaddlePaddle/Paddle/pull/38010)) - + - Add multi_precision policy to `paddle.optimizer.SGD` optimizer. ([#38231](https://github.com/PaddlePaddle/Paddle/pull/38231)) - + - Add the storage `master weight` parameter to the optimizer `state_dict` method. ([#39121](https://github.com/PaddlePaddle/Paddle/pull/39121)) - + - Add support for op CUDA bfloat16 mixed precision training. Support for O1 and O2 modes. Enable the above training modes via `paddle.amp.auto_cast` . ([#39029](https://github.com/PaddlePaddle/Paddle/pull/39029), [#39815](https://github.com/PaddlePaddle/Paddle/pull/39815)) - + - Add bfloat16 CUDA Kernel for the following ops: matmul, concat, split, dropout, reshape, slice, squeeze, stack, transpose, unbind, elementwize_max, elementwize_add, elementwize_mul, elementwize_sub, scale, sum, layer_norm, p_norm, reduce_sum, softmax, log_softmax, sigmoid, sqrt, softplus, square, gaussian_random, fill_constant, and fill_any_like. ([#39485](https://github.com/PaddlePaddle/Paddle/pull/39485), [#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683), [#39843](https://github.com/PaddlePaddle/Paddle/pull/39843), [#39999](https://github.com/PaddlePaddle/Paddle/pull/39999), [#40004](https://github.com/PaddlePaddle/Paddle/pull/40004), [#40027](https://github.com/PaddlePaddle/Paddle/pull/40027)) - + - Add bfloat16 CPU Kernel for the following ops: dropout, reshape, slice, squeeze, unsqueeze, stack, transpose, unbind, elementwize_max, elementwise_mul, elementwise_sub, and gather. ([#39380](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39395](https://github.com/PaddlePaddle/Paddle/pull/39380), [#39402](https://github.com/PaddlePaddle/Paddle/pull/39402), [#39457](https://github.com/PaddlePaddle/Paddle/pull/39457), [#39461](https://github.com/PaddlePaddle/Paddle/pull/39461), [#39602](https://github.com/PaddlePaddle/Paddle/pull/39602), [#39716](https://github.com/PaddlePaddle/Paddle/pull/39716), [#39683](https://github.com/PaddlePaddle/Paddle/pull/39683)) - + - Support printing of Tensor with data of bfloat16. ([#39375](https://github.com/PaddlePaddle/Paddle/pull/39375), [#39370](https://github.com/PaddlePaddle/Paddle/pull/39370)) - + - Add support for FP16 computation for `p_norm` , `elementwise_max` , and `fill_constant_batch_size_like ``scatter` . ([#35888](https://github.com/PaddlePaddle/Paddle/pull/35888), [#39907](https://github.com/PaddlePaddle/Paddle/pull/39907), [#38136](https://github.com/PaddlePaddle/Paddle/pull/38136), [#38499](https://github.com/PaddlePaddle/Paddle/pull/38499)) - + - Add support for int16_t for the following ops: cumsum, less_than, less_equal, greater_than, greater_equal, equal, not_equal, fill_any_like, grather_nd reduce_sum, where_index, reshape, and unsqueeze. ([#39636](https://github.com/PaddlePaddle/Paddle/pull/39636)) - + - Add support for int16_t label type for cross_entropy op. ([#39409](https://github.com/PaddlePaddle/Paddle/pull/39409)) - + - Add support for int16_t id type for embedding op. ([#39381](https://github.com/PaddlePaddle/Paddle/pull/39381)) - + - Add support for FP16 type for reduce_mean op. ([#38289](https://github.com/PaddlePaddle/Paddle/pull/38289)) - + - Add support for FP16 type for elementwise_min op. ([#38123](https://github.com/PaddlePaddle/Paddle/pull/38123)) - + - Update bfloat16 AMP oneDNN default support list. ([#39304](https://github.com/PaddlePaddle/Paddle/pull/39304)) - + #### **Paddle HIgh reusability operator library** We announce PHI as the new Paddle HIgh reusability operator library. PHI provides Primitive API, enabling kernel reuse for operator development. As a refactored functional operator library, PHI aims to solve legacy problems that harm the framework's performance and reusability, in particular on the operator development. Such problems include inefficient ways of cross using operators, unclear operator interfaces and lacking direct calls to the operator library in C++. With PHI, new operators can be easily implemented by composing functions available in the functional library. The library provides over 200 C++ operator class APIs and nearly 500 kernels. Composing new operators through these built-in functions can greatly reduce the user's development effort. PHI supports different types of hardware (e.g., GPU and XPU). In addition, PHI is extensible with plugins for accommodating third party accelerators (such as NPU) in a low cost and reusable fashion. In short, PHI supports low level operator composabilty, the reuse of kernels through Primitives, and accelerators through plugins.The main contents include six parts as below: - **The implementation of the operator library infrastructure, core components and mechanisms** : The directory structure of the new operator library is reasonably planned, design and implement the common base data structure of the new operator library, the new functional InferMeta and Kernel development paradigm and the corresponding registration and management components. Support the automated compilation object generation and compilation dependency generation of Kernel files, allowing developers to focus only on the functional Kernel implementation, and making the development paradigm clear and concise. ([#34425](https://github.com/PaddlePaddle/Paddle/pull/34425), [#37107](https://github.com/PaddlePaddle/Paddle/pull/37107), [#36946](https://github.com/PaddlePaddle/Paddle/pull/36946), [#36948](https://github.com/PaddlePaddle/Paddle/pull/36948), [#37876](https://github.com/PaddlePaddle/Paddle/pull/37876), [#37916](https://github.com/PaddlePaddle/Paddle/pull/37916), [#37977](https://github.com/PaddlePaddle/Paddle/pull/37977), [38078](https://github.com/PaddlePaddle/Paddle/pull/38078), [#38861](https://github.com/PaddlePaddle/Paddle/pull/38861), [#39123](https://github.com/PaddlePaddle/Paddle/pull/39123), [#39131](https://github.com/PaddlePaddle/Paddle/pull/39131), [#39748](https://github.com/PaddlePaddle/Paddle/pull/39748), [#39790](https://github.com/PaddlePaddle/Paddle/pull/39790), [#39941](https://github.com/PaddlePaddle/Paddle/pull/39941), [#40239](https://github.com/PaddlePaddle/Paddle/pull/40239), [#40635](https://github.com/PaddlePaddle/Paddle/pull/40635), [#41091](https://github.com/PaddlePaddle/Paddle/pull/41091), [#37409](https://github.com/PaddlePaddle/Paddle/pull/37409), [#37942](https://github.com/PaddlePaddle/Paddle/pull/37942), [#39002](https://github.com/PaddlePaddle/Paddle/pull/39002), [#38109](https://github.com/PaddlePaddle/Paddle/pull/38109), [#37881](https://github.com/PaddlePaddle/Paddle/pull/37881), [#37517](https://github.com/PaddlePaddle/Paddle/pull/37517), [#39870](https://github.com/PaddlePaddle/Paddle/pull/39870), [#40975](https://github.com/PaddlePaddle/Paddle/pull/40975), [#39475](https://github.com/PaddlePaddle/Paddle/pull/39475), [#37304](https://github.com/PaddlePaddle/Paddle/pull/37304), #36910, #37120, #37146, #37215, #37255, #37369, #38258, #38257, #38355, #38853, #38937, #38977, #38946, #39085, #39153, #39228, #38301, #38275, #38506, #38607, #38473, #38632, #38811, #38880, #38996, #38914, #39101) - + - **Operator library C++ API system construction**: design and implement yaml configuration file-based operator definition paradigm, to automatically generate more than 200 C++ operator class APIs for internal and external developers to reuse. This reduces the cost of repeated development of basic operators. ([#37668](https://github.com/PaddlePaddle/Paddle/pull/37668), [#36938](https://github.com/PaddlePaddle/Paddle/pull/36938), [#38172](https://github.com/PaddlePaddle/Paddle/pull/38172), [#38182](https://github.com/PaddlePaddle/Paddle/pull/38182), [#38311](https://github.com/PaddlePaddle/Paddle/pull/38311), [#38438](https://github.com/PaddlePaddle/Paddle/pull/38438), [#39057](https://github.com/PaddlePaddle/Paddle/pull/39057), [#39229](https://github.com/PaddlePaddle/Paddle/pull/39229), [#39281](https://github.com/PaddlePaddle/Paddle/pull/39281), [#39263](https://github.com/PaddlePaddle/Paddle/pull/39263), [#39408](https://github.com/PaddlePaddle/Paddle/pull/39408), [#39436](https://github.com/PaddlePaddle/Paddle/pull/39436), [#39482](https://github.com/PaddlePaddle/Paddle/pull/39482), [#39497](https://github.com/PaddlePaddle/Paddle/pull/39497), [#39651](https://github.com/PaddlePaddle/Paddle/pull/39651), [#39521](https://github.com/PaddlePaddle/Paddle/pull/39521), [#39760](https://github.com/PaddlePaddle/Paddle/pull/39760), [#40060](https://github.com/PaddlePaddle/Paddle/pull/40060), [#40196](https://github.com/PaddlePaddle/Paddle/pull/40196), [#40218](https://github.com/PaddlePaddle/Paddle/pull/40218), [#40640](https://github.com/PaddlePaddle/Paddle/pull/40640), [#40732](https://github.com/PaddlePaddle/Paddle/pull/40732), [#40729](https://github.com/PaddlePaddle/Paddle/pull/40729), [#40840](https://github.com/PaddlePaddle/Paddle/pull/40840), [#40867](https://github.com/PaddlePaddle/Paddle/pull/40867), [#41025](https://github.com/PaddlePaddle/Paddle/pull/41025), [#41368](https://github.com/PaddlePaddle/Paddle/pull/41368)) - + - **Operator library compatible with various execution systems**: Implement new InferMeta and Kernel to access the original dynamic and static graph execution system. Support the safe removal of the original OpKernel registration and migration to the new Kernel form. ([#34425](https://github.com/PaddlePaddle/Paddle/pull/34425), [#38825](https://github.com/PaddlePaddle/Paddle/pull/38825), [#38837](https://github.com/PaddlePaddle/Paddle/pull/38837), [#38842](https://github.com/PaddlePaddle/Paddle/pull/38842), [#38976](https://github.com/PaddlePaddle/Paddle/pull/38976), [#39134](https://github.com/PaddlePaddle/Paddle/pull/39134), [#39140](https://github.com/PaddlePaddle/Paddle/pull/39140), [#39135](https://github.com/PaddlePaddle/Paddle/pull/39135), [#39252](https://github.com/PaddlePaddle/Paddle/pull/39252), [#39222](https://github.com/PaddlePaddle/Paddle/pull/39222), [#39351](https://github.com/PaddlePaddle/Paddle/pull/39351)) - + - **Decouple the underlying data structures and tool functions of the operator library from the framework**: Relieve PHI's dependence on the framework for core data structures, lay the foundation for subsequent independent compilation of PHI, and support infrt, custom Kernel, and a series of Phi-based construction work ([#38583](https://github.com/PaddlePaddle/Paddle/pull/38583), [#39188](https://github.com/PaddlePaddle/Paddle/pull/39188), [#39560](https://github.com/PaddlePaddle/Paddle/pull/39560), [#39931](https://github.com/PaddlePaddle/Paddle/pull/39931), [#39169](https://github.com/PaddlePaddle/Paddle/pull/39169), [#38951](https://github.com/PaddlePaddle/Paddle/pull/38951), [#38898](https://github.com/PaddlePaddle/Paddle/pull/38898), [#38873](https://github.com/PaddlePaddle/Paddle/pull/38873), [#38696](https://github.com/PaddlePaddle/Paddle/pull/38696), [#38651](https://github.com/PaddlePaddle/Paddle/pull/38651), [#39359](https://github.com/PaddlePaddle/Paddle/pull/39359), [#39305](https://github.com/PaddlePaddle/Paddle/pull/39305), [#39234](https://github.com/PaddlePaddle/Paddle/pull/39234), [#39098](https://github.com/PaddlePaddle/Paddle/pull/39098), [#39120](https://github.com/PaddlePaddle/Paddle/pull/39120), [#38979](https://github.com/PaddlePaddle/Paddle/pull/38979), [#38899](https://github.com/PaddlePaddle/Paddle/pull/38899), [#38844](https://github.com/PaddlePaddle/Paddle/pull/38844), [#39714](https://github.com/PaddlePaddle/Paddle/pull/39714), [#39729](https://github.com/PaddlePaddle/Paddle/pull/39729), [#39889](https://github.com/PaddlePaddle/Paddle/pull/39889), [#39587](https://github.com/PaddlePaddle/Paddle/pull/39587), [#39558](https://github.com/PaddlePaddle/Paddle/pull/39558), [#39514](https://github.com/PaddlePaddle/Paddle/pull/39514), [#39502](https://github.com/PaddlePaddle/Paddle/pull/39502), [#39300](https://github.com/PaddlePaddle/Paddle/pull/39300), [#39246](https://github.com/PaddlePaddle/Paddle/pull/39246), [#39124](https://github.com/PaddlePaddle/Paddle/pull/39124)) - + - **Integration between custom operator mechanism and Phi with improvement**: support for calling over 200 C++ operator class APIs automatically generated by PHI when writing custom operators. This reduces custom operator development costs. A series of bugs are fixed. ([#37122](https://github.com/PaddlePaddle/Paddle/pull/37122), [#37276](https://github.com/PaddlePaddle/Paddle/pull/37276), [#37281](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37262](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37415](https://github.com/PaddlePaddle/Paddle/pull/37415), [#37423](https://github.com/PaddlePaddle/Paddle/pull/37423), [#37583](https://github.com/PaddlePaddle/Paddle/pull/37683), [#38776](https://github.com/PaddlePaddle/Paddle/pull/38776), [#39353](https://github.com/PaddlePaddle/Paddle/pull/39353), [#41072](https://github.com/PaddlePaddle/Paddle/pull/41072)) - + - **Operator scale migration and refactoring**: migrate about 250 high-frequency forward and backward operator Kernel to the new operator library and refactor them as a single function. Achieve the high-performance operator by encapsulating multiple base Kernel functions on the C++ side for the fast combination. Meanwhile, add the corresponding yaml operator definition, and access to the new dynamic graph execution system to improve the python API scheduling performance. The migrated and refactored operators include: - + - sqrt ([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - square([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - sin ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - sinh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - elementwise_fmax([#40140](https://github.com/PaddlePaddle/Paddle/pull/40140)) - + - elementwise_fmin([#40140](https://github.com/PaddlePaddle/Paddle/pull/40140)) - + - pool2d([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - max_pool2d_with_index([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - pool3d([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - max_pool3d_with_index([#40208](https://github.com/PaddlePaddle/Paddle/pull/40208), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - fill_constant ([#36930](https://github.com/PaddlePaddle/Paddle/pull/36930), [#39465](https://github.com/PaddlePaddle/Paddle/pull/39465)) - + - p_norm ([#40819](https://github.com/PaddlePaddle/Paddle/pull/40819)) - + - fill_constant_batch_size_like ([#40784](https://github.com/PaddlePaddle/Paddle/pull/40784)) - + - conv2d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - conv2d_transpose([#40675](https://github.com/PaddlePaddle/Paddle/pull/40675), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - conv3d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - conv3d_transpose([#40675](https://github.com/PaddlePaddle/Paddle/pull/40675), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - mish([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - gather_nd ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - gather ([#40500](https://github.com/PaddlePaddle/Paddle/pull/40500)) - + - scatter ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - scatter_nd_add ([#40090](https://github.com/PaddlePaddle/Paddle/pull/40090), [#40043](https://github.com/PaddlePaddle/Paddle/pull/40043)) - + - sgd([40045](https://github.com/PaddlePaddle/Paddle/pull/40045)) - + - momentum ([#41319](https://github.com/PaddlePaddle/Paddle/pull/41319)) - + - rmsprop([#40994](https://github.com/PaddlePaddle/Paddle/pull/40994)) - + - index_sample([#38130](https://github.com/PaddlePaddle/Paddle/pull/38130), [#38459](https://github.com/PaddlePaddle/Paddle/pull/38459),[#39905](https://github.com/PaddlePaddle/Paddle/pull/39905)) - + - adam ([#40351](https://github.com/PaddlePaddle/Paddle/pull/40351)) - + - layer_norm([#40193](https://github.com/PaddlePaddle/Paddle/pull/40193)) - + - adagrad([#40994](https://github.com/PaddlePaddle/Paddle/pull/40994/)) - + - adamax ([#40173](https://github.com/PaddlePaddle/Paddle/pull/40173)) - + - adadelta ([#40173](https://github.com/PaddlePaddle/Paddle/pull/40173)) - + - clip([#40602](https://github.com/PaddlePaddle/Paddle/pull/40602), [#41661](https://github.com/PaddlePaddle/Paddle/pull/41661), [#41675](https://github.com/PaddlePaddle/Paddle/pull/41675)) - + - ceil ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - cos ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - atan ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - cosh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - erf([#40388](https://github.com/PaddlePaddle/Paddle/pull/40388)) - + - asin ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - acos ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - scale ([#39278](https://github.com/PaddlePaddle/Paddle/pull/39278)) - + - elementwise_pow ([#40993](https://github.com/PaddlePaddle/Paddle/pull/40993)) - + - elementwise_sub ([#39225](https://github.com/PaddlePaddle/Paddle/pull/39225), [#37260](https://github.com/PaddlePaddle/Paddle/pull/37260)) - + - round ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - floor ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - pow ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - elementwise_floordiv ([#40993](https://github.com/PaddlePaddle/Paddle/pull/40993)) - + - reciprocal([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - log1p ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - allclose ([#40469](https://github.com/PaddlePaddle/Paddle/pull/40469)) - + - mul ([#40833](https://github.com/PaddlePaddle/Paddle/pull/40833)) - + - elementwise_max ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_min ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_mod ([#40590](https://github.com/PaddlePaddle/Paddle/pull/40590)) - + - elementwise_add ([#39048](https://github.com/PaddlePaddle/Paddle/pull/39048), [#37043](https://github.com/PaddlePaddle/Paddle/pull/37043)) - + - matmul_v2 ([#36844](https://github.com/PaddlePaddle/Paddle/pull/36844), [#38713](https://github.com/PaddlePaddle/Paddle/pull/38713)) - + - elementwise_mul ([#41042](https://github.com/PaddlePaddle/Paddle/pull/41042), [#40252](https://github.com/PaddlePaddle/Paddle/pull/40252), [#37471](https://github.com/PaddlePaddle/Paddle/pull/37471)) - + - elementwise_div ([#40172](https://github.com/PaddlePaddle/Paddle/pull/40172), [#40039](https://github.com/PaddlePaddle/Paddle/pull/40039), [#37418](https://github.com/PaddlePaddle/Paddle/pull/37418)) - + - SelectedRows ([#39037](https://github.com/PaddlePaddle/Paddle/pull/39037), [#39087](https://github.com/PaddlePaddle/Paddle/pull/39087), [#39128](https://github.com/PaddlePaddle/Paddle/pull/39128), [#39162](https://github.com/PaddlePaddle/Paddle/pull/39162), [#39236](https://github.com/PaddlePaddle/Paddle/pull/39236)) - + - fill_any_like ([#39807](https://github.com/PaddlePaddle/Paddle/pull/39807)) - + - dot([#38359](https://github.com/PaddlePaddle/Paddle/pull/38359)) - + - sum ([#40873](https://github.com/PaddlePaddle/Paddle/pull/40873)) - + - cumsum ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - diag_v2 ([#39914](https://github.com/PaddlePaddle/Paddle/pull/39914)) - + - auc ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - log_loss ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - one_hot_v2([39876](https://github.com/PaddlePaddle/Paddle/pull/39876)) - + - sigmoid_cross_entropy_with_logits ([#39976](https://github.com/PaddlePaddle/Paddle/pull/39976), [#40200](https://github.com/PaddlePaddle/Paddle/pull/40200)) - + - bce_loss ([#39868](https://github.com/PaddlePaddle/Paddle/pull/39868)) - + - argsort ([#40151](https://github.com/PaddlePaddle/Paddle/pull/40151)) - + - arg_max ([#40222](https://github.com/PaddlePaddle/Paddle/pull/40222)) - + - arg_min ([#40222](https://github.com/PaddlePaddle/Paddle/pull/40222)) - + - segment_pool ([#40099](https://github.com/PaddlePaddle/Paddle/pull/40099)) - + - frobenius_norm([#40707](https://github.com/PaddlePaddle/Paddle/pull/40707), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - dist ([#40178](https://github.com/PaddlePaddle/Paddle/pull/40178)) - + - isnan_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - logical_and ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - logical_not ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - isfinite_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - logical_or ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - isinf_v2 ([#40076](https://github.com/PaddlePaddle/Paddle/pull/40076)) - + - is_empty ([#39919](https://github.com/PaddlePaddle/Paddle/pull/39919)) - + - logical_xor ([#39942](https://github.com/PaddlePaddle/Paddle/pull/39942)) - + - less_than([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - not_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - less_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - equal_all([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - uniform_random ([#39937](https://github.com/PaddlePaddle/Paddle/pull/39937)) - + - randint ([#39876](https://github.com/PaddlePaddle/Paddle/pull/39876), [#41375](https://github.com/PaddlePaddle/Paddle/pull/41375)) - + - randperm ([#41265](https://github.com/PaddlePaddle/Paddle/pull/41265)) - + - unbind ([#39789](https://github.com/PaddlePaddle/Paddle/pull/39789)) - + - bernoulli ([#39590](https://github.com/PaddlePaddle/Paddle/pull/39590)) - + - increment ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - multinomial ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - addmm ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - cholesky ([#39858](https://github.com/PaddlePaddle/Paddle/pull/39858), [#39913](https://github.com/PaddlePaddle/Paddle/pull/39913)) - + - where ([#39811](https://github.com/PaddlePaddle/Paddle/pull/39811)) - + - log10 ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - log2 ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - expm1([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - atan2 ([#39806](https://github.com/PaddlePaddle/Paddle/pull/39806)) - + - gaussian_random ([#39932](https://github.com/PaddlePaddle/Paddle/pull/39932), [#40122](https://github.com/PaddlePaddle/Paddle/pull/40122), [#40191](https://github.com/PaddlePaddle/Paddle/pull/40191)) - + - empty ([#38334](https://github.com/PaddlePaddle/Paddle/pull/38334)) - + - truncated_gaussian_random ([#39971](https://github.com/PaddlePaddle/Paddle/pull/39971), [#40191](https://github.com/PaddlePaddle/Paddle/pull/40191)) - + - mv ([#39861](https://github.com/PaddlePaddle/Paddle/pull/39861), [#39954](https://github.com/PaddlePaddle/Paddle/pull/39954)) - + - tan ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - set_value ([#40195](https://github.com/PaddlePaddle/Paddle/pull/40195), [#40478](https://github.com/PaddlePaddle/Paddle/pull/40478), [#40636](https://github.com/PaddlePaddle/Paddle/pull/40636)) - + - bitwise_and ([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - bitwise_not([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - bitwise_or([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - poisson([#39814](https://github.com/PaddlePaddle/Paddle/pull/39814)) - + - cholesky_solve([#40387](https://github.com/PaddlePaddle/Paddle/pull/40387)) - + - bitwise_xor([#40031](https://github.com/PaddlePaddle/Paddle/pull/40031)) - + - triangular_solve([#40417](https://github.com/PaddlePaddle/Paddle/pull/40417)) - + - sigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - + - atanh ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - softsign([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - thresholded_relu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - tanh_shrink ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - stanh([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - reduce_mean ([#37559](https://github.com/PaddlePaddle/Paddle/pull/37559)) - + - reduce_max([#40225](https://github.com/PaddlePaddle/Paddle/pull/40225)) - + - reduce_min ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - mean ([#40872](https://github.com/PaddlePaddle/Paddle/pull/40872), [#41319](https://github.com/PaddlePaddle/Paddle/pull/41319)) - + - reduce_all ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - reduce_any ([#40374](https://github.com/PaddlePaddle/Paddle/pull/40374)) - + - logsumexp ([#40790](https://github.com/PaddlePaddle/Paddle/pull/40790)) - + - softshrink([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - range ([#41265](https://github.com/PaddlePaddle/Paddle/pull/41265), [#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - stack([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - tile ([#40371](https://github.com/PaddlePaddle/Paddle/pull/40371)) - + - unique([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - unstack([#40581](https://github.com/PaddlePaddle/Paddle/pull/40851)) - + - slice([#40736](https://github.com/PaddlePaddle/Paddle/pull/40736)) - + - transpose2([#39327](https://github.com/PaddlePaddle/Paddle/pull/39327)) - + - unsqueeze2( [#40596](https://github.com/PaddlePaddle/Paddle/pull/40596)) - + - squeeze2( [#40596](https://github.com/PaddlePaddle/Paddle/pull/40596)) - + - strided_slice ([#40708](https://github.com/PaddlePaddle/Paddle/pull/40708)) - + - softmax ([#39547](https://github.com/PaddlePaddle/Paddle/pull/39547)) - + - leaky_relu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - gelu ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - prelu ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - log_softmax ([#40393](https://github.com/PaddlePaddle/Paddle/pull/40393)) - + - elu ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - logsigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - + - psroi_pool ([#40353](https://github.com/PaddlePaddle/Paddle/pull/40353), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - kthvalue([#40575](https://github.com/PaddlePaddle/Paddle/pull/40575)) - + - mode ([#40571](https://github.com/PaddlePaddle/Paddle/pull/40571)) - + - yolo_box([#40112](https://github.com/PaddlePaddle/Paddle/pull/40112)) - + - yolov3_loss ([#40944](https://github.com/PaddlePaddle/Paddle/pull/40944)) - + - temporal_shift([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - depthwise_conv2d([#39354](https://github.com/PaddlePaddle/Paddle/pull/39354)) - + - pad3d ([#40701](https://github.com/PaddlePaddle/Paddle/pull/40701)) - + - pad( [#40012](https://github.com/PaddlePaddle/Paddle/pull/40012)) - + - greater_equal([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - kldiv_loss ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - isclose ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - silu ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - unfold ([#39778](https://github.com/PaddlePaddle/Paddle/pull/39778)) - + - batch_norm([39347](https://github.com/PaddlePaddle/Paddle/pull/39347)) - + - norm([#39324](https://github.com/PaddlePaddle/Paddle/pull/39324)) - + - roi_pool ([#40574](https://github.com/PaddlePaddle/Paddle/pull/40574), [#40682](https://github.com/PaddlePaddle/Paddle/pull/40682), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - roi_align ([#40382](https://github.com/PaddlePaddle/Paddle/pull/40382), [#40556](https://github.com/PaddlePaddle/Paddle/pull/40556), [#41402](https://github.com/PaddlePaddle/Paddle/pull/41402)) - + - deformable_conv ([#40700](https://github.com/PaddlePaddle/Paddle/pull/40700), [#40794](https://github.com/PaddlePaddle/Paddle/pull/40794), [#41644](https://github.com/PaddlePaddle/Paddle/pull/41644)) - + - deformable_conv_v1 ([#40794](https://github.com/PaddlePaddle/Paddle/pull/40794), [#41644](https://github.com/PaddlePaddle/Paddle/pull/41644)) - + - label_smooth ([#39796](https://github.com/PaddlePaddle/Paddle/pull/39796)) - + - grid_sampler ([#40585](https://github.com/PaddlePaddle/Paddle/pull/40585)) - + - greater_than([#39970](https://github.com/PaddlePaddle/Paddle/pull/39970)) - + - pixel_shuffle ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - nearest_interp_v2 ([#40855](https://github.com/PaddlePaddle/Paddle/pull/40855)) - + - bilinear_interp_v2 ([#40855](https://github.com/PaddlePaddle/Paddle/pull/40855)) - + - softmax_with_cross_entropy ([#40832](https://github.com/PaddlePaddle/Paddle/pull/40832)) - + - rnn ([#41007](https://github.com/PaddlePaddle/Paddle/pull/41007)) - + - reverse ([#40791](https://github.com/PaddlePaddle/Paddle/pull/40791)) - + - trace ([#39510](https://github.com/PaddlePaddle/Paddle/pull/39510)) - + - kron([#40427](https://github.com/PaddlePaddle/Paddle/pull/40427)) - + - accuracy([#39982](https://github.com/PaddlePaddle/Paddle/pull/39982)) - + - gather_tree ([#40082](https://github.com/PaddlePaddle/Paddle/pull/40082), [#39844](https://github.com/PaddlePaddle/Paddle/pull/39844)) - + - dropout([#40148](https://github.com/PaddlePaddle/Paddle/pull/40148)) - + - bincount ([#39947](https://github.com/PaddlePaddle/Paddle/pull/39947)) - + - warpctc ([#41389](https://github.com/PaddlePaddle/Paddle/pull/41389), [#40023](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/40023)) - + - multiplex([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#40102](https://github.com/PaddlePaddle/Paddle/pull/40102)) - + - qr([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#40007](https://github.com/PaddlePaddle/Paddle/pull/40007)) - + - assign_value ([#40967](https://github.com/PaddlePaddle/Paddle/pull/40967)) - + - assign ([#40022](https://github.com/PaddlePaddle/Paddle/pull/40022)) - + - cast ([#37610](https://github.com/PaddlePaddle/Paddle/pull/37610)) - + - tril_triu([#40007](https://github.com/PaddlePaddle/Paddle/pull/40007), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - where_index ([#40255](https://github.com/PaddlePaddle/Paddle/pull/40255)) - + - index_select ([#40260](https://github.com/PaddlePaddle/Paddle/pull/40260), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - roll ([#40257](https://github.com/PaddlePaddle/Paddle/pull/40257), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - cumprod (Xiong Kun [#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - shard_index ([#40254](https://github.com/PaddlePaddle/Paddle/pull/40254)) - + - reshape2 ([#40914](https://github.com/PaddlePaddle/Paddle/pull/40914), [#39631](https://github.com/PaddlePaddle/Paddle/pull/39631), [#38833](https://github.com/PaddlePaddle/Paddle/pull/38833), [#37164](https://github.com/PaddlePaddle/Paddle/pull/37164)) - + - flip ([#39822](https://github.com/PaddlePaddle/Paddle/pull/39822), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - eye ([#39712](https://github.com/PaddlePaddle/Paddle/pull/39712), [#40105](https://github.com/PaddlePaddle/Paddle/pull/40105), [#41476](https://github.com/PaddlePaddle/Paddle/pull/41476)) - + - lookup_table_v2([#39901](https://github.com/PaddlePaddle/Paddle/pull/39901)) - + - searchsorted([#40520](https://github.com/PaddlePaddle/Paddle/pull/40520), [#41053](https://github.com/PaddlePaddle/Paddle/pull/41053)) - + - adamw ([#40351](https://github.com/PaddlePaddle/Paddle/pull/40351)) - + - tanh ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - cross ([#39829](https://github.com/PaddlePaddle/Paddle/pull/39829)) - + - concat ([#38955](https://github.com/PaddlePaddle/Paddle/pull/38955), [#41112](https://github.com/PaddlePaddle/Paddle/pull/41112)) - + - split ([#39060](https://github.com/PaddlePaddle/Paddle/pull/39060)) - + - linspace ([#40124](https://github.com/PaddlePaddle/Paddle/pull/40124)) - + - huber_loss ([#39761](https://github.com/PaddlePaddle/Paddle/pull/39761)) - + - hierarchical_sigmoid([#40553](https://github.com/PaddlePaddle/Paddle/pull/40553)) - + - nll_loss ([#39936](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39936)) - + - graph_send_recv ([#40092](https://github.com/PaddlePaddle/Paddle/pull/40092), [#40320](https://github.com/PaddlePaddle/Paddle/pull/40320)) - + - abs([#39492](https://github.com/PaddlePaddle/Paddle/pull/39492), [#39762](https://github.com/PaddlePaddle/Paddle/pull/39762)) - + - exp([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - rsqrt([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) - + - viterbi_decode ([#40186](https://github.com/PaddlePaddle/Paddle/pull/40186)) - + - conj ([#38247](https://github.com/PaddlePaddle/Paddle/pull/38247)) - + - real ([#39777](https://github.com/PaddlePaddle/Paddle/pull/39777), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - imag ([#39777](https://github.com/PaddlePaddle/Paddle/pull/39777), [#41173](https://github.com/PaddlePaddle/Paddle/pull/41173)) - + - take_along_axis ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40270](https://github.com/PaddlePaddle/Paddle/pull/40270), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - put_along_axis ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - lgamma ([#39770](https://github.com/PaddlePaddle/Paddle/pull/39770)) - + - relu ([#40175](https://github.com/PaddlePaddle/Paddle/pull/40175)) - + - maxout ([#39959](https://github.com/PaddlePaddle/Paddle/pull/39959), [#40974](https://github.com/PaddlePaddle/Paddle/pull/40974)) - + - log ([#40785](https://github.com/PaddlePaddle/Paddle/pull/40785)) - + - bilinear_tensor_product([#39903](https://github.com/PaddlePaddle/Paddle/pull/39903)) - + - flatten_contiguous_range ([#38712](https://github.com/PaddlePaddle/Paddle/pull/38712), [#36957](https://github.com/PaddlePaddle/Paddle/pull/36957), [#41345](https://github.com/PaddlePaddle/Paddle/pull/41345)) - + - matrix_rank ([#40074](https://github.com/PaddlePaddle/Paddle/pull/40074), [#40519](https://github.com/PaddlePaddle/Paddle/pull/40519), [#41466](https://github.com/PaddlePaddle/Paddle/pull/41466)) - + - logit ([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) - + - lerp ([#40105](https://github.com/PaddlePaddle/Paddle/pull/40105), [#39524](https://github.com/PaddlePaddle/Paddle/pull/39524)) - + - erfinv ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - broadcast_tensors([#40047](https://github.com/PaddlePaddle/Paddle/pull/40047)) - + - gumbel_softmax([#39873](https://github.com/PaddlePaddle/Paddle/pull/39873)) - + - diagonal ([#39575](https://github.com/PaddlePaddle/Paddle/pull/39575)) - + - trunc ([#39543](https://github.com/PaddlePaddle/Paddle/pull/39543), [#39772](https://github.com/PaddlePaddle/Paddle/pull/39772)) - + - multi_dot ([#40038](https://github.com/PaddlePaddle/Paddle/pull/40038)) - + - matrix_power ([#40231](https://github.com/PaddlePaddle/Paddle/pull/40231)) - + - digamma([#39240](https://github.com/PaddlePaddle/Paddle/pull/39240)) - + - masked_select([#39193](https://github.com/PaddlePaddle/Paddle/pull/39193)) - + - determinant ([#40539](https://github.com/PaddlePaddle/Paddle/pull/40539)) - + - eigh ([#40213](https://github.com/PaddlePaddle/Paddle/pull/40213)) - + - size ([#39949](https://github.com/PaddlePaddle/Paddle/pull/39949), [#39712](https://github.com/PaddlePaddle/Paddle/pull/39712)) - + - shape ([#40248](https://github.com/PaddlePaddle/Paddle/pull/40248)) - + - reduce_sum([#37559](https://github.com/PaddlePaddle/Paddle/pull/37559), [#41295](https://github.com/PaddlePaddle/Paddle/pull/41295)) - + - reduce_prod ([#39844](https://github.com/PaddlePaddle/Paddle/pull/39844)) - + - histogram([#39496](https://github.com/PaddlePaddle/Paddle/pull/39496)) - + - meshgrid ([#41411](https://github.com/PaddlePaddle/Paddle/pull/41411)) - + - brelu ([#40385](https://github.com/PaddlePaddle/Paddle/pull/40385)) - + - hard_swish ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - hard_shrink ([#40565](https://github.com/PaddlePaddle/Paddle/pull/40565)) - + - selu ([#39819](https://github.com/PaddlePaddle/Paddle/pull/39819)) - + - expand_v2 ([#39471](https://github.com/PaddlePaddle/Paddle/pull/39471)) - + - top_k_v2([#40064](https://github.com/PaddlePaddle/Paddle/pull/40064)) - + - expand_as_v2([#40373](https://github.com/PaddlePaddle/Paddle/pull/40373)) - + - swish ([#40913](https://github.com/PaddlePaddle/Paddle/pull/40913)) - + - hard_sigmoid ([#40626](https://github.com/PaddlePaddle/Paddle/pull/40626)) - + - exp, det, assign, gaussian_random, matrix_rank, eye, and deformable_conv. ([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755), [#41737](https://github.com/PaddlePaddle/Paddle/pull/41737)) #### **New Dynamic Graph Execution Mechanism** @@ -1143,47 +1143,47 @@ We announce PHI as the new Paddle HIgh reusability operator library. PHI provide To improve scheduling performance and custom development capability of the dynamic graph execution mechanism of the PaddlePaddle, we have reconstructed the underlying execution mechanism of the dynamic graph. With the new execution method, the PHI operator library can be used for efficient runtime execution. For the operators supported by the PHI operator library, switching to the new dynamic graph mode will get a significant improvement in scheduling performance. However, due to the huge workload required in the upgrade of the overall framework execution mechanism and this part of the work is coupled with a lot on the PHI operator library, we still do not use this execution method by default in this version. If you want to try it, you can switch to it by setting the environment variable `FLAGS_enable_eager_mode=1`.The details are as follows: - **Implementation of dynamic graph execution infrastructure, core components and mechanism**: By staticizing dynamic graph-related execution codes, the original homogeneous operators constructing converted to specific calling for different PHI APIs, thus greatly optimizing the scheduling overhead. ([#36059](https://github.com/PaddlePaddle/Paddle/pull/36059), [#37323](https://github.com/PaddlePaddle/Paddle/pull/37323), [#37556](https://github.com/PaddlePaddle/Paddle/pull/37556), [#37555](https://github.com/PaddlePaddle/Paddle/pull/37555), [#37478](https://github.com/PaddlePaddle/Paddle/pull/37478), [#37458](https://github.com/PaddlePaddle/Paddle/pull/37458), [#37479](https://github.com/PaddlePaddle/Paddle/pull/37479), [#37599](https://github.com/PaddlePaddle/Paddle/pull/37599), [#37659](https://github.com/PaddlePaddle/Paddle/pull/37659), [#37654](https://github.com/PaddlePaddle/Paddle/pull/37654), [#39200](https://github.com/PaddlePaddle/Paddle/pull/39200), [#39309](https://github.com/PaddlePaddle/Paddle/pull/39309), [#39319](https://github.com/PaddlePaddle/Paddle/pull/39319), [#39414](https://github.com/PaddlePaddle/Paddle/pull/39414), [#39504](https://github.com/PaddlePaddle/Paddle/pull/39504), [#39526](https://github.com/PaddlePaddle/Paddle/pull/39526), [#39878](https://github.com/PaddlePaddle/Paddle/pull/39878), [#39963](https://github.com/PaddlePaddle/Paddle/pull/39963)) - + - **New dynamic graph execution mechanism sub-function development and adaptation**: support more flexible and complete dynamic graph sub-functions such as hook, pylayer, double_grad, inplace, amp, etc. ([#41396](https://github.com/PaddlePaddle/Paddle/pull/41396), [#40400](https://github.com/PaddlePaddle/Paddle/pull/40400), [#40695](https://github.com/PaddlePaddle/Paddle/pull/40695), [#41043](https://github.com/PaddlePaddle/Paddle/pull/41043), [#40915](https://github.com/PaddlePaddle/Paddle/pull/40915), [#41104](https://github.com/PaddlePaddle/Paddle/pull/41104), [#41350](https://github.com/PaddlePaddle/Paddle/pull/41350), [#41209](https://github.com/PaddlePaddle/Paddle/pull/41209), [#40830](https://github.com/PaddlePaddle/Paddle/pull/40830), [#40891](https://github.com/PaddlePaddle/Paddle/pull/40891), [#36814](https://github.com/PaddlePaddle/Paddle/pull/36814), [#37377](https://github.com/PaddlePaddle/Paddle/pull/37377), [#37193](https://github.com/PaddlePaddle/Paddle/pull/37193), [#36965](https://github.com/PaddlePaddle/Paddle/pull/36965), [#37810](https://github.com/PaddlePaddle/Paddle/pull/37810), [#36837](https://github.com/PaddlePaddle/Paddle/pull/36837), [#38488](https://github.com/PaddlePaddle/Paddle/pull/38488), [#39282](https://github.com/PaddlePaddle/Paddle/pull/39282), [#39449](https://github.com/PaddlePaddle/Paddle/pull/39449), [#39531](https://github.com/PaddlePaddle/Paddle/pull/39531), [#39638](https://github.com/PaddlePaddle/Paddle/pull/39638), [#39674](https://github.com/PaddlePaddle/Paddle/pull/39674), [#39893](https://github.com/PaddlePaddle/Paddle/pull/39893), [#40170](https://github.com/PaddlePaddle/Paddle/pull/40170), [#40693](https://github.com/PaddlePaddle/Paddle/pull/40693), [#40937](https://github.com/PaddlePaddle/Paddle/pull/40937), [#41016](https://github.com/PaddlePaddle/Paddle/pull/41016), [#41051](https://github.com/PaddlePaddle/Paddle/pull/41051), [#41121](https://github.com/PaddlePaddle/Paddle/pull/41121), [#41198](https://github.com/PaddlePaddle/Paddle/pull/41198), [#41287](https://github.com/PaddlePaddle/Paddle/pull/41287), [#41380](https://github.com/PaddlePaddle/Paddle/pull/41380), [#41306](https://github.com/PaddlePaddle/Paddle/pull/41306), [#41387](https://github.com/PaddlePaddle/Paddle/pull/41387), [#40623](https://github.com/PaddlePaddle/Paddle/pull/40623), [#40945](https://github.com/PaddlePaddle/Paddle/pull/40945), [#39282](https://github.com/PaddlePaddle/Paddle/pull/39282), [#39449](https://github.com/PaddlePaddle/Paddle/pull/39449), [#38488](https://github.com/PaddlePaddle/Paddle/pull/38488)) - + - **Automatic code generation mechanism for new dynamic graph execution**: When we are trying to split the computation and scheduling logic of a large number of homogeneous operators into different specific scheduling logics, we find that it is a huge workload. So we introduce a new automatic code generation logic to generate code and thus simplify the runtime logic of dynamic graphs. Meanwhile, in order to adapt to the various types of runtime logic in the previous framework, we also use some complicated compilation techniques to obtain information at runtime to generate more accurate scheduling code. ([#37574](https://github.com/PaddlePaddle/Paddle/pull/37574), [#37575](https://github.com/PaddlePaddle/Paddle/pull/37575), [#37639](https://github.com/PaddlePaddle/Paddle/pull/37639), [#37723](https://github.com/PaddlePaddle/Paddle/pull/37723), [#37753](https://github.com/PaddlePaddle/Paddle/pull/37753), [#37812](https://github.com/PaddlePaddle/Paddle/pull/37812), [#37837](https://github.com/PaddlePaddle/Paddle/pull/37837), [#37910](https://github.com/PaddlePaddle/Paddle/pull/37910), [#37943](https://github.com/PaddlePaddle/Paddle/pull/37943), [#37992](https://github.com/PaddlePaddle/Paddle/pull/37992), [#37959](https://github.com/PaddlePaddle/Paddle/pull/37959), [#38017](https://github.com/PaddlePaddle/Paddle/pull/38017), [#37969](https://github.com/PaddlePaddle/Paddle/pull/37969), [#38160](https://github.com/PaddlePaddle/Paddle/pull/38160), [#38085](https://github.com/PaddlePaddle/Paddle/pull/38085), [#38562](https://github.com/PaddlePaddle/Paddle/pull/38562), [#38573](https://github.com/PaddlePaddle/Paddle/pull/38573), [#39192](https://github.com/PaddlePaddle/Paddle/pull/39192), [#39215](https://github.com/PaddlePaddle/Paddle/pull/39215), [#39355](https://github.com/PaddlePaddle/Paddle/pull/39355), [#39358](https://github.com/PaddlePaddle/Paddle/pull/39358), [#39328](https://github.com/PaddlePaddle/Paddle/pull/39328), [#39233](https://github.com/PaddlePaddle/Paddle/pull/39233), [#39628](https://github.com/PaddlePaddle/Paddle/pull/39628), [#39767](https://github.com/PaddlePaddle/Paddle/pull/39767), [#39743](https://github.com/PaddlePaddle/Paddle/pull/39743), [#39897](https://github.com/PaddlePaddle/Paddle/pull/39897), [#39797](https://github.com/PaddlePaddle/Paddle/pull/39797), [#39997](https://github.com/PaddlePaddle/Paddle/pull/39997), [#40058](https://github.com/PaddlePaddle/Paddle/pull/40058), [#40080](https://github.com/PaddlePaddle/Paddle/pull/40080), [#40107](https://github.com/PaddlePaddle/Paddle/pull/40107), [#39962](https://github.com/PaddlePaddle/Paddle/pull/39962), [#40132](https://github.com/PaddlePaddle/Paddle/pull/40132), [#40276](https://github.com/PaddlePaddle/Paddle/pull/40276), [#40266](https://github.com/PaddlePaddle/Paddle/pull/40266), [#40480](https://github.com/PaddlePaddle/Paddle/pull/40480), [#40482](https://github.com/PaddlePaddle/Paddle/pull/40482), [#40368](https://github.com/PaddlePaddle/Paddle/pull/40368), [#40650](https://github.com/PaddlePaddle/Paddle/pull/40650), [#40815](https://github.com/PaddlePaddle/Paddle/pull/40815), [#40907](https://github.com/PaddlePaddle/Paddle/pull/40907), [#40935](https://github.com/PaddlePaddle/Paddle/pull/40935), [#41089](https://github.com/PaddlePaddle/Paddle/pull/41089)) - + - **New dynamic graph execution mechanism accessed into the main framework and Integration test**: we currently use some environment variables to distinguish between static graph mode and dynamic graph mode (including new dynamic graph and old dynamic graph mode). We have adapted most logics of dynamic graphs in these modes. However, there are still a lot of problems being fixed. ([#37638](https://github.com/PaddlePaddle/Paddle/pull/37638), [#37643](https://github.com/PaddlePaddle/Paddle/pull/37643), [#37653](https://github.com/PaddlePaddle/Paddle/pull/37653), [#38314](https://github.com/PaddlePaddle/Paddle/pull/38314), [#38337](https://github.com/PaddlePaddle/Paddle/pull/38337), [#38338](https://github.com/PaddlePaddle/Paddle/pull/38338), [#39164](https://github.com/PaddlePaddle/Paddle/pull/39164), [#39326](https://github.com/PaddlePaddle/Paddle/pull/39326), [#40391](https://github.com/PaddlePaddle/Paddle/pull/40391), [#40201](https://github.com/PaddlePaddle/Paddle/pull/40201), [#40854](https://github.com/PaddlePaddle/Paddle/pull/40854), [#40887](https://github.com/PaddlePaddle/Paddle/pull/40887)) - + - **Update some judgment logics under dynamic graphs, to support fast execution paths for dynamic graphs in compatible forms**:([#40786](https://github.com/PaddlePaddle/Paddle/pull/40786)) - + - Non-static graph mode (current transition scheme): `_non_static_mode()`。 - + - Determined as new dynamic graph in dynamic graph mode (recommended judgment logic): `_in_dygrah_mode()`。 - + - Determined as old dynamic graph in dynamic graph mode (Not recommended. It will be deprecated in future versions): `_in_legacy_dygraph()`。 - + - Enable old dynamic graph and disable new dynamic graph in dynamic graph mode: `_enable_legacy_dygraph()` or exit `_test_eager_guard()`。 - + - Enable new dynamic graph and disable old dynamic graph in dynamic graph mode: `_disable_legacy_dygraph()` or with `with _test_eager_guard()`。 - + - Determine in new dynamic graph in static or dynamic graph mode: `_in_eager_without_dygraph_check()`。 - + - **Support inplace after dynamic graph reconstruction**: input and output are the same Tensor. - + - Adapt the inplace strategy for dynamic graph reconstruction intermediate states.([#40400](https://github.com/PaddlePaddle/Paddle/pull/40400)) - + - Adapt the inplace strategy to the final state of the dynamic graph reconstruction. ([#40695](https://github.com/PaddlePaddle/Paddle/pull/40695)) - + - Add inplace strategy to PyLayer function after dynamical graph reconstruction. ([#41043](https://github.com/PaddlePaddle/Paddle/pull/41043)) - + - Add inplace strategy for Tensor's setitem function after dynamical graph reconstruction. ([#40915](https://github.com/PaddlePaddle/Paddle/pull/40915)) - + - Add `_reset_grad_inplace_version` interface after dynamic graph reconstruction, to set the inplace version of the Tensor's gradient to 0. ([#41101](https://github.com/PaddlePaddle/Paddle/pull/41101)) - + - If the value of the forward Tensor is not needed during the inverse computation (no need buffer property), the inplace version detection operation is not needed for that Tensor. For Tensor with no_need_buffer, skip the inplace version check. ([#41350](https://github.com/PaddlePaddle/Paddle/pull/41350)) - + - Unify error messages for inplace version checks after and before reconstruction of dynamic graphs. ([#41209](https://github.com/PaddlePaddle/Paddle/pull/41209)) - + - **Support view strategy after dynamical graph reconstruction**: input and output Tensor share underlying data. - + - Adapt the view strategy for dynamic graph reconstruction intermediate states. Include `reshape` , `squeeze` , `unsqueeze` , and `flatten` APIs. ([#40830](https://github.com/PaddlePaddle/Paddle/pull/40830)) - + - Adapt the view strategy for dynamic graph reconstruction final state. Include `reshape` API. ([#40891](https://github.com/PaddlePaddle/Paddle/pull/40891)) - **Add support for weakref on the python side of the new dynamic graph eager Tensor.** ([#41797](https://github.com/PaddlePaddle/Paddle/pull/41797)) @@ -1195,144 +1195,144 @@ To improve scheduling performance and custom development capability of the dynam - **Add `_grad_name` and `_grad_value`*to `core.eager.Tensor` to return the name and value of a gradient. ([#41990](https://github.com/PaddlePaddle/Paddle/pull/41990)) - **Add the processing of the no_need_buffer attribute for dynamic graph intermediate state.** The Tensor with the no_need_buffer attribute is skipped in the inplace backward check operation. ([#41720](https://github.com/PaddlePaddle/Paddle/pull/41720)) - + #### **New Static Graph Executor** In order to solve the problem that the original static graph executor of the PaddlePaddle is not good enough for scheduling in some scenarios and it is not easy to use multiple streams, we have implemented a new static graph executor with superior performance. It is easy to take advantage of the asynchronous scheduling capabilities of multi-streams and multi-threads. The new executor is a compatible upgrade of the original executor. At present, it is used by default in single-card scenarios. Users do not need to make any changes in the training codes. It can be used automatically. Of course, we also provide an interface to switch back to the original executor. Users can switch back to the original executor by setting the environment variable: `FLAGS_USE_STANDALONE_EXECUTOR=false`. ([#41179](https://github.com/PaddlePaddle/Paddle/pull/41179)) The main contents are as follows. - Basic components: High-performance thread pool for multi-threaded scheduling in the executor ([#35470](https://github.com/PaddlePaddle/Paddle/pull/35470), [#35930](https://github.com/PaddlePaddle/Paddle/pull/35930), [#36030](https://github.com/PaddlePaddle/Paddle/pull/36030), [#36480](https://github.com/PaddlePaddle/Paddle/pull/36480), [#36688](https://github.com/PaddlePaddle/Paddle/pull/36688), [#36740](https://github.com/PaddlePaddle/Paddle/pull/36740), [#38335](https://github.com/PaddlePaddle/Paddle/pull/38335), [#40770](https://github.com/PaddlePaddle/Paddle/pull/40770)) and thread co-op component ([#38779](https://github.com/PaddlePaddle/Paddle/pull/38779), [#40876](https://github.com/PaddlePaddle/Paddle/pull/40876), [#40912](https://github.com/PaddlePaddle/Paddle/pull/40912)) . There is the timely memory recovery after operator execution ([#37642](https://github.com/PaddlePaddle/Paddle/pull/37642), [#39617](https://github.com/PaddlePaddle/Paddle/pull/39617), [#40859](https://github.com/PaddlePaddle/Paddle/pull/40859)). There is the new dependency analysis algorithm for parallel executor ([#37231](https://github.com/PaddlePaddle/Paddle/pull/37231)) etc. - + - Scheduling logic: Optimize the scheduling method of operator in the executor. Support multi-stream multi-threaded asynchronous scheduling mechanism. Change transforms such as data type, device, and layout to the operator scheduling to improve performance. Support caching the selection of operator Kernel. Support the selection of new PHI operator.([#35024](https://github.com/PaddlePaddle/Paddle/pull/35024), [#34922](https://github.com/PaddlePaddle/Paddle/pull/34922), [#35711](https://github.com/PaddlePaddle/Paddle/pull/35711), [#35928](https://github.com/PaddlePaddle/Paddle/pull/35928), [#39458](https://github.com/PaddlePaddle/Paddle/pull/39458),[#36899](https://github.com/PaddlePaddle/Paddle/pull/36899))。 - + - Interface compatibility: Compatible with the user interface and functionality of the original executor, such as alignment with python interface Executor.run(), support for managing Tensor in Scope, etc. This ensures that users can switch to the new executor without perception. ([#37278](https://github.com/PaddlePaddle/Paddle/pull/37278), [#37379](https://github.com/PaddlePaddle/Paddle/pull/37379), [#37445](https://github.com/PaddlePaddle/Paddle/pull/37445), [#37510](https://github.com/PaddlePaddle/Paddle/pull/37510), [#40955](https://github.com/PaddlePaddle/Paddle/pull/40955), [#41778](https://github.com/PaddlePaddle/Paddle/pull/41178), [#41058](https://github.com/PaddlePaddle/Paddle/pull/41058), [#38584](https://github.com/PaddlePaddle/Paddle/pull/38584), [#37957](https://github.com/PaddlePaddle/Paddle/pull/37957), [#37672](https://github.com/PaddlePaddle/Paddle/pull/37672), [#37474](https://github.com/PaddlePaddle/Paddle/pull/37474), [#37085](https://github.com/PaddlePaddle/Paddle/pull/37085), [#37061](https://github.com/PaddlePaddle/Paddle/pull/37061), [#36945](https://github.com/PaddlePaddle/Paddle/pull/36945)) - + - Enhance debugging and error reporting in multi-threaded scenarios by capturing error reports from sub-threads and throwing them uniformly in the main thread. This can improve user experience. ([#36692](https://github.com/PaddlePaddle/Paddle/pull/36692),[#36802](https://github.com/PaddlePaddle/Paddle/pull/36802)) - Fix the bug with the new executor communication flow resetting stream cache information in the allocator, to reduce RecordStream overhead in cross-stream scenarios. This improves performance of DeepFM models by about 8% after optimization. ([#42046](https://github.com/PaddlePaddle/Paddle/pull/42046)) - Optimize the dependency analysis method between new executor operators to improve runtime performance. Establish correct dependencies for send/recv communication operators to support pipeline parallel. ([#42009](https://github.com/PaddlePaddle/Paddle/pull/42009)) - + #### **Distributed Training** - Basic functions of multi-machine multi-card parallel training based on collective communication - + - Add support for elastic training, enables scaling up and down the number of workers, enables training process resuming when node failure,to improve the fault tolerance of distributed training. ([#36684](https://github.com/PaddlePaddle/Paddle/pull/36684), [#37177](https://github.com/PaddlePaddle/Paddle/pull/37177), [#37781](https://github.com/PaddlePaddle/Paddle/pull/37781)) - + - Refactor launch startup module, add `master` collaboration and node number `nnodes` definition, to improve the ease of using the distributed startup.([#40086](https://github.com/PaddlePaddle/Paddle/pull/40086), [#40568](https://github.com/PaddlePaddle/Paddle/pull/40568), [#40782](https://github.com/PaddlePaddle/Paddle/pull/40782), [#40844](https://github.com/PaddlePaddle/Paddle/pull/40844), [#40936](https://github.com/PaddlePaddle/Paddle/pull/40936), [#41190](https://github.com/PaddlePaddle/Paddle/pull/41190), [#41314](https://github.com/PaddlePaddle/Paddle/pull/41314)) - + - Add support for GPU/NPU/XPU multi-hardware heterogeneous training. ([#37613](https://github.com/PaddlePaddle/Paddle/pull/37613), [#37998](https://github.com/PaddlePaddle/Paddle/pull/37998)) - + - Add fleet_executor asynchronous pipeline executor. ([#36966](https://github.com/PaddlePaddle/Paddle/pull/36966), [#37049](https://github.com/PaddlePaddle/Paddle/pull/37049), [#37087](https://github.com/PaddlePaddle/Paddle/pull/37087), [#37126](https://github.com/PaddlePaddle/Paddle/pull/37126), [#37150](https://github.com/PaddlePaddle/Paddle/pull/37150), [#37203](https://github.com/PaddlePaddle/Paddle/pull/37203), [#37167](https://github.com/PaddlePaddle/Paddle/pull/37167), [#37282](https://github.com/PaddlePaddle/Paddle/pull/37282), [#37319](https://github.com/PaddlePaddle/Paddle/pull/37319), [#37462](https://github.com/PaddlePaddle/Paddle/pull/37462), [#37507](https://github.com/PaddlePaddle/Paddle/pull/37507), [#37533](https://github.com/PaddlePaddle/Paddle/pull/37533), [#37576](https://github.com/PaddlePaddle/Paddle/pull/37576), [#37605](https://github.com/PaddlePaddle/Paddle/pull/37605), [#37691](https://github.com/PaddlePaddle/Paddle/pull/37691), [#37742](https://github.com/PaddlePaddle/Paddle/pull/37742), [#37783](https://github.com/PaddlePaddle/Paddle/pull/37783), [#37809](https://github.com/PaddlePaddle/Paddle/pull/37809), [#37862](https://github.com/PaddlePaddle/Paddle/pull/37862), [#37882](https://github.com/PaddlePaddle/Paddle/pull/37882), [#37934](https://github.com/PaddlePaddle/Paddle/pull/37934), [#38024](https://github.com/PaddlePaddle/Paddle/pull/38024), [#38083](https://github.com/PaddlePaddle/Paddle/pull/38083), [#38164](https://github.com/PaddlePaddle/Paddle/pull/38164), [#38261](https://github.com/PaddlePaddle/Paddle/pull/38261), [#38290](https://github.com/PaddlePaddle/Paddle/pull/38290), [#40607](https://github.com/PaddlePaddle/Paddle/pull/40607), [#37093](https://github.com/PaddlePaddle/Paddle/pull/37093), [#37106](https://github.com/PaddlePaddle/Paddle/pull/37106), [#37143](https://github.com/PaddlePaddle/Paddle/pull/37143), [#37338](https://github.com/PaddlePaddle/Paddle/pull/37338), [#37376](https://github.com/PaddlePaddle/Paddle/pull/37376), [#37485](https://github.com/PaddlePaddle/Paddle/pull/37485), [#37531](https://github.com/PaddlePaddle/Paddle/pull/37531), [#37623](https://github.com/PaddlePaddle/Paddle/pull/37623), [#37693](https://github.com/PaddlePaddle/Paddle/pull/37693), [#37755](https://github.com/PaddlePaddle/Paddle/pull/37755), [#37807](https://github.com/PaddlePaddle/Paddle/pull/37807), [#37889](https://github.com/PaddlePaddle/Paddle/pull/37889), [#38420](https://github.com/PaddlePaddle/Paddle/pull/38420), [#38539](https://github.com/PaddlePaddle/Paddle/pull/38539), [#36892](https://github.com/PaddlePaddle/Paddle/pull/36892), [#37084](https://github.com/PaddlePaddle/Paddle/pull/37084), [#37158](https://github.com/PaddlePaddle/Paddle/pull/37158), [#37361](https://github.com/PaddlePaddle/Paddle/pull/37361), [#37509](https://github.com/PaddlePaddle/Paddle/pull/37509), [#37603](https://github.com/PaddlePaddle/Paddle/pull/37603), [#37703](https://github.com/PaddlePaddle/Paddle/pull/37703), [#37824](https://github.com/PaddlePaddle/Paddle/pull/37824), [#38114](https://github.com/PaddlePaddle/Paddle/pull/38114), [#38322](https://github.com/PaddlePaddle/Paddle/pull/38322), [#38535](https://github.com/PaddlePaddle/Paddle/pull/38535), [#38650](https://github.com/PaddlePaddle/Paddle/pull/38650), [#38709](https://github.com/PaddlePaddle/Paddle/pull/38709), [#38799](https://github.com/PaddlePaddle/Paddle/pull/38799), [#38839](https://github.com/PaddlePaddle/Paddle/pull/38839), [#38904](https://github.com/PaddlePaddle/Paddle/pull/38904)) - + - Add distributed inference function for large-scale model. ([#38795](https://github.com/PaddlePaddle/Paddle/pull/38795), [#39012](https://github.com/PaddlePaddle/Paddle/pull/39012), [#39032](https://github.com/PaddlePaddle/Paddle/pull/39032), [#39076](https://github.com/PaddlePaddle/Paddle/pull/39076), [#39194](https://github.com/PaddlePaddle/Paddle/pull/39194), [#39207](https://github.com/PaddlePaddle/Paddle/pull/39207), [#39241](https://github.com/PaddlePaddle/Paddle/pull/39241), [#39603](https://github.com/PaddlePaddle/Paddle/pull/39603), [#39758](https://github.com/PaddlePaddle/Paddle/pull/39758), [#39992](https://github.com/PaddlePaddle/Paddle/pull/39992)). - + - Dynamic graph hybrid parallelism - + - Reconstruct `paddle.distributed.fleet.utils.recompute`, to support new dynamic computational graph. ([#41396](https://github.com/PaddlePaddle/Paddle/pull/41396)) - + - Add pure FP16 training to support data parallelism. ([#36420](https://github.com/PaddlePaddle/Paddle/pull/36420)) - + - Add MoE (Mixture of Experts) parallel strategy, to support large-scale MoE model training. ([#41092](https://github.com/PaddlePaddle/Paddle/pull/41092), [#40895](https://github.com/PaddlePaddle/Paddle/pull/40895), [#40850](https://github.com/PaddlePaddle/Paddle/pull/40580), [#39224](https://github.com/PaddlePaddle/Paddle/pull/39224)) - + - Add GroupSharded parallel strategy. Support stage1, stage2, stage3, and it supports synchronous and asynchronous communication. It can be used together with the basic function combinations such as Recompute, AMP O1\O2, Offload, GroupShardedClipGrad, and GroupShardedScaler. ([#37489](https://github.com/PaddlePaddle/Paddle/pull/37489), [#37568](https://github.com/PaddlePaddle/Paddle/pull/37568), [#37707](https://github.com/PaddlePaddle/Paddle/pull/37707), [#37836](https://github.com/PaddlePaddle/Paddle/pull/37836), [#37947](https://github.com/PaddlePaddle/Paddle/pull/37947), [#38151](https://github.com/PaddlePaddle/Paddle/pull/38151), [#38407](https://github.com/PaddlePaddle/Paddle/pull/38407), [#38052](https://github.com/PaddlePaddle/Paddle/pull/38052), [#39112](https://github.com/PaddlePaddle/Paddle/pull/39112), [#38989](https://github.com/PaddlePaddle/Paddle/pull/38989), [#39171](https://github.com/PaddlePaddle/Paddle/pull/39171), [#39285](https://github.com/PaddlePaddle/Paddle/pull/39285), [#39334](https://github.com/PaddlePaddle/Paddle/pull/39334), [#39397](https://github.com/PaddlePaddle/Paddle/pull/39397), [#39581](https://github.com/PaddlePaddle/Paddle/pull/39581), [#39668](https://github.com/PaddlePaddle/Paddle/pull/39668), [#40129](https://github.com/PaddlePaddle/Paddle/pull/40129), [#40396](https://github.com/PaddlePaddle/Paddle/pull/40396), [#40488](https://github.com/PaddlePaddle/Paddle/pull/40488), [#40601](https://github.com/PaddlePaddle/Paddle/pull/40601),[#37725](https://github.com/PaddlePaddle/Paddle/pull/37725),[#37904](https://github.com/PaddlePaddle/Paddle/pull/37904), [#38064](https://github.com/PaddlePaddle/Paddle/pull/38064)) - + - Static graph hybrid parallelism - + - Add `scale_gradient` flag bit to `gradient_scale_configs` to control the position where the gradient aggregation operation averages the gradients under pipeline parallelism. ([#36384](https://github.com/PaddlePaddle/Paddle/pull/36384)) - + - Under tensor parallelism, the dropout op supports the settings of deterministic random seed generators, to ensure random consistency for non-distributed variables and randomness of distributed variables. ([#36228](https://github.com/PaddlePaddle/Paddle/pull/36228)) - + - NPU hybrid parallelism supports Offload, with saving 40% of NPU memory. ([#37224](https://github.com/PaddlePaddle/Paddle/pull/37224)) - + - Add `force_cpu` optional parameter to the seed op, to allow dropout to read seed values directly from CPU. ([#35820](https://github.com/PaddlePaddle/Paddle/pull/35820)) - + - Improve the Automatic Sparsity (ASP) sharding strategy and support the selection of sharding strategy according to the program. ([#40028](https://github.com/PaddlePaddle/Paddle/pull/40028)) - + - Automatic parallel - + - Add the process restart (relaunch) after automatic mapping between logical processes and physical devices. ([#37523](https://github.com/PaddlePaddle/Paddle/pull/37523), [#37326](https://github.com/PaddlePaddle/Paddle/pull/37326)) - + - Improve the underlying mechanism and interface for automatic parallel to facilitate the unification of modules and add the optimized pass. ([#36617](https://github.com/PaddlePaddle/Paddle/pull/36617), [#38132](https://github.com/PaddlePaddle/Paddle/pull/38132)) - + - Add unified resource representation, to support for automatic mapping between logical processes and physical devices. ([#37091](https://github.com/PaddlePaddle/Paddle/pull/37091), [#37482](https://github.com/PaddlePaddle/Paddle/pull/37482), [#37094](https://github.com/PaddlePaddle/Paddle/pull/37094)) - + - Improve the distributed attribute complementation for the backward and update parts of the computation graph. ([#36744](https://github.com/PaddlePaddle/Paddle/pull/36744)) - + - Add data slicing function. ([#36055](https://github.com/PaddlePaddle/Paddle/pull/36055)) - + - Add tensor resharding function to reshard the tensor according to the distributed properties of the tensor and operator. ([#40865](https://github.com/PaddlePaddle/Paddle/pull/40865), [#41106](https://github.com/PaddlePaddle/Paddle/pull/41106)) - + - Add the automatic conversion pass of distributed parameters when the number of resources or parallel policy changes. ([#40434](https://github.com/PaddlePaddle/Paddle/pull/40434)) - + - Add GradientMerge pass to reduce the number of communications and improve training efficiency. ([#38259](https://github.com/PaddlePaddle/Paddle/pull/38259), [#40737](https://github.com/PaddlePaddle/Paddle/pull/40737)) - + - Add Recompute pass to reduce the activation memory storage. ([#38920](https://github.com/PaddlePaddle/Paddle/pull/38920)) - + - Add Sharding optimization pass, to support p-g-os 3 stage optimization. ([#38502](https://github.com/PaddlePaddle/Paddle/pull/38502)) - + - Add AMP + FP16 optimization pass. ([#38764](https://github.com/PaddlePaddle/Paddle/pull/38764), [#40615](https://github.com/PaddlePaddle/Paddle/pull/40615)) - + - Add fused QKV parallelization for Transformer class model. ([#39080](https://github.com/PaddlePaddle/Paddle/pull/39080)) - + - Improve the sharding propagation for while op to ensure convergence of the fix-point algorithm. ([#39939](https://github.com/PaddlePaddle/Paddle/pull/39939), [#39086](https://github.com/PaddlePaddle/Paddle/pull/39086), [#39014](https://github.com/PaddlePaddle/Paddle/pull/39014)) - + - Support training and inference for sub-block and while op control flow. ([#39612](https://github.com/PaddlePaddle/Paddle/pull/39612), [#39895](https://github.com/PaddlePaddle/Paddle/pull/39895), [#40077](https://github.com/PaddlePaddle/Paddle/pull/40077)) - + - Parameter Server - + - Add NaN/Inf value checking tool under GPUPS. ([#38131](https://github.com/PaddlePaddle/Paddle/pull/38131)) - + - Under GPUPS, add set_date interface to adapt incremental training. ([#36194](https://github.com/PaddlePaddle/Paddle/pull/36194)) - + - Under GPUPS, add asynchronous release dataset function. ([#37790](https://github.com/PaddlePaddle/Paddle/pull/37790)) - + - Under GPUPS, support the Dump parameters and intermediate layers([#36157](https://github.com/PaddlePaddle/Paddle/pull/36157)); - + - Under GPUPS, support the optimizer parameter configuration. ([#39783](https://github.com/PaddlePaddle/Paddle/pull/39783), [#39849](https://github.com/PaddlePaddle/Paddle/pull/39849)) - + - Under the Unified Parameter Server, refactor the base classes of each module such as communication and storage, to improve the ease of secondary development of each module. ([#41207](https://github.com/PaddlePaddle/Paddle/pull/41207), [#41022](https://github.com/PaddlePaddle/Paddle/pull/41022), [#40702](https://github.com/PaddlePaddle/Paddle/pull/40702), [#39341](https://github.com/PaddlePaddle/Paddle/pull/39341) [#39377](https://github.com/PaddlePaddle/Paddle/pull/39377), [#39191](https://github.com/PaddlePaddle/Paddle/pull/39191), [#39064](https://github.com/PaddlePaddle/Paddle/pull/39064)) - + - Add evaluation metrics module under the Unified Parameter Server, to support AUC/WuAUC/MaskAUC and other evaluation metrics calculation and customizable extensions. ([#38789](https://github.com/PaddlePaddle/Paddle/pull/38789)) - - - Supports XPU parameter server training on KUNLUNXIN 2. ([#41917](https://github.com/PaddlePaddle/Paddle/pull/41917), [#42266](https://github.com/PaddlePaddle/Paddle/pull/42266), [#41916](https://github.com/PaddlePaddle/Paddle/pull/41916)) + + - Supports XPU parameter server training on KUNLUNXIN 2. ([#41917](https://github.com/PaddlePaddle/Paddle/pull/41917), [#42266](https://github.com/PaddlePaddle/Paddle/pull/42266), [#41916](https://github.com/PaddlePaddle/Paddle/pull/41916)) #### Profiler - Add the performance analysis module `paddle.profiler` in the Python layer: Provide the ability to collect, export, and count performance data during the training push. ([#40065](https://github.com/PaddlePaddle/Paddle/pull/40065), [#40357](https://github.com/PaddlePaddle/Paddle/pull/40357), [#40888](https://github.com/PaddlePaddle/Paddle/pull/40888)) - + - `paddle.profiler.Profiler` : performance analyzer, interface for user interaction. ([#41029](https://github.com/PaddlePaddle/Paddle/pull/41029), [#41524](https://github.com/PaddlePaddle/Paddle/pull/41524), [#41157](https://github.com/PaddlePaddle/Paddle/pull/41157), [#40249](https://github.com/PaddlePaddle/Paddle/pull/40249), [#40111](https://github.com/PaddlePaddle/Paddle/pull/40111), [#39964](https://github.com/PaddlePaddle/Paddle/pull/39964), [#40133](https://github.com/PaddlePaddle/Paddle/pull/40133)) - + - `paddle.profiler.RecordEvent`: provide custom punches to record time. ([#39693](https://github.com/PaddlePaddle/Paddle/pull/39693), [#39694](https://github.com/PaddlePaddle/Paddle/pull/39694), [#39695](https://github.com/PaddlePaddle/Paddle/pull/39695), [#39675](https://github.com/PaddlePaddle/Paddle/pull/39675),[#41445](https://github.com/PaddlePaddle/Paddle/pull/41445), [#41132](https://github.com/PaddlePaddle/Paddle/pull/41132)) - + - `paddle.profiler.ProfilerTarget`: specify the target device for performance analysis. - + - `paddle.profiler.ProfilerState`: indicate the state of the performance analyzer. - + - `paddle.profiler.SortedKeys` : specify the sorting method of the data within the statistics form. - + - `paddle.profiler.make_scheduler`: the scheduler generating the performance analyzer state and implement the periodic control of the collection scope. - + - `paddle.profiler.export_chrome_tracing`: save performance data to a google chrome tracing file viewable by the chrome://tracing plugin. ([#39316](https://github.com/PaddlePaddle/Paddle/pull/39316), [#39984](https://github.com/PaddlePaddle/Paddle/pull/39984), [#41029](https://github.com/PaddlePaddle/Paddle/pull/41029)) - + - `paddle.profiler.export_protobuf`: save performance data to a protobuf file represented by internal structure. ([#39519](https://github.com/PaddlePaddle/Paddle/pull/39519), [#39109](https://github.com/PaddlePaddle/Paddle/pull/39109), [#39474](https://github.com/PaddlePaddle/Paddle/pull/39474)) - + - `paddle.profiler.load_profiler_result`: load the performance data saved to a protobuf file. - + - `paddle.profiler.Profiler` generate statistics for data reading, step overhead and throughput for the model training by specifying the `timer_only` parameter.([#40386](https://github.com/PaddlePaddle/Paddle/pull/40386)) - + - Refactor Profiler underlying infrastructure in C++ layer - + - Refactor the Profiler's controller architecture.([#38826](https://github.com/PaddlePaddle/Paddle/pull/38826), [#39230](https://github.com/PaddlePaddle/Paddle/pull/39230), [#39779](https://github.com/PaddlePaddle/Paddle/pull/39779) ) - + - Add Host Tracer to collect host-side performance metrics.([#37629](https://github.com/PaddlePaddle/Paddle/pull/39629), [#37766](https://github.com/PaddlePaddle/Paddle/pull/37766), [#37944](https://github.com/PaddlePaddle/Paddle/pull/37944), [#38280](https://github.com/PaddlePaddle/Paddle/pull/38280), [#39975](https://github.com/PaddlePaddle/Paddle/pull/39975), [#40460](https://github.com/PaddlePaddle/Paddle/pull/40460)) - + - Add CUDA Tracer to collect device-side performance metrics.([#39488](https://github.com/PaddlePaddle/Paddle/pull/39488)) - + - Profiler support for grading.([#39926](https://github.com/PaddlePaddle/Paddle/pull/39926)) - + - Modify the name and type of logging for op under new dynamic graph.([#41771](https://github.com/PaddlePaddle/Paddle/pull/41771/) - Add Kernel running statistics into profilers' summarization and optimize the summarization.([#41989](https://github.com/PaddlePaddle/Paddle/pull/41989) @@ -1344,168 +1344,168 @@ In order to solve the problem that the original static graph executor of the Pad With the recent development of PaddlePaddle's compiler, a.k.a, CINN([GitHub - PaddlePaddle/CINN: Compiler Infrastructure for Neural Networks](https://github.com/PaddlePaddle/CINN)), paddle framework has also been changed to adapt the compiler CINN features. These include the subgraph management related functions for the Paddle-CINN runtime, optimization of memory and speed performance, and bug fixing during development. - Functions developed: - + - Subgraph op related functions: - + - Add the function to find and generate CINN subgraphs from computational graphs.([#36345](https://github.com/PaddlePaddle/Paddle/pull/36345)) - + - Add cinn_launch op as a runtime entry point to CINN. It is responsible for scheduling CINN to compile the subgraph, to initialize the data, and to execute the generated kernels.([#36600](https://github.com/PaddlePaddle/Paddle/pull/36600)) - + - Add a helper class `CinnLaunchContext` to the kernel implementation of cinn_launch op to manage the intermediate data for compiling and running subgraphs, to improve scalability and code readability.([#37938](https://github.com/PaddlePaddle/Paddle/pull/37938)) - + - Add additional fetch nodes to CINN subgraphs, thus ensuring that CINN external nodes can fetch the values of variables.([#37172](https://github.com/PaddlePaddle/Paddle/pull/37172), [#37190](https://github.com/PaddlePaddle/Paddle/pull/37190)) - + - Add the function to symbolize a CINN subgraph, which is used to topologically sort the subgraphs and return the CINN execution sequence.([#36417](https://github.com/PaddlePaddle/Paddle/pull/36417) - + - Add `CinnCompiler` class for involking subgraphs in the CINN compiled graph that can be replaced by using CINN operators. ([#36562](https://github.com/PaddlePaddle/Paddle/pull/36562), [#36975](https://github.com/PaddlePaddle/Paddle/pull/36975)) - + - Add the interface to CINN symbolization class to get the names of subgraph fetched variables to prevent fetched variables from being eliminated in compilation optimizations.([#37218](https://github.com/PaddlePaddle/Paddle/pull/37218)) - + - Checking, debugging, and PI changes related: - + - Synchronize the update of NetBuilder API name changes in CINN.([#40392](https://github.com/PaddlePaddle/Paddle/pull/40392)) - + - Add necessary log information to Paddle-CINN for better debugging.([#36867](https://github.com/PaddlePaddle/Paddle/pull/36867)) - + - Add the bidirectional conversion function between Paddle desc and CINN desc.([#36100](https://github.com/PaddlePaddle/Paddle/pull/36100)) - + - The operator implemented in CINN may not use some input variables compared to Paddle. Therefore, remove the check that the input variables must be used in the cinn_launch op.([#37119](https://github.com/PaddlePaddle/Paddle/pull/37119)) - + - Added cinn_instruction_run op for invoking CINN to execute a single generation instruction, facilitating the construction of scheduling run subgraphs on the Paddle side.([#39435](https://github.com/PaddlePaddle/Paddle/pull/39435), [#39576](https://github.com/PaddlePaddle/Paddle/pull/39576)) - + - Add control macros to Paddle for CUDA/CUBLAS/MKL/CINN pass application required to compile CINN.([#37066](https://github.com/PaddlePaddle/Paddle/pull/37066), [#36660](https://github.com/PaddlePaddle/Paddle/pull/36660)) - + - Add two control flags FLAGS_allow_cinn_ops and FLAGS_deny_cinn_ops to control the categories of CINN operators used to replace native operators during Paddle training.([#36842](https://github.com/PaddlePaddle/Paddle/pull/36842)) - Performance optimization: - + - Speed optimization - + - Optimize the computational time consumed by CinnCacheKey.([#37786](https://github.com/PaddlePaddle/Paddle/pull/37786), [#37317](https://github.com/PaddlePaddle/Paddle/pull/37317)) - + - Cache variable scope for CINN compiled subgraphs to reduce runtime parameter construction overhead.([#37983](https://github.com/PaddlePaddle/Paddle/pull/37983)) - + - Utilize CINN's auto-tuning in case of subgraph compilation, could be enabled by flag, for further tuning of training performance.([#41795](https://github.com/PaddlePaddle/Paddle/pull/41795)) - + - Refactor the correctness check of compilation results in case of subgraph compilation to avoid repeated checks at runtime and reduce the scheduling overhead.([#41777](https://github.com/PaddlePaddle/Paddle/pull/41777)) - + - Enable TransposeFolding and GemmRewriter optimization passes by default in Paddle-CINN training.([#41084](https://github.com/PaddlePaddle/Paddle/pull/41084)) - + - Pass the cuda stream created in Paddle into CINN so that Paddle and CINN can use the same CUDA stream in cuda computing.([#37337](https://github.com/PaddlePaddle/Paddle/pull/37337)) - + - Move CINN optimization pass application logic from Paddle to CINN.([#42047](https://github.com/PaddlePaddle/Paddle/pull/42047), [#42070](https://github.com/PaddlePaddle/Paddle/pull/42070)) - + - Device memory optimization - + - Add NoNeedBufferVars to cinn_launch op to declare a list of input variables that do not require a buffer, so that the memory can be freed in advance.([#38367](https://github.com/PaddlePaddle/Paddle/pull/38367)) - + - Pass in reference count information for external variables to the subgraph, so that subgraphs within cinn_launch can reuse memory optimization passes and reduce the memory overhead in using CINN.([#39209](https://github.com/PaddlePaddle/Paddle/pull/39209), [#39622](https://github.com/PaddlePaddle/Paddle/pull/39622)) - + - Add the function to convert a collection of executable instructions generated by CINN compilation to a Paddle Graph, supporting reuse of the Paddle scheduler and memory optimization pass, further reducing the memory overhead in using CINN. ([#39724](https://github.com/PaddlePaddle/Paddle/pull/39724), [#39911](https://github.com/PaddlePaddle/Paddle/pull/39911)) - + - Add Kernel of cinn_instruction_run op, to support dynamic device memory requests based on data types inferred from compilation results.([#40920](https://github.com/PaddlePaddle/Paddle/pull/40920)) - Bug fixing: - + - Fix and optimize the generation logic of CINN subgraphs.([#36503](https://github.com/PaddlePaddle/Paddle/pull/36503)) - + - Fix the bug that Paddle-CINN does not support no-input subgraphs.([#40814](https://github.com/PaddlePaddle/Paddle/pull/40814)) - + - Fix an error reported due to CINN not being able to handle useless outputs in operators such as batch_norm.([#36996](https://github.com/PaddlePaddle/Paddle/pull/36996)) - + - Fix several bugs in CINN subgraph partitioning and symbolization, and solve problems with Paddle training accessing the CINN. ([#36739](https://github.com/PaddlePaddle/Paddle/pull/36739), [#36698](https://github.com/PaddlePaddle/Paddle/pull/36698) ) - + - CINN does not yet support the control flow yet. Add logic to skip control flow when encountered.([#40812](https://github.com/PaddlePaddle/Paddle/pull/40812)) #### **Other** - Model quantization - + - Upgrade quantization storage format to unify quantization formats for dynamic and static graphs. ([#41041](https://github.com/PaddlePaddle/Paddle/pull/41041)) - + - Add new post training quantization (PTQ): EMD and Adaround. ([#40421](https://github.com/PaddlePaddle/Paddle/pull/40421), [#38460](https://github.com/PaddlePaddle/Paddle/pull/38460)) - + - Support to quantize more operations in PTQ and QAT, such as crop, split, ab, unsqueeze etc. ([#40083](https://github.com/PaddlePaddle/Paddle/pull/40083)) - + - Support to quantize operators in control flow. ([#37498](https://github.com/PaddlePaddle/Paddle/pull/37498)) - + - Support quantization of matmul_v2 operator. ([#36469](https://github.com/PaddlePaddle/Paddle/pull/36469)) - + - Add support for quantized matmul_v2 inference on TensorRT. ([#36594](https://github.com/PaddlePaddle/Paddle/pull/36594)) - + - CUDA memory optimization - + - Implement multi-stream safe Allocator to support safe and efficient use of CUDA memory in asynchronous computing scenarios. ([#37290](https://github.com/PaddlePaddle/Paddle/pull/37290)) - + - Add new APIs (paddle.device.cuda.max_memory_allocated, paddle.device.cuda.max_memory_reserved, paddle.device.cuda.memory_allocated and paddle.device.cuda.memory_reserved) for GPU memory monitoring in runtime. ([#38657](https://github.com/PaddlePaddle/Paddle/pull/38657)) - + - Support allocate CUDA Managed Memory to train super large models in memory-constrained scenarios. ([#39075](https://github.com/PaddlePaddle/Paddle/pull/39075)) - + - Add GetBasePtr interface in C++ to get device address created with *cudaMalloc*. ([#37978](https://github.com/PaddlePaddle/Paddle/pull/37978)) - + - Reduce the number of free blocks in AutoGrowth Allocator to improve memory allocation performance. ([#35732](https://github.com/PaddlePaddle/Paddle/pull/35732)) - + - Remove redundant Float32 temporary tensor and cast operation for tensor with data type FP16 in`initializer.Normal` and `initializer.Constant`to save 2x memory. ([#38818](https://github.com/PaddlePaddle/Paddle/pull/38818)) - + - High-order derivative testing for models in dynamic graphs. - + - Add third-order derivative testing for network in dynamic graphs. ([#36814](https://github.com/PaddlePaddle/Paddle/pull/36814) , [#37377](https://github.com/PaddlePaddle/Paddle/pull/37377)) - Custom op: Support to custom op in ROCm(HIP) platform. ([#36771](https://github.com/PaddlePaddle/Paddle/pull/36771)) - + - Cost Model: Add basic Cost Model based on profiling infomation. ([#35774](https://github.com/PaddlePaddle/Paddle/pull/35774)) - + - Added a function to allow user to add their own layer and correspond pruning way to ASP support. ([#40253](https://github.com/PaddlePaddle/Paddle/pull/40253)) - + - Add string tensor data structure, allowing the framework to have the ability to represent and process string. ([#39830](https://github.com/PaddlePaddle/Paddle/pull/39830), [#40992](https://github.com/PaddlePaddle/Paddle/pull/40992)) - + - Add or upgrade oneDNN FP32/int8/bfloat16 Kernel, including: - + - ELU ([#37149](https://github.com/PaddlePaddle/Paddle/pull/37149)) - + - exp ([#38624](https://github.com/PaddlePaddle/Paddle/pull/38624)) - + - stack ([#37002](https://github.com/PaddlePaddle/Paddle/pull/37002)) - + - softplus ([#36382](https://github.com/PaddlePaddle/Paddle/pull/36382)) - + - round ([#39653](https://github.com/PaddlePaddle/Paddle/pull/39653)) - + - shape ([#36033](https://github.com/PaddlePaddle/Paddle/pull/36033)) - + - flatten and flatten2 ([#35892](https://github.com/PaddlePaddle/Paddle/pull/35892)) - + - slice ([#37630](https://github.com/PaddlePaddle/Paddle/pull/37630)) - + - elementwise_mul ([#40546](https://github.com/PaddlePaddle/Paddle/pull/40546)) - + - elementwise_add ([#38176](https://github.com/PaddlePaddle/Paddle/pull/38176)) - + - ementwise_div ([#36158](https://github.com/PaddlePaddle/Paddle/pull/36158)) - + - elementwise_sub ([#35662](https://github.com/PaddlePaddle/Paddle/pull/35662)) - + - roi_align ([#37848](https://github.com/PaddlePaddle/Paddle/pull/37848)) - + - nearest_interp and nearest_interp_v2 ([#37985](https://github.com/PaddlePaddle/Paddle/pull/37985),[#38622](https://github.com/PaddlePaddle/Paddle/pull/38622),[#39490](https://github.com/PaddlePaddle/Paddle/pull/39490)) - + - assembly optimized Adam ([#39158](https://github.com/PaddlePaddle/Paddle/pull/39158)) - + - logsoftmax ([#39793](https://github.com/PaddlePaddle/Paddle/pull/39793)) - + - activation ([#40721](https://github.com/PaddlePaddle/Paddle/pull/40721)) - + - mul ([#38552](https://github.com/PaddlePaddle/Paddle/pull/38552)) - + - mean ([#37104](https://github.com/PaddlePaddle/Paddle/pull/37104)) - + - relu ([#36265](https://github.com/PaddlePaddle/Paddle/pull/36265)) - + - pool2d ([#37081](https://github.com/PaddlePaddle/Paddle/pull/37081)) - + - concat ([#35889](https://github.com/PaddlePaddle/Paddle/pull/35889)) - + - conv2d ([#38507](https://github.com/PaddlePaddle/Paddle/pull/38507),[#38938](https://github.com/PaddlePaddle/Paddle/pull/38938),[#36284](https://github.com/PaddlePaddle/Paddle/pull/36284)) - + - LayerNorm ([#40418](https://github.com/PaddlePaddle/Paddle/pull/40418)) - Add the 3-stage storage graph retrieval engine based on SSD - host memory - GPU device memory, to support large-scale graph neural network training. ([#42472](https://github.com/PaddlePaddle/Paddle/pull/42472), [#42321](https://github.com/PaddlePaddle/Paddle/pull/42321), [#42027](https://github.com/PaddlePaddle/Paddle/pull/42027)) @@ -1519,75 +1519,75 @@ With the recent development of PaddlePaddle's compiler, a.k.a, CINN([GitHub - - Add backward implementation of `paddle.linalg.det `. ([#36013](https://github.com/PaddlePaddle/Paddle/pull/36013)) - Add support for mixed precision training O2 mode for `paddle.Model`, i.e., support for Pure FP16 training mode of the original dynamic/static graphs. ([#36441](https://github.com/PaddlePaddle/Paddle/pull/40962441)) - + - Support for self chain calls for `paddle.nn.Layer`. ([#36609](https://github.com/PaddlePaddle/Paddle/pull/36609)) - + - Add settings of `is_distributed` property for the `to` method of `paddle.nn.Layer` to ensure that the distributed properties remain consistent before and after network parameter transform. ([#36221](https://github.com/PaddlePaddle/Paddle/pull/36221)) - + - Improve the parameter conversion logic of the `to` method of `paddle.nn.Layer`, to reduce the peak memory consumption of the conversion process and improve the conversion success rate. ([#36862](https://github.com/PaddlePaddle/Paddle/pull/36862)) - + - Support settings of the shape of the output Tensor for `paddle.incubate.graph_send_recv` to reduce the memory usage during the actual computation. ([#40509](https://github.com/PaddlePaddle/Paddle/pull/40509)) - + - Add the support of int32 and int64 data types for `paddle.incubate.segment_sum`, `segment_mean`, `segment_max`, and `segment_min`. ([#40577](https://github.com/PaddlePaddle/Paddle/pull/40577)) - + - Add the support of the bool type for transpose op. ([#35886](https://github.com/PaddlePaddle/Paddle/pull/35886)) - + - Switch the `paddle.mm` underlying operator from matmul to matmul_v2. ([#35770](https://github.com/PaddlePaddle/Paddle/pull/35770)) - + - Support static graph mode and support the unknown shape for `paddle.einsum`. ([#40360](https://github.com/PaddlePaddle/Paddle/pull/40360)) - + - Support data`parallelism for paddle.nn.functional.margin_cross_entropy` and `paddle.nn.functional.class_center_sample`. ([#39852](https://github.com/PaddlePaddle/Paddle/pull/39852)) - + - Support input of shape [1] for `paddle.nn.functional.grid_sample` . ([#36183](https://github.com/PaddlePaddle/Paddle/pull/36183)) - + - Support NHWC data format for `paddle.nn.PRelu` . ([#37019](https://github.com/PaddlePaddle/Paddle/pull/37019)) - + - Support the fixed random state using `paddle.seed` for `paddle.nn.functional.class_center_sample` . ([#38248](https://github.com/PaddlePaddle/Paddle/pull/38248)) - + - Add ROCM backend support for all APIs under `paddle.fft` , and optimize CUFFT backend error messages. ([#36415](https://github.com/PaddlePaddle/Paddle/pull/36415), [#36114](https://github.com/PaddlePaddle/Paddle/pull/36114/files)) - + - Support the function that the slicing dimension i 0, that is, allow slicing index results to be empty . ([#37313](https://github.com/PaddlePaddle/Paddle/pull/37313)) - + - Support int and bool type Tensor with using bool index for `Tensor.setitem` . ([#37761](https://github.com/PaddlePaddle/Paddle/pull/37761)) - + - Support nearest mode for `paddle.nn.functional.interpolate` when the input shape is 5D. ([#38868](https://github.com/PaddlePaddle/Paddle/pull/38868)) - + - Add the support of int16 for `paddle.nn.Embedding`and`paddle.gather`. ([#40964](https://github.com/PaddlePaddle/Paddle/pull/40964), [#40052](https://github.com/PaddlePaddle/Paddle/pull/40052)) - + - Support data`parallelism on single machine on``CPU platform``in paddle.distributed.spawn` . ([#35745](https://github.com/PaddlePaddle/Paddle/pull/35745), [#36758](https://github.com/PaddlePaddle/Paddle/pull/36758), [#36637](https://github.com/PaddlePaddle/Paddle/pull/36637)) - + - Add `depthwise_conv2d` MKLDNN operator. ([#38484](https://github.com/PaddlePaddle/Paddle/pull/38484)) - + - Add complex types check in the static graph model for API`paddle.abs` , `paddle.transpose` , `paddle.squeeze` , `paddle.unsqueeze` , `paddle.matmul` , and `paddle.full` . ([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) - + - Support tuple and list type arguments for `paddle.autograd.PyLayer` . ([#38146](https://github.com/PaddlePaddle/Paddle/pull/38146)) - + - Add check whether tensor is inplace and leaf when calculate gradient . ([#37931](https://github.com/PaddlePaddle/Paddle/pull/37931)) - + - Support HIP library for `paddle.autograd.PyLayer` . ([#38184](https://github.com/PaddlePaddle/Paddle/pull/38184)) - + - Support more size inputs for `paddle.take_along_axis` and `paddle.put_along_axis` , and allow index matrix shape size to be larger than array matrix shape size. ([#39072](https://github.com/PaddlePaddle/Paddle/pull/39072)) - + - Optimize the error report message of API `paddle.nn.Pad2D` when replicate is 0. ([#36510](https://github.com/PaddlePaddle/Paddle/pull/36510/files)) - + - Support pad input in tuple format for API `paddle.nn.Pad2D` . ([#35985](https://github.com/PaddlePaddle/Paddle/pull/35985/files)) - + - Add tdm_sample API in `paddle.distributed.InMemoryDataset` to support sampling operations in TDM algorithms. ([#37044](https://github.com/PaddlePaddle/Paddle/pull/37044)) - + - Add Pre-saving Hooks mechanism for `paddle.jit.save` . ([#38186](https://github.com/PaddlePaddle/Paddle/pull/38186)) - + - Add new higher-order differentiation-related APIs. - + - `elementwise_add`: add third-order Kernel, to support computation of third-order differentiation. ([#36508](https://github.com/PaddlePaddle/Paddle/pull/36508), [#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) - + - `matmul_v2`: add third-order Kernel, to support computation of third-order differentiation. ([#36459](https://github.com/PaddlePaddle/Paddle/pull/36459)) - + - `elementwise_mul`: Add third-order Kernel, to support computation of third-order differentiation. ([#37152](https://github.com/PaddlePaddle/Paddle/pull/37547)) - + - Improve the logic of the `paddle.amp.GradScaler` to call check_finite_and_unscale op, to eliminate the cudaMemcpy introduced by the creation of the bool variable. ([#37770](https://github.com/PaddlePaddle/Paddle/pull/37770)) - + - Add check for unstack and unique op in case of input Tensor with 0 elements. ([#36021](https://github.com/PaddlePaddle/Paddle/pull/36021)) - + - Add new multi-layer, bi-directional LSTM function that supports KUNLUNXIN 2, to improve RNN forward/backward ops, and support the use of temporal model training. ([#](https://github.com/PaddlePaddle/Paddle/pull/41781)[42076](https://github.com/PaddlePaddle/Paddle/pull/42076)) - Add bce_loss forward/backward ops for KUNLUNXIN 2. ([#41610](https://github.com/PaddlePaddle/Paddle/pull/41610)) @@ -1597,65 +1597,65 @@ With the recent development of PaddlePaddle's compiler, a.k.a, CINN([GitHub - #### IR(Intermediate Representation) - Dynamic Graphs to Static Graphs - + - Optimize the behavior of the `ProgramCache.last` interface for dynamic graph to static graph so that it returns the most recently used Program instead of the final generated Program. ([#39541](https://github.com/PaddlePaddle/Paddle/pull/39541)) - + - Optimize the error report message for the `paddle.reshape` API for dynamic graph to static graph, and add a new recommended usage hint. ([#40599](https://github.com/PaddlePaddle/Paddle/pull/40599)) - + - Optimize the type of exception catch in the `is_api_in_module` function when transcribing dynamic code to static code. ([#40243](https://github.com/PaddlePaddle/Paddle/pull/40243)) - + - Optimize the hint of error message for dynamic graph to static graph,hide warning information by default. ([#39730](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39730)) - + - Add the support of type hint syntax for dynamic graph to static graph to improve the accuracy of variable type analysis. ([#39572](https://github.com/PaddlePaddle/Paddle/pull/39572)) - + - Optimize the `paddle.cond` function to allow values are equal for basic types such as bool and int . ([#37888](https://github.com/PaddlePaddle/Paddle/pull/37888)) - + - Optimize the decorate function `@to_static` to allow the switch of the train/eval mode. ([#37383](https://github.com/PaddlePaddle/Paddle/pull/37383)) - + - Optimize the stack of error report for dynamic graph to static graph, to highlight user-related codes and reduce the framework redundant error stack. ([#36741](https://github.com/PaddlePaddle/Paddle/pull/36741)) - + - Remove `no_value` placeholder from the return value of `paddle.cond`. ([#36513](https://github.com/PaddlePaddle/Paddle/pull/36513)、[#36826](https://github.com/PaddlePaddle/Paddle/pull/36826)) - + - Adapt the run_program op to the new dynamic graph mode. ([#40198](https://github.com/PaddlePaddle/Paddle/pull/40198), [#40355](https://github.com/PaddlePaddle/Paddle/pull/40355)) - + - Add check for zip syntax. ([#37846](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/37846)) - + - Fix the dynamic graph to static graph failure due to the error of dimension and type judgment in the `paddle.signal.frame`, `paddle.signal.stft` and `paddle.signal.istft`. ([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) - + - Add registration of plural type Kernel for mean, pad3d ops. ([#40113](https://github.com/PaddlePaddle/Paddle/pull/40113)) - + #### **Mixed Precision Training** - Add GPU Compute Capability environment check for amp. Add the usage warning for GPU environments that the fail acceleration for training. ([#38086](https://github.com/PaddlePaddle/Paddle/pull/38086)) - + - Add check of calling order when using `paddle.amp.decorate` and `paddle.DataParallel` at the same time. ([#38785](https://github.com/PaddlePaddle/Paddle/pull/38785)) - + #### **Distributed Training** - Basic functions of the distributed training - + - Optimize Fleet API and DistributedStrategy configuration to use dynamic graph parallel function conveniently. ([#40408](https://github.com/PaddlePaddle/Paddle/pull/40408)) - + - Optimize Dynamic Graph mixed parallel HybridParallelClipGrad strategy, support 4D hybrid parallel and Pure FP16 training. ([#36237](https://github.com/PaddlePaddle/Paddle/pull/36237), [#36555](https://github.com/PaddlePaddle/Paddle/pull/36555)) - + - Restructure dynamic graph data parallel strategy, to support new dynamic graph and communication. ([#40389](https://github.com/PaddlePaddle/Paddle/pull/40389), [#40593](https://github.com/PaddlePaddle/Paddle/pull/40593), [#40836](https://github.com/PaddlePaddle/Paddle/pull/40836), [#41119](https://github.com/PaddlePaddle/Paddle/pull/41119), [#41413](https://github.com/PaddlePaddle/Paddle/pull/41413), [#39987](https://github.com/PaddlePaddle/Paddle/pull/39987)) - + - Support distributed tensor model parallel for fused_attention op. ([#40101](https://github.com/PaddlePaddle/Paddle/pull/40101)) - + - Support the distributed tensor model parallel for fused_feedforward op. ([#40160](https://github.com/PaddlePaddle/Paddle/pull/40160)) - + - Graph retrieval engine - + - Optimize the data format returned by the graph sampling interface of the graph engine, with a 3x improvement of the sampling speed. ([#37315](https://github.com/PaddlePaddle/Paddle/pull/37315)) - + - Reduce the amount of graph engine threads to improve performance. ([#37098](https://github.com/PaddlePaddle/Paddle/pull/37098)) - + - Optimize graph engine data transfer to improve performance. ([#37341](https://github.com/PaddlePaddle/Paddle/pull/37341)) - + - Optimize the merge logic of embedding op to improve performance by exploiting the topological relationship of embedding op in the model. [(#35942)](https://github.com/PaddlePaddle/Paddle/pull/35942) - + - Communication library: restructure the communication library to improve the scalability and development of the communication library, and support heterogeneous communication. ([#41398](https://github.com/PaddlePaddle/Paddle/pull/41398), [#39720](https://github.com/PaddlePaddle/Paddle/pull/39720), [#40911](https://github.com/PaddlePaddle/Paddle/pull/40911), [#40579](https://github.com/PaddlePaddle/Paddle/pull/40579), [#40629](https://github.com/PaddlePaddle/Paddle/pull/40629), [#40437](https://github.com/PaddlePaddle/Paddle/pull/40437), [#40430](https://github.com/PaddlePaddle/Paddle/pull/40430), [#40228](https://github.com/PaddlePaddle/Paddle/pull/40228), [#40181](https://github.com/PaddlePaddle/Paddle/pull/40181), [#40100](https://github.com/PaddlePaddle/Paddle/pull/40100), [#40097](https://github.com/PaddlePaddle/Paddle/pull/40097), [#39892](https://github.com/PaddlePaddle/Paddle/pull/39892), [#39384](https://github.com/PaddlePaddle/Paddle/pull/39384), [#39737](https://github.com/PaddlePaddle/Paddle/pull/39737), [#40040](https://github.com/PaddlePaddle/Paddle/pull/40040)) - Support the publication of MoE-related interfaces in `paddle.incubate.distributed.models.moe ` (`moe.GShardGate `, `moe.BaseGate `, `moe.SwitchGate `, `moe.MoELayer `, and `moe. ClipGradForMOEByGlobalNorm `). ([#42300](https://github.com/PaddlePaddle/Paddle/pull/42300)) @@ -1668,41 +1668,41 @@ With the recent development of PaddlePaddle's compiler, a.k.a, CINN([GitHub - #### **Custom operator** -- Enhance the C++ custom operator mechanism for writing second-order gradient operators, to support adding suffixes to the gradient input variables of second-order gradient operators for use as outputs. ([#41781](https://github.com/PaddlePaddle/Paddle/pull/41781)) +- Enhance the C++ custom operator mechanism for writing second-order gradient operators, to support adding suffixes to the gradient input variables of second-order gradient operators for use as outputs. ([#41781](https://github.com/PaddlePaddle/Paddle/pull/41781)) -- Remove the use of the deprecated enumeration type `PlaceType` from the Tensor API member methods, make it compatible, and add a deprecation warning. ([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) +- Remove the use of the deprecated enumeration type `PlaceType` from the Tensor API member methods, make it compatible, and add a deprecation warning. ([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) -- Add deprecated warning for a number of deprecated interfaces of the original Tensor API, including the incomplete constructor, reshape, mutable_data, and copy_to methods. ([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) +- Add deprecated warning for a number of deprecated interfaces of the original Tensor API, including the incomplete constructor, reshape, mutable_data, and copy_to methods. ([#41882](https://github.com/PaddlePaddle/Paddle/pull/41882)) #### **Other** - Error report and debugging optimization - + - Optimize `the error message of the label` boundary check for the cross_entropy op. ([#40001](https://github.com/PaddlePaddle/Paddle/pull/40001)) - + - Add profile record for `infer_shape` and `compute` methods of op execution of dynamic graphs, show their cost in timeline. ([#39023](https://github.com/PaddlePaddle/Paddle/pull/39023)) - + - Replace `pybind::index_error` error hint on Windows for unknown exceptions. ([#40538](https://github.com/PaddlePaddle/Paddle/pull/40538)) - + - Add the error message in the out-of-bounds checks for user scatter op. ([#37429](https://github.com/PaddlePaddle/Paddle/pull/37429)) - + - Download tool: For the problem of slow decompression of directories with multiple files in `paddle.utils.download.get_path_from_url`, replace the original way (traverse directory in loop) of decompressing files in directories one by one by calling extractall on the directory, which greatly improves the decompression speed. ([#37311](https://github.com/PaddlePaddle/Paddle/pull/37311)) - + - Speed up the quantization training for`fake_quantize_range_abs_max`、`fake_quantize_abs_max`、`fake_quantize_dequantize_abs_max`、 `fake_quantize_moving_average_abs_max`, etc. ([#40491](https://github.com/PaddlePaddle/Paddle/pull/40491)) - + ### **(3) Performance optimization** #### **Distributed Training** - Hybrid parallel optimizer `sharding_optimizer` supports `optimize_cast` optimization, which move the parameter cast during forward and backwark stage to the optimizer stage. This improves performance by 7%. ([#35878](https://github.com/PaddlePaddle/Paddle/pull/35878)) - + - GPUPS optimization: support for gradient fuse allreduce training. This improves training performance by 20%. ([#35131](https://github.com/PaddlePaddle/Paddle/pull/35131)) - + - GPUPS optimization: dump CPU optimization speed improves by 3.21x. ([#40068](https://github.com/PaddlePaddle/Paddle/pull/40068)) - + - CPU parameter server streaming training optimization: support for automatic statistics of sparse parameter statistics, incremental saving of sparse parameters, etc. The training performance improves by 20%. ([#36465](https://github.com/PaddlePaddle/Paddle/pull/36465), [#36601](https://github.com/PaddlePaddle/Paddle/pull/36601), [#36734](https://github.com/PaddlePaddle/Paddle/pull/36734), [#36909](https://github.com/PaddlePaddle/Paddle/pull/36909), [#36943](https://github.com/PaddlePaddle/Paddle/pull/36943), [#37181](https://github.com/PaddlePaddle/Paddle/pull/37181), [#37194](https://github.com/PaddlePaddle/Paddle/pull/37194), [#37515](https://github.com/PaddlePaddle/Paddle/pull/37515), [#37626](https://github.com/PaddlePaddle/Paddle/pull/37626), [#37995](https://github.com/PaddlePaddle/Paddle/pull/37995), [#38582](https://github.com/PaddlePaddle/Paddle/pull/38582), [#39250](https://github.com/PaddlePaddle/Paddle/pull/39250), [#40762](https://github.com/PaddlePaddle/Paddle/pull/40762), [#41234](https://github.com/PaddlePaddle/Paddle/pull/41234), [#41320](https://github.com/PaddlePaddle/Paddle/pull/41320), [#41400](https://github.com/PaddlePaddle/Paddle/pull/41400)) - + #### **Auto-tuning** Add hardware-aware automatic performance tuning for the full training process, with performance improvements of about 3% to 50% or more on image classification, segmentation, detection, and image generation tasks compared to the model's default configuration. The auto-tuning status is set via the `paddle.incubate.autotune.set_config ` API. By default, it is currently disabled. Auto-tuning has three specific levels: @@ -1716,59 +1716,59 @@ Add hardware-aware automatic performance tuning for the full training process, w #### **Operator Optimization** - Optimize `FasterTokenizer` performance, with a 10% performance improvement compared to pre-optimization. ([#36701](https://github.com/PaddlePaddle/Paddle/pull/36701)) - + - Optimize `index_select` inverse computation, with 3.7~25.2x performance improvement over pre-optimization. ([#37055](https://github.com/PaddlePaddle/Paddle/pull/37055)) - + - Optimize the performance of `paddle.nn.ClipByGlobalNorm` . Take 10*10 `paddle.nn.Linear` as an example. In contrast to pre-optimization, the performance improves by about 30%. ([#38209](https://github.com/PaddlePaddle/Paddle/pull/38209)) - + - Optimize the performance of `pnorm` with very large or very small `axis` dimensions, with 31-96x improvement in forward speed and 1.1-19x improvement in backward speed. ([#37685](https://github.com/PaddlePaddle/Paddle/pull/37685), [#38215](https://github.com/PaddlePaddle/Paddle/pull/38215), [#39011](https://github.com/PaddlePaddle/Paddle/pull/39011)) - + - Optimize `softmax` forward and backward performance, with a speedup ratio of about 2x for the `axis!=-1` configuration. ([#38602](https://github.com/PaddlePaddle/Paddle/pull/38602), [#38609](https://github.com/PaddlePaddle/Paddle/pull/38609), [#32387](https://github.com/PaddlePaddle/Paddle/pull/32387), [#37927](https://github.com/PaddlePaddle/Paddle/pull/37927/files)) - + - Optimize `log_softmax` forward and backward performance, with a speedup ratio of about 6x to 20x for `axis!=-1` configurations. ([#38992](https://github.com/PaddlePaddle/Paddle/pull/38992), [#40612](https://github.com/PaddlePaddle/Paddle/pull/40612)) - + - Optimize `softmax_with_cross_entropy` forward and backward performance, with a speedup ratio of about 1.3x for the `hard_label` configuration. ([#39553](https://github.com/PaddlePaddle/Paddle/pull/39553), [#40424](https://github.com/PaddlePaddle/Paddle/pull/40424), [#40643](https://github.com/PaddlePaddle/Paddle/pull/40643)) - + - Optimize `top_k` performance, with a speedup ratio of more than 22x for one-dimension and larger `k` (k=5000) configuration. ([#40941](https://github.com/PaddlePaddle/Paddle/pull/40941)) - + - Optimize `elementwise_mul` backward computation, with 1.85~12.16x performance improvement over pre-optimization. ([#37728](https://github.com/PaddlePaddle/Paddle/pull/37728)) - + - Optimize `elementwise_min` and `elementwise_max` backward computation, to equalize or improve performance by 1.05x to 18.75x over pre-optimization. ([#38236](https://github.com/PaddlePaddle/Paddle/pull/38236), [#37906](https://github.com/PaddlePaddle/Paddle/pull/37906)) - + - Optimize `nearest_interp` forward and backward computation, with forward performance improvement by 1.5x to 2.3x over pre-optimization, and backward performance improvement by 60% to 1.8x over pre-optimization. ([#38528](https://github.com/PaddlePaddle/Paddle/pull/38528), [#39067](https://github.com/PaddlePaddle/Paddle/pull/39067)) - + - Optimize `bilinear_interp` forward and backward computation, with forward performance improvement by 0.4x to 2.3x over pre-optimization, and backward performance improvement by 10%-30% over pre-optimization. ([#39243](https://github.com/PaddlePaddle/Paddle/pull/39243), [#39423](https://github.com/PaddlePaddle/Paddle/pull/39423)) - + - Optimize `dropout` forward and backward computation, with performance improvement by about 20%. ([#39795](https://github.com/PaddlePaddle/Paddle/pull/39795), [#38859](https://github.com/PaddlePaddle/Paddle/pull/38859), [#38279](https://github.com/PaddlePaddle/Paddle/pull/38279), [#40053](https://github.com/PaddlePaddle/Paddle/pull/40053)) - + - Optimize `grid_sampler` forward and backward computation, with forward performance improvement by 10% to 30% over pre-optimization, and backward performance improvement by 10% to 60% over pre-optimization. ([#39751](https://github.com/PaddlePaddle/Paddle/pull/39751)) - + - Optimize `group_norm` forward and backward computation, with the forward performance improvement by 1.04x to 2.35x, and backward performance improvement by 1.12x to 1.18x. ([#39944](https://github.com/PaddlePaddle/Paddle/pull/39944), [#40657](https://github.com/PaddlePaddle/Paddle/pull/40657), [#39596](https://github.com/PaddlePaddle/Paddle/pull/39596)) - + - Optimize `conv1d` forward and backward computation, with the forward performance improvement by 1.00x to 2.01x, and backward performance improvement by 1.01x to 474.56x. ([#38425](https://github.com/PaddlePaddle/Paddle/pull/38425)) - + - Optimize `elementwise_div` backward computation, with the backward performance improvement by 1.02x to 29.25x. ([#38044](https://github.com/PaddlePaddle/Paddle/pull/38044)) - + - Optimize `gelu` forward and backward computation, with the backward performance improvement by 1.13x to 1.43x, and reverse performance improvement by 1.10x to 1.55x. ([#38188](https://github.com/PaddlePaddle/Paddle/pull/38188), [#38263](https://github.com/PaddlePaddle/Paddle/pull/38263)) - + - Optimize `elementwise_sub` backward computation, with the backward performance improvement by 1.04x to 15.64x. ([#37754](https://github.com/PaddlePaddle/Paddle/pull/37754)) - + - Optimize `flip's` forward performance on one-dimensional data input, with the performance improvement by 100%. ([#37825](https://github.com/PaddlePaddle/Paddle/pull/37825)) - + - Optimize `layer_norm` forward and backward computation, with the forward performance improvement by 2x to 5x over pre-optimization, and backward performance improvement by 20% to 50% over pre-optimization. ([#39167](https://github.com/PaddlePaddle/Paddle/pull/39167), [#39247](https://github.com/PaddlePaddle/Paddle/pull/39247)) - + - Optimize `embedding` forward and backward computation, with a maximum improvement of 1.51x in forward performance and 1.03x to 7.79x in backward performance. ([#39856](https://github.com/PaddlePaddle/Paddle/pull/39856), [#39886](https://github.com/PaddlePaddle/Paddle/pull/398866)) - + - Optimize `gelu` FP16 forward and backward calculations, with forward performance improvement by 9% to 12% over pre-optimization, and backward performance improvement by 2% to 9% over pre-optimization. ([#38980](https://github.com/PaddlePaddle/Paddle/pull/38980)) - + - Remove CPU -> GPU explicit data transfer operation in `gather_nd` forward and backward operators, and remove the explicit synchronous operation in `index_select` forward and backward operators. Change GPU -> GPU data transfer in `scatter_nd` from synchronous operation to asynchronous operation. ([#40933](https://github.com/PaddlePaddle/Paddle/pull/40933)) - + - Optimize `Lars optimzier` computation, with the training performance improvement of Resnet50 PF16 model by 5.1% over pre-optimization. ([#35652](https://github.com/PaddlePaddle/Paddle/pull/35652), [#35476](https://github.com/PaddlePaddle/Paddle/pull/35476)) - + - Optimize `AvgPool2dGrad` computation, with the performance improvement by 2.6x over pre-optimization. ([#35389](https://github.com/PaddlePaddle/Paddle/pull/35389)) - + - Optimize `Elementwise` computation for multivariate output, improving performance by up to 15% over pre-optimization. ([#38329](https://github.com/PaddlePaddle/Paddle/pull/38329), [#38410](https://github.com/PaddlePaddle/Paddle/pull/38410)) - + - Optimize `Categorical`the probs computation, simplify the computation logic, and improve the performance by 4x to 5x. ([#42178](https://github.com/PaddlePaddle/Paddle/pull/42178)) - Optimize the `paddle.sum ` performance, with performance improvement by about 20%. ([#42309](https://github.com/PaddlePaddle/Paddle/pull/42309)) @@ -1784,119 +1784,119 @@ Add hardware-aware automatic performance tuning for the full training process, w #### API - Fix the output type error with `paddle.sum` when the input parameter type and output parameter type do not match and the number of reduce elements on the `axis` is 1. ([#36123](https://github.com/PaddlePaddle/Paddle/pull/36123)) - + - Fix an `AttributeError` in `paddle.flops` when the layer output type is tuple. ([#38850](https://github.com/PaddlePaddle/Paddle/pull/38850)) - + - Fix the `paddle.diag` failing to propagate gradients because there is no backward kernel. ([#40447](https://github.com/PaddlePaddle/Paddle/pull/40447)) - + - Fix an error in sorting `paddle.sort` input with NaN values. ([#41070](https://github.com/PaddlePaddle/Paddle/pull/41070)) - + - Fix the error when`paddle.full_like`'s input contains INF value. ([#40232](https://github.com/PaddlePaddle/Paddle/pull/40232)) - + - Fix the bug in `paddle.strided_slice`: strided_slice result does not consistent with slice when the data in the input of starts is less than -rank. ([#39066](https://github.com/PaddlePaddle/Paddle/pull/39066)) - + - Fix the bug in the `max_pool` family of operators where infer_shape is calculated incorrectly when index is returned. This affects the APIs: `paddle.nn.functional.max_pool1d/2d/3d`, `paddle.nn.functional.adaptive_max_pool1d/2d/3d`, `paddle.nn.MaxPool1D/2D/3D`, `paddle.nn.AdaptiveMaxPool1D/2D/3D`. ([#40139](https://github.com/PaddlePaddle/Paddle/pull/40139)) - + - Fix an issue where the dtype of pooling_mask returned by the `max_pool` family of operators is incorrect. Now the dtype of pooling_mask is int32. The affected APIs are `paddle.nn.functional.max_pool1d/2d/3d`, `paddle.nn.functional.adaptive_max_pool1d/2d/3d`, `paddle.nn.MaxPool1D/2D/3D`, `paddle.nn.AdaptiveMaxPool1D/2D/3D`. ([#39314](https://github.com/PaddlePaddle/Paddle/pull/39314) ) - + - Fix the bug with `paddle.shape` where the backward gradient by default causes a computation error. ([#37340](https://github.com/PaddlePaddle/Paddle/pull/37340)) - + - Fix the bug in `paddle.nn.Layer's` `to` method when converting both dtype and place at the same time. ([#37007](https://github.com/PaddlePaddle/Paddle/pull/38007)) - + - Fix the bug that `paddle.amp.decorate` fails to rewrite the parameters of non-leaf network layers to FP16. ([#38402](https://github.com/PaddlePaddle/Paddle/pull/38402)) - + - Fix the bug that the `paddle.amp.decorate` rewrites the non-input parameter in `paddle.nn.BatchNorm1D`, `paddle.nn.BatchNorm2D`, and `paddle.nn.BatchNorm3D` to FP16. ([#38541](https://github.com/PaddlePaddle/Paddle/pull/38541)) - + - Fix the bug that the `paddle.amp.decorate` rewrites the non-input parameter in `paddle.nn.SyncBatchNorm` to FP16. ([#40943](https://github.com/PaddlePaddle/Paddle/pull/40943)) - + - Fix redundant warnings in `paddle.nn.Layer.to`. ([#36700](https://github.com/PaddlePaddle/Paddle/pull/36700)) - + - Fix the bug in `paddle.nn.RNN` when being used inside control flow. ([#41162](https://github.com/PaddlePaddle/Paddle/pull/41162)) - + - Fix the bug that the `paddle.to_tensor` fails to specify the CUDAPlace of the Tensor. ([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) - + - Fix the issue that`paddle.nn.Identity` is not exposed. ([#39615](https://github.com/PaddlePaddle/Paddle/pull/39615)) - + - Fix the bug where the output values of the `fill_` and `zero_` inplace APIs are incorrect when the input is on a CUDAPinned Place after dynamic graph reconstruction. ([#41229](https://github.com/PaddlePaddle/Paddle/pull/41229)) - + - After refactoring the dynamic graph, fix the bug of incorrect inplace version value of the output Tensor when calling assign op using the append op. Change it to call assign op using the `_C_ops`. ([#41118](https://github.com/PaddlePaddle/Paddle/pull/41118)) - + - Remove unreasonable codes in the `elementwise_add` 's third-order kernel, and fix an uninitialized issue in the network creation process. ([#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) - + - Fix the missing attribute bug in `conv2d` execution of cuDNN Kernel. ([#38827](https://github.com/PaddlePaddle/Paddle/pull/38827)) - + - Fix an issue where `multiclass_nms3` output shape is incorrect. ([#40059](https://github.com/PaddlePaddle/Paddle/pull/40059)) - + - Fix an issue with `yolo_box` outputting incorrect shape. ([#40056](https://github.com/PaddlePaddle/Paddle/pull/40056)) - + - Fix an issue where the higher-order differentiation `gradients` interface does not take effect as expected when target_grad is specified. ([#40940](https://github.com/PaddlePaddle/Paddle/pull/40940/)) - + - Fix an issue that the network parameter type is incorrect when the default_dtype is modified in the op`_BatchNormBase` base class in the dynamic graph mode. The affected APIs are `paddle.nn.BatchNorm1D`,`paddle.nn.BatchNorm2D`,`paddle.nn.BatchNorm3D`, and `paddle.nn.SyncBatchNorm`. Specific reason: when `get_default_dtype() == 'float16'`, the default parameter data type is modified by `set_default_dtype('float32')` . The parameter type in dynamic graph mode is created by default_dtype; therefore, the change of the default parameter type causes the subsequent networking Parameter type error. ([#36376](https://github.com/PaddlePaddle/Paddle/pull/36376)) - + - Fix the bug of the undefined intermediate variable in the backward op in batchnorm op in case that the data type is FP32 and the data dimension is `dims = 2 and data_layout = NHWC`. ([#37020](https://github.com/PaddlePaddle/Paddle/pull/37020)) - + - Fix the bug that shape of weights is incorrect, when using`paddle.static.nn.prelu` in static graph mode, and input format is`NHWC`, `mode==channel`. ([#38310](https://github.com/PaddlePaddle/Paddle/pull/38310)) - + - Fix the bug of `paddle.nn.functional.class_center_sample`: CUDA seed setting issue in multi-machine case. ([#38815](https://github.com/PaddlePaddle/Paddle/pull/38815)) - + - Fix the bug of failing to report error when the input of`paddle.nn.functional.one_hot`is incorrect. ([#41335](https://github.com/PaddlePaddle/Paddle/pull/41335)) - + - Fix an issue where a callback to reclaim device memory on a DCU device is not triggered in time, resulting in an OOM of the device memory. ([#40445](https://github.com/PaddlePaddle/Paddle/pull/40445)) - + - Fix the bugs of `setitem` backward gradient abnormal and inplace logic handling abnormal in some dynamic graph scenarios. ([#37023](https://github.com/PaddlePaddle/Paddle/pull/37023), [#38298](https://github.com/PaddlePaddle/Paddle/pull/38298)) - + - Fix the bug of index abnormal when Tensor array uses the Slice to index in the dynamic to static scenarios. ([#39251](https://github.com/PaddlePaddle/Paddle/pull/39251)) - + - Fix the bug of memory or device memory leaks caused by some temporary variables not being correctly destructed when `paddle.Tensor.register_hook` interface is used. ([#40716](https://github.com/PaddlePaddle/Paddle/pull/40716)) - + - Fix the bug that `Tensor.getitem` cannot get the value when the index is a bool Tensor with all False. ([#41297](https://github.com/PaddlePaddle/Paddle/pull/41297)) - + - Fix the bug that `Tensor.getitem` cannot get the value when the index is a bool scalar Tensor. ([#40829](https://github.com/PaddlePaddle/Paddle/pull/40829)) - + - Fix the bug in `paddle.index_select` when index is a 0-shape Tensor. ([#41383](https://github.com/PaddlePaddle/Paddle/pull/41383)) - + - Fix the bug when the number of GPU threads requested by `paddle.index_select` and `paddle.index_sample` exceeds the limited machine resources. ([#41127](https://github.com/PaddlePaddle/Paddle/pull/41127), [#37816](https://github.com/PaddlePaddle/Paddle/pull/37816), [#39736](https://github.com/PaddlePaddle/Paddle/pull/39736), [#41563](https://github.com/PaddlePaddle/Paddle/pull/41563)) - + - Fix the bug when ReduceConfig, elemwise_grad, gather, gather_nd, and scatter ops request more GPU threads than the limited machine resources. ([#40813](https://github.com/PaddlePaddle/Paddle/pull/40813), [#41127](https://github.com/PaddlePaddle/Paddle/pull/41127)) - + - Fix the bug that the memory access is out of boundary when NX ! = 1 in ReadData, ReadDataBc, and ReadDataReduce in Kernel Primitive API. ([#36373](https://github.com/PaddlePaddle/Paddle/pull/36373)) - + - Fix the bug of the computation result abnormal due to data overflow caused by the IndexRandom data type error. ([#39867](https://github.com/PaddlePaddle/Paddle/pull/39867), [#39891](https://github.com/PaddlePaddle/Paddle/pull/39891)) - + - Fix the bug of the returned computing result error of reduce op when reduce_num = 1. ([#38771](https://github.com/PaddlePaddle/Paddle/pull/38771)) - + - Fix the bug of the memory access out-of-bound of reduce op in the middle dimension of reduce in HIP environments. ([#41273](https://github.com/PaddlePaddle/Paddle/pull/41273)) - + - Fix the bug of Kernel failed to properly release in the computation of two FP16 one-dimensional vectors of matmul op. - + - Fix the bug caused by CUDA integer computation overflow for some operators, including: bernoulli, gaussian_random, gumbel_softmax, multinomial, truncated_gaussian_random, uniform_ random_inplace, and uniform_random ops. ([#37670](https://github.com/PaddlePaddle/Paddle/pull/37670)) - + - Fix the bug where `paddle.nn.Sequential` reports a KeyError error when traversing sublayers in a for loop. ([#39372](https://github.com/PaddlePaddle/Paddle/pull/39372)) - + - Fix the bug of the check shape error in `paddle.nn.functional.unfold` when compiling in static graphs. ([#38907](https://github.com/PaddlePaddle/Paddle/pull/38907), [#38819](https://github.com/PaddlePaddle/Paddle/pull/38819)) - + - Fix the bug of reporting an error if `axis` is specified when using dropout for static graphs. ([#37223](https://github.com/PaddlePaddle/Paddle/pull/37223)) - + - Migrate the matmul operator in the `paddle.nn.MultiHeadAttention` to the matmul_v2 operator. ([#36222](https://github.com/PaddlePaddle/Paddle/pull/36222)) - + - Fix the bug occurred in throwing FPE when the empty Tensor is used in `paddle.nn.functional.label_smooth`. ([#35861](https://github.com/PaddlePaddle/Paddle/pull/35861)) - + - Fix the deformation bug of reshape op when input is an empty Tensor. Support the empty Tensor rehape to [-1]. ([#36087](https://github.com/PaddlePaddle/Paddle/pull/36087)) - + - Fix the bug of the modified values will incorrectly override other rows when the `fill_diagonal` 's input parameter offset is non-zero. ([#36212](https://github.com/PaddlePaddle/Paddle/pull/36212)) - + - Modify stop_gradient returned by the range op bing set to True in dynamic graph mode. ([#37486](https://github.com/PaddlePaddle/Paddle/pull/37486)) - + - Fix the bug where Lamb optimizer is updated incorrectly when Beta1Pow and Beta2Pow are on the GPU. ([#38518](https://github.com/PaddlePaddle/Paddle/pull/38518)) - + - Fix the bug where the conv2d operator doesn't respect to FLAGS_cudnn_deterministic. ([#37173](https://github.com/PaddlePaddle/Paddle/pull/37173)) - + - Fix the bug caused by an earlier version of cufft that does not define CUFFT_VERSION. ([#37312](https://github.com/PaddlePaddle/Paddle/pull/37312)) - + - Fix the computing error of `paddle.ifftshit` and `paddle.fftshift`. ([#36834](https://github.com/PaddlePaddle/Paddle/pull/36834), [#36748](https://github.com/PaddlePaddle/Paddle/pull/36748)) - + - Fix the `axis` computation error in `paddle.fft` series of APIs. ([#36321](https://github.com/PaddlePaddle/Paddle/pull/36321)) - + - Fix an output data type registration bug of batch_norm_grad op in case of FP16 data type. This bug causes the compilation failure in some scenarios. There is also the impact on FP16 computational precision. ([#42461](https://github.com/PaddlePaddle/Paddle/pull/42461)) - Fix the incorrect Infershape information bug in the `paddle.nn.functional.pad ` API when the padding is Tensor in dynamic to static conversion. ([#42414](https://github.com/PaddlePaddle/Paddle/pull/42414)) @@ -1907,9 +1907,9 @@ Add hardware-aware automatic performance tuning for the full training process, w - Fix a nan/inf bug calculated in fused_attention op with FusedResidualDropoutBias on V100. ([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) -- Fix a redundant data transform bug introduced by the full_like op during execution. ([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) +- Fix a redundant data transform bug introduced by the full_like op during execution. ([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) -- Fix a problem with p_norm op calculating nan on GPU environments. ([#41804](https://github.com/PaddlePaddle/Paddle/pull/41804)) +- Fix a problem with p_norm op calculating nan on GPU environments. ([#41804](https://github.com/PaddlePaddle/Paddle/pull/41804)) - Fix a section error of split op when the sections parameter has a size of 0. ([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755)) @@ -1917,213 +1917,213 @@ Add hardware-aware automatic performance tuning for the full training process, w - Fix the bug that the deprecated interface reports a warning in case of `import paddle` due to a PIL version update. ([#42307](https://github.com/PaddlePaddle/Paddle/pull/42307)) -- Fix the bug that `paddle.linalg.matrix_rank ` does not support tol as FP64 Tensor under static graph. ([#42085](https://github.com/PaddlePaddle/Paddle/pull/42085)) +- Fix the bug that `paddle.linalg.matrix_rank ` does not support tol as FP64 Tensor under static graph. ([#42085](https://github.com/PaddlePaddle/Paddle/pull/42085)) #### IR(Intermediate Representation) - Dynamic to static graphs - + - Fix a type derivation error in reverse gradient accumulation when the `tensor_array` is used with the control flow. ([#39585](https://github.com/PaddlePaddle/Paddle/pull/39585), [#39689](https://github.com/PaddlePaddle/Paddle/pull/39689)) - + - Fix an issue where the parameter gradient type is not set correctly during dynamic to static AMP training. ([#40938](https://github.com/PaddlePaddle/Paddle/pull/40938)) - + - Fix an issue of reporting an error in the dynamic to static transcription when there are misplaced annotations in the codes. ([#39035](https://github.com/PaddlePaddle/Paddle/pull/39035), [#38003](https://github.com/PaddlePaddle/Paddle/pull/38003)) - + - Fix an issue where Tensor is not properly converted to Variable when calling a non-forward function in dynamic to static codes. ([#37296](https://github.com/PaddlePaddle/Paddle/pull/37296), [#38540](https://github.com/PaddlePaddle/Paddle/pull/38540)) - + - Fix an issue where `paddle` is incorrectly passed as a variable when dynamic to static transcription. ([#37999](https://github.com/PaddlePaddle/Paddle/pull/37999)) - + - Fix an issue where model parameters are incorrectly counted when calling `paddle.flops` after model dynamic to static conversion. ([#36852](https://github.com/PaddlePaddle/Paddle/pull/36852)) - + - Fix an issue where GPU memory will keep growing in train mode and no_grad contexts after loading models using the `paddle.jit.save/load` interface. ([#36434](https://github.com/PaddlePaddle/Paddle/pull/36434)) - + - Add warning in function of convert_call when converting the generator function. ([#35369](https://github.com/PaddlePaddle/Paddle/pull/35369)) - + - Fix the run_program op dependency analysis bug. ([#38470](https://github.com/PaddlePaddle/Paddle/pull/38470)) - + - Fix the code conversion bug when returning a single value in control flow For. ([#40683](https://github.com/PaddlePaddle/Paddle/pull/40683)) - + - Fix the bug when generating a reverse op when the input to conditional_block op contains LoDTensorArray. ([#39585](https://github.com/PaddlePaddle/Paddle/pull/39585)) - + - Fix the bug that `padddle.jit.save ` loses the forward_pre_hook and forward_post_hook of the top Layer in case of the export of a dynamic-to-static model. ([#42273](https://github.com/PaddlePaddle/Paddle/pull/42273)) - Fix the dynamic to static conversion error report where the shape parameter in `paddle.expand ` contains a Tensor. ([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) - + #### **Distributed Training** - Distributed training basic functions - + - Fix the bug of a port reporting error in the distributed multi-machine training. ([#37274](https://github.com/PaddlePaddle/Paddle/pull/37274)) - + - Fix the brpc compilation dependency bug. ([#37064](https://github.com/PaddlePaddle/Paddle/pull/37064)) - + - Fix an occupied port issue due to tcp self-connections when Fleet starts. ([#38174](https://github.com/PaddlePaddle/Paddle/pull/38174)) - + - Fix the precision degradation bug under data parallel due to inconsistent initialization of FP16 parameters under multiple cards. ([#38838](https://github.com/PaddlePaddle/Paddle/pull/38838), [#38563](https://github.com/PaddlePaddle/Paddle/pull/38563), [#38405](https://github.com/PaddlePaddle/Paddle/pull/38405)) - + - Fix the precision degradation under data parallel due to FP16 gradient synchronization without dividing by the number of cards. ([#38378](https://github.com/PaddlePaddle/Paddle/pull/38378)) - + - Dynamic graph mixing parallel - + - Fix the bug where parameters are not updated in FP16 mode under mixed parallel by using the new update interface. ([#36017](https://github.com/PaddlePaddle/Paddle/pull/36017)) - Static graph mixing parallel - + - Fix an issue where grad merge is not compatible with ClipGradientByGlobalNorm in distributed dp mode. ([#36334](https://github.com/PaddlePaddle/Paddle/pull/36334)) - + - Fix an issue under hybrid parallelism where the non-distributed parameters of tensor model parallelism are not broadcast during the initialization phase, resulting in inconsistent non-distributed parameters across cards. ([#36186](https://github.com/PaddlePaddle/Paddle/pull/36186)) - + - Fix the issue that sharding's save_persistables interface does not save FP16 parameters and offload persistent variables when sharding is enabled with offload. ([#40477](https://github.com/PaddlePaddle/Paddle/pull/40477)) - + - Fix the bug where ema parameters are not saved on non-0 cards when sharding is enabled for training. ([#39860](https://github.com/PaddlePaddle/Paddle/pull/39860)) - + - Fix an issue where FC incorrectly calculates gradients according to column cuts. ([#38724](https://github.com/PaddlePaddle/Paddle/pull/38724)) - + - Fix the bug reported when DistributedStrategy is set to without_graph_optimizer when used with rnn. ([#36176](https://github.com/PaddlePaddle/Paddle/pull/36176)) - + - GPUPS Parameter Server Training - + - Fix the CPU branch compilation bug triggered by the GPUPS macro definition. ([#37248](https://github.com/PaddlePaddle/Paddle/pull/37248)) - + - Fix an occasional error raised when saving delta and pullsparse concurrency during GPUPS streamline training. ([#37233](https://github.com/PaddlePaddle/Paddle/pull/37233)) - + - Fix a download error issue caused by HDFSClient querying a directory without returning the full path. ([#36590](https://github.com/PaddlePaddle/Paddle/pull/36590)) - + - Fix the bug with pulling old parameters in GPUPS streamline training. ([#36512](https://github.com/PaddlePaddle/Paddle/pull/36512)) - + - Fix a GPUPS multi-stream allocation issue. ([#37476](https://github.com/PaddlePaddle/Paddle/pull/37476)) - + - Fix the bug of the GPUPS pybind out of core. ([#37287](https://github.com/PaddlePaddle/Paddle/pull/37287)) - + #### **Other** - Fix the clip_extra issue when saving models for dynamic graph quantization training. ([#38323](https://github.com/PaddlePaddle/Paddle/pull/38323)) - + - Fix an issue with abs_max scale initialization for dynamic graph quantization training. ([#39307](https://github.com/PaddlePaddle/Paddle/pull/39307)) - + - Fix an issue of exceptions in saving model in dynamic graph quantization training. ([#38102](https://github.com/PaddlePaddle/Paddle/pull/38102), [#38012](https://github.com/PaddlePaddle/Paddle/pull/38012)) - + - Fix the offline quantization flatten op output error. ([#37722](https://github.com/PaddlePaddle/Paddle/pull/37722)) - + - Fix the non-matching dimension bug in case of inverse quantization matmul op. ([#36982](https://github.com/PaddlePaddle/Paddle/pull/36982)) - + - Fix the bug of adding quantization op when quantizing matmul_v2 without weights. ([#36593](https://github.com/PaddlePaddle/Paddle/pull/36593)) - + - Fix the error of saving the quant_axis attribute in the conv op channel-wise quantization when saving the models. ([#39054](https://github.com/PaddlePaddle/Paddle/pull/39054)) - + - Fix the slow training of channel-wise quantization. ([#40772](https://github.com/PaddlePaddle/Paddle/pull/40772)) - + - Fix the bug of quantization training when dividing by tensor(initialized as 0) leads to nan. ([#36762](https://github.com/PaddlePaddle/Paddle/pull/36762)) - + - Fix incorrect settings of amp_level for mixed precision in multi-threaded scenarios. ([#39198](https://github.com/PaddlePaddle/Paddle/pull/39198)) - + - Fix an issue where PyLayer and Recompute is not set mixed precision correctly when mixed precision training is used with PyLayer and Recompute. ([#39950](https://github.com/PaddlePaddle/Paddle/pull/39950), [#40042](https://github.com/PaddlePaddle/Paddle/pull/40042)) - + - Fix an issue where `D_GLIBCXX_USE_CXX11_ABI` does not take effect when compiling custom operators under Mac. ([#37878](https://github.com/PaddlePaddle/Paddle/pull/37878)) - + - Fix the bug of inconsistent dynamic and static behaviors in case of block=None the initializer-related API. ([#37827](https://github.com/PaddlePaddle/Paddle/pull/37827)) - + - Fix the bug in python 3.6 where there is no fluid module. ([#35862](https://github.com/PaddlePaddle/Paddle/pull/35862)) - + - Fix the bug where optimizer `paddle.optimizer.Adamw` incorrectly calls adam op. ([#36028](https://github.com/PaddlePaddle/Paddle/pull/36028)) - + - Fix a logic error when the `paddle.optimizer.Momentum` optimizer parameter `regularizer` property is None under the multi tensor policy. ([#38344](https://github.com/PaddlePaddle/Paddle/pull/38344)) - + - Fix the bug that the `paddle.optimizer.Momentum` and `paddle.optimizer.Adam` optimizers modify the `multi_precision` property under the multi tensor policy. ([#38991](https://github.com/PaddlePaddle/Paddle/pull/38991)) - + - Fix the code compilation error when using final-state API amp in combination with optional Tensor. ([#40980](https://github.com/PaddlePaddle/Paddle/pull/40980)) - + - Fix the bug where paddle+lite+xpu prediction library would report an error when calling lite CPU prediction, and fix the bug where paddle+lite(without NNAdapter) would report an error when compiling. ([#37449](https://github.com/PaddlePaddle/Paddle/pull/37449)) - + - Fix the bug in Debug compile mode where LoDTensorArray crashes due to inconsistent Pybind11 bindings. ([#37954](https://github.com/PaddlePaddle/Paddle/pull/37954)) - + - Fix the bug that prevents correct construction of Tensor in the extreme case where the shape parameter is a list of Tensor mix with int. ([#38284](https://github.com/PaddlePaddle/Paddle/pull/38284)) - + - Fix a compatibility issue with the `paddle.optimizer.AdamW` API. ([#37905](https://github.com/PaddlePaddle/Paddle/pull/37905)) - + - Fix the bug in _InstanceNormBase where the returne value of extra_repr is incorrect. ([#38537](https://github.com/PaddlePaddle/Paddle/pull/38537)) - + - Fix the bug that the Paddle Inference lacks of the symbol `paddle::distributed::TensorTable` when the -DWITH_DISTRIBUTED is uesd. ([#41128](https://github.com/PaddlePaddle/Paddle/pull/41128)) - + - matmul_v2 op reports error when there is a 0 value in the shape. ([#35791](https://github.com/PaddlePaddle/Paddle/pull/35791)) - + - Fix the problem of the repeated printing for no gradient input hint message of the recomputed in dynamic graphs. Change it to the printing only once with using warning. ([#38293](https://github.com/PaddlePaddle/Paddle/pull/38293)) - + - Fix the low accuracy bug on the validation set in later epoch training in visual models in the gelu op. ([#38450](https://github.com/PaddlePaddle/Paddle/pull/38450)) - + - Fix adamw op error in numerical computation. ([#37746](https://github.com/PaddlePaddle/Paddle/pull/37746)) - + - Add the parameters in the sparse_momentum `_C_ops` interface. ([#39969](https://github.com/PaddlePaddle/Paddle/pull/39969)) - + - Fix the bug where there is no `distributed` module in python 3.6. ([#35848](https://github.com/PaddlePaddle/Paddle/pull/35848)) - + - Fix the eigh unit test data initialization problem. ([#39568](https://github.com/PaddlePaddle/Paddle/pull/39568)) - + - Fix the eigvalsh unit test data initialization problem. ([#39841](https://github.com/PaddlePaddle/Paddle/pull/39841)) - + - Fix the bug of not working properly due to excessive register usage on V100 by segment op. ([#38113](https://github.com/PaddlePaddle/Paddle/pull/38113)) - + - Fix the bug with conv-related op sparsification incorrectly set dimension. ([#36054](https://github.com/PaddlePaddle/Paddle/pull/36054)) - + - Provide Automatic SParsity training for static graph-related function Alias to `Paddle.static.sparsity` . ([#36525](https://github.com/PaddlePaddle/Paddle/pull/36525)) - + - Fix the bug where divide op’s integer division is still an integer. ([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) - + - Fix the crash bug of`paddle.multiplex` when input Tensor value is 0. ([#34972](https://github.com/PaddlePaddle/Paddle/pull/34972)) - + - Fix a speed exception for set `reduction` parameter in `paddlpaddle.nn.functional.kl_div` . ([#37283](https://github.com/PaddlePaddle/Paddle/pull/37283)) - + - Fix the data source unsorted bug in loading the Cifar dataset. ([#37272](https://github.com/PaddlePaddle/Paddle/pull/37272)) - + - Fix the conversion of loss from uint16 to float in the ProgressBar class. ([#39231](https://github.com/PaddlePaddle/Paddle/pull/39231)) - + - Fix the ShareBufferWith shared data type problem. ([#37464](https://github.com/PaddlePaddle/Paddle/pull/37464), [#37247](https://github.com/PaddlePaddle/Paddle/pull/37247)) - + - Fix the performance issue when `paddle.io.DataLoader` uses IterableDataset and num_workers>0. ([#40541](https://github.com/PaddlePaddle/Paddle/pull/40541)) - + - Fix the bug with `paddle.vision.ops.yolo_loss` returns incomplete values in dynamic graph. ([#40185](https://github.com/PaddlePaddle/Paddle/pull/40185)) - + - Remove the restriction that the input parameter dataset of `paddle.io.BatchSampler` needs to be the `paddle.io.Dataset` type, to expand the support for user-defined datasets. ([#40184](https://github.com/PaddlePaddle/Paddle/pull/40184)) - + - Fix the bug of `paddle.summary` reporting that op_flops does not exist. ([#36489](https://github.com/PaddlePaddle/Paddle/pull/36489)) - + - Fix the formula error of lars_momentum op when lars_weight_decay=0. ([#40892](https://github.com/PaddlePaddle/Paddle/pull/40892)) - + - Fix the bug that the optimize-offload cannot save presistable var. ([#36433](https://github.com/PaddlePaddle/Paddle/pull/36433)) - + - Fix an issue where optimizer-offload does not support adamw op type. ([#36432](https://github.com/PaddlePaddle/Paddle/pull/36432)) - + - Fix an issue where enable_program_desc_tracing_data in Tracer is not safe in multi-threaded scenarios. ([#39776](https://github.com/PaddlePaddle/Paddle/pull/39776)) - + - Fix an issue where the model file size is not initialized when the model is read. ([#40518](https://github.com/PaddlePaddle/Paddle/pull/40518)) - + - Fix the logic bug of the Expand op. When the dimension of the input Tensor X is smaller than the shape to be expanded, it may result in the incorrect Out.Shape. ([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - + - Fix the dynamic to static transcription error when the Expand_As op takes only y.shape without Y variable entered. ([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - + - Fix the logic error when Expand_As op computes the output shape. ([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - + - Fix the bug that the variables of the `core.VarDesc.VarType.STRINGS` type report error when getting the `lod_level` property and setting its `lod_level` to None. ([#39077](https://github.com/PaddlePaddle/Paddle/pull/39077)) - Fix an issue where the framework function `Pylayer` does not support different dtypes. ([#37974](https://github.com/PaddlePaddle/Paddle/pull/37974)) - + - Fix the bug of division by zero of the learning rate decay API `paddle.optimizer.lr.PolynomialDecay`. ([#38782](https://github.com/PaddlePaddle/Paddle/pull/38782)) - Fix the issue where some logs remained after calling the DisableGlogInfo() interface. ([#36356](https://github.com/PaddlePaddle/Paddle/pull/36356)) - + - Fix an error in backward of multi-layer RNN (when dropout is set to 0) in the training of SimpleRNN, GRU and LSTM API CPU. ([#37080](https://github.com/PaddlePaddle/Paddle/pull/37080)) - + - Add cache for fft on the backend of cufft and hipfft. ([#36646](https://github.com/PaddlePaddle/Paddle/pull/36646)) - + - Enable the shifts parameter of `paddle.roll` to support transfer in Tensor. ([#36727](https://github.com/PaddlePaddle/Paddle/pull/36727)) - + - Add onemkl to fft as an optional computation backend. ([#36414](https://github.com/PaddlePaddle/Paddle/pull/36414)) -- Fix the precision bug in the bfloat16 type under two mamtul_v2 and elementwise_div ops. ([#42479](https://github.com/PaddlePaddle/Paddle/pull/42479)) +- Fix the precision bug in the bfloat16 type under two mamtul_v2 and elementwise_div ops. ([#42479](https://github.com/PaddlePaddle/Paddle/pull/42479)) - Fix a possible error in the next step caused by LoDTensorArray clearing only the internal Tensor and not clearing the Array during device memory recycling. ([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) - + ## **4. Deployment Direction (Paddle Inference)** @@ -2132,305 +2132,305 @@ Add hardware-aware automatic performance tuning for the full training process, w #### **New APIs** - Add the Java API so that Java developers can implement high performance inference on the server and in the cloud through a simple and flexible interface.([#37162](https://github.com/PaddlePaddle/Paddle/pull/37162)) - + - Add `GetTrtCompileVersion` and `GetTrtRuntimeVersion` interfaces for getting TensorRT version information. ([#36429](https://github.com/PaddlePaddle/Paddle/pull/36429)) - + - Add the `ShareExternalData` interface to avoid memory copy of input data during inference. ([#39809](https://github.com/PaddlePaddle/Paddle/pull/39809)) - + #### **New functions** - Add ONNX Runtime backend support. Currently it supports only CPU in the integrated version. ([#39988](https://github.com/PaddlePaddle/Paddle/pull/39988), [#40561](https://github.com/PaddlePaddle/Paddle/pull/40561)) - + - Add support for Ascend 310 inference based on the Paddle Lite subgraph approach. ([#35226](https://github.com/PaddlePaddle/Paddle/pull/35226)) - + - Add the native GPU FP16 inference. ([#40531](https://github.com/PaddlePaddle/Paddle/pull/40531)) - + - For the switch_ir_debug interface, add the dump model function. ([#36581](https://github.com/PaddlePaddle/Paddle/pull/36581)) - + - Add the configuration interface for TensorRT config: `void UpdateConfigInterleaved(paddle_infer::Config* c, bool with_interleaved)` for special data layout in int8 quantization inference. ([#38884](https://github.com/PaddlePaddle/Paddle/pull/38884)) - + - Add TensorRT inspector output information to the log. It is valid only for TensorRT 8.2 or later. ([#38362](https://github.com/PaddlePaddle/Paddle/pull/38362),[#38200](https://github.com/PaddlePaddle/Paddle/pull/38200))) - + - Add the support of the TensorRT ASP sparse inference. ([#36413](https://github.com/PaddlePaddle/Paddle/pull/36413)) - + ### **(2) Underlying optimization** #### **CPU performance optimization** - Optimize the caching mechanism of MKLDNN. ([#38336](https://github.com/PaddlePaddle/Paddle/pull/38336), [#36980](https://github.com/PaddlePaddle/Paddle/pull/36980), [#36695](https://github.com/PaddlePaddle/Paddle/pull/36695)) - + - Add matmul_scale_fuse pass. ([#37962](https://github.com/PaddlePaddle/Paddle/pull/37962)) - + - Add MKLDNN reshape_transpose_matmul_v2_mkldnn_fuse_pass. ([#37847](https://github.com/PaddlePaddle/Paddle/pull/37847), [#40948](https://github.com/PaddlePaddle/Paddle/pull/40948)) - + - Add MKLDNN conv_hard_sigmoid_mkldnn_fuse_pass. ([#36869](https://github.com/PaddlePaddle/Paddle/pull/36869)) - + - Add MKLDNN matmul_v2_transpose_reshape_fuse_pass. ([#36481](https://github.com/PaddlePaddle/Paddle/pull/36481)) - + - Add MKLDNN softplus_activation_mkldnn_fuse_pass. ([#36657](https://github.com/PaddlePaddle/Paddle/pull/36657)) - + - Add MKLDNN elt_act_mkldnn_fuse_pass. ([#36541](https://github.com/PaddlePaddle/Paddle/pull/36541)) - + - Add MKLDNN mish operator and conv_mish_mkldnn_fuse_pass. ([#38623](https://github.com/PaddlePaddle/Paddle/pull/38623)) - + #### **GPU performance optimization** - Change the inference default video memory allocation policy from `naive_best_fit` to `auto_growth` , to solve the problem of some models filled up with the GPU video memory. ([#41491](https://github.com/PaddlePaddle/Paddle/pull/41491)) - + - Support gelu and FC+gelu ops using TensorRT inference. ([#38399](https://github.com/PaddlePaddle/Paddle/pull/38399)) - + - Support `deformable_conv` inference using TensorRT under static shape. ([#36612](https://github.com/PaddlePaddle/Paddle/pull/36612) [#36850](https://github.com/PaddlePaddle/Paddle/pull/36850) [#37345](https://github.com/PaddlePaddle/Paddle/pull/37345)) - + - Support nearest_interp_v2 op using TensorRT inference. ([#34126](https://github.com/PaddlePaddle/Paddle/pull/34126)) - + - Add `yolo_box` TensorRT plugin to support input parameters `iou_aware` and `iou_aware_factor` so that the IoU computed by inference is used as a factor for confidence. ([#34128](https://github.com/PaddlePaddle/Paddle/pull/34128)) - + - Support `elementwise_sub` and `elementwise_div` calling for TensorRT inference. ([#40806](https://github.com/PaddlePaddle/Paddle/pull/40806) [#41253](https://github.com/PaddlePaddle/Paddle/pull/41253)) - + - Support `multiclass_nms3` using TensorRT inference. ([#41181](https://github.com/PaddlePaddle/Paddle/pull/41181) [#41344](https://github.com/PaddlePaddle/Paddle/pull/41344)) - + - Support flatten_contiguous_rang op using TensorRT inference. ([#38922](https://github.com/PaddlePaddle/Paddle/pull/38922)) - + - Support for `pool2d` attribute `padding` using TensorRT inference when dimension is 4, and `global_pooling` and `ceil_mode` are True. ([#39545](https://github.com/PaddlePaddle/Paddle/pull/39545)) - + - Support batch_norm and elementwise_add using TensorRT inference when dimension is 5. ([#36446](https://github.com/PaddlePaddle/Paddle/pull/36446)) - + - Add pool3d to use TensorRT inference. ([#36545](https://github.com/PaddlePaddle/Paddle/pull/36545), [#36783](https://github.com/PaddlePaddle/Paddle/pull/36783)) - + - Add the `reduce` int32 and float types to use TensorRT inference. Add `reduce_mean` GPU operator int32 and int64 registration. ([#39088](https://github.com/PaddlePaddle/Paddle/pull/39088)) - + - Modify MatmulV2ToMul pass. Modify the qualifier (not support of broadcast) and op_teller mapping condition. ([#36652](https://github.com/PaddlePaddle/Paddle/pull/36652)) - + - Add the support for TenorRT plugin interface AddPluginV2IOExt. ([#36493](https://github.com/PaddlePaddle/Paddle/pull/36493)) - + - Add the aligned attribute in roi_align op and support for TensorRT inference. ([#38905](https://github.com/PaddlePaddle/Paddle/pull/38905)) - + - Add the support for TensorRT inference with concat attribute `axis = -1` . ([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) - + - Add TensorRT plugin: preln_emb_eltwise_layernorm, preln_skip_la, and rnorm ops, for ERNIE-like model performance optimization. ([#39570](https://github.com/PaddlePaddle/Paddle/pull/39570)) - + - Add TensorRT fuse pass: preln_embedding_eltwise_layernorm_fuse_pass, preln_skip_layernorm_fuse_pass, for ERNIE-like model performance optimization. ([#39508](https://github.com/PaddlePaddle/Paddle/pull/39508)) - + - Split matmul fusion-related passes based on different backends (GPU, CPU, TensorRT), to support transpose function for FC weights. ([#39369](https://github.com/PaddlePaddle/Paddle/pull/39369)) - Add the support to TensorRT by roll, strided_slice, and slice op in case of dynamic shapes. ([#41913](https://github.com/PaddlePaddle/Paddle/pull/41913), [#41573](https://github.com/PaddlePaddle/Paddle/pull/41573), [#41467](https://github.com/PaddlePaddle/Paddle/pull/41467)) - Add div op support for TensorRT. ([#41243](https://github.com/PaddlePaddle/Paddle/pull/41243)) - + - Quantization support - + - For the `PostTrainingQuantization` API, add the support for `paddle.io.DataLoader` object or `Python Generator` input. ([#38686](https://github.com/PaddlePaddle/Paddle/pull/38686)) - + - ERNIE full quantization model inference supports for interleaved data layout. ([#39424](https://github.com/PaddlePaddle/Paddle/pull/39424)) - + - Support for PaddleSlim new quantile model format inference. ([#41049](https://github.com/PaddlePaddle/Paddle/pull/41049)) - + - Add matmul int8 quantization inference op converter and plugin. ([#37285](https://github.com/PaddlePaddle/Paddle/pull/37285)) - + - Add pass to determine if all ops in the model can support int8 quantization. ([#36042](https://github.com/PaddlePaddle/Paddle/pull/36042)) - + - Support quantization inference for the FC part of the multihead attention of the non-variable-length branch. ([#39660](https://github.com/PaddlePaddle/Paddle/pull/39660)) - + #### **Ascend NPU Related Features** - - Refactor shape operator forward computation logic to support execution on NPU. ([#39613](https://github.com/PaddlePaddle/Paddle/pull/39613)) - + - Refactor reshape operator forward computation logic to support ShapeTensor input. ([#38748](https://github.com/PaddlePaddle/Paddle/pull/38748)) - + - Uniform accuracy type when loading model weights. ([#39160](https://github.com/PaddlePaddle/Paddle/pull/39160)) - + ### **(3) Bug fixing** #### **Framework and API fixing** - Fix the bug of model clipping when saving static graphs. ([#37579](https://github.com/PaddlePaddle/Paddle/pull/37579)) - + - For the C API, add wrapper PD_Cstr for strings, and provide construction and destructing methods to avoid users to use C runtime library to destruct strings directly. ([#38667](https://github.com/PaddlePaddle/Paddle/pull/38667)) - + - Fix the logic bug with memory reuse at prediction time. ([#37324](https://github.com/PaddlePaddle/Paddle/pull/37324)) - + - Fix memory reuse error reporting in multi-threading. ([#37894](https://github.com/PaddlePaddle/Paddle/pull/37894)) - + - Allow passing empty strings for inference when no weight file is available. ([#38579](https://github.com/PaddlePaddle/Paddle/pull/38579)) - + - Fix an issue of clone not being supported when TensorRT dynamic shape is enabled. ([#38520](https://github.com/PaddlePaddle/Paddle/pull/38520)) - + - Fix multi-threaded clone error after TensorRT dynamic shape is enabled. ([#40067](https://github.com/PaddlePaddle/Paddle/pull/40067)) - + - Fix a TensorRT engine destructing issue. ([#35842](https://github.com/PaddlePaddle/Paddle/pull/35842), [#35938](https://github.com/PaddlePaddle/Paddle/pull/35938)) - + - For the lite xpu interface, fix an issue where the xpu card cannot be selected. ([#36610](https://github.com/PaddlePaddle/Paddle/pull/36610)) - + - The TensorRT dynamic shape parameter automatically generate the interface, to add the file existence check. ([#36628](https://github.com/PaddlePaddle/Paddle/pull/36628)) - -- Fix the bug that the MKLDNN does not support conv3d. ([#42055](https://github.com/PaddlePaddle/Paddle/pull/42055)) + +- Fix the bug that the MKLDNN does not support conv3d. ([#42055](https://github.com/PaddlePaddle/Paddle/pull/42055)) #### **Backend Capability Fixing** - Fix cuDNN default algorithm selection configuration for prediction, with using non-deterministic policies. ([#41491](https://github.com/PaddlePaddle/Paddle/pull/41491)) - + - Fix the bug with deformable_conv op in TensorRT plugin resource recovery handling error. ([#38374](https://github.com/PaddlePaddle/Paddle/pull/38374)) - + - Fix a serialization error in the TensorRT plugin for deformable_conv op. ([#38057](https://github.com/PaddlePaddle/Paddle/pull/38057)) - + - Adapt the new refactor engine and serialization API of TensorRT 8.0. ([#36769](https://github.com/PaddlePaddle/Paddle/pull/36769)) - + - Fix the bug that the Flatten2MatmulFusePass, Squeeze2MatmulFusePass, and Reshape2MatmulFusePass do not take effect. ([#37644](https://github.com/PaddlePaddle/Paddle/pull/37644)) - + - Fix the bug with TensorRT input data reporting errors. ([#37427](https://github.com/PaddlePaddle/Paddle/pull/37427)) - + - Add error message when input dimension is wrong. ([#38962](https://github.com/PaddlePaddle/Paddle/pull/38962)) - + - Fix the bug with EmbEltwiseLayernorm output type error. ([#40015](https://github.com/PaddlePaddle/Paddle/pull/40015)) - + - Remove conv_affine_channel_fuse_pass and the corresponding unit test. ([#39817](https://github.com/PaddlePaddle/Paddle/pull/39817)) - + - Fix an issue where the adaptive_pool2d pass incorrectly replaces the pool attribute. ([#39600](https://github.com/PaddlePaddle/Paddle/pull/39600)) - + - Fix the bug that shuffle_channel_detect_pass incorrectly generates shuffle_channel op. ([#39242](https://github.com/PaddlePaddle/Paddle/pull/39242)) - + - Fix transpose parameter error. ([#39006](https://github.com/PaddlePaddle/Paddle/pull/39006)) - + - Fix the crash bug when nearest_interp_v2 input scale dimension is less than 1. ([#38725](https://github.com/PaddlePaddle/Paddle/pull/38725)) - + - Fix the bug that the prelu does not support one-dimensional input in dynamic shape. ([#39389](https://github.com/PaddlePaddle/Paddle/pull/39389)) - + - Fix the bug in the kernel function of slice's special_slice_plugin. ([#39875](https://github.com/PaddlePaddle/Paddle/pull/39875)) - + - Temporarily disable int8 branch under skip_layernorm variable length to prevent accuracy degradation. ([#39991](https://github.com/PaddlePaddle/Paddle/pull/39991)) - + - Fix some bugs regarding support for preln_ernie models. ([#39733](https://github.com/PaddlePaddle/Paddle/pull/39733)) - + - Fix the bug that slice may exceed threads limit in ERNIE. Fix the bug that the spacial_slice is incorrectly triggered. ([#39096](https://github.com/PaddlePaddle/Paddle/pull/39096)) - + - Fix the bug that the elementwise does not support broadcast when the dimension is the same. ([#37908](https://github.com/PaddlePaddle/Paddle/pull/37908)) - + - Fix the problem that the underlying implementation is different in the nearest_interp op when align_corners is True and TensorRT layer results and native op have diff. ([#37525](https://github.com/PaddlePaddle/Paddle/pull/37525)) - + - Fix qkv_plugin: Kernel function computation error. ([#37096](https://github.com/PaddlePaddle/Paddle/pull/37096)) - + - Fix the bug with inference pass for dynamic quantization. ([#35879](https://github.com/PaddlePaddle/Paddle/pull/35879)) - + - Reuse directly when Tensor requests less memory than the allocated size. ([#37880](https://github.com/PaddlePaddle/Paddle/pull/37880)) - + - Fix the hang bug when ERNIE fixed-length model is enabled with TensorRT. ([#37839](https://github.com/PaddlePaddle/Paddle/pull/37839)) - + - Fix the crash bug when TensorRT int8 lacks of dynamic range information. ([#36900](https://github.com/PaddlePaddle/Paddle/pull/36900)) - + - Fix the bug with slice deserialization code. ([#36588](https://github.com/PaddlePaddle/Paddle/pull/36588)) - + - Fix yolo box calculation formula error. ([#36240](https://github.com/PaddlePaddle/Paddle/pull/36240)) - + - Fix the crash bug when the earlier version model uses a later version of roi_align. ([#38788](https://github.com/PaddlePaddle/Paddle/pull/38788)) External Developers - + - Fix the bug of a large performance difference of softmax between python and C++. ([#37130](https://github.com/PaddlePaddle/Paddle/pull/37130)) - + - Fix matmul inference failure on static shape 2-dimensional input and dynamic shape 3-dimensional input. ([#36849](https://github.com/PaddlePaddle/Paddle/pull/36849)) - + - Fix reshape_transpose_matmul_mkldnn_fuse_pass mishandling of shapes. ([#36731](https://github.com/PaddlePaddle/Paddle/pull/36731)) - + - Fix an issue where TensorRT gets 4 dimensions when the input is 2 dimensions. ([#36614](https://github.com/PaddlePaddle/Paddle/pull/36614)) - + - Fix the bug report when the interpolate_v2 MKLDNN operator is null in the scale attribute. ([#36623](https://github.com/PaddlePaddle/Paddle/pull/36623)) - + - Fix poor performance of the recurrent operator in multi-threaded scenarios. ([#36052](https://github.com/PaddlePaddle/Paddle/pull/36052)) - + - Remove restrictions of relu, sigmoid, tanh, relu6, batch_norm, clip, concat, gelu, hard_sigmoid, prelu, softmax, split, and swish on TensorRT 2-dimensional inputs. ([#37097](https://github.com/PaddlePaddle/Paddle/pull/37097)) - + - Fix reshape op to use TensorRT inference. ([#41090](https://github.com/PaddlePaddle/Paddle/pull/41090)) - + - Fix matmul related pass, which is compatible with matmul_v2. ([#36424](https://github.com/PaddlePaddle/Paddle/pull/36424)) - + - Support VALID and SAME attributes in the padding method of the conv2d operator when TensorRT is enabled. ([#38999](https://github.com/PaddlePaddle/Paddle/pull/38999)) - + - Fix MKLDNN multi-input operator quantization problem. ([#39593](https://github.com/PaddlePaddle/Paddle/pull/39593), [#39346](https://github.com/PaddlePaddle/Paddle/pull/39346), [#40717](https://github.com/PaddlePaddle/Paddle/pull/40717)) - + - Fix scale error of conv+activation in MKLDNN quantization scenarios. ([#38331](https://github.com/PaddlePaddle/Paddle/pull/38331)) - + - Fix the bug in MKLDNN quantization without parameters where the quantization of subsequent operators is handled differently. ([#39342](https://github.com/PaddlePaddle/Paddle/pull/39342)) - + - Fix a data type related issue in MKLDNN cpu_bfloat16_placement_pass. ([#38702](https://github.com/PaddlePaddle/Paddle/pull/38702)) - + - Fix a split operator execution issue in MKLDNN bfloat16 inference. ([#39548](https://github.com/PaddlePaddle/Paddle/pull/39548)) - + - Fix the bug with MKLDNN matmul_v2 operator not supporting 6 dimensions. ([#36342](https://github.com/PaddlePaddle/Paddle/pull/36342), [#38665](https://github.com/PaddlePaddle/Paddle/pull/38665)) - + - Fix MKLDNN DeviceContext error in MKLDNN matmul_v2_transpose_reshape. ([#38554](https://github.com/PaddlePaddle/Paddle/pull/38554)) - + - Fix incorrectly calculated results for segmentation models in MKLDNN inference scenarios. ([#37310](https://github.com/PaddlePaddle/Paddle/pull/37310)) - + - Fix MKLDNN bfloat16 placement operator list and add the missing operator. ([#36291](https://github.com/PaddlePaddle/Paddle/pull/36291)) - + - Fix the format bug of MKLDNN operators, including: FC, conv_transpose, 6-dimensional Tensor error reporting, and wrong output format of conv to NHWC input. ([#38890](https://github.com/PaddlePaddle/Paddle/pull/38890), [#37344](https://github.com/PaddlePaddle/Paddle/pull/37344), [#37175](https://github.com/PaddlePaddle/Paddle/pull/37175), [#38553](https://github.com/PaddlePaddle/Paddle/pull/38553), [#40049](https://github.com/PaddlePaddle/Paddle/pull/40049), [#39097](https://github.com/PaddlePaddle/Paddle/pull/39097)) - + - Fix MKLDNN multi-threaded reasoning scenario error due to cache mechanism. ([#36290](https://github.com/PaddlePaddle/Paddle/pull/36290), [#35884](https://github.com/PaddlePaddle/Paddle/pull/35884)) - + - Fix MKLDNN quantization model accuracy anomaly caused by matmul and FC. ([#38023](https://github.com/PaddlePaddle/Paddle/pull/38023), [#37618](https://github.com/PaddlePaddle/Paddle/pull/37618)) - + - Fix the abnormal quantization model accuracy issue in MKLDNN quantization conversion scripts caused by missing passes. ([#37619](https://github.com/PaddlePaddle/Paddle/pull/37619), [#40542](https://github.com/PaddlePaddle/Paddle/pull/40542),[#38912](https://github.com/PaddlePaddle/Paddle/pull/38912)) - + - Fix the crash bug in MKLDNN enabling volume op due to data type mismatch. ([#38133](https://github.com/PaddlePaddle/Paddle/pull/38133)) - + - Fix an issue where some MKLDNN ops need to change back to the original layout after modifying the layout. ([#39422](https://github.com/PaddlePaddle/Paddle/pull/39422)) - + - Fix the bug of Python API error report due to conflict with Ascend software stack, because the GIL lock is not released in the Ascend 910 inference scenario. ([#38605](https://github.com/PaddlePaddle/Paddle/pull/38605)) - + ## **5. Environment Adaptation** ### **Compile and Install** - + - From version 2.3.0, PaddlePaddle has adjusted and upgraded the types of GPU architectures supported by the framework. (For more information, please refer to: [GPU architectures supported by PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.3rc/install/Tables.html#gpu)) - + Notes: - PIP source installation means downloading the installation package and dependency libraries from PIP official website with using `pip install paddlepaddle` or `pip install paddlepaddle-gpu` . This supports less architecture types, and lighter installation package,and only one CUDA version of the installation package is provided(compared with BOS source). - + - Prior to version 2.3, the PIP source installer (CUDA10.2) supports the following GPU architectures: 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, and 7.5. - + - Later than version 2.3, the PIP source installer (CUDA11.0) supports the following GPU architectures: 6.0, 6.1, 7.0, 7.5, 8.0 - + - The BOS source is a way to download the installation package and dependency libraries from the official website of PaddlePaddle, which supports more GPU architectures. The download source is from China and it is much faster.(compared with PIP source, it supports more kinds of architectures and provides multiple CUDA versions of installation packages). - + - Prior to version 2.3, the GPU architectures supported by the bos source installer on the PaddlePaddle website: - + - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; - + - CUDA11 : 5.2,6.0,6.1,7.0,7.5,8.0。 - + - Later than version 2.3, the GPU architectures supported by the bos source installer on the PaddlePaddle website: - + - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; - + - CUDA11 : 3.5, 5.0, 6.0, 6.1, 7.0, 7.5, 8.0。 - + - Support Python 3.10. Fix compilation bugs caused by some PythonC API changes on Windows. ([#41180](https://github.com/PaddlePaddle/Paddle/pull/42180)) - The Windows platform supports the compilation through Visual Studio 2019. ([#38719](https://github.com/PaddlePaddle/Paddle/pull/38719)) - + - Eliminate various warnings when compiling on the Windows platform. ([#38034](https://github.com/PaddlePaddle/Paddle/pull/38034), [#37890](https://github.com/PaddlePaddle/Paddle/pull/37890), [#37442](https://github.com/PaddlePaddle/Paddle/pull/37442), [#37439](https://github.com/PaddlePaddle/Paddle/pull/37439), [#36857](https://github.com/PaddlePaddle/Paddle/pull/36857)) - + - Fix jetson compilation issues introduced by the underlying data structure upgrade. ([#39669](https://github.com/PaddlePaddle/Paddle/pull/39669), [#39441](https://github.com/PaddlePaddle/Paddle/pull/39441)) - + ### **New Hardware Backend Extention** - Custom device support: provide a plug-in way to extend PaddlePaddle hardware backend. With this function, developers do not need to modify PaddlePaddle codes for specific hardware, but simply implement the standard interface and compile it into a dynamic link library that can be called by PaddlePaddle as a plug-in.This reduces the development effort of adding a new hardware backend to PaddlePaddle. Currently it supports custom Runtime and custom Kernel. - + - Support Huawei NPU chip (Ascend910) training/inference. Support ResNet50, YoloV3, BERT, Transformer and many other models. Support static + dynamic graph and auto-mixed precision training. Support single card, and distribute training across multiple cards, multiple machines. - + - Support Graphcore IPU chip (including IPU Mk2 GC200 and Bow IPU) training/inference. Support ResNet50, BERT and other models. Support static graph training. Support single card, and distribute training across multiple cards, multiple machines. - + - Support cambricon MLU chip (MLU370x4) training/inference. Support models such as ResNet50. Support static graph + dynamic graph training. Support auto-mixed precision training. Support single card, and distribute training across multiple cards, multiple machines. - + - Support KUNLUNXIN 2 chips (KUNLUNXIN AI acceleration cards R200, R300) training/inference. Support ResNet50, YoloV3, OCR-DB, SSD, MobilnetV3, UNet, BERT, Transformer, GPT-2, Wide&Deep, and DeepFM. Support static graph + dynamic graph training. Support auto-mixed precision training. Support single card, and distribute training across multiple cards, multiple machines. - + ## Thanks to our Contributors From 6159bdc89cbf5fbe4763b6ca6806ae686651fd69 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 09:51:37 +0000 Subject: [PATCH 04/20] ensure-final-newline and trim-trailing-blank-lines --- docs/advanced_guide/index_en.rst | 1 - .../cpu_train_best_practice.rst | 2 +- .../cpu_train_best_practice_en.rst | 2 +- docs/api/index_en.rst | 1 - docs/api/paddle/NPUPlace_cn.rst | 2 +- docs/api/paddle/ParamAttr_cn.rst | 2 +- docs/api/paddle/abs_cn.rst | 2 +- docs/api/paddle/acos_cn.rst | 2 +- docs/api/paddle/acosh_cn.rst | 2 +- docs/api/paddle/add_cn.rst | 2 +- docs/api/paddle/addmm_cn.rst | 2 +- docs/api/paddle/all_cn.rst | 2 +- docs/api/paddle/allclose_cn.rst | 2 +- docs/api/paddle/amp/Overview_cn.rst | 2 - docs/api/paddle/angle_cn.rst | 2 +- docs/api/paddle/any_cn.rst | 2 +- docs/api/paddle/arange_cn.rst | 2 +- docs/api/paddle/argmax_cn.rst | 2 +- docs/api/paddle/argsort_cn.rst | 2 +- docs/api/paddle/as_complex_cn.rst | 2 +- docs/api/paddle/as_real_cn.rst | 2 +- docs/api/paddle/asin_cn.rst | 2 +- docs/api/paddle/asinh_cn.rst | 2 +- docs/api/paddle/atan2_cn.rst | 2 +- docs/api/paddle/atan_cn.rst | 2 +- docs/api/paddle/atanh_cn.rst | 2 +- docs/api/paddle/autograd/PyLayer_cn.rst | 1 - docs/api/paddle/autograd/backward_cn.rst | 2 +- docs/api/paddle/batch_cn.rst | 2 +- docs/api/paddle/bincount_cn.rst | 2 - docs/api/paddle/bitwise_and_cn.rst | 2 +- docs/api/paddle/bitwise_not_cn.rst | 2 +- docs/api/paddle/bitwise_or_cn.rst | 2 +- docs/api/paddle/bitwise_xor_cn.rst | 2 +- docs/api/paddle/bmm_cn.rst | 1 - docs/api/paddle/broadcast_shape_cn.rst | 2 +- docs/api/paddle/broadcast_tensors_cn.rst | 2 +- docs/api/paddle/broadcast_to_cn.rst | 2 +- .../api/paddle/callbacks/EarlyStopping_cn.rst | 2 +- docs/api/paddle/callbacks/LRScheduler_cn.rst | 2 +- .../paddle/callbacks/ModelCheckpoint_cn.rst | 2 +- .../api/paddle/callbacks/ProgBarLogger_cn.rst | 2 +- .../paddle/callbacks/ReduceLROnPlateau_cn.rst | 2 +- docs/api/paddle/callbacks/VisualDL_cn.rst | 2 +- docs/api/paddle/cast_cn.rst | 2 +- docs/api/paddle/ceil_cn.rst | 2 +- docs/api/paddle/chunk_cn.rst | 2 +- docs/api/paddle/clip_cn.rst | 2 +- docs/api/paddle/clone_cn.rst | 2 +- docs/api/paddle/compat/long_type_cn.rst | 1 - docs/api/paddle/compat/to_bytes_cn.rst | 2 +- docs/api/paddle/compat/to_text_cn.rst | 2 +- docs/api/paddle/complex_cn.rst | 2 +- docs/api/paddle/concat_cn.rst | 2 +- docs/api/paddle/conj_cn.rst | 2 +- docs/api/paddle/cos_cn.rst | 2 +- docs/api/paddle/cosh_cn.rst | 2 +- docs/api/paddle/cross_cn.rst | 2 +- docs/api/paddle/cumprod_cn.rst | 2 +- docs/api/paddle/cumsum_cn.rst | 2 +- docs/api/paddle/device/cuda/Stream_cn.rst | 1 - .../paddle/device/cuda/current_stream_cn.rst | 1 - .../paddle/device/cuda/device_count_cn.rst | 2 +- .../api/paddle/device/cuda/empty_cache_cn.rst | 2 +- .../device/cuda/get_device_properties_cn.rst | 2 - .../device/cuda/max_memory_allocated_cn.rst | 2 - .../device/cuda/max_memory_reserved_cn.rst | 2 - .../device/cuda/memory_allocated_cn.rst | 2 - .../paddle/device/cuda/memory_reserved_cn.rst | 2 - .../api/paddle/device/cuda/synchronize_cn.rst | 1 - .../paddle/device/get_cudnn_version_cn.rst | 2 +- docs/api/paddle/device/get_device_cn.rst | 2 +- .../device/is_compiled_with_cinn_cn.rst | 2 +- .../device/is_compiled_with_cuda_cn.rst | 2 +- .../paddle/device/is_compiled_with_npu_cn.rst | 1 - .../device/is_compiled_with_rocm_cn.rst | 2 +- docs/api/paddle/device/set_device_cn.rst | 2 +- docs/api/paddle/diag_cn.rst | 5 -- docs/api/paddle/diagflat_cn.rst | 5 -- docs/api/paddle/diagonal_cn.rst | 2 +- docs/api/paddle/digamma_cn.rst | 2 +- docs/api/paddle/disable_signal_handler_cn.rst | 2 +- docs/api/paddle/disable_static_cn.rst | 2 +- docs/api/paddle/dist_cn.rst | 2 +- .../paddle/distributed/InMemoryDataset_cn.rst | 3 -- docs/api/paddle/distributed/all_gather_cn.rst | 2 +- docs/api/paddle/distributed/all_reduce_cn.rst | 2 +- docs/api/paddle/distributed/alltoall_cn.rst | 2 +- docs/api/paddle/distributed/barrier_cn.rst | 2 +- docs/api/paddle/distributed/broadcast_cn.rst | 2 +- .../fleet/DistributedStrategy_cn.rst | 1 - .../api/paddle/distributed/fleet/Fleet_cn.rst | 2 - .../fleet/PaddleCloudRoleMaker_cn.rst | 1 - .../fleet/UserDefinedRoleMaker_cn.rst | 5 -- .../distributed/fleet/utils/HDFSClient_cn.rst | 3 -- .../distributed/fleet/utils/LocalFS_cn.rst | 2 +- .../distributed/fleet/utils/recompute_cn.rst | 2 +- docs/api/paddle/distributed/get_group_cn.rst | 1 - .../paddle/distributed/get_world_size_cn.rst | 2 +- .../paddle/distributed/gloo_barrier_cn.rst | 2 +- .../distributed/gloo_init_parallel_env_cn.rst | 2 +- .../paddle/distributed/gloo_release_cn.rst | 2 +- .../distributed/init_parallel_env_cn.rst | 2 +- docs/api/paddle/distributed/irecv_cn.rst | 2 +- .../paddle/distributed/is_initialized_cn.rst | 2 +- docs/api/paddle/distributed/isend_cn.rst | 2 +- docs/api/paddle/distributed/new_group_cn.rst | 1 - docs/api/paddle/distributed/reduce_cn.rst | 2 +- .../paddle/distributed/reduce_scatter_cn.rst | 2 +- docs/api/paddle/distributed/scatter_cn.rst | 2 +- docs/api/paddle/distributed/send_cn.rst | 2 +- docs/api/paddle/distributed/spawn_cn.rst | 2 +- docs/api/paddle/distributed/split_cn.rst | 2 +- .../distributed/utils/global_gather_cn.rst | 2 +- .../distributed/utils/global_scatter_cn.rst | 2 +- .../paddle/distribution/AbsTransform_cn.rst | 1 - .../distribution/AffineTransform_cn.rst | 1 - .../paddle/distribution/Categorical_cn.rst | 1 - .../paddle/distribution/ChainTransform_cn.rst | 1 - .../paddle/distribution/ExpTransform_cn.rst | 1 - .../distribution/ExponentialFamily_cn.rst | 9 ---- .../distribution/IndependentTransform_cn.rst | 1 - .../paddle/distribution/Multinomial_cn.rst | 2 - docs/api/paddle/distribution/Overview_cn.rst | 2 +- .../paddle/distribution/PowerTransform_cn.rst | 1 - .../distribution/ReshapeTransform_cn.rst | 1 - .../distribution/SigmoidTransform_cn.rst | 1 - .../distribution/SoftmaxTransform_cn.rst | 1 - .../paddle/distribution/StackTransform_cn.rst | 1 - .../StickBreakingTransform_cn.rst | 1 - .../paddle/distribution/TanhTransform_cn.rst | 1 - docs/api/paddle/distribution/Transform_cn.rst | 1 - .../TransformedDistribution_cn.rst | 2 - docs/api/paddle/divide_cn.rst | 2 +- docs/api/paddle/dot_cn.rst | 2 +- docs/api/paddle/einsum_cn.rst | 2 +- docs/api/paddle/empty_like_cn.rst | 2 +- docs/api/paddle/enable_static_cn.rst | 2 +- docs/api/paddle/equal_all_cn.rst | 2 +- docs/api/paddle/equal_cn.rst | 2 +- docs/api/paddle/erf_cn.rst | 2 +- docs/api/paddle/erfinv__cn.rst | 2 +- docs/api/paddle/erfinv_cn.rst | 2 +- docs/api/paddle/exp_cn.rst | 2 +- docs/api/paddle/expand_as_cn.rst | 1 - docs/api/paddle/expand_cn.rst | 2 +- docs/api/paddle/expm1_cn.rst | 2 +- docs/api/paddle/eye_cn.rst | 2 +- docs/api/paddle/flatten_cn.rst | 2 - docs/api/paddle/flip_cn.rst | 1 - docs/api/paddle/floor_cn.rst | 2 +- docs/api/paddle/floor_divide_cn.rst | 2 +- docs/api/paddle/fmax_cn.rst | 2 +- docs/api/paddle/fmin_cn.rst | 2 +- docs/api/paddle/frac_cn.rst | 2 +- docs/api/paddle/full_like_cn.rst | 1 - docs/api/paddle/gather_cn.rst | 1 - docs/api/paddle/gather_nd_cn.rst | 2 - docs/api/paddle/get_cuda_rng_state_cn.rst | 2 +- docs/api/paddle/get_default_dtype_cn.rst | 2 +- docs/api/paddle/get_flags_cn.rst | 2 +- docs/api/paddle/greater_equal_cn.rst | 2 +- docs/api/paddle/greater_than_cn.rst | 2 +- docs/api/paddle/histogram_cn.rst | 2 +- docs/api/paddle/imag_cn.rst | 2 +- docs/api/paddle/in_dynamic_mode_cn.rst | 2 +- docs/api/paddle/increment_cn.rst | 2 +- docs/api/paddle/incubate/LookAhead_cn.rst | 1 - .../paddle/incubate/autograd/Jacobian_cn.rst | 1 - .../incubate/autograd/disable_prim_cn.rst | 2 +- .../incubate/autograd/enable_prim_cn.rst | 2 +- docs/api/paddle/incubate/autograd/jvp_cn.rst | 1 - .../paddle/incubate/autograd/prim2orig_cn.rst | 2 +- .../incubate/autograd/prim_enabled_cn.rst | 2 +- docs/api/paddle/incubate/autograd/vjp_cn.rst | 1 - .../paddle/incubate/graph_khop_sampler_cn.rst | 2 +- .../incubate/graph_sample_neighbors_cn.rst | 2 +- .../incubate/nn/FusedFeedForward_cn.rst | 2 +- .../nn/FusedMultiHeadAttention_cn.rst | 2 +- .../nn/FusedTransformerEncoderLayer_cn.rst | 2 +- .../nn/functional/fused_feedforward_cn.rst | 1 - .../fused_multi_head_attention_cn.rst | 2 - .../optimizer/functional/minimize_bfgs_cn.rst | 2 +- .../functional/minimize_lbfgs_cn.rst | 2 +- docs/api/paddle/incubate/segment_max_cn.rst | 2 +- docs/api/paddle/incubate/segment_mean_cn.rst | 2 +- docs/api/paddle/incubate/segment_min_cn.rst | 2 +- docs/api/paddle/incubate/segment_sum_cn.rst | 2 +- .../paddle/incubate/softmax_mask_fuse_cn.rst | 2 +- .../softmax_mask_fuse_upper_triangle_cn.rst | 2 +- docs/api/paddle/index_sample_cn.rst | 2 +- docs/api/paddle/index_select_cn.rst | 2 +- docs/api/paddle/io/BatchSampler_cn.rst | 2 +- docs/api/paddle/io/DataLoader_cn.rst | 1 - docs/api/paddle/io/IterableDataset_cn.rst | 1 - docs/api/paddle/io/Overview_cn.rst | 1 - docs/api/paddle/io/RandomSampler_cn.rst | 2 +- docs/api/paddle/io/Sampler_cn.rst | 2 +- docs/api/paddle/io/SequenceSampler_cn.rst | 2 +- docs/api/paddle/io/Subset_cn.rst | 2 +- .../paddle/io/WeightedRandomSampler_cn.rst | 2 +- docs/api/paddle/io/random_split_cn.rst | 2 +- docs/api/paddle/is_complex_cn.rst | 2 +- docs/api/paddle/is_empty_cn.rst | 2 +- docs/api/paddle/is_floating_point_cn.rst | 2 +- docs/api/paddle/is_grad_enabled_cn.rst | 2 +- docs/api/paddle/is_integer_cn.rst | 2 +- docs/api/paddle/is_tensor_cn.rst | 2 +- docs/api/paddle/isclose_cn.rst | 2 +- docs/api/paddle/isfinite_cn.rst | 2 +- docs/api/paddle/isinf_cn.rst | 2 +- docs/api/paddle/isnan_cn.rst | 2 +- docs/api/paddle/jit/Overview_cn.rst | 1 - docs/api/paddle/jit/ProgramTranslator_cn.rst | 1 - docs/api/paddle/jit/not_to_static_cn.rst | 1 - docs/api/paddle/jit/save_cn.rst | 2 +- docs/api/paddle/jit/set_code_level_cn.rst | 2 +- docs/api/paddle/jit/set_verbosity_cn.rst | 2 +- docs/api/paddle/kron_cn.rst | 2 +- docs/api/paddle/kthvalue_cn.rst | 2 +- docs/api/paddle/lerp_cn.rst | 2 +- docs/api/paddle/less_equal_cn.rst | 2 +- docs/api/paddle/less_than_cn.rst | 2 +- docs/api/paddle/lgamma_cn.rst | 2 +- docs/api/paddle/linalg/cholesky_cn.rst | 2 +- docs/api/paddle/linalg/cholesky_solve_cn.rst | 2 +- docs/api/paddle/linalg/cond_cn.rst | 2 +- docs/api/paddle/linalg/cov_cn.rst | 2 +- docs/api/paddle/linalg/det_cn.rst | 2 +- docs/api/paddle/linalg/eig_cn.rst | 2 +- docs/api/paddle/linalg/eigh_cn.rst | 2 +- docs/api/paddle/linalg/eigvals_cn.rst | 2 +- docs/api/paddle/linalg/eigvalsh_cn.rst | 2 +- docs/api/paddle/linalg/inv_cn.rst | 2 +- docs/api/paddle/linalg/lstsq_cn.rst | 2 +- docs/api/paddle/linalg/matrix_power_cn.rst | 2 +- docs/api/paddle/linalg/matrix_rank_cn.rst | 2 +- docs/api/paddle/linalg/multi_dot_cn.rst | 2 +- docs/api/paddle/linalg/norm_cn.rst | 2 +- docs/api/paddle/linalg/pinv_cn.rst | 2 +- docs/api/paddle/linalg/qr_cn.rst | 2 +- docs/api/paddle/linalg/slogdet_cn.rst | 2 +- docs/api/paddle/linalg/solve_cn.rst | 2 +- docs/api/paddle/linalg/svd_cn.rst | 2 +- .../api/paddle/linalg/triangular_solve_cn.rst | 2 +- docs/api/paddle/linspace_cn.rst | 2 +- docs/api/paddle/log10_cn.rst | 2 +- docs/api/paddle/log1p_cn.rst | 2 +- docs/api/paddle/log2_cn.rst | 2 +- docs/api/paddle/log_cn.rst | 2 +- docs/api/paddle/logical_and_cn.rst | 2 +- docs/api/paddle/logical_not_cn.rst | 2 +- docs/api/paddle/logical_or_cn.rst | 2 +- docs/api/paddle/logical_xor_cn.rst | 2 +- docs/api/paddle/logit_cn.rst | 2 +- docs/api/paddle/logsumexp_cn.rst | 2 +- docs/api/paddle/masked_select_cn.rst | 2 +- docs/api/paddle/matmul_cn.rst | 2 +- docs/api/paddle/maximum_cn.rst | 2 +- docs/api/paddle/mean_cn.rst | 2 +- docs/api/paddle/median_cn.rst | 2 +- docs/api/paddle/meshgrid_cn.rst | 2 +- docs/api/paddle/minimum_cn.rst | 2 +- docs/api/paddle/mm_cn.rst | 2 +- docs/api/paddle/mod_cn.rst | 2 +- docs/api/paddle/mode_cn.rst | 2 +- docs/api/paddle/moveaxis_cn.rst | 2 +- docs/api/paddle/multinomial_cn.rst | 2 +- docs/api/paddle/multiplex_cn.rst | 1 - docs/api/paddle/multiply_cn.rst | 2 +- docs/api/paddle/nanmedian_cn.rst | 1 - docs/api/paddle/neg_cn.rst | 2 +- docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst | 2 +- docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst | 2 +- docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst | 2 +- docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst | 2 +- docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst | 2 +- docs/api/paddle/nn/AlphaDropout_cn.rst | 2 +- docs/api/paddle/nn/AvgPool1D_cn.rst | 2 +- docs/api/paddle/nn/AvgPool2D_cn.rst | 2 +- docs/api/paddle/nn/AvgPool3D_cn.rst | 2 +- docs/api/paddle/nn/BCELoss_cn.rst | 2 +- docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst | 2 +- docs/api/paddle/nn/BatchNorm1D_cn.rst | 1 - docs/api/paddle/nn/BatchNorm2D_cn.rst | 1 - docs/api/paddle/nn/BatchNorm3D_cn.rst | 1 - docs/api/paddle/nn/BeamSearchDecoder_cn.rst | 1 - docs/api/paddle/nn/BiRNN_cn.rst | 2 +- docs/api/paddle/nn/Bilinear_cn.rst | 2 +- docs/api/paddle/nn/CELU_cn.rst | 2 +- docs/api/paddle/nn/CTCLoss_cn.rst | 2 +- .../api/paddle/nn/ClipGradByGlobalNorm_cn.rst | 1 - docs/api/paddle/nn/ClipGradByNorm_cn.rst | 1 - docs/api/paddle/nn/Conv1DTranspose_cn.rst | 2 +- docs/api/paddle/nn/Conv1D_cn.rst | 2 +- docs/api/paddle/nn/Conv2DTranspose_cn.rst | 2 +- docs/api/paddle/nn/Conv2D_cn.rst | 2 +- docs/api/paddle/nn/Conv3DTranspose_cn.rst | 2 +- docs/api/paddle/nn/Conv3D_cn.rst | 2 +- docs/api/paddle/nn/CosineSimilarity_cn.rst | 2 +- docs/api/paddle/nn/CrossEntropyLoss_cn.rst | 2 +- docs/api/paddle/nn/Dropout2D_cn.rst | 2 +- docs/api/paddle/nn/Dropout3D_cn.rst | 2 +- docs/api/paddle/nn/Dropout_cn.rst | 2 +- docs/api/paddle/nn/ELU_cn.rst | 2 +- docs/api/paddle/nn/Embedding_cn.rst | 2 - docs/api/paddle/nn/Fold_cn.rst | 2 +- docs/api/paddle/nn/GELU_cn.rst | 2 +- docs/api/paddle/nn/GRUCell_cn.rst | 2 +- docs/api/paddle/nn/GRU_cn.rst | 2 +- docs/api/paddle/nn/HSigmoidLoss_cn.rst | 2 +- docs/api/paddle/nn/Hardshrink_cn.rst | 2 +- docs/api/paddle/nn/Hardsigmoid_cn.rst | 2 +- docs/api/paddle/nn/Hardswish_cn.rst | 2 +- docs/api/paddle/nn/Hardtanh_cn.rst | 2 +- docs/api/paddle/nn/HingeEmbeddingLoss_cn.rst | 2 +- docs/api/paddle/nn/Identity_cn.rst | 2 +- docs/api/paddle/nn/InstanceNorm1D_cn.rst | 1 - docs/api/paddle/nn/InstanceNorm2D_cn.rst | 2 - docs/api/paddle/nn/InstanceNorm3D_cn.rst | 1 - docs/api/paddle/nn/KLDivLoss_cn.rst | 2 +- docs/api/paddle/nn/L1Loss_cn.rst | 2 +- docs/api/paddle/nn/LSTMCell_cn.rst | 2 +- docs/api/paddle/nn/LSTM_cn.rst | 2 +- docs/api/paddle/nn/LayerNorm_cn.rst | 1 - docs/api/paddle/nn/Layer_cn.rst | 1 - docs/api/paddle/nn/LeakyReLU_cn.rst | 2 +- docs/api/paddle/nn/Linear_cn.rst | 2 +- docs/api/paddle/nn/LocalResponseNorm_cn.rst | 2 +- docs/api/paddle/nn/LogSigmoid_cn.rst | 2 +- docs/api/paddle/nn/LogSoftmax_cn.rst | 2 +- docs/api/paddle/nn/MSELoss_cn.rst | 2 +- docs/api/paddle/nn/MarginRankingLoss_cn.rst | 2 +- docs/api/paddle/nn/MaxPool1D_cn.rst | 2 +- docs/api/paddle/nn/MaxPool2D_cn.rst | 2 +- docs/api/paddle/nn/MaxPool3D_cn.rst | 2 +- docs/api/paddle/nn/MaxUnPool1D_cn.rst | 2 +- docs/api/paddle/nn/MaxUnPool2D_cn.rst | 2 +- docs/api/paddle/nn/Maxout_cn.rst | 2 +- docs/api/paddle/nn/Mish_cn.rst | 2 +- docs/api/paddle/nn/MultiHeadAttention_cn.rst | 2 +- docs/api/paddle/nn/NLLLoss_cn.rst | 2 +- docs/api/paddle/nn/PReLU_cn.rst | 2 +- docs/api/paddle/nn/Pad1D_cn.rst | 2 +- docs/api/paddle/nn/Pad2D_cn.rst | 2 +- docs/api/paddle/nn/Pad3D_cn.rst | 2 +- docs/api/paddle/nn/PairwiseDistance_cn.rst | 2 +- docs/api/paddle/nn/ParameterList_cn.rst | 2 +- docs/api/paddle/nn/RNN_cn.rst | 2 +- docs/api/paddle/nn/ReLU6_cn.rst | 2 +- docs/api/paddle/nn/ReLU_cn.rst | 2 +- docs/api/paddle/nn/SELU_cn.rst | 2 +- docs/api/paddle/nn/Sequential_cn.rst | 2 +- docs/api/paddle/nn/Sigmoid_cn.rst | 2 +- docs/api/paddle/nn/Silu_cn.rst | 2 +- docs/api/paddle/nn/SimpleRNNCell_cn.rst | 2 +- docs/api/paddle/nn/SimpleRNN_cn.rst | 2 +- docs/api/paddle/nn/SmoothL1Loss_cn.rst | 2 +- docs/api/paddle/nn/Softplus_cn.rst | 2 +- docs/api/paddle/nn/Softshrink_cn.rst | 2 +- docs/api/paddle/nn/Softsign_cn.rst | 2 +- docs/api/paddle/nn/Swish_cn.rst | 2 +- docs/api/paddle/nn/SyncBatchNorm_cn.rst | 1 - docs/api/paddle/nn/Tanh_cn.rst | 2 +- docs/api/paddle/nn/Tanhshrink_cn.rst | 2 +- docs/api/paddle/nn/ThresholdedReLU_cn.rst | 2 +- .../paddle/nn/TransformerDecoderLayer_cn.rst | 2 +- docs/api/paddle/nn/TransformerDecoder_cn.rst | 2 +- .../paddle/nn/TransformerEncoderLayer_cn.rst | 2 +- docs/api/paddle/nn/TransformerEncoder_cn.rst | 2 +- docs/api/paddle/nn/Transformer_cn.rst | 1 - docs/api/paddle/nn/Unfold_cn.rst | 2 +- docs/api/paddle/nn/Upsample_cn.rst | 2 +- .../api/paddle/nn/UpsamplingBilinear2D_cn.rst | 2 +- docs/api/paddle/nn/UpsamplingNearest2D_cn.rst | 2 +- docs/api/paddle/nn/ZeroPad2D_cn.rst | 2 +- docs/api/paddle/nn/dynamic_decode_cn.rst | 2 +- .../nn/functional/adaptive_avg_pool2d_cn.rst | 2 +- .../nn/functional/adaptive_avg_pool3d_cn.rst | 2 +- .../nn/functional/adaptive_max_pool1d_cn.rst | 2 +- .../nn/functional/adaptive_max_pool2d_cn.rst | 2 +- .../nn/functional/adaptive_max_pool3d_cn.rst | 2 +- .../paddle/nn/functional/affine_grid_cn.rst | 2 +- .../paddle/nn/functional/alpha_dropout_cn.rst | 2 +- .../paddle/nn/functional/avg_pool1d_cn.rst | 2 +- .../paddle/nn/functional/avg_pool2d_cn.rst | 2 +- .../paddle/nn/functional/avg_pool3d_cn.rst | 2 +- .../paddle/nn/functional/batch_norm_cn.rst | 2 +- docs/api/paddle/nn/functional/bilinear_cn.rst | 2 +- .../nn/functional/binary_cross_entropy_cn.rst | 2 +- .../binary_cross_entropy_with_logits_cn.rst | 2 +- docs/api/paddle/nn/functional/celu_cn.rst | 2 +- docs/api/paddle/nn/functional/conv1d_cn.rst | 2 +- .../nn/functional/conv1d_transpose_cn.rst | 2 +- docs/api/paddle/nn/functional/conv2d_cn.rst | 2 +- .../nn/functional/conv2d_transpose_cn.rst | 2 +- docs/api/paddle/nn/functional/conv3d_cn.rst | 2 +- .../nn/functional/conv3d_transpose_cn.rst | 2 +- .../functional/cosine_embedding_loss_cn.rst | 1 - .../nn/functional/cosine_similarity_cn.rst | 2 +- .../paddle/nn/functional/cross_entropy_cn.rst | 2 +- docs/api/paddle/nn/functional/ctc_loss_cn.rst | 2 +- .../api/paddle/nn/functional/dice_loss_cn.rst | 2 +- .../api/paddle/nn/functional/dropout2d_cn.rst | 2 +- .../api/paddle/nn/functional/dropout3d_cn.rst | 2 +- docs/api/paddle/nn/functional/elu_cn.rst | 2 +- .../api/paddle/nn/functional/embedding_cn.rst | 2 - docs/api/paddle/nn/functional/fold_cn.rst | 2 +- .../paddle/nn/functional/gather_tree_cn.rst | 2 +- docs/api/paddle/nn/functional/gelu_cn.rst | 2 +- .../paddle/nn/functional/grid_sample_cn.rst | 1 - .../nn/functional/gumbel_softmax_cn.rst | 2 +- .../paddle/nn/functional/hardshrink_cn.rst | 2 +- .../paddle/nn/functional/hardsigmoid_cn.rst | 2 +- .../api/paddle/nn/functional/hardswish_cn.rst | 2 +- docs/api/paddle/nn/functional/hardtanh_cn.rst | 2 +- .../nn/functional/hinge_embedding_loss_cn.rst | 2 +- .../paddle/nn/functional/hsigmoid_loss_cn.rst | 2 +- .../paddle/nn/functional/instance_norm_cn.rst | 2 +- .../paddle/nn/functional/interpolate_cn.rst | 2 +- docs/api/paddle/nn/functional/kl_div_cn.rst | 2 +- docs/api/paddle/nn/functional/l1_loss_cn.rst | 2 +- .../paddle/nn/functional/label_smooth_cn.rst | 2 +- .../paddle/nn/functional/layer_norm_cn.rst | 2 +- .../paddle/nn/functional/leaky_relu_cn.rst | 2 +- docs/api/paddle/nn/functional/linear_cn.rst | 2 +- .../nn/functional/local_response_norm_cn.rst | 2 +- docs/api/paddle/nn/functional/log_loss_cn.rst | 2 +- .../paddle/nn/functional/log_sigmoid_cn.rst | 2 +- .../paddle/nn/functional/log_softmax_cn.rst | 2 +- .../nn/functional/margin_cross_entropy_cn.rst | 2 +- .../nn/functional/margin_ranking_loss_cn.rst | 2 +- .../paddle/nn/functional/max_pool1d_cn.rst | 2 +- .../paddle/nn/functional/max_pool2d_cn.rst | 2 +- .../paddle/nn/functional/max_pool3d_cn.rst | 2 +- .../paddle/nn/functional/max_unpool1d_cn.rst | 2 +- .../paddle/nn/functional/max_unpool2d_cn.rst | 2 +- .../paddle/nn/functional/max_unpool3d_cn.rst | 2 +- docs/api/paddle/nn/functional/maxout_cn.rst | 2 +- docs/api/paddle/nn/functional/mish_cn.rst | 2 +- docs/api/paddle/nn/functional/mse_loss_cn.rst | 2 +- docs/api/paddle/nn/functional/nll_loss_cn.rst | 2 +- .../api/paddle/nn/functional/normalize_cn.rst | 2 +- .../paddle/nn/functional/npair_loss_cn.rst | 2 +- docs/api/paddle/nn/functional/one_hot_cn.rst | 2 +- docs/api/paddle/nn/functional/pad_cn.rst | 3 -- .../paddle/nn/functional/pixel_shuffle_cn.rst | 2 +- docs/api/paddle/nn/functional/prelu_cn.rst | 2 +- docs/api/paddle/nn/functional/relu6_cn.rst | 2 +- docs/api/paddle/nn/functional/relu_cn.rst | 2 +- docs/api/paddle/nn/functional/selu_cn.rst | 2 +- docs/api/paddle/nn/functional/sigmoid_cn.rst | 2 +- .../nn/functional/sigmoid_focal_loss_cn.rst | 2 +- docs/api/paddle/nn/functional/silu_cn.rst | 2 +- .../nn/functional/smooth_l1_loss_cn.rst | 2 +- .../softmax_with_cross_entropy_cn.rst | 2 +- docs/api/paddle/nn/functional/softplus_cn.rst | 2 +- .../paddle/nn/functional/softshrink_cn.rst | 2 +- docs/api/paddle/nn/functional/softsign_cn.rst | 2 +- .../nn/functional/sparse_attention_cn.rst | 2 +- .../nn/functional/square_error_cost_cn.rst | 2 +- docs/api/paddle/nn/functional/swish_cn.rst | 2 +- .../paddle/nn/functional/tanhshrink_cn.rst | 2 +- .../nn/functional/temporal_shift_cn.rst | 2 +- .../nn/functional/thresholded_relu_cn.rst | 2 +- docs/api/paddle/nn/functional/unfold_cn.rst | 2 +- docs/api/paddle/nn/functional/upsample_cn.rst | 2 +- .../api/paddle/nn/functional/zeropad2d_cn.rst | 2 +- docs/api/paddle/nn/initializer/Assign_cn.rst | 2 +- .../api/paddle/nn/initializer/Bilinear_cn.rst | 2 +- docs/api/paddle/nn/initializer/Dirac_cn.rst | 2 +- docs/api/paddle/nn/initializer/Normal_cn.rst | 2 +- .../initializer/set_global_initializer_cn.rst | 2 +- .../nn/utils/parameters_to_vector_cn.rst | 2 +- .../paddle/nn/utils/remove_weight_norm_cn.rst | 2 +- docs/api/paddle/nn/utils/spectral_norm_cn.rst | 2 +- .../nn/utils/vector_to_parameters_cn.rst | 2 +- docs/api/paddle/nn/utils/weight_norm_cn.rst | 2 +- docs/api/paddle/no_grad_cn.rst | 2 +- docs/api/paddle/nonzero_cn.rst | 2 +- docs/api/paddle/not_equal_cn.rst | 2 +- docs/api/paddle/numel_cn.rst | 2 +- docs/api/paddle/ones_like_cn.rst | 2 +- docs/api/paddle/onnx/export_cn.rst | 2 +- docs/api/paddle/optimizer/Adadelta_cn.rst | 1 - docs/api/paddle/optimizer/Adagrad_cn.rst | 2 +- docs/api/paddle/optimizer/Momentum_cn.rst | 2 - docs/api/paddle/optimizer/Optimizer_cn.rst | 1 - docs/api/paddle/optimizer/SGD_cn.rst | 4 -- .../optimizer/lr/CosineAnnealingDecay_cn.rst | 1 - docs/api/paddle/optimizer/lr/CyclicLR_cn.rst | 2 +- .../optimizer/lr/ExponentialDecay_cn.rst | 2 - .../optimizer/lr/InverseTimeDecay_cn.rst | 2 +- .../paddle/optimizer/lr/LambdaDecay_cn.rst | 2 +- .../paddle/optimizer/lr/LinearWarmup_cn.rst | 1 - .../optimizer/lr/MultiplicativeDecay_cn.rst | 2 - .../optimizer/lr/NaturalExpDecay_cn.rst | 1 - docs/api/paddle/optimizer/lr/NoamDecay_cn.rst | 1 - .../optimizer/lr/PolynomialDecay_cn.rst | 1 - docs/api/paddle/optimizer/lr/StepDecay_cn.rst | 1 - docs/api/paddle/poisson_cn.rst | 2 +- docs/api/paddle/pow_cn.rst | 2 +- docs/api/paddle/profiler/SortedKeys_cn.rst | 2 - .../profiler/export_chrome_tracing_cn.rst | 2 +- .../paddle/profiler/export_protobuf_cn.rst | 2 +- docs/api/paddle/put_along_axis_cn.rst | 1 - docs/api/paddle/quantile_cn.rst | 2 +- docs/api/paddle/randint_cn.rst | 2 +- docs/api/paddle/randint_like_cn.rst | 2 +- docs/api/paddle/randperm_cn.rst | 2 +- docs/api/paddle/rank_cn.rst | 2 +- docs/api/paddle/real_cn.rst | 2 +- docs/api/paddle/reciprocal_cn.rst | 2 +- docs/api/paddle/regularizer/L1Decay_cn.rst | 1 - docs/api/paddle/regularizer/L2Decay_cn.rst | 1 - docs/api/paddle/reshape_cn.rst | 8 ---- docs/api/paddle/roll_cn.rst | 2 +- docs/api/paddle/rot90_cn.rst | 2 +- docs/api/paddle/round_cn.rst | 3 -- docs/api/paddle/rsqrt_cn.rst | 2 +- docs/api/paddle/scale_cn.rst | 2 +- docs/api/paddle/scatter_nd_add_cn.rst | 2 +- docs/api/paddle/scatter_nd_cn.rst | 2 +- docs/api/paddle/searchsorted_cn.rst | 2 +- docs/api/paddle/seed_cn.rst | 2 +- docs/api/paddle/set_cuda_rng_state_cn.rst | 2 +- docs/api/paddle/set_default_dtype_cn.rst | 2 +- docs/api/paddle/set_flags_cn.rst | 2 +- docs/api/paddle/set_grad_enabled_cn.rst | 2 +- docs/api/paddle/set_printoptions_cn.rst | 2 +- docs/api/paddle/shape_cn.rst | 2 +- docs/api/paddle/shard_index_cn.rst | 2 +- docs/api/paddle/sign_cn.rst | 2 +- docs/api/paddle/sin_cn.rst | 2 +- docs/api/paddle/sinh_cn.rst | 2 +- docs/api/paddle/slice_cn.rst | 2 +- docs/api/paddle/sort_cn.rst | 1 - docs/api/paddle/split_cn.rst | 2 +- docs/api/paddle/sqrt_cn.rst | 2 +- docs/api/paddle/square_cn.rst | 2 +- docs/api/paddle/stack_cn.rst | 2 - docs/api/paddle/stanh_cn.rst | 2 +- docs/api/paddle/static/Executor_cn.rst | 1 - .../paddle/static/IpuCompiledProgram_cn.rst | 1 - docs/api/paddle/static/Overview_cn.rst | 2 +- .../api/paddle/static/ParallelExecutor_cn.rst | 1 - docs/api/paddle/static/Print_cn.rst | 2 +- .../paddle/static/WeightNormParamAttr_cn.rst | 2 +- docs/api/paddle/static/accuracy_cn.rst | 2 +- docs/api/paddle/static/append_backward_cn.rst | 2 +- docs/api/paddle/static/auc_cn.rst | 2 +- docs/api/paddle/static/cpu_places_cn.rst | 2 +- .../paddle/static/create_global_var_cn.rst | 2 +- .../api/paddle/static/create_parameter_cn.rst | 2 +- docs/api/paddle/static/cuda_places_cn.rst | 2 +- docs/api/paddle/static/data_cn.rst | 2 +- .../paddle/static/default_main_program_cn.rst | 2 +- .../static/default_startup_program_cn.rst | 2 +- .../static/deserialize_persistables_cn.rst | 2 +- .../paddle/static/deserialize_program_cn.rst | 2 +- docs/api/paddle/static/device_guard_cn.rst | 2 +- docs/api/paddle/static/global_scope_cn.rst | 2 +- docs/api/paddle/static/load_cn.rst | 2 +- docs/api/paddle/static/load_from_file_cn.rst | 2 +- .../paddle/static/load_inference_model_cn.rst | 2 +- .../paddle/static/load_program_state_cn.rst | 2 +- docs/api/paddle/static/mlu_places_cn.rst | 1 - docs/api/paddle/static/name_scope_cn.rst | 2 +- docs/api/paddle/static/nn/batch_norm_cn.rst | 2 +- .../static/nn/bilinear_tensor_product_cn.rst | 2 +- docs/api/paddle/static/nn/case_cn.rst | 2 +- docs/api/paddle/static/nn/cond_cn.rst | 1 - docs/api/paddle/static/nn/conv2d_cn.rst | 2 +- .../paddle/static/nn/conv2d_transpose_cn.rst | 2 +- docs/api/paddle/static/nn/conv3d_cn.rst | 2 +- .../paddle/static/nn/conv3d_transpose_cn.rst | 2 +- docs/api/paddle/static/nn/crf_decoding_cn.rst | 2 +- docs/api/paddle/static/nn/data_norm_cn.rst | 2 +- .../api/paddle/static/nn/deform_conv2d_cn.rst | 2 +- docs/api/paddle/static/nn/embedding_cn.rst | 2 +- docs/api/paddle/static/nn/fc_cn.rst | 2 - docs/api/paddle/static/nn/group_norm_cn.rst | 2 +- .../api/paddle/static/nn/instance_norm_cn.rst | 2 +- docs/api/paddle/static/nn/layer_norm_cn.rst | 2 +- docs/api/paddle/static/nn/nce_cn.rst | 2 +- docs/api/paddle/static/nn/prelu_cn.rst | 2 +- docs/api/paddle/static/nn/row_conv_cn.rst | 2 +- .../paddle/static/nn/sequence_concat_cn.rst | 10 ---- .../api/paddle/static/nn/sequence_conv_cn.rst | 6 --- .../static/nn/sequence_enumerate_cn.rst | 9 ---- .../static/nn/sequence_expand_as_cn.rst | 9 ---- .../paddle/static/nn/sequence_expand_cn.rst | 7 --- .../static/nn/sequence_first_step_cn.rst | 9 ---- .../static/nn/sequence_last_step_cn.rst | 9 ---- docs/api/paddle/static/nn/sequence_pad_cn.rst | 7 --- .../paddle/static/nn/sequence_reshape_cn.rst | 9 ---- .../paddle/static/nn/sequence_reverse_cn.rst | 7 --- .../paddle/static/nn/sequence_scatter_cn.rst | 9 ---- .../paddle/static/nn/sequence_slice_cn.rst | 10 ---- .../paddle/static/nn/sequence_softmax_cn.rst | 1 - .../paddle/static/nn/sequence_unpad_cn.rst | 1 - .../paddle/static/nn/sparse_embedding_cn.rst | 2 +- .../api/paddle/static/nn/spectral_norm_cn.rst | 2 +- docs/api/paddle/static/nn/switch_case_cn.rst | 2 +- .../paddle/static/normalize_program_cn.rst | 2 +- docs/api/paddle/static/npu_places_cn.rst | 1 - docs/api/paddle/static/program_guard_cn.rst | 1 - docs/api/paddle/static/save_cn.rst | 2 +- .../paddle/static/save_inference_model_cn.rst | 2 +- docs/api/paddle/static/save_to_file_cn.rst | 2 +- docs/api/paddle/static/scope_guard_cn.rst | 2 +- .../static/serialize_persistables_cn.rst | 2 +- .../paddle/static/serialize_program_cn.rst | 2 +- .../paddle/static/set_program_state_cn.rst | 2 +- docs/api/paddle/static/xpu_places_cn.rst | 2 +- docs/api/paddle/std_cn.rst | 2 +- docs/api/paddle/strided_slice_cn.rst | 2 +- docs/api/paddle/subtract_cn.rst | 2 +- docs/api/paddle/summary_cn.rst | 2 +- docs/api/paddle/sysconfig/get_include_cn.rst | 2 +- docs/api/paddle/sysconfig/get_lib_cn.rst | 2 +- docs/api/paddle/take_along_axis_cn.rst | 1 - docs/api/paddle/tan_cn.rst | 2 +- docs/api/paddle/tanh_cn.rst | 2 +- docs/api/paddle/tensordot_cn.rst | 2 +- docs/api/paddle/text/Conll05st_cn.rst | 2 +- docs/api/paddle/text/Imdb_cn.rst | 2 +- docs/api/paddle/text/Imikolov_cn.rst | 2 +- docs/api/paddle/text/Movielens_cn.rst | 2 +- docs/api/paddle/text/UCIHousing_cn.rst | 1 - docs/api/paddle/text/ViterbiDecoder_cn.rst | 2 +- docs/api/paddle/text/WMT14_cn.rst | 2 +- docs/api/paddle/text/WMT16_cn.rst | 2 +- docs/api/paddle/text/viterbi_decode_cn.rst | 2 +- docs/api/paddle/tile_cn.rst | 2 +- docs/api/paddle/to_tensor_cn.rst | 2 +- docs/api/paddle/trace_cn.rst | 2 +- docs/api/paddle/transpose_cn.rst | 1 - docs/api/paddle/tril_cn.rst | 2 +- docs/api/paddle/triu_cn.rst | 2 +- docs/api/paddle/trunc_cn.rst | 2 +- docs/api/paddle/unsqueeze_cn.rst | 2 +- docs/api/paddle/unstack_cn.rst | 2 +- .../utils/cpp_extension/CppExtension_cn.rst | 2 +- .../cpp_extension/get_build_directory_cn.rst | 2 +- docs/api/paddle/utils/deprecated_cn.rst | 1 - .../paddle/utils/dlpack/from_dlpack_cn.rst | 2 +- docs/api/paddle/utils/dlpack/to_dlpack_cn.rst | 2 +- .../download/get_weights_path_from_url_cn.rst | 2 +- docs/api/paddle/utils/run_check_cn.rst | 2 +- .../paddle/utils/unique_name/generate_cn.rst | 2 +- .../api/paddle/utils/unique_name/guard_cn.rst | 2 +- .../paddle/utils/unique_name/switch_cn.rst | 2 +- docs/api/paddle/var_cn.rst | 2 +- docs/api/paddle/version/Overview_cn.rst | 1 - docs/api/paddle/version/cuda_cn.rst | 1 - docs/api/paddle/version/cudnn_cn.rst | 1 - docs/api/paddle/version/show_cn.rst | 2 +- .../paddle/vision/get_image_backend_cn.rst | 2 +- docs/api/paddle/vision/image_load_cn.rst | 2 +- .../paddle/vision/models/MobileNetV1_cn.rst | 1 - .../paddle/vision/models/MobileNetV2_cn.rst | 1 - .../paddle/vision/models/squeezenet1_1_cn.rst | 1 - docs/api/paddle/vision/models/vgg13_cn.rst | 1 - .../api/paddle/vision/ops/DeformConv2D_cn.rst | 2 +- docs/api/paddle/vision/ops/RoIPool_cn.rst | 2 +- .../paddle/vision/ops/deform_conv2d_cn.rst | 2 +- docs/api/paddle/vision/ops/roi_pool_cn.rst | 2 +- docs/api/paddle/vision/ops/yolo_box_cn.rst | 2 +- docs/api/paddle/vision/ops/yolo_loss_cn.rst | 2 +- .../paddle/vision/set_image_backend_cn.rst | 2 +- .../vision/transforms/BaseTransform_cn.rst | 1 - .../paddle/vision/transforms/Compose_cn.rst | 1 - .../paddle/vision/transforms/Normalize_cn.rst | 2 +- .../vision/transforms/RandomErasing_cn.rst | 1 - .../vision/transforms/RandomRotation_cn.rst | 1 - .../paddle/vision/transforms/Resize_cn.rst | 1 - .../transforms/adjust_brightness_cn.rst | 2 - .../vision/transforms/adjust_contrast_cn.rst | 2 +- .../vision/transforms/adjust_hue_cn.rst | 2 +- .../vision/transforms/center_crop_cn.rst | 2 +- docs/api/paddle/vision/transforms/crop_cn.rst | 2 +- .../api/paddle/vision/transforms/erase_cn.rst | 2 +- .../api/paddle/vision/transforms/hflip_cn.rst | 2 +- .../paddle/vision/transforms/resize_cn.rst | 1 - .../paddle/vision/transforms/rotate_cn.rst | 1 - .../vision/transforms/to_grayscale_cn.rst | 1 - .../paddle/vision/transforms/to_tensor_cn.rst | 2 +- .../api/paddle/vision/transforms/vflip_cn.rst | 2 +- docs/api/paddle/zeros_cn.rst | 2 +- docs/api/paddle/zeros_like_cn.rst | 2 +- docs/api_guides/low_level/backward_en.rst | 1 - .../low_level/distributed/index_en.rst | 2 - docs/api_guides/low_level/executor_en.rst | 3 -- .../low_level/layers/control_flow_en.rst | 2 +- docs/api_guides/low_level/layers/conv_en.rst | 2 +- .../low_level/layers/data_feeder.rst | 2 +- .../low_level/layers/data_feeder_en.rst | 2 +- .../low_level/layers/data_in_out.rst | 2 +- .../low_level/layers/data_in_out_en.rst | 2 +- .../api_guides/low_level/layers/detection.rst | 2 - .../low_level/layers/detection_en.rst | 2 +- docs/api_guides/low_level/layers/index.rst | 1 - docs/api_guides/low_level/layers/index_en.rst | 1 - .../layers/learning_rate_scheduler.rst | 2 +- .../layers/learning_rate_scheduler_en.rst | 2 +- .../low_level/layers/loss_function.rst | 2 +- .../low_level/layers/loss_function_en.rst | 2 +- docs/api_guides/low_level/layers/sequence.rst | 1 - docs/api_guides/low_level/metrics.rst | 1 - docs/api_guides/low_level/metrics_en.rst | 2 +- .../low_level/model_save_reader_en.rst | 2 +- docs/api_guides/low_level/nets.rst | 1 - docs/api_guides/low_level/optimizer.rst | 2 - docs/api_guides/low_level/optimizer_en.rst | 1 - docs/design/mkldnn/gru/index_en.rst | 2 +- .../custom_device_docs/custom_runtime_en.rst | 1 - docs/dev_guides/index_cn.rst | 1 - docs/dev_guides/style_guides_cn.md | 2 +- docs/dev_guides/sugon/complie_and_test_cn.md | 2 +- docs/dev_guides/sugon/paddle_c86_cn.md | 2 +- ...200\220Hackathon No.111\343\200\221 PR.md" | 5 -- .../cluster_quick_start_cn.rst | 2 +- .../cluster_quick_start_collective_cn.rst | 1 - .../data_parallel/gradient_merge_cn.rst | 1 - .../data_parallel/principle_and_demo_cn.rst | 2 +- .../06_distributed_training/deployment_cn.rst | 1 - .../distributed_overview.rst | 1 - .../fleet_api_howto_cn.rst | 2 - .../group_sharded_parallel_cn.rst | 2 - .../model_parallel_cn.rst | 2 +- .../pipeline_parallel_cn.rst | 1 - .../community_contribution_cn.md | 2 +- docs/guides/beginner/index_cn.rst | 1 - docs/guides/beginner/index_en.rst | 1 - docs/guides/beginner/model_save_load_cn.rst | 1 - docs/guides/custom_op/index_cn.rst | 1 - docs/guides/flags/cudnn_cn.rst | 2 +- docs/guides/flags/data_cn.rst | 2 +- docs/guides/flags/debug_en.rst | 2 +- docs/guides/flags/device_cn.rst | 2 +- docs/guides/flags/device_en.rst | 2 +- docs/guides/flags/distributed_cn.rst | 2 +- docs/guides/flags/distributed_en.rst | 2 +- docs/guides/flags/executor_cn.rst | 2 +- docs/guides/flags/executor_en.rst | 2 +- docs/guides/flags/flags_en.rst | 46 ------------------- docs/guides/flags/npu_cn.rst | 1 - docs/guides/flags/npu_en.rst | 1 - docs/guides/flags/others_cn.rst | 1 - docs/guides/flags/others_en.rst | 1 - .../hardware_support/hardware_info_cn.md | 2 +- docs/guides/hardware_support/index_cn.rst | 2 +- docs/guides/jit/basic_usage_en.md | 1 - docs/guides/jit/index_en.rst | 1 - .../load_old_format_model_cn.rst | 2 +- .../model_convert/paddle_api_mapping_cn.rst | 2 +- .../analysis_tools/index_en.rst | 2 - .../guides/performance_improving/index_cn.rst | 2 +- .../guides/performance_improving/index_en.rst | 2 +- .../performance_improving/memory_optimize.rst | 1 - .../memory_optimize_en.rst | 1 - docs/index_cn.rst | 1 - docs/index_en.rst | 1 - docs/practices/jit/index_cn.rst | 2 +- docs/practices/nlp/index_cn.rst | 2 +- docs/practices/quick_start/index_cn.rst | 2 +- 767 files changed, 587 insertions(+), 974 deletions(-) diff --git a/docs/advanced_guide/index_en.rst b/docs/advanced_guide/index_en.rst index d6401bceb56..f44212dadfb 100644 --- a/docs/advanced_guide/index_en.rst +++ b/docs/advanced_guide/index_en.rst @@ -17,4 +17,3 @@ So far you have already been familiar with PaddlePaddle. And the next expectatio inference_deployment/index_en.rst flags/flags_en.rst - diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst index 50f6f69a96e..86df9f539c2 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst @@ -158,4 +158,4 @@ paddlepaddle支持对训练策略中的细节进行调整: config.min_block_size = 81920 # config = dict() # config['min_block_size'] = 81920 - strategy.set_program_config(config) \ No newline at end of file + strategy.set_program_config(config) diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst index 60a3310844b..3e9b3949631 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst @@ -161,4 +161,4 @@ PaddlePaddle supports adjusting the details of the training strategy: config.min_block_size = 81920 # config = dict() # config['min_block_size'] = 81920 - strategy.set_program_config(config) \ No newline at end of file + strategy.set_program_config(config) diff --git a/docs/api/index_en.rst b/docs/api/index_en.rst index c811df0062d..730ab8d4613 100644 --- a/docs/api/index_en.rst +++ b/docs/api/index_en.rst @@ -86,4 +86,3 @@ In this version, PaddlePaddle has made many optimizations to the APIs. You can r +-------------------------------+-------------------------------------------------------+ | paddle.sparse | The Sparse domain API. | +-------------------------------+-------------------------------------------------------+ - diff --git a/docs/api/paddle/NPUPlace_cn.rst b/docs/api/paddle/NPUPlace_cn.rst index 182afc83c2e..723ae38bc3b 100644 --- a/docs/api/paddle/NPUPlace_cn.rst +++ b/docs/api/paddle/NPUPlace_cn.rst @@ -18,4 +18,4 @@ NPUPlace 代码示例 :::::::::::: -COPY-FROM: paddle.NPUPlace \ No newline at end of file +COPY-FROM: paddle.NPUPlace diff --git a/docs/api/paddle/ParamAttr_cn.rst b/docs/api/paddle/ParamAttr_cn.rst index 909d3adbd1d..f15b647c5b4 100644 --- a/docs/api/paddle/ParamAttr_cn.rst +++ b/docs/api/paddle/ParamAttr_cn.rst @@ -35,4 +35,4 @@ ParamAttr 代码示例 :::::::::::: -COPY-FROM: paddle.ParamAttr \ No newline at end of file +COPY-FROM: paddle.ParamAttr diff --git a/docs/api/paddle/abs_cn.rst b/docs/api/paddle/abs_cn.rst index c4582fc2a64..7c7da51437f 100644 --- a/docs/api/paddle/abs_cn.rst +++ b/docs/api/paddle/abs_cn.rst @@ -25,4 +25,4 @@ abs 代码示例 ::::::::: -COPY-FROM: paddle.abs \ No newline at end of file +COPY-FROM: paddle.abs diff --git a/docs/api/paddle/acos_cn.rst b/docs/api/paddle/acos_cn.rst index 51db53cd54a..20db4d110e6 100644 --- a/docs/api/paddle/acos_cn.rst +++ b/docs/api/paddle/acos_cn.rst @@ -25,4 +25,4 @@ arccosine函数。 代码示例 ::::::::: -COPY-FROM: paddle.acos \ No newline at end of file +COPY-FROM: paddle.acos diff --git a/docs/api/paddle/acosh_cn.rst b/docs/api/paddle/acosh_cn.rst index 1a80dd82dee..040fd441a2c 100644 --- a/docs/api/paddle/acosh_cn.rst +++ b/docs/api/paddle/acosh_cn.rst @@ -27,4 +27,4 @@ Arccosh函数。 代码示例 ::::::::: -COPY-FROM: paddle.acosh \ No newline at end of file +COPY-FROM: paddle.acosh diff --git a/docs/api/paddle/add_cn.rst b/docs/api/paddle/add_cn.rst index 9c2c90f1754..6ea4126e7fb 100644 --- a/docs/api/paddle/add_cn.rst +++ b/docs/api/paddle/add_cn.rst @@ -33,4 +33,4 @@ add 代码示例 ::::::::: -COPY-FROM: paddle.add \ No newline at end of file +COPY-FROM: paddle.add diff --git a/docs/api/paddle/addmm_cn.rst b/docs/api/paddle/addmm_cn.rst index 8fc2f93d6af..02176315f24 100644 --- a/docs/api/paddle/addmm_cn.rst +++ b/docs/api/paddle/addmm_cn.rst @@ -33,4 +33,4 @@ addmm 代码示例 :::::::::::: -COPY-FROM: paddle.addmm \ No newline at end of file +COPY-FROM: paddle.addmm diff --git a/docs/api/paddle/all_cn.rst b/docs/api/paddle/all_cn.rst index 868d3e6cd52..0bc41b3f790 100644 --- a/docs/api/paddle/all_cn.rst +++ b/docs/api/paddle/all_cn.rst @@ -22,4 +22,4 @@ all 代码示例 ::::::::: -COPY-FROM: paddle.all \ No newline at end of file +COPY-FROM: paddle.all diff --git a/docs/api/paddle/allclose_cn.rst b/docs/api/paddle/allclose_cn.rst index 117419f19ef..fcb67085ca5 100644 --- a/docs/api/paddle/allclose_cn.rst +++ b/docs/api/paddle/allclose_cn.rst @@ -29,4 +29,4 @@ allclose 代码示例 :::::::::::: -COPY-FROM: paddle.allclose \ No newline at end of file +COPY-FROM: paddle.allclose diff --git a/docs/api/paddle/amp/Overview_cn.rst b/docs/api/paddle/amp/Overview_cn.rst index 016698e5536..d53c89e9121 100644 --- a/docs/api/paddle/amp/Overview_cn.rst +++ b/docs/api/paddle/amp/Overview_cn.rst @@ -58,5 +58,3 @@ AMP相关API "sigmoid_cross_entropy_with_logits", "按元素的概率误差" "cross_entropy", "交叉熵" "cross_entropy2", "交叉熵" - - diff --git a/docs/api/paddle/angle_cn.rst b/docs/api/paddle/angle_cn.rst index 2db5d410cee..9f2f290f9ae 100644 --- a/docs/api/paddle/angle_cn.rst +++ b/docs/api/paddle/angle_cn.rst @@ -24,4 +24,4 @@ angle 代码示例 ::::::::: -COPY-FROM: paddle.angle \ No newline at end of file +COPY-FROM: paddle.angle diff --git a/docs/api/paddle/any_cn.rst b/docs/api/paddle/any_cn.rst index cc78d832cf1..bb8ef51305f 100644 --- a/docs/api/paddle/any_cn.rst +++ b/docs/api/paddle/any_cn.rst @@ -22,4 +22,4 @@ any 代码示例 ::::::::: -COPY-FROM: paddle.any \ No newline at end of file +COPY-FROM: paddle.any diff --git a/docs/api/paddle/arange_cn.rst b/docs/api/paddle/arange_cn.rst index 4e8237a94a9..6d740ea9a0a 100644 --- a/docs/api/paddle/arange_cn.rst +++ b/docs/api/paddle/arange_cn.rst @@ -25,4 +25,4 @@ arange 代码示例 :::::::::: -COPY-FROM: paddle.arange \ No newline at end of file +COPY-FROM: paddle.arange diff --git a/docs/api/paddle/argmax_cn.rst b/docs/api/paddle/argmax_cn.rst index 104a4700719..115c3ea6688 100644 --- a/docs/api/paddle/argmax_cn.rst +++ b/docs/api/paddle/argmax_cn.rst @@ -24,4 +24,4 @@ argmax 示例代码 :::::::: -COPY-FROM: paddle.argmax \ No newline at end of file +COPY-FROM: paddle.argmax diff --git a/docs/api/paddle/argsort_cn.rst b/docs/api/paddle/argsort_cn.rst index b10a6b4fd3c..502e36e9bb2 100644 --- a/docs/api/paddle/argsort_cn.rst +++ b/docs/api/paddle/argsort_cn.rst @@ -25,4 +25,4 @@ Tensor,排序后索引信息(与 ``x`` 维度信息一致),数据类型 代码示例 :::::::::::: -COPY-FROM: paddle.argsort \ No newline at end of file +COPY-FROM: paddle.argsort diff --git a/docs/api/paddle/as_complex_cn.rst b/docs/api/paddle/as_complex_cn.rst index 07a78bc5cb3..afa1e54e634 100644 --- a/docs/api/paddle/as_complex_cn.rst +++ b/docs/api/paddle/as_complex_cn.rst @@ -24,4 +24,4 @@ as_complex 代码示例 ::::::::: -COPY-FROM: paddle.as_complex \ No newline at end of file +COPY-FROM: paddle.as_complex diff --git a/docs/api/paddle/as_real_cn.rst b/docs/api/paddle/as_real_cn.rst index 0d5d354d43b..b76324a1755 100644 --- a/docs/api/paddle/as_real_cn.rst +++ b/docs/api/paddle/as_real_cn.rst @@ -24,4 +24,4 @@ as_real 代码示例 ::::::::: -COPY-FROM: paddle.as_real \ No newline at end of file +COPY-FROM: paddle.as_real diff --git a/docs/api/paddle/asin_cn.rst b/docs/api/paddle/asin_cn.rst index 36054cd6e88..4ecd7380d62 100644 --- a/docs/api/paddle/asin_cn.rst +++ b/docs/api/paddle/asin_cn.rst @@ -26,4 +26,4 @@ arcsine函数。 代码示例 :::::::::::: -COPY-FROM: paddle.asin \ No newline at end of file +COPY-FROM: paddle.asin diff --git a/docs/api/paddle/asinh_cn.rst b/docs/api/paddle/asinh_cn.rst index 173793a6450..796c59fbeca 100644 --- a/docs/api/paddle/asinh_cn.rst +++ b/docs/api/paddle/asinh_cn.rst @@ -24,4 +24,4 @@ Arcsinh函数。 代码示例 ::::::::: -COPY-FROM: paddle.asinh \ No newline at end of file +COPY-FROM: paddle.asinh diff --git a/docs/api/paddle/atan2_cn.rst b/docs/api/paddle/atan2_cn.rst index 36eb8b0ab1b..62cd3a59c89 100644 --- a/docs/api/paddle/atan2_cn.rst +++ b/docs/api/paddle/atan2_cn.rst @@ -35,4 +35,4 @@ atan2 代码示例 ::::::::: -COPY-FROM: paddle.atan2 \ No newline at end of file +COPY-FROM: paddle.atan2 diff --git a/docs/api/paddle/atan_cn.rst b/docs/api/paddle/atan_cn.rst index 227d20c6242..9e28b318a26 100644 --- a/docs/api/paddle/atan_cn.rst +++ b/docs/api/paddle/atan_cn.rst @@ -26,4 +26,4 @@ arctangent函数。 代码示例 :::::::::::: -COPY-FROM: paddle.atan \ No newline at end of file +COPY-FROM: paddle.atan diff --git a/docs/api/paddle/atanh_cn.rst b/docs/api/paddle/atanh_cn.rst index 8d7fc722508..efd038ec3dd 100644 --- a/docs/api/paddle/atanh_cn.rst +++ b/docs/api/paddle/atanh_cn.rst @@ -24,4 +24,4 @@ Arctanh函数。 代码示例 ::::::::: -COPY-FROM: paddle.atanh \ No newline at end of file +COPY-FROM: paddle.atanh diff --git a/docs/api/paddle/autograd/PyLayer_cn.rst b/docs/api/paddle/autograd/PyLayer_cn.rst index 24d2f52200a..6715d8452a8 100644 --- a/docs/api/paddle/autograd/PyLayer_cn.rst +++ b/docs/api/paddle/autograd/PyLayer_cn.rst @@ -171,4 +171,3 @@ Tensor或至少包含一个Tensor的list/tuple data.stop_gradient = False # run custom Layer. z = cus_tanh.apply(data, func1=paddle.tanh) - diff --git a/docs/api/paddle/autograd/backward_cn.rst b/docs/api/paddle/autograd/backward_cn.rst index 05e9e460b0e..27ac7c5d6a8 100644 --- a/docs/api/paddle/autograd/backward_cn.rst +++ b/docs/api/paddle/autograd/backward_cn.rst @@ -23,4 +23,4 @@ None 代码示例 :::::::::::: -COPY-FROM: paddle.autograd.backward \ No newline at end of file +COPY-FROM: paddle.autograd.backward diff --git a/docs/api/paddle/batch_cn.rst b/docs/api/paddle/batch_cn.rst index 8a9e2f2d340..a9b9c936e6e 100644 --- a/docs/api/paddle/batch_cn.rst +++ b/docs/api/paddle/batch_cn.rst @@ -25,4 +25,4 @@ batched reader 代码示例 :::::::::::: -COPY-FROM: paddle.batch \ No newline at end of file +COPY-FROM: paddle.batch diff --git a/docs/api/paddle/bincount_cn.rst b/docs/api/paddle/bincount_cn.rst index eb92661b902..f866b241909 100644 --- a/docs/api/paddle/bincount_cn.rst +++ b/docs/api/paddle/bincount_cn.rst @@ -33,5 +33,3 @@ Tensor,维度为1。 w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] - - diff --git a/docs/api/paddle/bitwise_and_cn.rst b/docs/api/paddle/bitwise_and_cn.rst index 0031dce1e49..ea09f5c524e 100644 --- a/docs/api/paddle/bitwise_and_cn.rst +++ b/docs/api/paddle/bitwise_and_cn.rst @@ -27,4 +27,4 @@ bitwise_and 代码示例 :::::::::::: -COPY-FROM: paddle.bitwise_and \ No newline at end of file +COPY-FROM: paddle.bitwise_and diff --git a/docs/api/paddle/bitwise_not_cn.rst b/docs/api/paddle/bitwise_not_cn.rst index 652ce613a33..6ba22b14a81 100644 --- a/docs/api/paddle/bitwise_not_cn.rst +++ b/docs/api/paddle/bitwise_not_cn.rst @@ -26,4 +26,4 @@ bitwise_not 代码示例 :::::::::::: -COPY-FROM: paddle.bitwise_not \ No newline at end of file +COPY-FROM: paddle.bitwise_not diff --git a/docs/api/paddle/bitwise_or_cn.rst b/docs/api/paddle/bitwise_or_cn.rst index 9e226b525d6..e36faf4d2b6 100644 --- a/docs/api/paddle/bitwise_or_cn.rst +++ b/docs/api/paddle/bitwise_or_cn.rst @@ -27,4 +27,4 @@ bitwise_or 代码示例 :::::::::::: -COPY-FROM: paddle.bitwise_or \ No newline at end of file +COPY-FROM: paddle.bitwise_or diff --git a/docs/api/paddle/bitwise_xor_cn.rst b/docs/api/paddle/bitwise_xor_cn.rst index 6f3e0f45aeb..fc44d812d0a 100644 --- a/docs/api/paddle/bitwise_xor_cn.rst +++ b/docs/api/paddle/bitwise_xor_cn.rst @@ -27,4 +27,4 @@ bitwise_xor 代码示例 :::::::::::: -COPY-FROM: paddle.bitwise_xor \ No newline at end of file +COPY-FROM: paddle.bitwise_xor diff --git a/docs/api/paddle/bmm_cn.rst b/docs/api/paddle/bmm_cn.rst index fea8e17ef15..0981cf5127c 100644 --- a/docs/api/paddle/bmm_cn.rst +++ b/docs/api/paddle/bmm_cn.rst @@ -29,4 +29,3 @@ Tensor,矩阵相乘后的结果。 ::::::::: COPY-FROM: paddle.bmm - diff --git a/docs/api/paddle/broadcast_shape_cn.rst b/docs/api/paddle/broadcast_shape_cn.rst index 5d1632fb9a4..56410d1a49a 100644 --- a/docs/api/paddle/broadcast_shape_cn.rst +++ b/docs/api/paddle/broadcast_shape_cn.rst @@ -21,4 +21,4 @@ broadcast操作后的shape,返回类型为 list[int]。 代码示例 ::::::::: -COPY-FROM: paddle.broadcast_shape \ No newline at end of file +COPY-FROM: paddle.broadcast_shape diff --git a/docs/api/paddle/broadcast_tensors_cn.rst b/docs/api/paddle/broadcast_tensors_cn.rst index a2202508dd8..c2280f91f9a 100644 --- a/docs/api/paddle/broadcast_tensors_cn.rst +++ b/docs/api/paddle/broadcast_tensors_cn.rst @@ -23,4 +23,4 @@ broadcast_tensors 代码示例 ::::::::: -COPY-FROM: paddle.broadcast_tensors \ No newline at end of file +COPY-FROM: paddle.broadcast_tensors diff --git a/docs/api/paddle/broadcast_to_cn.rst b/docs/api/paddle/broadcast_to_cn.rst index 3e6bae7b67d..4b24d1c4db6 100644 --- a/docs/api/paddle/broadcast_to_cn.rst +++ b/docs/api/paddle/broadcast_to_cn.rst @@ -22,4 +22,4 @@ broadcast_to 代码示例 ::::::::: -COPY-FROM: paddle.broadcast_to \ No newline at end of file +COPY-FROM: paddle.broadcast_to diff --git a/docs/api/paddle/callbacks/EarlyStopping_cn.rst b/docs/api/paddle/callbacks/EarlyStopping_cn.rst index cdb8c9f2b27..c08cef3953b 100644 --- a/docs/api/paddle/callbacks/EarlyStopping_cn.rst +++ b/docs/api/paddle/callbacks/EarlyStopping_cn.rst @@ -21,4 +21,4 @@ EarlyStopping 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.EarlyStopping \ No newline at end of file +COPY-FROM: paddle.callbacks.EarlyStopping diff --git a/docs/api/paddle/callbacks/LRScheduler_cn.rst b/docs/api/paddle/callbacks/LRScheduler_cn.rst index 9770e6b160c..5c637c35b52 100644 --- a/docs/api/paddle/callbacks/LRScheduler_cn.rst +++ b/docs/api/paddle/callbacks/LRScheduler_cn.rst @@ -17,4 +17,4 @@ LRScheduler 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.LRScheduler \ No newline at end of file +COPY-FROM: paddle.callbacks.LRScheduler diff --git a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst index 73a8795a2a5..cc720717147 100644 --- a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst +++ b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst @@ -19,4 +19,4 @@ ModelCheckpoint 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.ModelCheckpoint \ No newline at end of file +COPY-FROM: paddle.callbacks.ModelCheckpoint diff --git a/docs/api/paddle/callbacks/ProgBarLogger_cn.rst b/docs/api/paddle/callbacks/ProgBarLogger_cn.rst index 6eb208335c4..22db8f7dd3e 100644 --- a/docs/api/paddle/callbacks/ProgBarLogger_cn.rst +++ b/docs/api/paddle/callbacks/ProgBarLogger_cn.rst @@ -20,4 +20,4 @@ ProgBarLogger 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.ProgBarLogger \ No newline at end of file +COPY-FROM: paddle.callbacks.ProgBarLogger diff --git a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst index ae1b7911e8b..d715e8f7f57 100644 --- a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst +++ b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst @@ -23,4 +23,4 @@ ReduceLROnPlateau 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.ReduceLROnPlateau \ No newline at end of file +COPY-FROM: paddle.callbacks.ReduceLROnPlateau diff --git a/docs/api/paddle/callbacks/VisualDL_cn.rst b/docs/api/paddle/callbacks/VisualDL_cn.rst index 36e80899aea..0fb3dd46bf4 100644 --- a/docs/api/paddle/callbacks/VisualDL_cn.rst +++ b/docs/api/paddle/callbacks/VisualDL_cn.rst @@ -16,4 +16,4 @@ VisualDL 代码示例 :::::::::::: -COPY-FROM: paddle.callbacks.VisualDL \ No newline at end of file +COPY-FROM: paddle.callbacks.VisualDL diff --git a/docs/api/paddle/cast_cn.rst b/docs/api/paddle/cast_cn.rst index dd5c68e3a2b..68dd5cdc29e 100644 --- a/docs/api/paddle/cast_cn.rst +++ b/docs/api/paddle/cast_cn.rst @@ -23,4 +23,4 @@ Tensor,维度与 ``x`` 相同,数据类型为 ``dtype``。 代码示例 :::::::::::: -COPY-FROM: paddle.cast \ No newline at end of file +COPY-FROM: paddle.cast diff --git a/docs/api/paddle/ceil_cn.rst b/docs/api/paddle/ceil_cn.rst index 6aef8a65cf4..1caba3897e6 100644 --- a/docs/api/paddle/ceil_cn.rst +++ b/docs/api/paddle/ceil_cn.rst @@ -28,4 +28,4 @@ ceil 代码示例 :::::::::::: -COPY-FROM: paddle.ceil \ No newline at end of file +COPY-FROM: paddle.ceil diff --git a/docs/api/paddle/chunk_cn.rst b/docs/api/paddle/chunk_cn.rst index 7dd73212e4a..e2b1faa1ed9 100644 --- a/docs/api/paddle/chunk_cn.rst +++ b/docs/api/paddle/chunk_cn.rst @@ -23,4 +23,4 @@ chunk 代码示例 :::::::::::: -COPY-FROM: paddle.chunk \ No newline at end of file +COPY-FROM: paddle.chunk diff --git a/docs/api/paddle/clip_cn.rst b/docs/api/paddle/clip_cn.rst index 08489dff00e..3008aa6b18d 100644 --- a/docs/api/paddle/clip_cn.rst +++ b/docs/api/paddle/clip_cn.rst @@ -29,4 +29,4 @@ clip 代码示例 :::::::::::: -COPY-FROM: paddle.clip \ No newline at end of file +COPY-FROM: paddle.clip diff --git a/docs/api/paddle/clone_cn.rst b/docs/api/paddle/clone_cn.rst index 0d4dbf3d6fb..b9b80f27191 100644 --- a/docs/api/paddle/clone_cn.rst +++ b/docs/api/paddle/clone_cn.rst @@ -21,4 +21,4 @@ clone 代码示例 ::::::::: -COPY-FROM: paddle.clone \ No newline at end of file +COPY-FROM: paddle.clone diff --git a/docs/api/paddle/compat/long_type_cn.rst b/docs/api/paddle/compat/long_type_cn.rst index 4762a29d0c1..fe6001936e6 100644 --- a/docs/api/paddle/compat/long_type_cn.rst +++ b/docs/api/paddle/compat/long_type_cn.rst @@ -6,4 +6,3 @@ long_type .. py:function:: paddle.compat.long_type() builtins.int的别名 - diff --git a/docs/api/paddle/compat/to_bytes_cn.rst b/docs/api/paddle/compat/to_bytes_cn.rst index 445cd13585e..f8383bc7c03 100644 --- a/docs/api/paddle/compat/to_bytes_cn.rst +++ b/docs/api/paddle/compat/to_bytes_cn.rst @@ -31,4 +31,4 @@ to_bytes 代码示例 ::::::::: -COPY-FROM: paddle.compat.to_bytes \ No newline at end of file +COPY-FROM: paddle.compat.to_bytes diff --git a/docs/api/paddle/compat/to_text_cn.rst b/docs/api/paddle/compat/to_text_cn.rst index 6a8346e775c..a6fbe7de613 100644 --- a/docs/api/paddle/compat/to_text_cn.rst +++ b/docs/api/paddle/compat/to_text_cn.rst @@ -31,4 +31,4 @@ to_text 代码示例 ::::::::: -COPY-FROM: paddle.compat.to_text \ No newline at end of file +COPY-FROM: paddle.compat.to_text diff --git a/docs/api/paddle/complex_cn.rst b/docs/api/paddle/complex_cn.rst index fe6292d03cf..65caef9dc1b 100644 --- a/docs/api/paddle/complex_cn.rst +++ b/docs/api/paddle/complex_cn.rst @@ -25,4 +25,4 @@ complex 代码示例 ::::::::: -COPY-FROM: paddle.complex \ No newline at end of file +COPY-FROM: paddle.complex diff --git a/docs/api/paddle/concat_cn.rst b/docs/api/paddle/concat_cn.rst index c60cc0ad2a8..ad210156001 100644 --- a/docs/api/paddle/concat_cn.rst +++ b/docs/api/paddle/concat_cn.rst @@ -23,4 +23,4 @@ concat 代码示例 :::::::::::: -COPY-FROM: paddle.concat \ No newline at end of file +COPY-FROM: paddle.concat diff --git a/docs/api/paddle/conj_cn.rst b/docs/api/paddle/conj_cn.rst index a096c845522..bf00695cc6b 100644 --- a/docs/api/paddle/conj_cn.rst +++ b/docs/api/paddle/conj_cn.rst @@ -23,4 +23,4 @@ conj 代码示例 :::::::::::: -COPY-FROM: paddle.conj \ No newline at end of file +COPY-FROM: paddle.conj diff --git a/docs/api/paddle/cos_cn.rst b/docs/api/paddle/cos_cn.rst index 2a5fb7d6b1c..1a891b81ae7 100644 --- a/docs/api/paddle/cos_cn.rst +++ b/docs/api/paddle/cos_cn.rst @@ -29,4 +29,4 @@ cos 代码示例 :::::::::::: -COPY-FROM: paddle.cos \ No newline at end of file +COPY-FROM: paddle.cos diff --git a/docs/api/paddle/cosh_cn.rst b/docs/api/paddle/cosh_cn.rst index f119bd350b4..72e68ad98c3 100644 --- a/docs/api/paddle/cosh_cn.rst +++ b/docs/api/paddle/cosh_cn.rst @@ -29,4 +29,4 @@ cosh 代码示例 :::::::::::: -COPY-FROM: paddle.cosh \ No newline at end of file +COPY-FROM: paddle.cosh diff --git a/docs/api/paddle/cross_cn.rst b/docs/api/paddle/cross_cn.rst index 69cd0fbf961..475f5098f14 100644 --- a/docs/api/paddle/cross_cn.rst +++ b/docs/api/paddle/cross_cn.rst @@ -24,4 +24,4 @@ cross 代码示例 :::::::::: -COPY-FROM: paddle.cross \ No newline at end of file +COPY-FROM: paddle.cross diff --git a/docs/api/paddle/cumprod_cn.rst b/docs/api/paddle/cumprod_cn.rst index e2ba206d195..a897aa6971e 100644 --- a/docs/api/paddle/cumprod_cn.rst +++ b/docs/api/paddle/cumprod_cn.rst @@ -25,4 +25,4 @@ cumprod 代码示例 :::::::::: -COPY-FROM: paddle.cumprod \ No newline at end of file +COPY-FROM: paddle.cumprod diff --git a/docs/api/paddle/cumsum_cn.rst b/docs/api/paddle/cumsum_cn.rst index e8f4bf88768..fe93cdcce47 100644 --- a/docs/api/paddle/cumsum_cn.rst +++ b/docs/api/paddle/cumsum_cn.rst @@ -25,4 +25,4 @@ cumsum 代码示例 :::::::::: -COPY-FROM: paddle.cumsum \ No newline at end of file +COPY-FROM: paddle.cumsum diff --git a/docs/api/paddle/device/cuda/Stream_cn.rst b/docs/api/paddle/device/cuda/Stream_cn.rst index ff647b3c9b7..103be004b32 100644 --- a/docs/api/paddle/device/cuda/Stream_cn.rst +++ b/docs/api/paddle/device/cuda/Stream_cn.rst @@ -121,4 +121,3 @@ record_event(event=None) import paddle s = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) event = s.record_event() - diff --git a/docs/api/paddle/device/cuda/current_stream_cn.rst b/docs/api/paddle/device/cuda/current_stream_cn.rst index 4fe6a982aa8..3f3f33cc272 100644 --- a/docs/api/paddle/device/cuda/current_stream_cn.rst +++ b/docs/api/paddle/device/cuda/current_stream_cn.rst @@ -20,4 +20,3 @@ current_stream 代码示例 :::::::::::: COPY-FROM: paddle.device.cuda.current_stream - diff --git a/docs/api/paddle/device/cuda/device_count_cn.rst b/docs/api/paddle/device/cuda/device_count_cn.rst index 8acc0ae2171..0bbd767529b 100644 --- a/docs/api/paddle/device/cuda/device_count_cn.rst +++ b/docs/api/paddle/device/cuda/device_count_cn.rst @@ -15,4 +15,4 @@ device_count 代码示例 :::::::::::: -COPY-FROM: paddle.device.cuda.device_count \ No newline at end of file +COPY-FROM: paddle.device.cuda.device_count diff --git a/docs/api/paddle/device/cuda/empty_cache_cn.rst b/docs/api/paddle/device/cuda/empty_cache_cn.rst index e8b18a498be..893d1282e60 100644 --- a/docs/api/paddle/device/cuda/empty_cache_cn.rst +++ b/docs/api/paddle/device/cuda/empty_cache_cn.rst @@ -10,4 +10,4 @@ empty_cache 代码示例 ::::::::: -COPY-FROM: paddle.device.cuda.empty_cache \ No newline at end of file +COPY-FROM: paddle.device.cuda.empty_cache diff --git a/docs/api/paddle/device/cuda/get_device_properties_cn.rst b/docs/api/paddle/device/cuda/get_device_properties_cn.rst index ad036312074..8f2e3bf6e0f 100644 --- a/docs/api/paddle/device/cuda/get_device_properties_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_properties_cn.rst @@ -24,5 +24,3 @@ _gpuDeviceProperties:设备属性,包括标识设备的ASCII字符串、设 :::::::: COPY-FROM: paddle.device.cuda.get_device_properties - - diff --git a/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst b/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst index b89f66016db..948051f9547 100644 --- a/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst +++ b/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst @@ -26,5 +26,3 @@ max_memory_allocated :::::::: COPY-FROM: paddle.device.cuda.max_memory_allocated - - diff --git a/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst b/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst index 1deed435a53..a8fec1f7fc5 100644 --- a/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst +++ b/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst @@ -23,5 +23,3 @@ max_memory_reserved :::::::: COPY-FROM: paddle.device.cuda.max_memory_reserved - - diff --git a/docs/api/paddle/device/cuda/memory_allocated_cn.rst b/docs/api/paddle/device/cuda/memory_allocated_cn.rst index f26d77bf2a0..a3c328e82ed 100644 --- a/docs/api/paddle/device/cuda/memory_allocated_cn.rst +++ b/docs/api/paddle/device/cuda/memory_allocated_cn.rst @@ -26,5 +26,3 @@ memory_allocated :::::::: COPY-FROM: paddle.device.cuda.memory_allocated - - diff --git a/docs/api/paddle/device/cuda/memory_reserved_cn.rst b/docs/api/paddle/device/cuda/memory_reserved_cn.rst index 4ce160c48f0..5789a6d268c 100644 --- a/docs/api/paddle/device/cuda/memory_reserved_cn.rst +++ b/docs/api/paddle/device/cuda/memory_reserved_cn.rst @@ -23,5 +23,3 @@ memory_reserved :::::::: COPY-FROM: paddle.device.cuda.memory_reserved - - diff --git a/docs/api/paddle/device/cuda/synchronize_cn.rst b/docs/api/paddle/device/cuda/synchronize_cn.rst index 3c2a64989d7..e820d6cd061 100644 --- a/docs/api/paddle/device/cuda/synchronize_cn.rst +++ b/docs/api/paddle/device/cuda/synchronize_cn.rst @@ -20,4 +20,3 @@ None 代码示例 :::::::::::: COPY-FROM: paddle.device.cuda.synchronize - diff --git a/docs/api/paddle/device/get_cudnn_version_cn.rst b/docs/api/paddle/device/get_cudnn_version_cn.rst index 23130e722b1..6a44d3474f0 100644 --- a/docs/api/paddle/device/get_cudnn_version_cn.rst +++ b/docs/api/paddle/device/get_cudnn_version_cn.rst @@ -15,4 +15,4 @@ get_cudnn_version 代码示例 :::::::::::: -COPY-FROM: paddle.device.get_cudnn_version \ No newline at end of file +COPY-FROM: paddle.device.get_cudnn_version diff --git a/docs/api/paddle/device/get_device_cn.rst b/docs/api/paddle/device/get_device_cn.rst index 906d7ab6c21..07504eec87a 100644 --- a/docs/api/paddle/device/get_device_cn.rst +++ b/docs/api/paddle/device/get_device_cn.rst @@ -15,4 +15,4 @@ get_device 代码示例 :::::::::::: -COPY-FROM: paddle.device.get_device \ No newline at end of file +COPY-FROM: paddle.device.get_device diff --git a/docs/api/paddle/device/is_compiled_with_cinn_cn.rst b/docs/api/paddle/device/is_compiled_with_cinn_cn.rst index 2ac3c9f9e71..76539f7ab48 100644 --- a/docs/api/paddle/device/is_compiled_with_cinn_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_cinn_cn.rst @@ -14,4 +14,4 @@ bool,支持CINN则为True,否则为False。 代码示例 :::::::::::: -COPY-FROM: paddle.device.is_compiled_with_cinn \ No newline at end of file +COPY-FROM: paddle.device.is_compiled_with_cinn diff --git a/docs/api/paddle/device/is_compiled_with_cuda_cn.rst b/docs/api/paddle/device/is_compiled_with_cuda_cn.rst index f99cbd9a0e3..d539af1b974 100644 --- a/docs/api/paddle/device/is_compiled_with_cuda_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_cuda_cn.rst @@ -17,4 +17,4 @@ bool,支持GPU则为True,否则为False。 代码示例 :::::::::::: -COPY-FROM: paddle.device.is_compiled_with_cuda \ No newline at end of file +COPY-FROM: paddle.device.is_compiled_with_cuda diff --git a/docs/api/paddle/device/is_compiled_with_npu_cn.rst b/docs/api/paddle/device/is_compiled_with_npu_cn.rst index 7f653236d5e..37fe1ff2b48 100644 --- a/docs/api/paddle/device/is_compiled_with_npu_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_npu_cn.rst @@ -15,4 +15,3 @@ bool,支持NPU则为True,否则为False。 :::::::::::: COPY-FROM: paddle.device.is_compiled_with_npu - diff --git a/docs/api/paddle/device/is_compiled_with_rocm_cn.rst b/docs/api/paddle/device/is_compiled_with_rocm_cn.rst index c75fb1beec0..923058e339e 100644 --- a/docs/api/paddle/device/is_compiled_with_rocm_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_rocm_cn.rst @@ -17,4 +17,4 @@ bool,支持GPU(ROCm)则为True,否则为False。 代码示例 :::::::::::: -COPY-FROM: paddle.device.is_compiled_with_rocm \ No newline at end of file +COPY-FROM: paddle.device.is_compiled_with_rocm diff --git a/docs/api/paddle/device/set_device_cn.rst b/docs/api/paddle/device/set_device_cn.rst index 9121045ad41..bb42704f83e 100644 --- a/docs/api/paddle/device/set_device_cn.rst +++ b/docs/api/paddle/device/set_device_cn.rst @@ -20,4 +20,4 @@ Place,设置的Place。 代码示例 :::::::::::: -COPY-FROM: paddle.device.set_device \ No newline at end of file +COPY-FROM: paddle.device.set_device diff --git a/docs/api/paddle/diag_cn.rst b/docs/api/paddle/diag_cn.rst index 17409b6db82..7f293b479ff 100644 --- a/docs/api/paddle/diag_cn.rst +++ b/docs/api/paddle/diag_cn.rst @@ -75,8 +75,3 @@ diag y = paddle.diag(x, offset=-1) print(y) # [4] - - - - - diff --git a/docs/api/paddle/diagflat_cn.rst b/docs/api/paddle/diagflat_cn.rst index 57e5be14b81..de7fc5fa7b8 100644 --- a/docs/api/paddle/diagflat_cn.rst +++ b/docs/api/paddle/diagflat_cn.rst @@ -86,8 +86,3 @@ diagflat # [0 2 0 0 0] # [0 0 3 0 0] # [0 0 0 4 0]] - - - - - diff --git a/docs/api/paddle/diagonal_cn.rst b/docs/api/paddle/diagonal_cn.rst index b521f0c7a78..1e13c4612b0 100644 --- a/docs/api/paddle/diagonal_cn.rst +++ b/docs/api/paddle/diagonal_cn.rst @@ -34,4 +34,4 @@ diagonal 代码示例 ::::::::: -COPY-FROM: paddle.diagonal \ No newline at end of file +COPY-FROM: paddle.diagonal diff --git a/docs/api/paddle/digamma_cn.rst b/docs/api/paddle/digamma_cn.rst index 7da046cfa7a..d0f95e0d213 100644 --- a/docs/api/paddle/digamma_cn.rst +++ b/docs/api/paddle/digamma_cn.rst @@ -24,4 +24,4 @@ digamma 代码示例 ::::::::: -COPY-FROM: paddle.digamma \ No newline at end of file +COPY-FROM: paddle.digamma diff --git a/docs/api/paddle/disable_signal_handler_cn.rst b/docs/api/paddle/disable_signal_handler_cn.rst index 4eca266875a..70d789a9d77 100644 --- a/docs/api/paddle/disable_signal_handler_cn.rst +++ b/docs/api/paddle/disable_signal_handler_cn.rst @@ -24,4 +24,4 @@ Paddle默认在C++层面注册了系统信号处理方法,用于优化报错 代码示例 ::::::::: -COPY-FROM: paddle.disable_signal_handler \ No newline at end of file +COPY-FROM: paddle.disable_signal_handler diff --git a/docs/api/paddle/disable_static_cn.rst b/docs/api/paddle/disable_static_cn.rst index 13de52aabf6..41ca0020c46 100644 --- a/docs/api/paddle/disable_static_cn.rst +++ b/docs/api/paddle/disable_static_cn.rst @@ -23,4 +23,4 @@ disable_static 代码示例 :::::::::::: -COPY-FROM: paddle.disable_static \ No newline at end of file +COPY-FROM: paddle.disable_static diff --git a/docs/api/paddle/dist_cn.rst b/docs/api/paddle/dist_cn.rst index c07b9e862dd..f0cf899f7ed 100644 --- a/docs/api/paddle/dist_cn.rst +++ b/docs/api/paddle/dist_cn.rst @@ -60,4 +60,4 @@ z (4-D Tensor): 8 x 7 x 6 x 5 代码示例 :::::::::::: -COPY-FROM: paddle.dist \ No newline at end of file +COPY-FROM: paddle.dist diff --git a/docs/api/paddle/distributed/InMemoryDataset_cn.rst b/docs/api/paddle/distributed/InMemoryDataset_cn.rst index b6d5487073c..5dd22b996bd 100644 --- a/docs/api/paddle/distributed/InMemoryDataset_cn.rst +++ b/docs/api/paddle/distributed/InMemoryDataset_cn.rst @@ -512,6 +512,3 @@ slots_shuffle(slots) dataset.set_filelist(filelist) dataset.load_into_memory() dataset.slots_shuffle(['slot1']) - - - diff --git a/docs/api/paddle/distributed/all_gather_cn.rst b/docs/api/paddle/distributed/all_gather_cn.rst index ad79dfd398e..6665fc10239 100644 --- a/docs/api/paddle/distributed/all_gather_cn.rst +++ b/docs/api/paddle/distributed/all_gather_cn.rst @@ -27,4 +27,4 @@ all_gather 代码示例 ::::::::: -COPY-FROM: paddle.distributed.all_gather \ No newline at end of file +COPY-FROM: paddle.distributed.all_gather diff --git a/docs/api/paddle/distributed/all_reduce_cn.rst b/docs/api/paddle/distributed/all_reduce_cn.rst index b78d5336d7b..d80f169a460 100644 --- a/docs/api/paddle/distributed/all_reduce_cn.rst +++ b/docs/api/paddle/distributed/all_reduce_cn.rst @@ -27,4 +27,4 @@ all_reduce 代码示例 ::::::::: -COPY-FROM: paddle.distributed.all_reduce \ No newline at end of file +COPY-FROM: paddle.distributed.all_reduce diff --git a/docs/api/paddle/distributed/alltoall_cn.rst b/docs/api/paddle/distributed/alltoall_cn.rst index 5599309908c..ba217b0dd10 100644 --- a/docs/api/paddle/distributed/alltoall_cn.rst +++ b/docs/api/paddle/distributed/alltoall_cn.rst @@ -29,4 +29,4 @@ GPU1卡的out_tensor_list包含0_1和1_1。 代码示例 ::::::::: -COPY-FROM: paddle.distributed.alltoall \ No newline at end of file +COPY-FROM: paddle.distributed.alltoall diff --git a/docs/api/paddle/distributed/barrier_cn.rst b/docs/api/paddle/distributed/barrier_cn.rst index b1d07e5d69a..8312fe6952e 100644 --- a/docs/api/paddle/distributed/barrier_cn.rst +++ b/docs/api/paddle/distributed/barrier_cn.rst @@ -18,4 +18,4 @@ barrier 代码示例 ::::::::: -COPY-FROM: paddle.distributed.barrier \ No newline at end of file +COPY-FROM: paddle.distributed.barrier diff --git a/docs/api/paddle/distributed/broadcast_cn.rst b/docs/api/paddle/distributed/broadcast_cn.rst index ff047b7f31f..72f8c054975 100644 --- a/docs/api/paddle/distributed/broadcast_cn.rst +++ b/docs/api/paddle/distributed/broadcast_cn.rst @@ -26,4 +26,4 @@ broadcast 代码示例 ::::::::: -COPY-FROM: paddle.distributed.broadcast \ No newline at end of file +COPY-FROM: paddle.distributed.broadcast diff --git a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst index 9a0e7f4b205..bc9aca4238a 100755 --- a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst +++ b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst @@ -473,4 +473,3 @@ sharding_configs "sharding_degree": 2, "gradient_merge_acc_step": 4, } - diff --git a/docs/api/paddle/distributed/fleet/Fleet_cn.rst b/docs/api/paddle/distributed/fleet/Fleet_cn.rst index fa7226d1041..45f8f87acdd 100644 --- a/docs/api/paddle/distributed/fleet/Fleet_cn.rst +++ b/docs/api/paddle/distributed/fleet/Fleet_cn.rst @@ -788,5 +788,3 @@ minimize(loss, startup_program=None, parameter_list=None, no_grad_set=None) :::::::::::: util ''''''''' - - diff --git a/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst b/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst index e14421aafdf..49f95d0b0c5 100644 --- a/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst +++ b/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst @@ -50,4 +50,3 @@ string import paddle.distributed.fleet as fleet role = fleet.PaddleCloudRoleMaker(is_collective=False) role.to_string() - diff --git a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst index 67cc12c3110..53246b552c3 100644 --- a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst +++ b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst @@ -50,8 +50,3 @@ string server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) role.to_string() - - - - - diff --git a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst index c963bc39b0c..b7d6036a709 100644 --- a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst @@ -308,6 +308,3 @@ list_dirs(fs_path) client = HDFSClient(hadoop_home, configs) subdirs = client.list_dirs("hdfs:/test_hdfs_client") - - - diff --git a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst index 62de09a312b..fef496fd4f2 100644 --- a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst @@ -223,4 +223,4 @@ list_dirs(fs_path) from paddle.distributed.fleet.utils import LocalFS client = LocalFS() - subdirs = client.list_dirs("./") \ No newline at end of file + subdirs = client.list_dirs("./") diff --git a/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst b/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst index 6269e5810f0..d3d197cd199 100644 --- a/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst @@ -21,4 +21,4 @@ function作用在输入的输出 代码示例 ::::::::: -COPY-FROM: paddle.distributed.fleet.utils.recompute \ No newline at end of file +COPY-FROM: paddle.distributed.fleet.utils.recompute diff --git a/docs/api/paddle/distributed/get_group_cn.rst b/docs/api/paddle/distributed/get_group_cn.rst index 4347441e62f..2f527ba9cf0 100644 --- a/docs/api/paddle/distributed/get_group_cn.rst +++ b/docs/api/paddle/distributed/get_group_cn.rst @@ -18,4 +18,3 @@ Group 通信组实例 代码示例 :::::::::::: COPY-FROM: paddle.distributed.get_group - diff --git a/docs/api/paddle/distributed/get_world_size_cn.rst b/docs/api/paddle/distributed/get_world_size_cn.rst index 2edb6a75b72..5fadb7e882b 100644 --- a/docs/api/paddle/distributed/get_world_size_cn.rst +++ b/docs/api/paddle/distributed/get_world_size_cn.rst @@ -15,4 +15,4 @@ get_world_size 代码示例 ::::::::: -COPY-FROM: paddle.distributed.get_world_size \ No newline at end of file +COPY-FROM: paddle.distributed.get_world_size diff --git a/docs/api/paddle/distributed/gloo_barrier_cn.rst b/docs/api/paddle/distributed/gloo_barrier_cn.rst index 256177d0284..2e4f7906659 100644 --- a/docs/api/paddle/distributed/gloo_barrier_cn.rst +++ b/docs/api/paddle/distributed/gloo_barrier_cn.rst @@ -17,4 +17,4 @@ gloo_barrier 代码示例 ::::::::: -COPY-FROM: paddle.distributed.gloo_barrier \ No newline at end of file +COPY-FROM: paddle.distributed.gloo_barrier diff --git a/docs/api/paddle/distributed/gloo_init_parallel_env_cn.rst b/docs/api/paddle/distributed/gloo_init_parallel_env_cn.rst index 62349d646b9..40b996c34ff 100644 --- a/docs/api/paddle/distributed/gloo_init_parallel_env_cn.rst +++ b/docs/api/paddle/distributed/gloo_init_parallel_env_cn.rst @@ -18,4 +18,4 @@ gloo_init_parallel_env 代码示例 ::::::::: -COPY-FROM: paddle.distributed.gloo_init_parallel_env \ No newline at end of file +COPY-FROM: paddle.distributed.gloo_init_parallel_env diff --git a/docs/api/paddle/distributed/gloo_release_cn.rst b/docs/api/paddle/distributed/gloo_release_cn.rst index 66ca4854772..cc01b4c3bf7 100644 --- a/docs/api/paddle/distributed/gloo_release_cn.rst +++ b/docs/api/paddle/distributed/gloo_release_cn.rst @@ -17,4 +17,4 @@ gloo_release 代码示例 ::::::::: -COPY-FROM: paddle.distributed.gloo_release \ No newline at end of file +COPY-FROM: paddle.distributed.gloo_release diff --git a/docs/api/paddle/distributed/init_parallel_env_cn.rst b/docs/api/paddle/distributed/init_parallel_env_cn.rst index 0a3cbe1ed02..dfd9ab0d78c 100644 --- a/docs/api/paddle/distributed/init_parallel_env_cn.rst +++ b/docs/api/paddle/distributed/init_parallel_env_cn.rst @@ -16,4 +16,4 @@ init_parallel_env 代码示例 ::::::::: -COPY-FROM: paddle.distributed.init_parallel_env \ No newline at end of file +COPY-FROM: paddle.distributed.init_parallel_env diff --git a/docs/api/paddle/distributed/irecv_cn.rst b/docs/api/paddle/distributed/irecv_cn.rst index af82ae06164..742b13f654c 100644 --- a/docs/api/paddle/distributed/irecv_cn.rst +++ b/docs/api/paddle/distributed/irecv_cn.rst @@ -24,4 +24,4 @@ irecv 代码示例 ::::::::: -COPY-FROM: paddle.distributed.irecv \ No newline at end of file +COPY-FROM: paddle.distributed.irecv diff --git a/docs/api/paddle/distributed/is_initialized_cn.rst b/docs/api/paddle/distributed/is_initialized_cn.rst index 5aceafb0ade..d54cbb8945c 100644 --- a/docs/api/paddle/distributed/is_initialized_cn.rst +++ b/docs/api/paddle/distributed/is_initialized_cn.rst @@ -18,4 +18,4 @@ is_initialized 代码示例 ::::::::: -COPY-FROM: paddle.distributed.is_initialized \ No newline at end of file +COPY-FROM: paddle.distributed.is_initialized diff --git a/docs/api/paddle/distributed/isend_cn.rst b/docs/api/paddle/distributed/isend_cn.rst index 3d63fbbd3b7..cafe02f426c 100644 --- a/docs/api/paddle/distributed/isend_cn.rst +++ b/docs/api/paddle/distributed/isend_cn.rst @@ -25,4 +25,4 @@ isend 代码示例 ::::::::: -COPY-FROM: paddle.distributed.isend \ No newline at end of file +COPY-FROM: paddle.distributed.isend diff --git a/docs/api/paddle/distributed/new_group_cn.rst b/docs/api/paddle/distributed/new_group_cn.rst index 512507a0d3e..b60dfd9e5e7 100644 --- a/docs/api/paddle/distributed/new_group_cn.rst +++ b/docs/api/paddle/distributed/new_group_cn.rst @@ -22,4 +22,3 @@ Group:新建的通信组对象 代码示例 :::::::::::: COPY-FROM: paddle.distributed.new_group - diff --git a/docs/api/paddle/distributed/reduce_cn.rst b/docs/api/paddle/distributed/reduce_cn.rst index 989c0428d17..2c520e12ad5 100644 --- a/docs/api/paddle/distributed/reduce_cn.rst +++ b/docs/api/paddle/distributed/reduce_cn.rst @@ -28,4 +28,4 @@ reduce 代码示例 ::::::::: -COPY-FROM: paddle.distributed.reduce \ No newline at end of file +COPY-FROM: paddle.distributed.reduce diff --git a/docs/api/paddle/distributed/reduce_scatter_cn.rst b/docs/api/paddle/distributed/reduce_scatter_cn.rst index 6a6428bba03..cb89b78384c 100644 --- a/docs/api/paddle/distributed/reduce_scatter_cn.rst +++ b/docs/api/paddle/distributed/reduce_scatter_cn.rst @@ -26,4 +26,4 @@ reduce_scatter 代码示例 ::::::::: -COPY-FROM: paddle.distributed.reduce_scatter \ No newline at end of file +COPY-FROM: paddle.distributed.reduce_scatter diff --git a/docs/api/paddle/distributed/scatter_cn.rst b/docs/api/paddle/distributed/scatter_cn.rst index 5a219e0a039..d56bba74371 100644 --- a/docs/api/paddle/distributed/scatter_cn.rst +++ b/docs/api/paddle/distributed/scatter_cn.rst @@ -28,4 +28,4 @@ scatter 代码示例 ::::::::: -COPY-FROM: paddle.distributed.scatter \ No newline at end of file +COPY-FROM: paddle.distributed.scatter diff --git a/docs/api/paddle/distributed/send_cn.rst b/docs/api/paddle/distributed/send_cn.rst index af41f55216d..5cad655da6f 100644 --- a/docs/api/paddle/distributed/send_cn.rst +++ b/docs/api/paddle/distributed/send_cn.rst @@ -21,4 +21,4 @@ send 代码示例 ::::::::: -COPY-FROM: paddle.distributed.send \ No newline at end of file +COPY-FROM: paddle.distributed.send diff --git a/docs/api/paddle/distributed/spawn_cn.rst b/docs/api/paddle/distributed/spawn_cn.rst index 43bffd95aa7..9a2e75c0771 100644 --- a/docs/api/paddle/distributed/spawn_cn.rst +++ b/docs/api/paddle/distributed/spawn_cn.rst @@ -25,4 +25,4 @@ spawn 代码示例 ::::::::: -COPY-FROM: paddle.distributed.spawn \ No newline at end of file +COPY-FROM: paddle.distributed.spawn diff --git a/docs/api/paddle/distributed/split_cn.rst b/docs/api/paddle/distributed/split_cn.rst index 2e00b3849de..65c6e4827ad 100644 --- a/docs/api/paddle/distributed/split_cn.rst +++ b/docs/api/paddle/distributed/split_cn.rst @@ -87,4 +87,4 @@ Tensor 代码示例 ::::::::: -COPY-FROM: paddle.distributed.split \ No newline at end of file +COPY-FROM: paddle.distributed.split diff --git a/docs/api/paddle/distributed/utils/global_gather_cn.rst b/docs/api/paddle/distributed/utils/global_gather_cn.rst index dcb09e6a1ff..c8e708d0273 100644 --- a/docs/api/paddle/distributed/utils/global_gather_cn.rst +++ b/docs/api/paddle/distributed/utils/global_gather_cn.rst @@ -47,4 +47,4 @@ Tensor,从所有expert接收的数据。 代码示例 ::::::::: -COPY-FROM: paddle.distributed.utils.global_gather \ No newline at end of file +COPY-FROM: paddle.distributed.utils.global_gather diff --git a/docs/api/paddle/distributed/utils/global_scatter_cn.rst b/docs/api/paddle/distributed/utils/global_scatter_cn.rst index 1a08d20a54a..7f6d4b3486c 100644 --- a/docs/api/paddle/distributed/utils/global_scatter_cn.rst +++ b/docs/api/paddle/distributed/utils/global_scatter_cn.rst @@ -52,4 +52,4 @@ Tensor,从所有expert接收的数据,按照每个expert排列。 代码示例 ::::::::: -COPY-FROM: paddle.distributed.utils.global_scatter \ No newline at end of file +COPY-FROM: paddle.distributed.utils.global_scatter diff --git a/docs/api/paddle/distribution/AbsTransform_cn.rst b/docs/api/paddle/distribution/AbsTransform_cn.rst index 38deeb5514d..1e1b8dbcbfb 100644 --- a/docs/api/paddle/distribution/AbsTransform_cn.rst +++ b/docs/api/paddle/distribution/AbsTransform_cn.rst @@ -110,4 +110,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/AffineTransform_cn.rst b/docs/api/paddle/distribution/AffineTransform_cn.rst index 10ab3739b65..a9aa7d8223b 100644 --- a/docs/api/paddle/distribution/AffineTransform_cn.rst +++ b/docs/api/paddle/distribution/AffineTransform_cn.rst @@ -108,4 +108,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/Categorical_cn.rst b/docs/api/paddle/distribution/Categorical_cn.rst index 9e4f638cdaf..b323c735856 100644 --- a/docs/api/paddle/distribution/Categorical_cn.rst +++ b/docs/api/paddle/distribution/Categorical_cn.rst @@ -232,4 +232,3 @@ log_prob(value) value = paddle.to_tensor([2,1,3]) cat.log_prob(value) # [-5.10271 -2.22287 -1.31061] - diff --git a/docs/api/paddle/distribution/ChainTransform_cn.rst b/docs/api/paddle/distribution/ChainTransform_cn.rst index 432a6160fa9..5b1ec62cfc4 100644 --- a/docs/api/paddle/distribution/ChainTransform_cn.rst +++ b/docs/api/paddle/distribution/ChainTransform_cn.rst @@ -109,4 +109,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/ExpTransform_cn.rst b/docs/api/paddle/distribution/ExpTransform_cn.rst index 83ceb50d9fa..9704fd50147 100644 --- a/docs/api/paddle/distribution/ExpTransform_cn.rst +++ b/docs/api/paddle/distribution/ExpTransform_cn.rst @@ -103,4 +103,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/ExponentialFamily_cn.rst b/docs/api/paddle/distribution/ExponentialFamily_cn.rst index ca9d82c2bcd..8ee1df192f1 100644 --- a/docs/api/paddle/distribution/ExponentialFamily_cn.rst +++ b/docs/api/paddle/distribution/ExponentialFamily_cn.rst @@ -16,12 +16,3 @@ ExponentialFamily 归一化函数。 属于指数型分布族的概率分布列表参考 https://en.wikipedia.org/wiki/Exponential_family - - - - - - - - - diff --git a/docs/api/paddle/distribution/IndependentTransform_cn.rst b/docs/api/paddle/distribution/IndependentTransform_cn.rst index 57122d7ebe8..678f7a5a1f6 100644 --- a/docs/api/paddle/distribution/IndependentTransform_cn.rst +++ b/docs/api/paddle/distribution/IndependentTransform_cn.rst @@ -121,4 +121,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/Multinomial_cn.rst b/docs/api/paddle/distribution/Multinomial_cn.rst index 2ee96cfeb75..1f0afc5df8c 100644 --- a/docs/api/paddle/distribution/Multinomial_cn.rst +++ b/docs/api/paddle/distribution/Multinomial_cn.rst @@ -93,5 +93,3 @@ sample(shape=()) **返回** - Tensor:样本数据。 - - diff --git a/docs/api/paddle/distribution/Overview_cn.rst b/docs/api/paddle/distribution/Overview_cn.rst index cd968e19997..106eaa4cd39 100644 --- a/docs/api/paddle/distribution/Overview_cn.rst +++ b/docs/api/paddle/distribution/Overview_cn.rst @@ -64,4 +64,4 @@ KL散度相关API :widths: 10, 30 " :ref:`register_kl ` ", "注册KL散度" - " :ref:`kl_divergence ` ", "计算KL散度" \ No newline at end of file + " :ref:`kl_divergence ` ", "计算KL散度" diff --git a/docs/api/paddle/distribution/PowerTransform_cn.rst b/docs/api/paddle/distribution/PowerTransform_cn.rst index 18a39ab9a6f..6b7148ef612 100644 --- a/docs/api/paddle/distribution/PowerTransform_cn.rst +++ b/docs/api/paddle/distribution/PowerTransform_cn.rst @@ -107,4 +107,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/ReshapeTransform_cn.rst b/docs/api/paddle/distribution/ReshapeTransform_cn.rst index fb728bb1ea0..cb2ce1391dd 100644 --- a/docs/api/paddle/distribution/ReshapeTransform_cn.rst +++ b/docs/api/paddle/distribution/ReshapeTransform_cn.rst @@ -110,4 +110,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/SigmoidTransform_cn.rst b/docs/api/paddle/distribution/SigmoidTransform_cn.rst index 8a569f21fd9..54e42ba3496 100644 --- a/docs/api/paddle/distribution/SigmoidTransform_cn.rst +++ b/docs/api/paddle/distribution/SigmoidTransform_cn.rst @@ -102,4 +102,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst index 33536d9bc48..d5acd407e77 100644 --- a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst +++ b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst @@ -106,4 +106,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/StackTransform_cn.rst b/docs/api/paddle/distribution/StackTransform_cn.rst index 771c9c1e59b..d57a12ac265 100644 --- a/docs/api/paddle/distribution/StackTransform_cn.rst +++ b/docs/api/paddle/distribution/StackTransform_cn.rst @@ -107,4 +107,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst index db068a3c3cd..5cbb0037257 100644 --- a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst +++ b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst @@ -102,4 +102,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/TanhTransform_cn.rst b/docs/api/paddle/distribution/TanhTransform_cn.rst index ffdcd6db5e8..e76eac26bb5 100644 --- a/docs/api/paddle/distribution/TanhTransform_cn.rst +++ b/docs/api/paddle/distribution/TanhTransform_cn.rst @@ -103,4 +103,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/Transform_cn.rst b/docs/api/paddle/distribution/Transform_cn.rst index dcddb07eb27..dfe2ea09fba 100644 --- a/docs/api/paddle/distribution/Transform_cn.rst +++ b/docs/api/paddle/distribution/Transform_cn.rst @@ -155,4 +155,3 @@ inverse_shape(shape) **返回** - Sequence[int] - 逆变换输出的形状。 - diff --git a/docs/api/paddle/distribution/TransformedDistribution_cn.rst b/docs/api/paddle/distribution/TransformedDistribution_cn.rst index 8f3a2f8c5d8..0c6d1d1e903 100644 --- a/docs/api/paddle/distribution/TransformedDistribution_cn.rst +++ b/docs/api/paddle/distribution/TransformedDistribution_cn.rst @@ -62,5 +62,3 @@ sample(shape=()) **返回** - Tensor:样本数据。 - - diff --git a/docs/api/paddle/divide_cn.rst b/docs/api/paddle/divide_cn.rst index cf2b317aaae..d8a30ec0250 100644 --- a/docs/api/paddle/divide_cn.rst +++ b/docs/api/paddle/divide_cn.rst @@ -33,4 +33,4 @@ divide 代码示例 ::::::::: -COPY-FROM: paddle.divide \ No newline at end of file +COPY-FROM: paddle.divide diff --git a/docs/api/paddle/dot_cn.rst b/docs/api/paddle/dot_cn.rst index 7b029a585cd..a50569b59a3 100644 --- a/docs/api/paddle/dot_cn.rst +++ b/docs/api/paddle/dot_cn.rst @@ -29,4 +29,4 @@ dot 代码示例 ::::::::: -COPY-FROM: paddle.dot \ No newline at end of file +COPY-FROM: paddle.dot diff --git a/docs/api/paddle/einsum_cn.rst b/docs/api/paddle/einsum_cn.rst index cc84ac72143..f16a484d092 100644 --- a/docs/api/paddle/einsum_cn.rst +++ b/docs/api/paddle/einsum_cn.rst @@ -81,4 +81,4 @@ Einsum 求和过程理论上等价于如下四步,但实现中实际执行的 代码示例 ::::::::: -COPY-FROM: paddle.einsum \ No newline at end of file +COPY-FROM: paddle.einsum diff --git a/docs/api/paddle/empty_like_cn.rst b/docs/api/paddle/empty_like_cn.rst index 4b924632b40..5a61ad33273 100644 --- a/docs/api/paddle/empty_like_cn.rst +++ b/docs/api/paddle/empty_like_cn.rst @@ -22,4 +22,4 @@ empty_like 代码示例 :::::::::::: -COPY-FROM: paddle.empty_like \ No newline at end of file +COPY-FROM: paddle.empty_like diff --git a/docs/api/paddle/enable_static_cn.rst b/docs/api/paddle/enable_static_cn.rst index 061c72ec486..b88c49137c4 100644 --- a/docs/api/paddle/enable_static_cn.rst +++ b/docs/api/paddle/enable_static_cn.rst @@ -18,4 +18,4 @@ enable_static 代码示例 :::::::::::: -COPY-FROM: paddle.enable_static \ No newline at end of file +COPY-FROM: paddle.enable_static diff --git a/docs/api/paddle/equal_all_cn.rst b/docs/api/paddle/equal_all_cn.rst index 889873fe17e..4050dad0f43 100644 --- a/docs/api/paddle/equal_all_cn.rst +++ b/docs/api/paddle/equal_all_cn.rst @@ -25,4 +25,4 @@ equal_all 代码示例 :::::::::::: -COPY-FROM: paddle.equal_all \ No newline at end of file +COPY-FROM: paddle.equal_all diff --git a/docs/api/paddle/equal_cn.rst b/docs/api/paddle/equal_cn.rst index e1f51e740b1..738d788a128 100644 --- a/docs/api/paddle/equal_cn.rst +++ b/docs/api/paddle/equal_cn.rst @@ -25,4 +25,4 @@ equal 代码示例 :::::::::::: -COPY-FROM: paddle.equal \ No newline at end of file +COPY-FROM: paddle.equal diff --git a/docs/api/paddle/erf_cn.rst b/docs/api/paddle/erf_cn.rst index aebd3efe186..235d57e43a6 100644 --- a/docs/api/paddle/erf_cn.rst +++ b/docs/api/paddle/erf_cn.rst @@ -29,4 +29,4 @@ erf 代码示例 :::::::::::: -COPY-FROM: paddle.erf \ No newline at end of file +COPY-FROM: paddle.erf diff --git a/docs/api/paddle/erfinv__cn.rst b/docs/api/paddle/erfinv__cn.rst index 5de9f99f7fe..6f7ec9a9863 100644 --- a/docs/api/paddle/erfinv__cn.rst +++ b/docs/api/paddle/erfinv__cn.rst @@ -4,4 +4,4 @@ erfinv\_ ------------------------------- .. py:function:: paddle.erfinv_(x) -Inplace 版本的 :ref:`cn_api_paddle_tensor_erfinv` API,对输入 `x` 采用 Inplace 策略。 \ No newline at end of file +Inplace 版本的 :ref:`cn_api_paddle_tensor_erfinv` API,对输入 `x` 采用 Inplace 策略。 diff --git a/docs/api/paddle/erfinv_cn.rst b/docs/api/paddle/erfinv_cn.rst index af850d29dbe..28804eff72d 100644 --- a/docs/api/paddle/erfinv_cn.rst +++ b/docs/api/paddle/erfinv_cn.rst @@ -24,4 +24,4 @@ erfinv 代码示例 ::::::::: -COPY-FROM: paddle.erfinv \ No newline at end of file +COPY-FROM: paddle.erfinv diff --git a/docs/api/paddle/exp_cn.rst b/docs/api/paddle/exp_cn.rst index 3b00dfcde19..d710b6bf118 100644 --- a/docs/api/paddle/exp_cn.rst +++ b/docs/api/paddle/exp_cn.rst @@ -26,4 +26,4 @@ exp 代码示例 :::::::::::: -COPY-FROM: paddle.exp \ No newline at end of file +COPY-FROM: paddle.exp diff --git a/docs/api/paddle/expand_as_cn.rst b/docs/api/paddle/expand_as_cn.rst index 1e35413bdc8..7e96e66dc98 100644 --- a/docs/api/paddle/expand_as_cn.rst +++ b/docs/api/paddle/expand_as_cn.rst @@ -22,4 +22,3 @@ Tensor,数据类型与 ``x`` 相同。 ::::::::: COPY-FROM: paddle.expand_as - diff --git a/docs/api/paddle/expand_cn.rst b/docs/api/paddle/expand_cn.rst index b80e2d412f9..776e962c485 100644 --- a/docs/api/paddle/expand_cn.rst +++ b/docs/api/paddle/expand_cn.rst @@ -22,4 +22,4 @@ expand 代码示例 ::::::::: -COPY-FROM: paddle.expand \ No newline at end of file +COPY-FROM: paddle.expand diff --git a/docs/api/paddle/expm1_cn.rst b/docs/api/paddle/expm1_cn.rst index 44c8970a02d..fd0d068fb73 100644 --- a/docs/api/paddle/expm1_cn.rst +++ b/docs/api/paddle/expm1_cn.rst @@ -27,4 +27,4 @@ expm1 代码示例 ::::::::: -COPY-FROM: paddle.expm1 \ No newline at end of file +COPY-FROM: paddle.expm1 diff --git a/docs/api/paddle/eye_cn.rst b/docs/api/paddle/eye_cn.rst index 68fef6cb980..990da3aa682 100644 --- a/docs/api/paddle/eye_cn.rst +++ b/docs/api/paddle/eye_cn.rst @@ -22,4 +22,4 @@ eye 代码示例 :::::::::::: -COPY-FROM: paddle.eye \ No newline at end of file +COPY-FROM: paddle.eye diff --git a/docs/api/paddle/flatten_cn.rst b/docs/api/paddle/flatten_cn.rst index 213e9072e97..dfa934df5a1 100644 --- a/docs/api/paddle/flatten_cn.rst +++ b/docs/api/paddle/flatten_cn.rst @@ -68,5 +68,3 @@ flatten # 在动态图模式下,输出out与输入img共享数据 img[0, 0, 0, 0] = -1 print(out[0, 0, 0]) # [-1] - - diff --git a/docs/api/paddle/flip_cn.rst b/docs/api/paddle/flip_cn.rst index 0a7f9adb8cb..8d3ee9a6f3a 100644 --- a/docs/api/paddle/flip_cn.rst +++ b/docs/api/paddle/flip_cn.rst @@ -26,4 +26,3 @@ Tensor,在指定 axis 上翻转后的 Tensor,与输入 x 数据类型相同 :::::::::::: COPY-FROM: paddle.flip - diff --git a/docs/api/paddle/floor_cn.rst b/docs/api/paddle/floor_cn.rst index 64a13457312..262e916c44a 100644 --- a/docs/api/paddle/floor_cn.rst +++ b/docs/api/paddle/floor_cn.rst @@ -26,4 +26,4 @@ floor 代码示例 :::::::::::: -COPY-FROM: paddle.floor \ No newline at end of file +COPY-FROM: paddle.floor diff --git a/docs/api/paddle/floor_divide_cn.rst b/docs/api/paddle/floor_divide_cn.rst index 517fd83a440..bf1e028c1b4 100644 --- a/docs/api/paddle/floor_divide_cn.rst +++ b/docs/api/paddle/floor_divide_cn.rst @@ -31,4 +31,4 @@ floor_divide 代码示例 ::::::::: -COPY-FROM: paddle.floor_divide \ No newline at end of file +COPY-FROM: paddle.floor_divide diff --git a/docs/api/paddle/fmax_cn.rst b/docs/api/paddle/fmax_cn.rst index e1e38d009a0..e3c665e8b43 100644 --- a/docs/api/paddle/fmax_cn.rst +++ b/docs/api/paddle/fmax_cn.rst @@ -30,4 +30,4 @@ fmax 代码示例 :::::::::: -COPY-FROM: paddle.fmax \ No newline at end of file +COPY-FROM: paddle.fmax diff --git a/docs/api/paddle/fmin_cn.rst b/docs/api/paddle/fmin_cn.rst index 49bccbfcece..4f4ac58f8fe 100644 --- a/docs/api/paddle/fmin_cn.rst +++ b/docs/api/paddle/fmin_cn.rst @@ -30,4 +30,4 @@ fmin 代码示例 :::::::::: -COPY-FROM: paddle.fmin \ No newline at end of file +COPY-FROM: paddle.fmin diff --git a/docs/api/paddle/frac_cn.rst b/docs/api/paddle/frac_cn.rst index 41f505fa404..0ece40fab53 100644 --- a/docs/api/paddle/frac_cn.rst +++ b/docs/api/paddle/frac_cn.rst @@ -22,4 +22,4 @@ frac 代码示例 ::::::::: -COPY-FROM: paddle.frac \ No newline at end of file +COPY-FROM: paddle.frac diff --git a/docs/api/paddle/full_like_cn.rst b/docs/api/paddle/full_like_cn.rst index b70f86ccf9c..2d1ae581dc5 100644 --- a/docs/api/paddle/full_like_cn.rst +++ b/docs/api/paddle/full_like_cn.rst @@ -31,4 +31,3 @@ full_like output = paddle.full_like(input, 2.0) # [[2. 2. 2.] # [2. 2. 2.]] - diff --git a/docs/api/paddle/gather_cn.rst b/docs/api/paddle/gather_cn.rst index 350120c5313..817e665ed7a 100644 --- a/docs/api/paddle/gather_cn.rst +++ b/docs/api/paddle/gather_cn.rst @@ -50,4 +50,3 @@ gather index = paddle.to_tensor(index_1) output = paddle.gather(input, index, axis=0) # expected output: [[1,2],[3,4]] - diff --git a/docs/api/paddle/gather_nd_cn.rst b/docs/api/paddle/gather_nd_cn.rst index 3a93363b0e6..df91a8ef00c 100644 --- a/docs/api/paddle/gather_nd_cn.rst +++ b/docs/api/paddle/gather_nd_cn.rst @@ -66,5 +66,3 @@ shape 为index.shape[:-1] + x.shape[index.shape[-1]:]的Tensor,数据类型与 :::::::::::: COPY-FROM: paddle.gather_nd - - diff --git a/docs/api/paddle/get_cuda_rng_state_cn.rst b/docs/api/paddle/get_cuda_rng_state_cn.rst index c7f9062a78b..8c668f253fd 100644 --- a/docs/api/paddle/get_cuda_rng_state_cn.rst +++ b/docs/api/paddle/get_cuda_rng_state_cn.rst @@ -20,4 +20,4 @@ get_cuda_rng_state 代码示例 :::::::::::: -COPY-FROM: paddle.get_cuda_rng_state \ No newline at end of file +COPY-FROM: paddle.get_cuda_rng_state diff --git a/docs/api/paddle/get_default_dtype_cn.rst b/docs/api/paddle/get_default_dtype_cn.rst index 35774291885..cb082ae3735 100644 --- a/docs/api/paddle/get_default_dtype_cn.rst +++ b/docs/api/paddle/get_default_dtype_cn.rst @@ -22,4 +22,4 @@ get_default_dtype 代码示例 :::::::::::: -COPY-FROM: paddle.get_default_dtype \ No newline at end of file +COPY-FROM: paddle.get_default_dtype diff --git a/docs/api/paddle/get_flags_cn.rst b/docs/api/paddle/get_flags_cn.rst index 1949457da0f..8e5f94383d0 100644 --- a/docs/api/paddle/get_flags_cn.rst +++ b/docs/api/paddle/get_flags_cn.rst @@ -23,4 +23,4 @@ Flag 的值。 代码示例 :::::::::::: -COPY-FROM: paddle.get_flags \ No newline at end of file +COPY-FROM: paddle.get_flags diff --git a/docs/api/paddle/greater_equal_cn.rst b/docs/api/paddle/greater_equal_cn.rst index 0f293d53d0e..2484e3c03bc 100644 --- a/docs/api/paddle/greater_equal_cn.rst +++ b/docs/api/paddle/greater_equal_cn.rst @@ -26,4 +26,4 @@ greater_equal 代码示例 :::::::::::: -COPY-FROM: paddle.greater_equal \ No newline at end of file +COPY-FROM: paddle.greater_equal diff --git a/docs/api/paddle/greater_than_cn.rst b/docs/api/paddle/greater_than_cn.rst index 184309f05e8..d1facd59d2b 100644 --- a/docs/api/paddle/greater_than_cn.rst +++ b/docs/api/paddle/greater_than_cn.rst @@ -25,4 +25,4 @@ Tensor,输出结果,shape 和输入一致,Tensor 数据类型为 bool。 代码示例 ::::::::: -COPY-FROM: paddle.greater_than \ No newline at end of file +COPY-FROM: paddle.greater_than diff --git a/docs/api/paddle/histogram_cn.rst b/docs/api/paddle/histogram_cn.rst index 9a212e575d0..10ab4640f5c 100644 --- a/docs/api/paddle/histogram_cn.rst +++ b/docs/api/paddle/histogram_cn.rst @@ -23,4 +23,4 @@ Tensor,数据为 int64 类型,维度为(nbins,)。 代码示例 :::::::::::: -COPY-FROM: paddle.histogram \ No newline at end of file +COPY-FROM: paddle.histogram diff --git a/docs/api/paddle/imag_cn.rst b/docs/api/paddle/imag_cn.rst index b76c79793ea..0b569405f64 100644 --- a/docs/api/paddle/imag_cn.rst +++ b/docs/api/paddle/imag_cn.rst @@ -20,4 +20,4 @@ Tensor,包含原复数 Tensor 的虚部数值。 代码示例 :::::::::::: -COPY-FROM: paddle.imag \ No newline at end of file +COPY-FROM: paddle.imag diff --git a/docs/api/paddle/in_dynamic_mode_cn.rst b/docs/api/paddle/in_dynamic_mode_cn.rst index 74b3a7d83da..9051484c1bb 100644 --- a/docs/api/paddle/in_dynamic_mode_cn.rst +++ b/docs/api/paddle/in_dynamic_mode_cn.rst @@ -20,4 +20,4 @@ bool,如果paddle当前是在动态图模式运行,则返回 ``True``,否 代码示例 :::::::::::: -COPY-FROM: paddle.in_dynamic_mode \ No newline at end of file +COPY-FROM: paddle.in_dynamic_mode diff --git a/docs/api/paddle/increment_cn.rst b/docs/api/paddle/increment_cn.rst index f2466396971..d2a4376c82c 100644 --- a/docs/api/paddle/increment_cn.rst +++ b/docs/api/paddle/increment_cn.rst @@ -26,4 +26,4 @@ Tensor,形状和数据类型同输入 ``x`` 。 代码示例 :::::::::::: -COPY-FROM: paddle.increment \ No newline at end of file +COPY-FROM: paddle.increment diff --git a/docs/api/paddle/incubate/LookAhead_cn.rst b/docs/api/paddle/incubate/LookAhead_cn.rst index 98be8e9ce1b..7fb6d276372 100644 --- a/docs/api/paddle/incubate/LookAhead_cn.rst +++ b/docs/api/paddle/incubate/LookAhead_cn.rst @@ -151,4 +151,3 @@ tuple: tuple (optimize_ops, params_grads),由 ``minimize`` 添加的操作列 loss.backward() lookahead.minimize(loss) lookahead.clear_grad() - diff --git a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst index ea22c2a3113..be8c4362428 100644 --- a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst +++ b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst @@ -53,4 +53,3 @@ Tensor经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比 ::::::::: COPY-FROM: paddle.incubate.autograd.Jacobian - diff --git a/docs/api/paddle/incubate/autograd/disable_prim_cn.rst b/docs/api/paddle/incubate/autograd/disable_prim_cn.rst index b03b3ce1c57..f8997a964ee 100644 --- a/docs/api/paddle/incubate/autograd/disable_prim_cn.rst +++ b/docs/api/paddle/incubate/autograd/disable_prim_cn.rst @@ -18,4 +18,4 @@ disable_prim 代码示例 :::::::::::: -COPY-FROM: paddle.incubate.autograd.disable_prim \ No newline at end of file +COPY-FROM: paddle.incubate.autograd.disable_prim diff --git a/docs/api/paddle/incubate/autograd/enable_prim_cn.rst b/docs/api/paddle/incubate/autograd/enable_prim_cn.rst index 211c19c4d1f..462280d43b9 100644 --- a/docs/api/paddle/incubate/autograd/enable_prim_cn.rst +++ b/docs/api/paddle/incubate/autograd/enable_prim_cn.rst @@ -18,4 +18,4 @@ enable_prim 代码示例 :::::::::::: -COPY-FROM: paddle.incubate.autograd.enable_prim \ No newline at end of file +COPY-FROM: paddle.incubate.autograd.enable_prim diff --git a/docs/api/paddle/incubate/autograd/jvp_cn.rst b/docs/api/paddle/incubate/autograd/jvp_cn.rst index e332aea9531..f04d1be2ea2 100644 --- a/docs/api/paddle/incubate/autograd/jvp_cn.rst +++ b/docs/api/paddle/incubate/autograd/jvp_cn.rst @@ -30,4 +30,3 @@ jvp ::::::::: COPY-FROM: paddle.incubate.autograd.jvp - diff --git a/docs/api/paddle/incubate/autograd/prim2orig_cn.rst b/docs/api/paddle/incubate/autograd/prim2orig_cn.rst index d96866a9bc6..204dac419b9 100644 --- a/docs/api/paddle/incubate/autograd/prim2orig_cn.rst +++ b/docs/api/paddle/incubate/autograd/prim2orig_cn.rst @@ -24,4 +24,4 @@ prim2orig 代码示例 :::::::::::: -COPY-FROM: paddle.incubate.autograd.prim2orig \ No newline at end of file +COPY-FROM: paddle.incubate.autograd.prim2orig diff --git a/docs/api/paddle/incubate/autograd/prim_enabled_cn.rst b/docs/api/paddle/incubate/autograd/prim_enabled_cn.rst index 48cc9662e01..2fe880a9d25 100644 --- a/docs/api/paddle/incubate/autograd/prim_enabled_cn.rst +++ b/docs/api/paddle/incubate/autograd/prim_enabled_cn.rst @@ -19,4 +19,4 @@ prim_enabled 代码示例 :::::::::::: -COPY-FROM: paddle.incubate.autograd.prim_enabled \ No newline at end of file +COPY-FROM: paddle.incubate.autograd.prim_enabled diff --git a/docs/api/paddle/incubate/autograd/vjp_cn.rst b/docs/api/paddle/incubate/autograd/vjp_cn.rst index 240b7418795..cf68d4ece48 100644 --- a/docs/api/paddle/incubate/autograd/vjp_cn.rst +++ b/docs/api/paddle/incubate/autograd/vjp_cn.rst @@ -30,4 +30,3 @@ vjp ::::::::: COPY-FROM: paddle.incubate.autograd.vjp - diff --git a/docs/api/paddle/incubate/graph_khop_sampler_cn.rst b/docs/api/paddle/incubate/graph_khop_sampler_cn.rst index 77b14472606..47fc6abeaff 100644 --- a/docs/api/paddle/incubate/graph_khop_sampler_cn.rst +++ b/docs/api/paddle/incubate/graph_khop_sampler_cn.rst @@ -29,4 +29,4 @@ graph_khop_sampler 代码示例 :::::::::: -COPY-FROM: paddle.incubate.graph_khop_sampler \ No newline at end of file +COPY-FROM: paddle.incubate.graph_khop_sampler diff --git a/docs/api/paddle/incubate/graph_sample_neighbors_cn.rst b/docs/api/paddle/incubate/graph_sample_neighbors_cn.rst index 6318d93c09a..37cea250055 100644 --- a/docs/api/paddle/incubate/graph_sample_neighbors_cn.rst +++ b/docs/api/paddle/incubate/graph_sample_neighbors_cn.rst @@ -29,4 +29,4 @@ graph_sample_neighbors 代码示例 :::::::::: -COPY-FROM: paddle.incubate.graph_sample_neighbors \ No newline at end of file +COPY-FROM: paddle.incubate.graph_sample_neighbors diff --git a/docs/api/paddle/incubate/nn/FusedFeedForward_cn.rst b/docs/api/paddle/incubate/nn/FusedFeedForward_cn.rst index 251c712bb82..77ffd115f6d 100644 --- a/docs/api/paddle/incubate/nn/FusedFeedForward_cn.rst +++ b/docs/api/paddle/incubate/nn/FusedFeedForward_cn.rst @@ -25,4 +25,4 @@ FusedFeedForward 代码示例 :::::::::: -COPY-FROM: paddle.incubate.nn.FusedFeedForward \ No newline at end of file +COPY-FROM: paddle.incubate.nn.FusedFeedForward diff --git a/docs/api/paddle/incubate/nn/FusedMultiHeadAttention_cn.rst b/docs/api/paddle/incubate/nn/FusedMultiHeadAttention_cn.rst index b02acc9b2ca..87fc1ff52d8 100644 --- a/docs/api/paddle/incubate/nn/FusedMultiHeadAttention_cn.rst +++ b/docs/api/paddle/incubate/nn/FusedMultiHeadAttention_cn.rst @@ -47,4 +47,4 @@ FusedMultiHeadAttention 代码示例 ::::::::: -COPY-FROM: paddle.incubate.nn.FusedMultiHeadAttention \ No newline at end of file +COPY-FROM: paddle.incubate.nn.FusedMultiHeadAttention diff --git a/docs/api/paddle/incubate/nn/FusedTransformerEncoderLayer_cn.rst b/docs/api/paddle/incubate/nn/FusedTransformerEncoderLayer_cn.rst index 43d4e30032f..f53bb7a3c12 100644 --- a/docs/api/paddle/incubate/nn/FusedTransformerEncoderLayer_cn.rst +++ b/docs/api/paddle/incubate/nn/FusedTransformerEncoderLayer_cn.rst @@ -29,4 +29,4 @@ FusedTransformer编码器层由两个子层组成:多头自注意力机制和 代码示例 :::::::::: -COPY-FROM: paddle.incubate.nn.FusedTransformerEncoderLayer \ No newline at end of file +COPY-FROM: paddle.incubate.nn.FusedTransformerEncoderLayer diff --git a/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst b/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst index 54acdfdb577..6d099a253bb 100644 --- a/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst +++ b/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst @@ -70,4 +70,3 @@ fused_feedforward out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight) print(out.numpy().shape) # (1, 8, 8) - diff --git a/docs/api/paddle/incubate/nn/functional/fused_multi_head_attention_cn.rst b/docs/api/paddle/incubate/nn/functional/fused_multi_head_attention_cn.rst index 3fcf6c28388..e209e7325e4 100644 --- a/docs/api/paddle/incubate/nn/functional/fused_multi_head_attention_cn.rst +++ b/docs/api/paddle/incubate/nn/functional/fused_multi_head_attention_cn.rst @@ -86,5 +86,3 @@ fused_multi_head_attention 算子目前只支持在GPU下运行,其包含的 ::::::::: COPY-FROM: paddle.incubate.nn.functional.fused_multi_head_attention - - diff --git a/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst b/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst index 23d0f888d0e..17aaf8dc99a 100644 --- a/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst +++ b/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst @@ -44,4 +44,4 @@ minimize_bfgs 代码示例 :::::::::: -COPY-FROM: paddle.incubate.optimizer.functional.minimize_bfgs \ No newline at end of file +COPY-FROM: paddle.incubate.optimizer.functional.minimize_bfgs diff --git a/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst b/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst index 225150f6b54..2128af1eb04 100644 --- a/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst +++ b/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst @@ -42,4 +42,4 @@ minimize_lbfgs 代码示例 :::::::::: -COPY-FROM: paddle.incubate.optimizer.functional.minimize_lbfgs \ No newline at end of file +COPY-FROM: paddle.incubate.optimizer.functional.minimize_lbfgs diff --git a/docs/api/paddle/incubate/segment_max_cn.rst b/docs/api/paddle/incubate/segment_max_cn.rst index 6a63979c810..c585ec1308a 100644 --- a/docs/api/paddle/incubate/segment_max_cn.rst +++ b/docs/api/paddle/incubate/segment_max_cn.rst @@ -31,4 +31,4 @@ segment_max 代码示例 ::::::::: -COPY-FROM: paddle.incubate.segment_max \ No newline at end of file +COPY-FROM: paddle.incubate.segment_max diff --git a/docs/api/paddle/incubate/segment_mean_cn.rst b/docs/api/paddle/incubate/segment_mean_cn.rst index c1b2f13e2dd..200e549c491 100644 --- a/docs/api/paddle/incubate/segment_mean_cn.rst +++ b/docs/api/paddle/incubate/segment_mean_cn.rst @@ -31,4 +31,4 @@ segment_mean 代码示例 ::::::::: -COPY-FROM: paddle.incubate.segment_mean \ No newline at end of file +COPY-FROM: paddle.incubate.segment_mean diff --git a/docs/api/paddle/incubate/segment_min_cn.rst b/docs/api/paddle/incubate/segment_min_cn.rst index 060ddf5edbc..8abc34e9c38 100644 --- a/docs/api/paddle/incubate/segment_min_cn.rst +++ b/docs/api/paddle/incubate/segment_min_cn.rst @@ -31,4 +31,4 @@ segment_min 代码示例 ::::::::: -COPY-FROM: paddle.incubate.segment_min \ No newline at end of file +COPY-FROM: paddle.incubate.segment_min diff --git a/docs/api/paddle/incubate/segment_sum_cn.rst b/docs/api/paddle/incubate/segment_sum_cn.rst index 366f2970685..5737674dbbf 100644 --- a/docs/api/paddle/incubate/segment_sum_cn.rst +++ b/docs/api/paddle/incubate/segment_sum_cn.rst @@ -33,4 +33,4 @@ segment_sum 代码示例 ::::::::: -COPY-FROM: paddle.incubate.segment_sum \ No newline at end of file +COPY-FROM: paddle.incubate.segment_sum diff --git a/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst b/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst index 05545d14783..cea5652eae3 100644 --- a/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst +++ b/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst @@ -27,4 +27,4 @@ softmax_mask_fuse 代码示例 :::::::::: -COPY-FROM: paddle.incubate.softmax_mask_fuse \ No newline at end of file +COPY-FROM: paddle.incubate.softmax_mask_fuse diff --git a/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst b/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst index 4964d2b82c7..e04de7f4c9c 100644 --- a/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst +++ b/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst @@ -25,4 +25,4 @@ softmax_mask_fuse_upper_triangle 代码示例 :::::::::: -COPY-FROM: paddle.incubate.softmax_mask_fuse_upper_triangle \ No newline at end of file +COPY-FROM: paddle.incubate.softmax_mask_fuse_upper_triangle diff --git a/docs/api/paddle/index_sample_cn.rst b/docs/api/paddle/index_sample_cn.rst index 13d8bf21558..93d7a4221cc 100644 --- a/docs/api/paddle/index_sample_cn.rst +++ b/docs/api/paddle/index_sample_cn.rst @@ -25,4 +25,4 @@ Tensor,数据类型与输入 ``x`` 相同,维度与 ``index`` 相同。 代码示例 :::::::::::: -COPY-FROM: paddle.index_sample \ No newline at end of file +COPY-FROM: paddle.index_sample diff --git a/docs/api/paddle/index_select_cn.rst b/docs/api/paddle/index_select_cn.rst index 33463b8a0a6..7d05ab5b580 100644 --- a/docs/api/paddle/index_select_cn.rst +++ b/docs/api/paddle/index_select_cn.rst @@ -26,4 +26,4 @@ Tensor,返回一个数据类型同输入的Tensor。 代码示例 :::::::::::: -COPY-FROM: paddle.index_select \ No newline at end of file +COPY-FROM: paddle.index_select diff --git a/docs/api/paddle/io/BatchSampler_cn.rst b/docs/api/paddle/io/BatchSampler_cn.rst index 96d920a561c..814a73df440 100644 --- a/docs/api/paddle/io/BatchSampler_cn.rst +++ b/docs/api/paddle/io/BatchSampler_cn.rst @@ -32,4 +32,4 @@ BatchSampler,返回样本下标数组的迭代器。 代码示例 :::::::::::: -COPY-FROM: paddle.io.BatchSampler \ No newline at end of file +COPY-FROM: paddle.io.BatchSampler diff --git a/docs/api/paddle/io/DataLoader_cn.rst b/docs/api/paddle/io/DataLoader_cn.rst index 1e1ead11acc..05419163b15 100644 --- a/docs/api/paddle/io/DataLoader_cn.rst +++ b/docs/api/paddle/io/DataLoader_cn.rst @@ -438,4 +438,3 @@ from_dataset(dataset, places, drop_last=True) dataset.set_filelist(['a.txt', 'b.txt', 'c.txt']) loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places()) - diff --git a/docs/api/paddle/io/IterableDataset_cn.rst b/docs/api/paddle/io/IterableDataset_cn.rst index 1953bf93e19..a90a182856e 100644 --- a/docs/api/paddle/io/IterableDataset_cn.rst +++ b/docs/api/paddle/io/IterableDataset_cn.rst @@ -134,4 +134,3 @@ IterableDataset for data in dataloader: print(data) # outputs: [2, 5, 3, 6, 4, 7] - diff --git a/docs/api/paddle/io/Overview_cn.rst b/docs/api/paddle/io/Overview_cn.rst index 0c8daf7d994..f36069a9d97 100644 --- a/docs/api/paddle/io/Overview_cn.rst +++ b/docs/api/paddle/io/Overview_cn.rst @@ -77,4 +77,3 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API " :ref:`BatchSampler ` ", "批采样器接口" " :ref:`DistributedBatchSampler ` ", "分布式批采样器接口, 用于分布式多卡场景" - diff --git a/docs/api/paddle/io/RandomSampler_cn.rst b/docs/api/paddle/io/RandomSampler_cn.rst index b41f66ab95b..ae14652f706 100644 --- a/docs/api/paddle/io/RandomSampler_cn.rst +++ b/docs/api/paddle/io/RandomSampler_cn.rst @@ -22,4 +22,4 @@ RandomSampler,返回随机采样下标的采样器 代码示例 ::::::::: -COPY-FROM: paddle.io.RandomSampler \ No newline at end of file +COPY-FROM: paddle.io.RandomSampler diff --git a/docs/api/paddle/io/Sampler_cn.rst b/docs/api/paddle/io/Sampler_cn.rst index bfe6a4c2e40..45b3c8ef239 100644 --- a/docs/api/paddle/io/Sampler_cn.rst +++ b/docs/api/paddle/io/Sampler_cn.rst @@ -28,4 +28,4 @@ Sampler,返回样本下标的迭代器。 代码示例 :::::::::::: -COPY-FROM: paddle.io.Sampler \ No newline at end of file +COPY-FROM: paddle.io.Sampler diff --git a/docs/api/paddle/io/SequenceSampler_cn.rst b/docs/api/paddle/io/SequenceSampler_cn.rst index 0a20a1cc72f..b1ebd77e94b 100644 --- a/docs/api/paddle/io/SequenceSampler_cn.rst +++ b/docs/api/paddle/io/SequenceSampler_cn.rst @@ -20,4 +20,4 @@ SequenceSampler,返回样本下标的迭代器。 代码示例 :::::::::::: -COPY-FROM: paddle.io.SequenceSampler \ No newline at end of file +COPY-FROM: paddle.io.SequenceSampler diff --git a/docs/api/paddle/io/Subset_cn.rst b/docs/api/paddle/io/Subset_cn.rst index f7c63835d3b..b90de809352 100644 --- a/docs/api/paddle/io/Subset_cn.rst +++ b/docs/api/paddle/io/Subset_cn.rst @@ -22,4 +22,4 @@ list[Dataset],原数据集合的子集列表。 代码示例 ::::::::: -COPY-FROM: paddle.io.Subset \ No newline at end of file +COPY-FROM: paddle.io.Subset diff --git a/docs/api/paddle/io/WeightedRandomSampler_cn.rst b/docs/api/paddle/io/WeightedRandomSampler_cn.rst index 44587f2718b..4d9ee2b2b2b 100644 --- a/docs/api/paddle/io/WeightedRandomSampler_cn.rst +++ b/docs/api/paddle/io/WeightedRandomSampler_cn.rst @@ -23,4 +23,4 @@ WeightedRandomSampler,返回根据权重随机采样下标的采样器 代码示例 ::::::::: -COPY-FROM: paddle.io.WeightedRandomSampler \ No newline at end of file +COPY-FROM: paddle.io.WeightedRandomSampler diff --git a/docs/api/paddle/io/random_split_cn.rst b/docs/api/paddle/io/random_split_cn.rst index 542c79b4f48..d304f5e536a 100644 --- a/docs/api/paddle/io/random_split_cn.rst +++ b/docs/api/paddle/io/random_split_cn.rst @@ -22,4 +22,4 @@ random_split 代码示例 :::::::::::: -COPY-FROM: paddle.io.random_split \ No newline at end of file +COPY-FROM: paddle.io.random_split diff --git a/docs/api/paddle/is_complex_cn.rst b/docs/api/paddle/is_complex_cn.rst index d2ed3ce766d..91f2263d580 100644 --- a/docs/api/paddle/is_complex_cn.rst +++ b/docs/api/paddle/is_complex_cn.rst @@ -21,4 +21,4 @@ bool,如果输入 tensor 的数据类型为复数类型则为 True,反之为 代码示例 ::::::::: -COPY-FROM: paddle.is_complex \ No newline at end of file +COPY-FROM: paddle.is_complex diff --git a/docs/api/paddle/is_empty_cn.rst b/docs/api/paddle/is_empty_cn.rst index 81b9ba4918d..dfff7ace24d 100644 --- a/docs/api/paddle/is_empty_cn.rst +++ b/docs/api/paddle/is_empty_cn.rst @@ -24,4 +24,4 @@ Tensor,布尔类型的 Tensor,如果输入 Tensor x 为空则值为 True。 代码示例 :::::::::::: -COPY-FROM: paddle.is_empty \ No newline at end of file +COPY-FROM: paddle.is_empty diff --git a/docs/api/paddle/is_floating_point_cn.rst b/docs/api/paddle/is_floating_point_cn.rst index 3b140dbf0ec..7d451ddfeeb 100644 --- a/docs/api/paddle/is_floating_point_cn.rst +++ b/docs/api/paddle/is_floating_point_cn.rst @@ -19,4 +19,4 @@ is_floating_point 代码示例 ::::::::: -COPY-FROM: paddle.is_floating_point \ No newline at end of file +COPY-FROM: paddle.is_floating_point diff --git a/docs/api/paddle/is_grad_enabled_cn.rst b/docs/api/paddle/is_grad_enabled_cn.rst index 6da36d1eab7..4bf580d7e24 100644 --- a/docs/api/paddle/is_grad_enabled_cn.rst +++ b/docs/api/paddle/is_grad_enabled_cn.rst @@ -14,4 +14,4 @@ is_grad_enabled 代码示例 ::::::::: -COPY-FROM: paddle.is_grad_enabled \ No newline at end of file +COPY-FROM: paddle.is_grad_enabled diff --git a/docs/api/paddle/is_integer_cn.rst b/docs/api/paddle/is_integer_cn.rst index 162b6637d22..6fff0eaed84 100644 --- a/docs/api/paddle/is_integer_cn.rst +++ b/docs/api/paddle/is_integer_cn.rst @@ -21,4 +21,4 @@ bool,输入 tensor 的数据类型为整数类型则为 True,反之为 False 代码示例 ::::::::: -COPY-FROM: paddle.is_integer \ No newline at end of file +COPY-FROM: paddle.is_integer diff --git a/docs/api/paddle/is_tensor_cn.rst b/docs/api/paddle/is_tensor_cn.rst index 9f0ce7339f9..8e65fb4e7fe 100644 --- a/docs/api/paddle/is_tensor_cn.rst +++ b/docs/api/paddle/is_tensor_cn.rst @@ -20,4 +20,4 @@ bool 值,如果 x 是 `paddle.Tensor` 类型返回 True,反之返回 False 代码示例 :::::::::::: -COPY-FROM: paddle.is_tensor \ No newline at end of file +COPY-FROM: paddle.is_tensor diff --git a/docs/api/paddle/isclose_cn.rst b/docs/api/paddle/isclose_cn.rst index ff5a9936e4e..b17ddd22e2e 100644 --- a/docs/api/paddle/isclose_cn.rst +++ b/docs/api/paddle/isclose_cn.rst @@ -29,4 +29,4 @@ isclose 代码示例 ::::::::: -COPY-FROM: paddle.isclose \ No newline at end of file +COPY-FROM: paddle.isclose diff --git a/docs/api/paddle/isfinite_cn.rst b/docs/api/paddle/isfinite_cn.rst index 4b515e4951c..11b0fbaac6b 100644 --- a/docs/api/paddle/isfinite_cn.rst +++ b/docs/api/paddle/isfinite_cn.rst @@ -19,4 +19,4 @@ isfinite 代码示例 ::::::::: -COPY-FROM: paddle.isfinite \ No newline at end of file +COPY-FROM: paddle.isfinite diff --git a/docs/api/paddle/isinf_cn.rst b/docs/api/paddle/isinf_cn.rst index bff09ce1ad0..80385cbae0c 100644 --- a/docs/api/paddle/isinf_cn.rst +++ b/docs/api/paddle/isinf_cn.rst @@ -19,4 +19,4 @@ isinf 代码示例 ::::::::: -COPY-FROM: paddle.isinf \ No newline at end of file +COPY-FROM: paddle.isinf diff --git a/docs/api/paddle/isnan_cn.rst b/docs/api/paddle/isnan_cn.rst index 1223c9e90f7..596118dab52 100644 --- a/docs/api/paddle/isnan_cn.rst +++ b/docs/api/paddle/isnan_cn.rst @@ -19,4 +19,4 @@ isnan 代码示例 ::::::::: -COPY-FROM: paddle.isnan \ No newline at end of file +COPY-FROM: paddle.isnan diff --git a/docs/api/paddle/jit/Overview_cn.rst b/docs/api/paddle/jit/Overview_cn.rst index 3121e1cb25d..82df69dc7b0 100644 --- a/docs/api/paddle/jit/Overview_cn.rst +++ b/docs/api/paddle/jit/Overview_cn.rst @@ -38,4 +38,3 @@ Debug动态图转静态图相关 " :ref:`set_code_level ` ", "设置代码级别,打印该级别动转静转化后的代码" " :ref:`set_verbosity ` ", "设置动态图转静态图的日志详细级别" - diff --git a/docs/api/paddle/jit/ProgramTranslator_cn.rst b/docs/api/paddle/jit/ProgramTranslator_cn.rst index 6473f2bb356..c461669962b 100644 --- a/docs/api/paddle/jit/ProgramTranslator_cn.rst +++ b/docs/api/paddle/jit/ProgramTranslator_cn.rst @@ -226,4 +226,3 @@ ProgramTranslator中的ProgramCache。 prog_trans = paddle.jit.ProgramTranslator() prog_cache = prog_trans.get_program_cache() - diff --git a/docs/api/paddle/jit/not_to_static_cn.rst b/docs/api/paddle/jit/not_to_static_cn.rst index aac14c23bb4..7431bd65a30 100644 --- a/docs/api/paddle/jit/not_to_static_cn.rst +++ b/docs/api/paddle/jit/not_to_static_cn.rst @@ -37,4 +37,3 @@ callable,一个在动转静过程不会进行代码转写的函数。 x = paddle.ones([1, 2], dtype='float32') out = func(x) print(out) # [[2. 2.]] - diff --git a/docs/api/paddle/jit/save_cn.rst b/docs/api/paddle/jit/save_cn.rst index a529f8b249c..7fe0518288c 100644 --- a/docs/api/paddle/jit/save_cn.rst +++ b/docs/api/paddle/jit/save_cn.rst @@ -34,4 +34,4 @@ save 代码示例 ::::::::: -COPY-FROM: paddle.jit.save \ No newline at end of file +COPY-FROM: paddle.jit.save diff --git a/docs/api/paddle/jit/set_code_level_cn.rst b/docs/api/paddle/jit/set_code_level_cn.rst index c4ae67daafa..0f40c7e763f 100644 --- a/docs/api/paddle/jit/set_code_level_cn.rst +++ b/docs/api/paddle/jit/set_code_level_cn.rst @@ -25,4 +25,4 @@ set_code_level 代码示例 :::::::::::: -COPY-FROM: paddle.jit.set_code_level \ No newline at end of file +COPY-FROM: paddle.jit.set_code_level diff --git a/docs/api/paddle/jit/set_verbosity_cn.rst b/docs/api/paddle/jit/set_verbosity_cn.rst index 14d4f21effa..19e0a3f7c3a 100644 --- a/docs/api/paddle/jit/set_verbosity_cn.rst +++ b/docs/api/paddle/jit/set_verbosity_cn.rst @@ -24,4 +24,4 @@ set_verbosity 代码示例 :::::::::::: -COPY-FROM: paddle.jit.set_verbosity \ No newline at end of file +COPY-FROM: paddle.jit.set_verbosity diff --git a/docs/api/paddle/kron_cn.rst b/docs/api/paddle/kron_cn.rst index 8525f482ecf..fa72e073740 100644 --- a/docs/api/paddle/kron_cn.rst +++ b/docs/api/paddle/kron_cn.rst @@ -50,4 +50,4 @@ Kronecker Product 算子。 代码示例 :::::::::::: -COPY-FROM: paddle.kron \ No newline at end of file +COPY-FROM: paddle.kron diff --git a/docs/api/paddle/kthvalue_cn.rst b/docs/api/paddle/kthvalue_cn.rst index fc325431df4..f27d9a8d58d 100644 --- a/docs/api/paddle/kthvalue_cn.rst +++ b/docs/api/paddle/kthvalue_cn.rst @@ -22,4 +22,4 @@ tuple(Tensor),返回第k小的元素和对应的索引信息。结果的 代码示例 ::::::::: -COPY-FROM: paddle.kthvalue(x, \ No newline at end of file +COPY-FROM: paddle.kthvalue(x, diff --git a/docs/api/paddle/lerp_cn.rst b/docs/api/paddle/lerp_cn.rst index 8575b81a5dc..3ebe34b004f 100644 --- a/docs/api/paddle/lerp_cn.rst +++ b/docs/api/paddle/lerp_cn.rst @@ -24,4 +24,4 @@ lerp 代码示例 ::::::::: -COPY-FROM: paddle.lerp \ No newline at end of file +COPY-FROM: paddle.lerp diff --git a/docs/api/paddle/less_equal_cn.rst b/docs/api/paddle/less_equal_cn.rst index 4f062ae0058..8711bf25d9c 100644 --- a/docs/api/paddle/less_equal_cn.rst +++ b/docs/api/paddle/less_equal_cn.rst @@ -25,4 +25,4 @@ less_equal 代码示例 :::::::::::: -COPY-FROM: paddle.less_equal \ No newline at end of file +COPY-FROM: paddle.less_equal diff --git a/docs/api/paddle/less_than_cn.rst b/docs/api/paddle/less_than_cn.rst index 27c29476996..f6e42d97490 100644 --- a/docs/api/paddle/less_than_cn.rst +++ b/docs/api/paddle/less_than_cn.rst @@ -26,4 +26,4 @@ less_than 代码示例 :::::::::::: -COPY-FROM: paddle.less_than \ No newline at end of file +COPY-FROM: paddle.less_than diff --git a/docs/api/paddle/lgamma_cn.rst b/docs/api/paddle/lgamma_cn.rst index 9a68d11f414..6a0bf674f64 100644 --- a/docs/api/paddle/lgamma_cn.rst +++ b/docs/api/paddle/lgamma_cn.rst @@ -25,4 +25,4 @@ lgamma 代码示例 ::::::::: -COPY-FROM: paddle.lgamma \ No newline at end of file +COPY-FROM: paddle.lgamma diff --git a/docs/api/paddle/linalg/cholesky_cn.rst b/docs/api/paddle/linalg/cholesky_cn.rst index 037d782ff07..890dcc88883 100644 --- a/docs/api/paddle/linalg/cholesky_cn.rst +++ b/docs/api/paddle/linalg/cholesky_cn.rst @@ -26,4 +26,4 @@ Tensor,与 `x` 具有相同形状和数据类型。它代表了Cholesky分解 代码示例 :::::::::::: -COPY-FROM: paddle.linalg.cholesky \ No newline at end of file +COPY-FROM: paddle.linalg.cholesky diff --git a/docs/api/paddle/linalg/cholesky_solve_cn.rst b/docs/api/paddle/linalg/cholesky_solve_cn.rst index 0e4daf25628..a4ee74caada 100644 --- a/docs/api/paddle/linalg/cholesky_solve_cn.rst +++ b/docs/api/paddle/linalg/cholesky_solve_cn.rst @@ -24,4 +24,4 @@ Tensor,线性方程的解X。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.cholesky_solve \ No newline at end of file +COPY-FROM: paddle.linalg.cholesky_solve diff --git a/docs/api/paddle/linalg/cond_cn.rst b/docs/api/paddle/linalg/cond_cn.rst index e4d9a0995bc..b6a91ba5554 100644 --- a/docs/api/paddle/linalg/cond_cn.rst +++ b/docs/api/paddle/linalg/cond_cn.rst @@ -22,4 +22,4 @@ Tensor,条件数的计算结果,数据类型和输入 ``x`` 的一致。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.cond \ No newline at end of file +COPY-FROM: paddle.linalg.cond diff --git a/docs/api/paddle/linalg/cov_cn.rst b/docs/api/paddle/linalg/cov_cn.rst index 14489402f9c..ae70e269a66 100644 --- a/docs/api/paddle/linalg/cov_cn.rst +++ b/docs/api/paddle/linalg/cov_cn.rst @@ -29,4 +29,4 @@ Tensor,输入x的协方差矩阵。假设x是[m,n]的矩阵,rowvar=True, 代码示例 :::::::::: -COPY-FROM: paddle.linalg.cov \ No newline at end of file +COPY-FROM: paddle.linalg.cov diff --git a/docs/api/paddle/linalg/det_cn.rst b/docs/api/paddle/linalg/det_cn.rst index 8fc2edd7c1f..9b5caad8321 100644 --- a/docs/api/paddle/linalg/det_cn.rst +++ b/docs/api/paddle/linalg/det_cn.rst @@ -19,4 +19,4 @@ Tensor,输出矩阵的行列式值 Shape为 ``[*]`` 。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.det \ No newline at end of file +COPY-FROM: paddle.linalg.det diff --git a/docs/api/paddle/linalg/eig_cn.rst b/docs/api/paddle/linalg/eig_cn.rst index 1af67605bd6..3f01779f929 100644 --- a/docs/api/paddle/linalg/eig_cn.rst +++ b/docs/api/paddle/linalg/eig_cn.rst @@ -29,4 +29,4 @@ eig 代码示例 :::::::::: -COPY-FROM: paddle.linalg.eig \ No newline at end of file +COPY-FROM: paddle.linalg.eig diff --git a/docs/api/paddle/linalg/eigh_cn.rst b/docs/api/paddle/linalg/eigh_cn.rst index c11e5acc88f..81511d85227 100644 --- a/docs/api/paddle/linalg/eigh_cn.rst +++ b/docs/api/paddle/linalg/eigh_cn.rst @@ -22,4 +22,4 @@ eigh 代码示例 :::::::::: -COPY-FROM: paddle.linalg.eigh \ No newline at end of file +COPY-FROM: paddle.linalg.eigh diff --git a/docs/api/paddle/linalg/eigvals_cn.rst b/docs/api/paddle/linalg/eigvals_cn.rst index 06418d81db3..0f9b50fb834 100644 --- a/docs/api/paddle/linalg/eigvals_cn.rst +++ b/docs/api/paddle/linalg/eigvals_cn.rst @@ -25,4 +25,4 @@ eigvals 代码示例 ::::::::: -COPY-FROM: paddle.linalg.eigvals \ No newline at end of file +COPY-FROM: paddle.linalg.eigvals diff --git a/docs/api/paddle/linalg/eigvalsh_cn.rst b/docs/api/paddle/linalg/eigvalsh_cn.rst index 018ae07517e..1d813c3749b 100644 --- a/docs/api/paddle/linalg/eigvalsh_cn.rst +++ b/docs/api/paddle/linalg/eigvalsh_cn.rst @@ -21,4 +21,4 @@ Tensor,输出矩阵的特征值,输出顺序按照从小到大进行排序 代码示例 :::::::::: -COPY-FROM: paddle.linalg.eigvalsh \ No newline at end of file +COPY-FROM: paddle.linalg.eigvalsh diff --git a/docs/api/paddle/linalg/inv_cn.rst b/docs/api/paddle/linalg/inv_cn.rst index e6a26645fa9..a01ede4ebbc 100644 --- a/docs/api/paddle/linalg/inv_cn.rst +++ b/docs/api/paddle/linalg/inv_cn.rst @@ -21,4 +21,4 @@ Tensor,输入方阵的逆。 代码示例 ::::::::: -COPY-FROM: paddle.linalg.inv \ No newline at end of file +COPY-FROM: paddle.linalg.inv diff --git a/docs/api/paddle/linalg/lstsq_cn.rst b/docs/api/paddle/linalg/lstsq_cn.rst index 09244087d74..77fb9703df9 100644 --- a/docs/api/paddle/linalg/lstsq_cn.rst +++ b/docs/api/paddle/linalg/lstsq_cn.rst @@ -30,4 +30,4 @@ lstsq 代码示例 :::::::::: -COPY-FROM: paddle.linalg.lstsq \ No newline at end of file +COPY-FROM: paddle.linalg.lstsq diff --git a/docs/api/paddle/linalg/matrix_power_cn.rst b/docs/api/paddle/linalg/matrix_power_cn.rst index c4bce565907..2db17c6e478 100644 --- a/docs/api/paddle/linalg/matrix_power_cn.rst +++ b/docs/api/paddle/linalg/matrix_power_cn.rst @@ -35,4 +35,4 @@ Tensor,这个(或这批)矩阵 ``x`` 经过 ``n`` 次幂运算后的结果 代码示例 :::::::::: -COPY-FROM: paddle.linalg.matrix_power \ No newline at end of file +COPY-FROM: paddle.linalg.matrix_power diff --git a/docs/api/paddle/linalg/matrix_rank_cn.rst b/docs/api/paddle/linalg/matrix_rank_cn.rst index cb185533af2..943eadf96cf 100644 --- a/docs/api/paddle/linalg/matrix_rank_cn.rst +++ b/docs/api/paddle/linalg/matrix_rank_cn.rst @@ -24,4 +24,4 @@ Tensor, ``x`` 的秩,数据类型为int64。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.matrix_rank \ No newline at end of file +COPY-FROM: paddle.linalg.matrix_rank diff --git a/docs/api/paddle/linalg/multi_dot_cn.rst b/docs/api/paddle/linalg/multi_dot_cn.rst index 8d059496a75..deb4f484377 100755 --- a/docs/api/paddle/linalg/multi_dot_cn.rst +++ b/docs/api/paddle/linalg/multi_dot_cn.rst @@ -32,4 +32,4 @@ Tensor 代码示例 :::::::::: -COPY-FROM: paddle.linalg.multi_dot \ No newline at end of file +COPY-FROM: paddle.linalg.multi_dot diff --git a/docs/api/paddle/linalg/norm_cn.rst b/docs/api/paddle/linalg/norm_cn.rst index acc432e4691..8bbac12fb5c 100644 --- a/docs/api/paddle/linalg/norm_cn.rst +++ b/docs/api/paddle/linalg/norm_cn.rst @@ -31,4 +31,4 @@ norm 代码示例 ::::::::: -COPY-FROM: paddle.linalg.norm \ No newline at end of file +COPY-FROM: paddle.linalg.norm diff --git a/docs/api/paddle/linalg/pinv_cn.rst b/docs/api/paddle/linalg/pinv_cn.rst index daf88a49a6d..7d9a458b52a 100644 --- a/docs/api/paddle/linalg/pinv_cn.rst +++ b/docs/api/paddle/linalg/pinv_cn.rst @@ -25,4 +25,4 @@ Tensor,输入矩阵的伪逆矩阵,数据类型和输入数据类型一致 代码示例 :::::::::: -COPY-FROM: paddle.linalg.pinv \ No newline at end of file +COPY-FROM: paddle.linalg.pinv diff --git a/docs/api/paddle/linalg/qr_cn.rst b/docs/api/paddle/linalg/qr_cn.rst index 01d6797b277..00c5ba28926 100644 --- a/docs/api/paddle/linalg/qr_cn.rst +++ b/docs/api/paddle/linalg/qr_cn.rst @@ -32,4 +32,4 @@ qr 代码示例 :::::::::: -COPY-FROM: paddle.linalg.qr \ No newline at end of file +COPY-FROM: paddle.linalg.qr diff --git a/docs/api/paddle/linalg/slogdet_cn.rst b/docs/api/paddle/linalg/slogdet_cn.rst index 3041fc3064a..3d6998c68cc 100644 --- a/docs/api/paddle/linalg/slogdet_cn.rst +++ b/docs/api/paddle/linalg/slogdet_cn.rst @@ -19,4 +19,4 @@ Tensor,输出矩阵的行列式值 Shape为 ``[2, *]``。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.slogdet \ No newline at end of file +COPY-FROM: paddle.linalg.slogdet diff --git a/docs/api/paddle/linalg/solve_cn.rst b/docs/api/paddle/linalg/solve_cn.rst index 8bf3489984c..627fe4ee788 100644 --- a/docs/api/paddle/linalg/solve_cn.rst +++ b/docs/api/paddle/linalg/solve_cn.rst @@ -32,4 +32,4 @@ Tensor,这个(或这批)矩阵 ``x`` 与 ``y`` 经过运算后的结果, 代码示例 :::::::::: -COPY-FROM: paddle.linalg.solve \ No newline at end of file +COPY-FROM: paddle.linalg.solve diff --git a/docs/api/paddle/linalg/svd_cn.rst b/docs/api/paddle/linalg/svd_cn.rst index 71a5d4b5c88..e0526e643f7 100644 --- a/docs/api/paddle/linalg/svd_cn.rst +++ b/docs/api/paddle/linalg/svd_cn.rst @@ -33,4 +33,4 @@ svd 代码示例 :::::::::: -COPY-FROM: paddle.linalg.svd \ No newline at end of file +COPY-FROM: paddle.linalg.svd diff --git a/docs/api/paddle/linalg/triangular_solve_cn.rst b/docs/api/paddle/linalg/triangular_solve_cn.rst index 035d3d76a7f..78c3bf353df 100644 --- a/docs/api/paddle/linalg/triangular_solve_cn.rst +++ b/docs/api/paddle/linalg/triangular_solve_cn.rst @@ -41,4 +41,4 @@ Tensor,线程方程组的解,数据类型和 ``x`` 一致。 代码示例 :::::::::: -COPY-FROM: paddle.linalg.triangular_solve \ No newline at end of file +COPY-FROM: paddle.linalg.triangular_solve diff --git a/docs/api/paddle/linspace_cn.rst b/docs/api/paddle/linspace_cn.rst index d3d2947cb0d..ae6c5a78ad5 100644 --- a/docs/api/paddle/linspace_cn.rst +++ b/docs/api/paddle/linspace_cn.rst @@ -25,4 +25,4 @@ linspace 代码示例 :::::::::::: -COPY-FROM: paddle.linspace \ No newline at end of file +COPY-FROM: paddle.linspace diff --git a/docs/api/paddle/log10_cn.rst b/docs/api/paddle/log10_cn.rst index 5fae50ab16d..6cb128a5f52 100755 --- a/docs/api/paddle/log10_cn.rst +++ b/docs/api/paddle/log10_cn.rst @@ -29,4 +29,4 @@ Log10激活函数(计算底为10的对数) 代码示例 :::::::::::: -COPY-FROM: paddle.log10 \ No newline at end of file +COPY-FROM: paddle.log10 diff --git a/docs/api/paddle/log1p_cn.rst b/docs/api/paddle/log1p_cn.rst index d4a4e0c9594..39d633451f6 100644 --- a/docs/api/paddle/log1p_cn.rst +++ b/docs/api/paddle/log1p_cn.rst @@ -25,4 +25,4 @@ log1p 代码示例 :::::::::::: -COPY-FROM: paddle.log1p \ No newline at end of file +COPY-FROM: paddle.log1p diff --git a/docs/api/paddle/log2_cn.rst b/docs/api/paddle/log2_cn.rst index 0925ca547be..413ec7f646e 100755 --- a/docs/api/paddle/log2_cn.rst +++ b/docs/api/paddle/log2_cn.rst @@ -28,4 +28,4 @@ Tensor,Log2算子底为2对数输出,数据类型与输入一致。 代码示例 ::::::::: -COPY-FROM: paddle.log2 \ No newline at end of file +COPY-FROM: paddle.log2 diff --git a/docs/api/paddle/log_cn.rst b/docs/api/paddle/log_cn.rst index 6e235e0f096..fc654706ab9 100644 --- a/docs/api/paddle/log_cn.rst +++ b/docs/api/paddle/log_cn.rst @@ -28,4 +28,4 @@ Tensor, Log算子自然对数输出,数据类型与输入一致。 代码示例 :::::::::::: -COPY-FROM: paddle.log \ No newline at end of file +COPY-FROM: paddle.log diff --git a/docs/api/paddle/logical_and_cn.rst b/docs/api/paddle/logical_and_cn.rst index d7b3b2895b8..65690be6b50 100644 --- a/docs/api/paddle/logical_and_cn.rst +++ b/docs/api/paddle/logical_and_cn.rst @@ -28,4 +28,4 @@ logical_and 代码示例 :::::::::::: -COPY-FROM: paddle.logical_and \ No newline at end of file +COPY-FROM: paddle.logical_and diff --git a/docs/api/paddle/logical_not_cn.rst b/docs/api/paddle/logical_not_cn.rst index 27d80e7f3a1..48c09f9d7a1 100644 --- a/docs/api/paddle/logical_not_cn.rst +++ b/docs/api/paddle/logical_not_cn.rst @@ -28,4 +28,4 @@ Tensor,与 ``x`` 维度相同,数据类型相同。 代码示例 :::::::::::: -COPY-FROM: paddle.logical_not \ No newline at end of file +COPY-FROM: paddle.logical_not diff --git a/docs/api/paddle/logical_or_cn.rst b/docs/api/paddle/logical_or_cn.rst index a80b701a7df..653a5e72e24 100644 --- a/docs/api/paddle/logical_or_cn.rst +++ b/docs/api/paddle/logical_or_cn.rst @@ -28,4 +28,4 @@ logical_or 代码示例 :::::::::::: -COPY-FROM: paddle.logical_or \ No newline at end of file +COPY-FROM: paddle.logical_or diff --git a/docs/api/paddle/logical_xor_cn.rst b/docs/api/paddle/logical_xor_cn.rst index c1154bb031f..289f67c4af3 100644 --- a/docs/api/paddle/logical_xor_cn.rst +++ b/docs/api/paddle/logical_xor_cn.rst @@ -28,4 +28,4 @@ logical_xor 代码示例 :::::::::::: -COPY-FROM: paddle.logical_xor \ No newline at end of file +COPY-FROM: paddle.logical_xor diff --git a/docs/api/paddle/logit_cn.rst b/docs/api/paddle/logit_cn.rst index 74aab79d03b..87b60e9cfbb 100644 --- a/docs/api/paddle/logit_cn.rst +++ b/docs/api/paddle/logit_cn.rst @@ -38,4 +38,4 @@ logit 代码示例 :::::::::: -COPY-FROM: paddle.logit \ No newline at end of file +COPY-FROM: paddle.logit diff --git a/docs/api/paddle/logsumexp_cn.rst b/docs/api/paddle/logsumexp_cn.rst index 193cb41c75f..4996c3985ce 100644 --- a/docs/api/paddle/logsumexp_cn.rst +++ b/docs/api/paddle/logsumexp_cn.rst @@ -24,4 +24,4 @@ logsumexp 代码示例 :::::::::: -COPY-FROM: paddle.logsumexp \ No newline at end of file +COPY-FROM: paddle.logsumexp diff --git a/docs/api/paddle/masked_select_cn.rst b/docs/api/paddle/masked_select_cn.rst index 844bf710b77..6eb9e46d32e 100644 --- a/docs/api/paddle/masked_select_cn.rst +++ b/docs/api/paddle/masked_select_cn.rst @@ -24,4 +24,4 @@ masked_select 代码示例 :::::::::::: -COPY-FROM: paddle.masked_select \ No newline at end of file +COPY-FROM: paddle.masked_select diff --git a/docs/api/paddle/matmul_cn.rst b/docs/api/paddle/matmul_cn.rst index c8faaf89f5e..e8be4dbf630 100644 --- a/docs/api/paddle/matmul_cn.rst +++ b/docs/api/paddle/matmul_cn.rst @@ -40,4 +40,4 @@ matmul 代码示例 :::::::::: -COPY-FROM: paddle.matmul \ No newline at end of file +COPY-FROM: paddle.matmul diff --git a/docs/api/paddle/maximum_cn.rst b/docs/api/paddle/maximum_cn.rst index 466217cdf4c..2c534f42114 100644 --- a/docs/api/paddle/maximum_cn.rst +++ b/docs/api/paddle/maximum_cn.rst @@ -30,4 +30,4 @@ maximum 代码示例 :::::::::: -COPY-FROM: paddle.maximum \ No newline at end of file +COPY-FROM: paddle.maximum diff --git a/docs/api/paddle/mean_cn.rst b/docs/api/paddle/mean_cn.rst index a0b5e9d5bca..f20adbd455e 100644 --- a/docs/api/paddle/mean_cn.rst +++ b/docs/api/paddle/mean_cn.rst @@ -23,4 +23,4 @@ mean 代码示例 :::::::::: -COPY-FROM: paddle.mean \ No newline at end of file +COPY-FROM: paddle.mean diff --git a/docs/api/paddle/median_cn.rst b/docs/api/paddle/median_cn.rst index d5a1a64e0f1..19251330461 100644 --- a/docs/api/paddle/median_cn.rst +++ b/docs/api/paddle/median_cn.rst @@ -21,4 +21,4 @@ median 代码示例 :::::::::: -COPY-FROM: paddle.median \ No newline at end of file +COPY-FROM: paddle.median diff --git a/docs/api/paddle/meshgrid_cn.rst b/docs/api/paddle/meshgrid_cn.rst index 6e3c0e35a3e..e0d6e8da389 100644 --- a/docs/api/paddle/meshgrid_cn.rst +++ b/docs/api/paddle/meshgrid_cn.rst @@ -27,4 +27,4 @@ k 个 k 维 ``Tensor``,每个形状均为(N1, N2, ..., Nk)。 -COPY-FROM: paddle.meshgrid \ No newline at end of file +COPY-FROM: paddle.meshgrid diff --git a/docs/api/paddle/minimum_cn.rst b/docs/api/paddle/minimum_cn.rst index d7ab7e0e60b..d9cbf26d2e6 100644 --- a/docs/api/paddle/minimum_cn.rst +++ b/docs/api/paddle/minimum_cn.rst @@ -30,4 +30,4 @@ minimum 代码示例 :::::::::: -COPY-FROM: paddle.minimum \ No newline at end of file +COPY-FROM: paddle.minimum diff --git a/docs/api/paddle/mm_cn.rst b/docs/api/paddle/mm_cn.rst index 336a794d2ea..b9d6aee3358 100644 --- a/docs/api/paddle/mm_cn.rst +++ b/docs/api/paddle/mm_cn.rst @@ -64,4 +64,4 @@ mm 代码示例 :::::::::::: -COPY-FROM: paddle.mm \ No newline at end of file +COPY-FROM: paddle.mm diff --git a/docs/api/paddle/mod_cn.rst b/docs/api/paddle/mod_cn.rst index cc23ecb85e1..819fb1c932f 100644 --- a/docs/api/paddle/mod_cn.rst +++ b/docs/api/paddle/mod_cn.rst @@ -26,4 +26,4 @@ mod 代码示例 ::::::::: -COPY-FROM: paddle.mod \ No newline at end of file +COPY-FROM: paddle.mod diff --git a/docs/api/paddle/mode_cn.rst b/docs/api/paddle/mode_cn.rst index a4ba0716b53..e09d15fdf3a 100644 --- a/docs/api/paddle/mode_cn.rst +++ b/docs/api/paddle/mode_cn.rst @@ -22,4 +22,4 @@ tuple(Tensor),返回检索到的众数结果和对应索引信息。结果 ::::::::: -COPY-FROM: paddle.mode(x, \ No newline at end of file +COPY-FROM: paddle.mode(x, diff --git a/docs/api/paddle/moveaxis_cn.rst b/docs/api/paddle/moveaxis_cn.rst index fcd7b3c7e6d..9f80b749407 100644 --- a/docs/api/paddle/moveaxis_cn.rst +++ b/docs/api/paddle/moveaxis_cn.rst @@ -21,4 +21,4 @@ moveaxis 代码示例 ::::::::: -COPY-FROM: paddle.moveaxis \ No newline at end of file +COPY-FROM: paddle.moveaxis diff --git a/docs/api/paddle/multinomial_cn.rst b/docs/api/paddle/multinomial_cn.rst index c0749d24b5b..80c66be883b 100644 --- a/docs/api/paddle/multinomial_cn.rst +++ b/docs/api/paddle/multinomial_cn.rst @@ -29,4 +29,4 @@ multinomial 代码示例 :::::::::::: -COPY-FROM: paddle.multinomial \ No newline at end of file +COPY-FROM: paddle.multinomial diff --git a/docs/api/paddle/multiplex_cn.rst b/docs/api/paddle/multiplex_cn.rst index 076ff2ac895..e04bed1c60d 100644 --- a/docs/api/paddle/multiplex_cn.rst +++ b/docs/api/paddle/multiplex_cn.rst @@ -48,4 +48,3 @@ Tensor,进行Multiplex运算后的输出Tensor。 COPY-FROM: paddle.multiplex:code-example1 - diff --git a/docs/api/paddle/multiply_cn.rst b/docs/api/paddle/multiply_cn.rst index 0a3cbe38230..7b71a72b057 100755 --- a/docs/api/paddle/multiply_cn.rst +++ b/docs/api/paddle/multiply_cn.rst @@ -40,4 +40,4 @@ multiply 代码示例 ::::::::: -COPY-FROM: paddle.multiply \ No newline at end of file +COPY-FROM: paddle.multiply diff --git a/docs/api/paddle/nanmedian_cn.rst b/docs/api/paddle/nanmedian_cn.rst index ec5ac448897..4c2dc22f94b 100644 --- a/docs/api/paddle/nanmedian_cn.rst +++ b/docs/api/paddle/nanmedian_cn.rst @@ -21,4 +21,3 @@ nanmedian 代码示例 :::::::::: COPY-FROM: paddle.nanmedian:nanmedian-example - diff --git a/docs/api/paddle/neg_cn.rst b/docs/api/paddle/neg_cn.rst index 110f75ab8c7..0d81802e102 100644 --- a/docs/api/paddle/neg_cn.rst +++ b/docs/api/paddle/neg_cn.rst @@ -25,4 +25,4 @@ neg 代码示例 ::::::::: -COPY-FROM: paddle.neg \ No newline at end of file +COPY-FROM: paddle.neg diff --git a/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst b/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst index 2e79a52171a..577451da348 100755 --- a/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst @@ -43,4 +43,4 @@ AdaptiveAvgPool2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveAvgPool2D \ No newline at end of file +COPY-FROM: paddle.nn.AdaptiveAvgPool2D diff --git a/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst b/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst index fb953f851f9..dfe3fe57cc9 100755 --- a/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst @@ -46,4 +46,4 @@ AdaptiveAvgPool3D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveAvgPool3D \ No newline at end of file +COPY-FROM: paddle.nn.AdaptiveAvgPool3D diff --git a/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst index 0658d328938..42b03d9b755 100755 --- a/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst @@ -39,4 +39,4 @@ AdaptiveMaxPool1D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveMaxPool1D \ No newline at end of file +COPY-FROM: paddle.nn.AdaptiveMaxPool1D diff --git a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst index d765afef7c1..2c396776bf2 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst @@ -50,4 +50,4 @@ AdaptiveMaxPool2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveMaxPool2D \ No newline at end of file +COPY-FROM: paddle.nn.AdaptiveMaxPool2D diff --git a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst index b0dc38dcc26..8e3fff9d4e8 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst @@ -45,4 +45,4 @@ AdaptiveMaxPool3D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveMaxPool3D \ No newline at end of file +COPY-FROM: paddle.nn.AdaptiveMaxPool3D diff --git a/docs/api/paddle/nn/AlphaDropout_cn.rst b/docs/api/paddle/nn/AlphaDropout_cn.rst index c6d2d56785f..fbd54bf231a 100644 --- a/docs/api/paddle/nn/AlphaDropout_cn.rst +++ b/docs/api/paddle/nn/AlphaDropout_cn.rst @@ -24,4 +24,4 @@ AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为 代码示例 ::::::::: -COPY-FROM: paddle.nn.AlphaDropout \ No newline at end of file +COPY-FROM: paddle.nn.AlphaDropout diff --git a/docs/api/paddle/nn/AvgPool1D_cn.rst b/docs/api/paddle/nn/AvgPool1D_cn.rst index 9eb5bede8ed..b818c7f418d 100755 --- a/docs/api/paddle/nn/AvgPool1D_cn.rst +++ b/docs/api/paddle/nn/AvgPool1D_cn.rst @@ -38,4 +38,4 @@ AvgPool1D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AvgPool1D \ No newline at end of file +COPY-FROM: paddle.nn.AvgPool1D diff --git a/docs/api/paddle/nn/AvgPool2D_cn.rst b/docs/api/paddle/nn/AvgPool2D_cn.rst index 4f66437c435..787432c2623 100644 --- a/docs/api/paddle/nn/AvgPool2D_cn.rst +++ b/docs/api/paddle/nn/AvgPool2D_cn.rst @@ -54,4 +54,4 @@ AvgPool2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AvgPool2D \ No newline at end of file +COPY-FROM: paddle.nn.AvgPool2D diff --git a/docs/api/paddle/nn/AvgPool3D_cn.rst b/docs/api/paddle/nn/AvgPool3D_cn.rst index 10e3c020671..39c9f9f1b81 100644 --- a/docs/api/paddle/nn/AvgPool3D_cn.rst +++ b/docs/api/paddle/nn/AvgPool3D_cn.rst @@ -55,4 +55,4 @@ AvgPool3D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AvgPool3D \ No newline at end of file +COPY-FROM: paddle.nn.AvgPool3D diff --git a/docs/api/paddle/nn/BCELoss_cn.rst b/docs/api/paddle/nn/BCELoss_cn.rst index 47bdb193d94..df14bf312b8 100644 --- a/docs/api/paddle/nn/BCELoss_cn.rst +++ b/docs/api/paddle/nn/BCELoss_cn.rst @@ -52,4 +52,4 @@ BCELoss 代码示例 :::::::::: -COPY-FROM: paddle.nn.BCELoss \ No newline at end of file +COPY-FROM: paddle.nn.BCELoss diff --git a/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst b/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst index e73ed002062..66139401844 100644 --- a/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst +++ b/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst @@ -52,4 +52,4 @@ BCEWithLogitsLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.BCEWithLogitsLoss \ No newline at end of file +COPY-FROM: paddle.nn.BCEWithLogitsLoss diff --git a/docs/api/paddle/nn/BatchNorm1D_cn.rst b/docs/api/paddle/nn/BatchNorm1D_cn.rst index 1905accc7af..4f9ce8432a5 100644 --- a/docs/api/paddle/nn/BatchNorm1D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm1D_cn.rst @@ -76,4 +76,3 @@ BatchNorm1D batch_norm_out = batch_norm(x) print(batch_norm_out) - diff --git a/docs/api/paddle/nn/BatchNorm2D_cn.rst b/docs/api/paddle/nn/BatchNorm2D_cn.rst index d1d52bbb982..b53d09a0ce2 100644 --- a/docs/api/paddle/nn/BatchNorm2D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm2D_cn.rst @@ -76,4 +76,3 @@ BatchNorm2D batch_norm_out = batch_norm(x) print(batch_norm_out) - diff --git a/docs/api/paddle/nn/BatchNorm3D_cn.rst b/docs/api/paddle/nn/BatchNorm3D_cn.rst index a733c8469ad..c3f2eec74b1 100644 --- a/docs/api/paddle/nn/BatchNorm3D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm3D_cn.rst @@ -76,4 +76,3 @@ BatchNorm3D batch_norm_out = batch_norm(x) print(batch_norm_out) - diff --git a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst index d71387297b0..6622693d09a 100644 --- a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst +++ b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst @@ -184,4 +184,3 @@ finalize(outputs, final_states, sequence_lengths) **返回** tuple,一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个Tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 - diff --git a/docs/api/paddle/nn/BiRNN_cn.rst b/docs/api/paddle/nn/BiRNN_cn.rst index 5196227958a..7b8ce218b85 100644 --- a/docs/api/paddle/nn/BiRNN_cn.rst +++ b/docs/api/paddle/nn/BiRNN_cn.rst @@ -38,4 +38,4 @@ BiRNN 代码示例 :::::::::::: -COPY-FROM: paddle.nn.BiRNN \ No newline at end of file +COPY-FROM: paddle.nn.BiRNN diff --git a/docs/api/paddle/nn/Bilinear_cn.rst b/docs/api/paddle/nn/Bilinear_cn.rst index 3c362803a5b..5eb6937a854 100644 --- a/docs/api/paddle/nn/Bilinear_cn.rst +++ b/docs/api/paddle/nn/Bilinear_cn.rst @@ -45,4 +45,4 @@ Bilinear 代码示例 ::::::::: -COPY-FROM: paddle.nn.Bilinear \ No newline at end of file +COPY-FROM: paddle.nn.Bilinear diff --git a/docs/api/paddle/nn/CELU_cn.rst b/docs/api/paddle/nn/CELU_cn.rst index 96933a220d2..ea442a7590c 100644 --- a/docs/api/paddle/nn/CELU_cn.rst +++ b/docs/api/paddle/nn/CELU_cn.rst @@ -27,4 +27,4 @@ CELU激活层(CELU Activation Operator) 代码示例 ::::::::: -COPY-FROM: paddle.nn.CELU \ No newline at end of file +COPY-FROM: paddle.nn.CELU diff --git a/docs/api/paddle/nn/CTCLoss_cn.rst b/docs/api/paddle/nn/CTCLoss_cn.rst index 7ab50de848f..b1675348163 100644 --- a/docs/api/paddle/nn/CTCLoss_cn.rst +++ b/docs/api/paddle/nn/CTCLoss_cn.rst @@ -27,4 +27,4 @@ CTCLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.CTCLoss \ No newline at end of file +COPY-FROM: paddle.nn.CTCLoss diff --git a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst index 3910f7cf8aa..c27cb76a797 100644 --- a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst @@ -53,4 +53,3 @@ ClipGradByGlobalNorm clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) sdg.step() - diff --git a/docs/api/paddle/nn/ClipGradByNorm_cn.rst b/docs/api/paddle/nn/ClipGradByNorm_cn.rst index 94a5bcaf8ec..65efda938dd 100644 --- a/docs/api/paddle/nn/ClipGradByNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByNorm_cn.rst @@ -59,4 +59,3 @@ ClipGradByNorm clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) sdg.step() - diff --git a/docs/api/paddle/nn/Conv1DTranspose_cn.rst b/docs/api/paddle/nn/Conv1DTranspose_cn.rst index e9ea45e8672..e26532b52c7 100644 --- a/docs/api/paddle/nn/Conv1DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv1DTranspose_cn.rst @@ -75,4 +75,4 @@ Conv1DTranspose 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv1DTranspose \ No newline at end of file +COPY-FROM: paddle.nn.Conv1DTranspose diff --git a/docs/api/paddle/nn/Conv1D_cn.rst b/docs/api/paddle/nn/Conv1D_cn.rst index b618bd3c137..1332ef3d30c 100644 --- a/docs/api/paddle/nn/Conv1D_cn.rst +++ b/docs/api/paddle/nn/Conv1D_cn.rst @@ -81,4 +81,4 @@ bias 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv1D \ No newline at end of file +COPY-FROM: paddle.nn.Conv1D diff --git a/docs/api/paddle/nn/Conv2DTranspose_cn.rst b/docs/api/paddle/nn/Conv2DTranspose_cn.rst index 6df3b675539..b57dc40d7aa 100644 --- a/docs/api/paddle/nn/Conv2DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv2DTranspose_cn.rst @@ -88,4 +88,4 @@ Conv2DTranspose 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv2DTranspose \ No newline at end of file +COPY-FROM: paddle.nn.Conv2DTranspose diff --git a/docs/api/paddle/nn/Conv2D_cn.rst b/docs/api/paddle/nn/Conv2D_cn.rst index 26735897aab..40550c54f56 100644 --- a/docs/api/paddle/nn/Conv2D_cn.rst +++ b/docs/api/paddle/nn/Conv2D_cn.rst @@ -90,4 +90,4 @@ bias 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv2D \ No newline at end of file +COPY-FROM: paddle.nn.Conv2D diff --git a/docs/api/paddle/nn/Conv3DTranspose_cn.rst b/docs/api/paddle/nn/Conv3DTranspose_cn.rst index 7880076d977..9bcecf3db6a 100755 --- a/docs/api/paddle/nn/Conv3DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv3DTranspose_cn.rst @@ -89,4 +89,4 @@ Conv3DTranspose 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv3DTranspose \ No newline at end of file +COPY-FROM: paddle.nn.Conv3DTranspose diff --git a/docs/api/paddle/nn/Conv3D_cn.rst b/docs/api/paddle/nn/Conv3D_cn.rst index 9af5722e99e..617ddedbde2 100644 --- a/docs/api/paddle/nn/Conv3D_cn.rst +++ b/docs/api/paddle/nn/Conv3D_cn.rst @@ -93,4 +93,4 @@ bias 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Conv3D \ No newline at end of file +COPY-FROM: paddle.nn.Conv3D diff --git a/docs/api/paddle/nn/CosineSimilarity_cn.rst b/docs/api/paddle/nn/CosineSimilarity_cn.rst index b35000918cb..093e1521d06 100644 --- a/docs/api/paddle/nn/CosineSimilarity_cn.rst +++ b/docs/api/paddle/nn/CosineSimilarity_cn.rst @@ -21,4 +21,4 @@ CosineSimilarity 代码示例 :::::::::::: -COPY-FROM: paddle.nn.CosineSimilarity \ No newline at end of file +COPY-FROM: paddle.nn.CosineSimilarity diff --git a/docs/api/paddle/nn/CrossEntropyLoss_cn.rst b/docs/api/paddle/nn/CrossEntropyLoss_cn.rst index 4cbd866d92f..c9ee5aa4f03 100644 --- a/docs/api/paddle/nn/CrossEntropyLoss_cn.rst +++ b/docs/api/paddle/nn/CrossEntropyLoss_cn.rst @@ -40,4 +40,4 @@ CrossEntropyLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.CrossEntropyLoss \ No newline at end of file +COPY-FROM: paddle.nn.CrossEntropyLoss diff --git a/docs/api/paddle/nn/Dropout2D_cn.rst b/docs/api/paddle/nn/Dropout2D_cn.rst index 59d90231572..a01c4778dbb 100644 --- a/docs/api/paddle/nn/Dropout2D_cn.rst +++ b/docs/api/paddle/nn/Dropout2D_cn.rst @@ -26,4 +26,4 @@ Dropout2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.Dropout2D \ No newline at end of file +COPY-FROM: paddle.nn.Dropout2D diff --git a/docs/api/paddle/nn/Dropout3D_cn.rst b/docs/api/paddle/nn/Dropout3D_cn.rst index f76dc502541..65ba119e227 100644 --- a/docs/api/paddle/nn/Dropout3D_cn.rst +++ b/docs/api/paddle/nn/Dropout3D_cn.rst @@ -26,4 +26,4 @@ Dropout3D 代码示例 ::::::::: -COPY-FROM: paddle.nn.Dropout3D \ No newline at end of file +COPY-FROM: paddle.nn.Dropout3D diff --git a/docs/api/paddle/nn/Dropout_cn.rst b/docs/api/paddle/nn/Dropout_cn.rst index 5dd87d2e676..7d12137f578 100644 --- a/docs/api/paddle/nn/Dropout_cn.rst +++ b/docs/api/paddle/nn/Dropout_cn.rst @@ -38,4 +38,4 @@ Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在 代码示例 ::::::::: -COPY-FROM: paddle.nn.Dropout \ No newline at end of file +COPY-FROM: paddle.nn.Dropout diff --git a/docs/api/paddle/nn/ELU_cn.rst b/docs/api/paddle/nn/ELU_cn.rst index 5f0924eab5b..6da1b8512e6 100644 --- a/docs/api/paddle/nn/ELU_cn.rst +++ b/docs/api/paddle/nn/ELU_cn.rst @@ -33,4 +33,4 @@ ELU激活层(ELU Activation Operator) 代码示例 ::::::::: -COPY-FROM: paddle.nn.ELU \ No newline at end of file +COPY-FROM: paddle.nn.ELU diff --git a/docs/api/paddle/nn/Embedding_cn.rst b/docs/api/paddle/nn/Embedding_cn.rst index 382adc944ab..5f8863805dc 100644 --- a/docs/api/paddle/nn/Embedding_cn.rst +++ b/docs/api/paddle/nn/Embedding_cn.rst @@ -84,5 +84,3 @@ Tensor, input映射后得到的Embedding Tensor,数据类型和词嵌入的定 out=embedding(x) out.backward() adam.step() - - diff --git a/docs/api/paddle/nn/Fold_cn.rst b/docs/api/paddle/nn/Fold_cn.rst index cf0ed5df7f0..78a1eabca69 100644 --- a/docs/api/paddle/nn/Fold_cn.rst +++ b/docs/api/paddle/nn/Fold_cn.rst @@ -38,4 +38,4 @@ Fold 代码示例 ::::::::: -COPY-FROM: paddle.nn.Fold \ No newline at end of file +COPY-FROM: paddle.nn.Fold diff --git a/docs/api/paddle/nn/GELU_cn.rst b/docs/api/paddle/nn/GELU_cn.rst index 7aeb618155a..df76f0919fd 100644 --- a/docs/api/paddle/nn/GELU_cn.rst +++ b/docs/api/paddle/nn/GELU_cn.rst @@ -34,4 +34,4 @@ GELU激活层(GELU Activation Operator) 代码示例 ::::::::: -COPY-FROM: paddle.nn.GELU \ No newline at end of file +COPY-FROM: paddle.nn.GELU diff --git a/docs/api/paddle/nn/GRUCell_cn.rst b/docs/api/paddle/nn/GRUCell_cn.rst index 200a64db0c4..cb58e6bb314 100644 --- a/docs/api/paddle/nn/GRUCell_cn.rst +++ b/docs/api/paddle/nn/GRUCell_cn.rst @@ -68,4 +68,4 @@ GRUCell 代码示例 :::::::::::: -COPY-FROM: paddle.nn.GRUCell \ No newline at end of file +COPY-FROM: paddle.nn.GRUCell diff --git a/docs/api/paddle/nn/GRU_cn.rst b/docs/api/paddle/nn/GRU_cn.rst index 4cd6de23970..8cff30a7716 100644 --- a/docs/api/paddle/nn/GRU_cn.rst +++ b/docs/api/paddle/nn/GRU_cn.rst @@ -59,4 +59,4 @@ GRU 代码示例 :::::::::::: -COPY-FROM: paddle.nn.GRU \ No newline at end of file +COPY-FROM: paddle.nn.GRU diff --git a/docs/api/paddle/nn/HSigmoidLoss_cn.rst b/docs/api/paddle/nn/HSigmoidLoss_cn.rst index f9d811d9102..422e43c8a35 100644 --- a/docs/api/paddle/nn/HSigmoidLoss_cn.rst +++ b/docs/api/paddle/nn/HSigmoidLoss_cn.rst @@ -42,4 +42,4 @@ HSigmoidLoss 代码示例 :::::::::: -COPY-FROM: paddle.nn.HSigmoidLoss \ No newline at end of file +COPY-FROM: paddle.nn.HSigmoidLoss diff --git a/docs/api/paddle/nn/Hardshrink_cn.rst b/docs/api/paddle/nn/Hardshrink_cn.rst index 4d1f94e42ee..dadc7afd175 100644 --- a/docs/api/paddle/nn/Hardshrink_cn.rst +++ b/docs/api/paddle/nn/Hardshrink_cn.rst @@ -32,4 +32,4 @@ Hardshrink激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.Hardshrink \ No newline at end of file +COPY-FROM: paddle.nn.Hardshrink diff --git a/docs/api/paddle/nn/Hardsigmoid_cn.rst b/docs/api/paddle/nn/Hardsigmoid_cn.rst index b6163b84941..7b4ab4e710b 100644 --- a/docs/api/paddle/nn/Hardsigmoid_cn.rst +++ b/docs/api/paddle/nn/Hardsigmoid_cn.rst @@ -33,4 +33,4 @@ Hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmo 代码示例 :::::::::: -COPY-FROM: paddle.nn.Hardsigmoid \ No newline at end of file +COPY-FROM: paddle.nn.Hardsigmoid diff --git a/docs/api/paddle/nn/Hardswish_cn.rst b/docs/api/paddle/nn/Hardswish_cn.rst index a18e5e62050..1e769a58f46 100644 --- a/docs/api/paddle/nn/Hardswish_cn.rst +++ b/docs/api/paddle/nn/Hardswish_cn.rst @@ -33,4 +33,4 @@ Hardswish激活函数。在MobileNetV3架构中被提出,相较于swish函数 代码示例 :::::::::: -COPY-FROM: paddle.nn.Hardswish \ No newline at end of file +COPY-FROM: paddle.nn.Hardswish diff --git a/docs/api/paddle/nn/Hardtanh_cn.rst b/docs/api/paddle/nn/Hardtanh_cn.rst index e24f74bfcd4..c7a1d8a80b4 100644 --- a/docs/api/paddle/nn/Hardtanh_cn.rst +++ b/docs/api/paddle/nn/Hardtanh_cn.rst @@ -33,4 +33,4 @@ Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.Hardtanh \ No newline at end of file +COPY-FROM: paddle.nn.Hardtanh diff --git a/docs/api/paddle/nn/HingeEmbeddingLoss_cn.rst b/docs/api/paddle/nn/HingeEmbeddingLoss_cn.rst index 67b89de80a9..2bd93f15bfa 100644 --- a/docs/api/paddle/nn/HingeEmbeddingLoss_cn.rst +++ b/docs/api/paddle/nn/HingeEmbeddingLoss_cn.rst @@ -51,4 +51,4 @@ HingeEmbeddingLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.HingeEmbeddingLoss \ No newline at end of file +COPY-FROM: paddle.nn.HingeEmbeddingLoss diff --git a/docs/api/paddle/nn/Identity_cn.rst b/docs/api/paddle/nn/Identity_cn.rst index 8dc7caedd40..c72d5b3edd0 100644 --- a/docs/api/paddle/nn/Identity_cn.rst +++ b/docs/api/paddle/nn/Identity_cn.rst @@ -28,4 +28,4 @@ Identity 代码示例 ::::::::: -COPY-FROM: paddle.nn.Identity \ No newline at end of file +COPY-FROM: paddle.nn.Identity diff --git a/docs/api/paddle/nn/InstanceNorm1D_cn.rst b/docs/api/paddle/nn/InstanceNorm1D_cn.rst index 869165d3938..3178bff5434 100644 --- a/docs/api/paddle/nn/InstanceNorm1D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm1D_cn.rst @@ -59,4 +59,3 @@ Note: instance_norm_out = instance_norm(x) print(instance_norm_out) - diff --git a/docs/api/paddle/nn/InstanceNorm2D_cn.rst b/docs/api/paddle/nn/InstanceNorm2D_cn.rst index c6f3ab1dff4..b26c305e4c2 100644 --- a/docs/api/paddle/nn/InstanceNorm2D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm2D_cn.rst @@ -58,5 +58,3 @@ Note: instance_norm_out = instance_norm(x) print(instance_norm_out) - - diff --git a/docs/api/paddle/nn/InstanceNorm3D_cn.rst b/docs/api/paddle/nn/InstanceNorm3D_cn.rst index 2e044850d08..a372ffdbe6d 100644 --- a/docs/api/paddle/nn/InstanceNorm3D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm3D_cn.rst @@ -57,4 +57,3 @@ Note: instance_norm_out = instance_norm(x) print(instance_norm_out) - diff --git a/docs/api/paddle/nn/KLDivLoss_cn.rst b/docs/api/paddle/nn/KLDivLoss_cn.rst index a6def96b80d..4d59f6e5ffa 100644 --- a/docs/api/paddle/nn/KLDivLoss_cn.rst +++ b/docs/api/paddle/nn/KLDivLoss_cn.rst @@ -37,4 +37,4 @@ kL发散损失计算如下: 代码示例 :::::::::::: -COPY-FROM: paddle.nn.KLDivLoss \ No newline at end of file +COPY-FROM: paddle.nn.KLDivLoss diff --git a/docs/api/paddle/nn/L1Loss_cn.rst b/docs/api/paddle/nn/L1Loss_cn.rst index a006f83f269..3640f7af9ce 100644 --- a/docs/api/paddle/nn/L1Loss_cn.rst +++ b/docs/api/paddle/nn/L1Loss_cn.rst @@ -39,4 +39,4 @@ L1Loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.L1Loss \ No newline at end of file +COPY-FROM: paddle.nn.L1Loss diff --git a/docs/api/paddle/nn/LSTMCell_cn.rst b/docs/api/paddle/nn/LSTMCell_cn.rst index ee8de0c3264..70a0bdad001 100644 --- a/docs/api/paddle/nn/LSTMCell_cn.rst +++ b/docs/api/paddle/nn/LSTMCell_cn.rst @@ -70,4 +70,4 @@ LSTMCell 代码示例 :::::::::::: -COPY-FROM: paddle.nn.LSTMCell \ No newline at end of file +COPY-FROM: paddle.nn.LSTMCell diff --git a/docs/api/paddle/nn/LSTM_cn.rst b/docs/api/paddle/nn/LSTM_cn.rst index 40e7f93c25c..15e70fb6818 100644 --- a/docs/api/paddle/nn/LSTM_cn.rst +++ b/docs/api/paddle/nn/LSTM_cn.rst @@ -63,4 +63,4 @@ LSTM 代码示例 :::::::::::: -COPY-FROM: paddle.nn.LSTM \ No newline at end of file +COPY-FROM: paddle.nn.LSTM diff --git a/docs/api/paddle/nn/LayerNorm_cn.rst b/docs/api/paddle/nn/LayerNorm_cn.rst index df109a87489..3e9d5394994 100644 --- a/docs/api/paddle/nn/LayerNorm_cn.rst +++ b/docs/api/paddle/nn/LayerNorm_cn.rst @@ -53,4 +53,3 @@ LayerNorm layer_norm_out = layer_norm(x) print(layer_norm_out) - diff --git a/docs/api/paddle/nn/Layer_cn.rst b/docs/api/paddle/nn/Layer_cn.rst index 2f7b6b77c2f..1aff35022f7 100644 --- a/docs/api/paddle/nn/Layer_cn.rst +++ b/docs/api/paddle/nn/Layer_cn.rst @@ -749,4 +749,3 @@ to(device=None, dtype=None, blocking=None) #Tensor(shape=[2, 2], dtype=float64, place=CUDAPinnedPlace, stop_gradient=False, # [[-0.04989364, -0.56889004], # [ 0.33960250, 0.96878713]]) - diff --git a/docs/api/paddle/nn/LeakyReLU_cn.rst b/docs/api/paddle/nn/LeakyReLU_cn.rst index 918a21f49ef..f4692abf49d 100644 --- a/docs/api/paddle/nn/LeakyReLU_cn.rst +++ b/docs/api/paddle/nn/LeakyReLU_cn.rst @@ -32,4 +32,4 @@ LeakyReLU 激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.LeakyReLU \ No newline at end of file +COPY-FROM: paddle.nn.LeakyReLU diff --git a/docs/api/paddle/nn/Linear_cn.rst b/docs/api/paddle/nn/Linear_cn.rst index 9cc387b27a5..851b5465423 100644 --- a/docs/api/paddle/nn/Linear_cn.rst +++ b/docs/api/paddle/nn/Linear_cn.rst @@ -49,4 +49,4 @@ bias 代码示例 ::::::::: -COPY-FROM: paddle.nn.Linear \ No newline at end of file +COPY-FROM: paddle.nn.Linear diff --git a/docs/api/paddle/nn/LocalResponseNorm_cn.rst b/docs/api/paddle/nn/LocalResponseNorm_cn.rst index 85d4eae84ed..6fead24fab7 100644 --- a/docs/api/paddle/nn/LocalResponseNorm_cn.rst +++ b/docs/api/paddle/nn/LocalResponseNorm_cn.rst @@ -27,4 +27,4 @@ LocalResponseNorm 代码示例 ::::::::: -COPY-FROM: paddle.nn.LocalResponseNorm \ No newline at end of file +COPY-FROM: paddle.nn.LocalResponseNorm diff --git a/docs/api/paddle/nn/LogSigmoid_cn.rst b/docs/api/paddle/nn/LogSigmoid_cn.rst index 545d6cdd5b3..d4e39cb2866 100644 --- a/docs/api/paddle/nn/LogSigmoid_cn.rst +++ b/docs/api/paddle/nn/LogSigmoid_cn.rst @@ -24,4 +24,4 @@ LogSigmoid激活层。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.LogSigmoid \ No newline at end of file +COPY-FROM: paddle.nn.LogSigmoid diff --git a/docs/api/paddle/nn/LogSoftmax_cn.rst b/docs/api/paddle/nn/LogSoftmax_cn.rst index ef6e3835d09..1da69b265f3 100644 --- a/docs/api/paddle/nn/LogSoftmax_cn.rst +++ b/docs/api/paddle/nn/LogSoftmax_cn.rst @@ -27,4 +27,4 @@ LogSoftmax激活层,计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.LogSoftmax \ No newline at end of file +COPY-FROM: paddle.nn.LogSoftmax diff --git a/docs/api/paddle/nn/MSELoss_cn.rst b/docs/api/paddle/nn/MSELoss_cn.rst index ba5df238aa3..10e463c4861 100644 --- a/docs/api/paddle/nn/MSELoss_cn.rst +++ b/docs/api/paddle/nn/MSELoss_cn.rst @@ -44,4 +44,4 @@ MSELoss 代码示例 :::::::::::: -COPY-FROM: paddle.nn.MSELoss \ No newline at end of file +COPY-FROM: paddle.nn.MSELoss diff --git a/docs/api/paddle/nn/MarginRankingLoss_cn.rst b/docs/api/paddle/nn/MarginRankingLoss_cn.rst index c0f57b3d20a..905bc24f0c0 100644 --- a/docs/api/paddle/nn/MarginRankingLoss_cn.rst +++ b/docs/api/paddle/nn/MarginRankingLoss_cn.rst @@ -44,4 +44,4 @@ MarginRankingLoss 代码示例 :::::::: -COPY-FROM: paddle.nn.MarginRankingLoss \ No newline at end of file +COPY-FROM: paddle.nn.MarginRankingLoss diff --git a/docs/api/paddle/nn/MaxPool1D_cn.rst b/docs/api/paddle/nn/MaxPool1D_cn.rst index 1f9198b0bcd..15f25ed6d92 100755 --- a/docs/api/paddle/nn/MaxPool1D_cn.rst +++ b/docs/api/paddle/nn/MaxPool1D_cn.rst @@ -39,4 +39,4 @@ MaxPool1D 代码示例 ::::::::: -COPY-FROM: paddle.nn.MaxPool1D \ No newline at end of file +COPY-FROM: paddle.nn.MaxPool1D diff --git a/docs/api/paddle/nn/MaxPool2D_cn.rst b/docs/api/paddle/nn/MaxPool2D_cn.rst index 1d3532223e8..4450c0e240d 100644 --- a/docs/api/paddle/nn/MaxPool2D_cn.rst +++ b/docs/api/paddle/nn/MaxPool2D_cn.rst @@ -48,4 +48,4 @@ MaxPool2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.MaxPool2D \ No newline at end of file +COPY-FROM: paddle.nn.MaxPool2D diff --git a/docs/api/paddle/nn/MaxPool3D_cn.rst b/docs/api/paddle/nn/MaxPool3D_cn.rst index 22955b70f48..e1912834c9c 100644 --- a/docs/api/paddle/nn/MaxPool3D_cn.rst +++ b/docs/api/paddle/nn/MaxPool3D_cn.rst @@ -46,4 +46,4 @@ MaxPool3D 代码示例 ::::::::: -COPY-FROM: paddle.nn.MaxPool3D \ No newline at end of file +COPY-FROM: paddle.nn.MaxPool3D diff --git a/docs/api/paddle/nn/MaxUnPool1D_cn.rst b/docs/api/paddle/nn/MaxUnPool1D_cn.rst index df18cfa48cc..bdd556386f9 100644 --- a/docs/api/paddle/nn/MaxUnPool1D_cn.rst +++ b/docs/api/paddle/nn/MaxUnPool1D_cn.rst @@ -44,4 +44,4 @@ MaxUnPool1D 代码示例 ::::::::: -COPY-FROM: paddle.nn.MaxUnPool1D \ No newline at end of file +COPY-FROM: paddle.nn.MaxUnPool1D diff --git a/docs/api/paddle/nn/MaxUnPool2D_cn.rst b/docs/api/paddle/nn/MaxUnPool2D_cn.rst index 2eb11288123..64b55c9d4b3 100644 --- a/docs/api/paddle/nn/MaxUnPool2D_cn.rst +++ b/docs/api/paddle/nn/MaxUnPool2D_cn.rst @@ -48,4 +48,4 @@ MaxUnPool2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.MaxUnPool2D \ No newline at end of file +COPY-FROM: paddle.nn.MaxUnPool2D diff --git a/docs/api/paddle/nn/Maxout_cn.rst b/docs/api/paddle/nn/Maxout_cn.rst index 319f6273f9b..aca9fed6a5a 100644 --- a/docs/api/paddle/nn/Maxout_cn.rst +++ b/docs/api/paddle/nn/Maxout_cn.rst @@ -34,4 +34,4 @@ Maxout激活层。 代码示例 :::::::::: -COPY-FROM: paddle.nn.Maxout \ No newline at end of file +COPY-FROM: paddle.nn.Maxout diff --git a/docs/api/paddle/nn/Mish_cn.rst b/docs/api/paddle/nn/Mish_cn.rst index fbaa68bbadb..79516f21786 100644 --- a/docs/api/paddle/nn/Mish_cn.rst +++ b/docs/api/paddle/nn/Mish_cn.rst @@ -28,4 +28,4 @@ Mish激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Mish \ No newline at end of file +COPY-FROM: paddle.nn.Mish diff --git a/docs/api/paddle/nn/MultiHeadAttention_cn.rst b/docs/api/paddle/nn/MultiHeadAttention_cn.rst index 3f62870f6a5..e2e7b7c7da5 100644 --- a/docs/api/paddle/nn/MultiHeadAttention_cn.rst +++ b/docs/api/paddle/nn/MultiHeadAttention_cn.rst @@ -30,4 +30,4 @@ MultiHeadAttention 代码示例 :::::::::::: -COPY-FROM: paddle.nn.MultiHeadAttention \ No newline at end of file +COPY-FROM: paddle.nn.MultiHeadAttention diff --git a/docs/api/paddle/nn/NLLLoss_cn.rst b/docs/api/paddle/nn/NLLLoss_cn.rst index 41a2f9ce3f7..9eb1ea7d53c 100644 --- a/docs/api/paddle/nn/NLLLoss_cn.rst +++ b/docs/api/paddle/nn/NLLLoss_cn.rst @@ -46,4 +46,4 @@ NLLLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.NLLLoss \ No newline at end of file +COPY-FROM: paddle.nn.NLLLoss diff --git a/docs/api/paddle/nn/PReLU_cn.rst b/docs/api/paddle/nn/PReLU_cn.rst index 8a60ea41ff4..abb7b68e771 100644 --- a/docs/api/paddle/nn/PReLU_cn.rst +++ b/docs/api/paddle/nn/PReLU_cn.rst @@ -30,4 +30,4 @@ PReLU激活层(PReLU Activation Operator)。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.PReLU \ No newline at end of file +COPY-FROM: paddle.nn.PReLU diff --git a/docs/api/paddle/nn/Pad1D_cn.rst b/docs/api/paddle/nn/Pad1D_cn.rst index 9b8c9c34cbc..da2b2f35bc3 100644 --- a/docs/api/paddle/nn/Pad1D_cn.rst +++ b/docs/api/paddle/nn/Pad1D_cn.rst @@ -27,4 +27,4 @@ Pad1D 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Pad1D \ No newline at end of file +COPY-FROM: paddle.nn.Pad1D diff --git a/docs/api/paddle/nn/Pad2D_cn.rst b/docs/api/paddle/nn/Pad2D_cn.rst index 45eff9fd983..9be03c54fc7 100644 --- a/docs/api/paddle/nn/Pad2D_cn.rst +++ b/docs/api/paddle/nn/Pad2D_cn.rst @@ -27,4 +27,4 @@ Pad2D 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Pad2D \ No newline at end of file +COPY-FROM: paddle.nn.Pad2D diff --git a/docs/api/paddle/nn/Pad3D_cn.rst b/docs/api/paddle/nn/Pad3D_cn.rst index b65cbf4d158..da121a4a607 100644 --- a/docs/api/paddle/nn/Pad3D_cn.rst +++ b/docs/api/paddle/nn/Pad3D_cn.rst @@ -27,4 +27,4 @@ Pad3D 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Pad3D \ No newline at end of file +COPY-FROM: paddle.nn.Pad3D diff --git a/docs/api/paddle/nn/PairwiseDistance_cn.rst b/docs/api/paddle/nn/PairwiseDistance_cn.rst index afc56e3dd19..0ce19034542 100644 --- a/docs/api/paddle/nn/PairwiseDistance_cn.rst +++ b/docs/api/paddle/nn/PairwiseDistance_cn.rst @@ -27,4 +27,4 @@ PairwiseDistance 代码示例 :::::::: -COPY-FROM: paddle.nn.PairwiseDistance \ No newline at end of file +COPY-FROM: paddle.nn.PairwiseDistance diff --git a/docs/api/paddle/nn/ParameterList_cn.rst b/docs/api/paddle/nn/ParameterList_cn.rst index 991fa08dffd..df77abd9924 100644 --- a/docs/api/paddle/nn/ParameterList_cn.rst +++ b/docs/api/paddle/nn/ParameterList_cn.rst @@ -22,4 +22,4 @@ ParameterList 代码示例 :::::::::::: -COPY-FROM: paddle.nn.ParameterList \ No newline at end of file +COPY-FROM: paddle.nn.ParameterList diff --git a/docs/api/paddle/nn/RNN_cn.rst b/docs/api/paddle/nn/RNN_cn.rst index fd6d0ac8eda..a594f816fd4 100644 --- a/docs/api/paddle/nn/RNN_cn.rst +++ b/docs/api/paddle/nn/RNN_cn.rst @@ -38,4 +38,4 @@ RNN 代码示例 :::::::::::: -COPY-FROM: paddle.nn.RNN \ No newline at end of file +COPY-FROM: paddle.nn.RNN diff --git a/docs/api/paddle/nn/ReLU6_cn.rst b/docs/api/paddle/nn/ReLU6_cn.rst index 20c83943913..7e5f836caf8 100644 --- a/docs/api/paddle/nn/ReLU6_cn.rst +++ b/docs/api/paddle/nn/ReLU6_cn.rst @@ -24,4 +24,4 @@ ReLU6激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.ReLU6 \ No newline at end of file +COPY-FROM: paddle.nn.ReLU6 diff --git a/docs/api/paddle/nn/ReLU_cn.rst b/docs/api/paddle/nn/ReLU_cn.rst index 07c16200c0c..aa93abe4dec 100644 --- a/docs/api/paddle/nn/ReLU_cn.rst +++ b/docs/api/paddle/nn/ReLU_cn.rst @@ -24,4 +24,4 @@ ReLU激活层(Rectified Linear Unit)。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.ReLU \ No newline at end of file +COPY-FROM: paddle.nn.ReLU diff --git a/docs/api/paddle/nn/SELU_cn.rst b/docs/api/paddle/nn/SELU_cn.rst index 9e604862bb8..57857534761 100644 --- a/docs/api/paddle/nn/SELU_cn.rst +++ b/docs/api/paddle/nn/SELU_cn.rst @@ -30,4 +30,4 @@ SELU激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.SELU \ No newline at end of file +COPY-FROM: paddle.nn.SELU diff --git a/docs/api/paddle/nn/Sequential_cn.rst b/docs/api/paddle/nn/Sequential_cn.rst index 853a3f36f40..2c46cb75c2a 100644 --- a/docs/api/paddle/nn/Sequential_cn.rst +++ b/docs/api/paddle/nn/Sequential_cn.rst @@ -22,4 +22,4 @@ Sequential 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Sequential \ No newline at end of file +COPY-FROM: paddle.nn.Sequential diff --git a/docs/api/paddle/nn/Sigmoid_cn.rst b/docs/api/paddle/nn/Sigmoid_cn.rst index 18c6526ab41..3d502a7b35d 100644 --- a/docs/api/paddle/nn/Sigmoid_cn.rst +++ b/docs/api/paddle/nn/Sigmoid_cn.rst @@ -27,4 +27,4 @@ Sigmoid 代码示例 :::::::: -COPY-FROM: paddle.nn.Sigmoid \ No newline at end of file +COPY-FROM: paddle.nn.Sigmoid diff --git a/docs/api/paddle/nn/Silu_cn.rst b/docs/api/paddle/nn/Silu_cn.rst index 078ba65c4ef..f23ba4c9ff5 100644 --- a/docs/api/paddle/nn/Silu_cn.rst +++ b/docs/api/paddle/nn/Silu_cn.rst @@ -24,4 +24,4 @@ Silu激活层。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.Silu \ No newline at end of file +COPY-FROM: paddle.nn.Silu diff --git a/docs/api/paddle/nn/SimpleRNNCell_cn.rst b/docs/api/paddle/nn/SimpleRNNCell_cn.rst index aa978de8db6..49de0da7b38 100644 --- a/docs/api/paddle/nn/SimpleRNNCell_cn.rst +++ b/docs/api/paddle/nn/SimpleRNNCell_cn.rst @@ -62,4 +62,4 @@ SimpleRNNCell 代码示例 :::::::::::: -COPY-FROM: paddle.nn.SimpleRNNCell \ No newline at end of file +COPY-FROM: paddle.nn.SimpleRNNCell diff --git a/docs/api/paddle/nn/SimpleRNN_cn.rst b/docs/api/paddle/nn/SimpleRNN_cn.rst index d3832737e6e..504b8e38060 100644 --- a/docs/api/paddle/nn/SimpleRNN_cn.rst +++ b/docs/api/paddle/nn/SimpleRNN_cn.rst @@ -50,4 +50,4 @@ SimpleRNN 代码示例 :::::::::::: -COPY-FROM: paddle.nn.SimpleRNN \ No newline at end of file +COPY-FROM: paddle.nn.SimpleRNN diff --git a/docs/api/paddle/nn/SmoothL1Loss_cn.rst b/docs/api/paddle/nn/SmoothL1Loss_cn.rst index d961c6698d5..532faa6b87e 100644 --- a/docs/api/paddle/nn/SmoothL1Loss_cn.rst +++ b/docs/api/paddle/nn/SmoothL1Loss_cn.rst @@ -41,4 +41,4 @@ Tensor,计算 `SmoothL1Loss` 后的损失值。 代码示例 ::::::::: -COPY-FROM: paddle.nn.SmoothL1Loss \ No newline at end of file +COPY-FROM: paddle.nn.SmoothL1Loss diff --git a/docs/api/paddle/nn/Softplus_cn.rst b/docs/api/paddle/nn/Softplus_cn.rst index c801c7346e5..ec206917d46 100644 --- a/docs/api/paddle/nn/Softplus_cn.rst +++ b/docs/api/paddle/nn/Softplus_cn.rst @@ -27,4 +27,4 @@ Softplus激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Softplus \ No newline at end of file +COPY-FROM: paddle.nn.Softplus diff --git a/docs/api/paddle/nn/Softshrink_cn.rst b/docs/api/paddle/nn/Softshrink_cn.rst index 135acdeaf6d..bf055bb352b 100644 --- a/docs/api/paddle/nn/Softshrink_cn.rst +++ b/docs/api/paddle/nn/Softshrink_cn.rst @@ -29,4 +29,4 @@ Softshrink激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Softshrink \ No newline at end of file +COPY-FROM: paddle.nn.Softshrink diff --git a/docs/api/paddle/nn/Softsign_cn.rst b/docs/api/paddle/nn/Softsign_cn.rst index 040dd14b9c4..1fbdbfe545a 100644 --- a/docs/api/paddle/nn/Softsign_cn.rst +++ b/docs/api/paddle/nn/Softsign_cn.rst @@ -24,4 +24,4 @@ Softsign激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Softsign \ No newline at end of file +COPY-FROM: paddle.nn.Softsign diff --git a/docs/api/paddle/nn/Swish_cn.rst b/docs/api/paddle/nn/Swish_cn.rst index 530bdb5de46..7c3b74cbbae 100644 --- a/docs/api/paddle/nn/Swish_cn.rst +++ b/docs/api/paddle/nn/Swish_cn.rst @@ -24,4 +24,4 @@ Swish激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Swish \ No newline at end of file +COPY-FROM: paddle.nn.Swish diff --git a/docs/api/paddle/nn/SyncBatchNorm_cn.rst b/docs/api/paddle/nn/SyncBatchNorm_cn.rst index 85f17539602..fcad169fed3 100644 --- a/docs/api/paddle/nn/SyncBatchNorm_cn.rst +++ b/docs/api/paddle/nn/SyncBatchNorm_cn.rst @@ -95,4 +95,3 @@ convert_sync_batchnorm(layer) import paddle.nn as nn model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5)) sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) - diff --git a/docs/api/paddle/nn/Tanh_cn.rst b/docs/api/paddle/nn/Tanh_cn.rst index 5d4beddf8ed..27f82d5a902 100644 --- a/docs/api/paddle/nn/Tanh_cn.rst +++ b/docs/api/paddle/nn/Tanh_cn.rst @@ -23,4 +23,4 @@ Tanh激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.Tanh \ No newline at end of file +COPY-FROM: paddle.nn.Tanh diff --git a/docs/api/paddle/nn/Tanhshrink_cn.rst b/docs/api/paddle/nn/Tanhshrink_cn.rst index c84603e8f3b..2db0da03418 100644 --- a/docs/api/paddle/nn/Tanhshrink_cn.rst +++ b/docs/api/paddle/nn/Tanhshrink_cn.rst @@ -24,4 +24,4 @@ Tanhshrink激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.Tanhshrink \ No newline at end of file +COPY-FROM: paddle.nn.Tanhshrink diff --git a/docs/api/paddle/nn/ThresholdedReLU_cn.rst b/docs/api/paddle/nn/ThresholdedReLU_cn.rst index bce06f41611..565a284c104 100644 --- a/docs/api/paddle/nn/ThresholdedReLU_cn.rst +++ b/docs/api/paddle/nn/ThresholdedReLU_cn.rst @@ -28,4 +28,4 @@ Thresholded ReLU激活层 代码示例 ::::::::: -COPY-FROM: paddle.nn.ThresholdedReLU \ No newline at end of file +COPY-FROM: paddle.nn.ThresholdedReLU diff --git a/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst b/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst index 4ddacf5cb4c..8e94c99e96f 100644 --- a/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst +++ b/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst @@ -30,4 +30,4 @@ Transformer解码器层由三个子层组成:多头自注意力机制、编码 代码示例 :::::::::::: -COPY-FROM: paddle.nn.TransformerDecoderLayer \ No newline at end of file +COPY-FROM: paddle.nn.TransformerDecoderLayer diff --git a/docs/api/paddle/nn/TransformerDecoder_cn.rst b/docs/api/paddle/nn/TransformerDecoder_cn.rst index ca0496734c3..9e482ea5eb0 100644 --- a/docs/api/paddle/nn/TransformerDecoder_cn.rst +++ b/docs/api/paddle/nn/TransformerDecoder_cn.rst @@ -23,4 +23,4 @@ Transformer解码器由多个Transformer解码器层(``TransformerDecoderLayer 代码示例 :::::::::::: -COPY-FROM: paddle.nn.TransformerDecoder \ No newline at end of file +COPY-FROM: paddle.nn.TransformerDecoder diff --git a/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst b/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst index 49148cd0c4c..eb4abbcc80b 100644 --- a/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst +++ b/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst @@ -30,4 +30,4 @@ Transformer编码器层由两个子层组成:多头自注意力机制和前馈 代码示例 :::::::::::: -COPY-FROM: paddle.nn.TransformerEncoderLayer \ No newline at end of file +COPY-FROM: paddle.nn.TransformerEncoderLayer diff --git a/docs/api/paddle/nn/TransformerEncoder_cn.rst b/docs/api/paddle/nn/TransformerEncoder_cn.rst index 2cc4829fb04..490cefa8874 100644 --- a/docs/api/paddle/nn/TransformerEncoder_cn.rst +++ b/docs/api/paddle/nn/TransformerEncoder_cn.rst @@ -23,4 +23,4 @@ Transformer编码器由多个Transformer编码器层(``TransformerEncoderLayer 代码示例 :::::::::::: -COPY-FROM: paddle.nn.TransformerEncoder \ No newline at end of file +COPY-FROM: paddle.nn.TransformerEncoder diff --git a/docs/api/paddle/nn/Transformer_cn.rst b/docs/api/paddle/nn/Transformer_cn.rst index 2d4be6d1384..1da2ab00a77 100644 --- a/docs/api/paddle/nn/Transformer_cn.rst +++ b/docs/api/paddle/nn/Transformer_cn.rst @@ -117,4 +117,3 @@ Tensor,根据输入的 ``length`` 具体的大小生成的形状为 ``[length, # [ 0. 0. 0. -inf -inf] # [ 0. 0. 0. 0. -inf] # [ 0. 0. 0. 0. 0.]] - diff --git a/docs/api/paddle/nn/Unfold_cn.rst b/docs/api/paddle/nn/Unfold_cn.rst index bfcf7cc232e..29f948248b8 100644 --- a/docs/api/paddle/nn/Unfold_cn.rst +++ b/docs/api/paddle/nn/Unfold_cn.rst @@ -46,4 +46,4 @@ unfold 代码示例 :::::::::::: -COPY-FROM: paddle.nn.Unfold \ No newline at end of file +COPY-FROM: paddle.nn.Unfold diff --git a/docs/api/paddle/nn/Upsample_cn.rst b/docs/api/paddle/nn/Upsample_cn.rst index 9ff0961fcdb..57792737257 100644 --- a/docs/api/paddle/nn/Upsample_cn.rst +++ b/docs/api/paddle/nn/Upsample_cn.rst @@ -166,4 +166,4 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 代码示例 ::::::::: -COPY-FROM: paddle.nn.Upsample \ No newline at end of file +COPY-FROM: paddle.nn.Upsample diff --git a/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst b/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst index 1281e0cf8de..343557bde28 100644 --- a/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst +++ b/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst @@ -39,4 +39,4 @@ https://en.wikipedia.org/wiki/Bilinear_interpolation 代码示例 :::::::::::: -COPY-FROM: paddle.nn.UpsamplingBilinear2D \ No newline at end of file +COPY-FROM: paddle.nn.UpsamplingBilinear2D diff --git a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst index bf7e673d2d4..17804a8be8b 100644 --- a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst +++ b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst @@ -67,4 +67,4 @@ https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation 代码示例 :::::::::::: -COPY-FROM: paddle.nn.UpsamplingNearest2D \ No newline at end of file +COPY-FROM: paddle.nn.UpsamplingNearest2D diff --git a/docs/api/paddle/nn/ZeroPad2D_cn.rst b/docs/api/paddle/nn/ZeroPad2D_cn.rst index 76ef89f624c..76739674521 100644 --- a/docs/api/paddle/nn/ZeroPad2D_cn.rst +++ b/docs/api/paddle/nn/ZeroPad2D_cn.rst @@ -29,4 +29,4 @@ ZeroPad2D 代码示例 ::::::::: -COPY-FROM: paddle.nn.ZeroPad2D \ No newline at end of file +COPY-FROM: paddle.nn.ZeroPad2D diff --git a/docs/api/paddle/nn/dynamic_decode_cn.rst b/docs/api/paddle/nn/dynamic_decode_cn.rst index 9fd42230ce7..decd6eaf272 100644 --- a/docs/api/paddle/nn/dynamic_decode_cn.rst +++ b/docs/api/paddle/nn/dynamic_decode_cn.rst @@ -33,4 +33,4 @@ tuple,若 :code:`return_length` 为True,则返回三元组 :code:`(final_out 代码示例 ::::::::: -COPY-FROM: paddle.nn.dynamic_decode \ No newline at end of file +COPY-FROM: paddle.nn.dynamic_decode diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst index c30d5832a87..c6a16622876 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst @@ -38,4 +38,4 @@ adaptive_avg_pool2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_avg_pool2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.adaptive_avg_pool2d diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst index 6e7fe2d01df..f1e773b32fa 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst @@ -43,4 +43,4 @@ adaptive_avg_pool3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_avg_pool3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.adaptive_avg_pool3d diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst index a5d7a8dc4ff..425445315ca 100755 --- a/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst @@ -28,4 +28,4 @@ adaptive_max_pool1d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_max_pool1d \ No newline at end of file +COPY-FROM: paddle.nn.functional.adaptive_max_pool1d diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst index 790d9b69ac0..59a5163c33e 100644 --- a/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst @@ -25,4 +25,4 @@ adaptive_max_pool2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_max_pool2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.adaptive_max_pool2d diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst index b83d78d35c6..b972e8723de 100644 --- a/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst @@ -26,4 +26,4 @@ adaptive_max_pool3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_max_pool3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.adaptive_max_pool3d diff --git a/docs/api/paddle/nn/functional/affine_grid_cn.rst b/docs/api/paddle/nn/functional/affine_grid_cn.rst index 96da78dfcce..b2600cf6b38 100644 --- a/docs/api/paddle/nn/functional/affine_grid_cn.rst +++ b/docs/api/paddle/nn/functional/affine_grid_cn.rst @@ -24,4 +24,4 @@ affine_grid 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.affine_grid \ No newline at end of file +COPY-FROM: paddle.nn.functional.affine_grid diff --git a/docs/api/paddle/nn/functional/alpha_dropout_cn.rst b/docs/api/paddle/nn/functional/alpha_dropout_cn.rst index f081cecf973..de09f0b0da7 100644 --- a/docs/api/paddle/nn/functional/alpha_dropout_cn.rst +++ b/docs/api/paddle/nn/functional/alpha_dropout_cn.rst @@ -21,4 +21,4 @@ alpha_dropout是一种具有自归一化性质的dropout。均值为0,方差 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.alpha_dropout \ No newline at end of file +COPY-FROM: paddle.nn.functional.alpha_dropout diff --git a/docs/api/paddle/nn/functional/avg_pool1d_cn.rst b/docs/api/paddle/nn/functional/avg_pool1d_cn.rst index 8297cded17b..4b84be57351 100755 --- a/docs/api/paddle/nn/functional/avg_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool1d_cn.rst @@ -34,4 +34,4 @@ avg_pool1d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.avg_pool1d \ No newline at end of file +COPY-FROM: paddle.nn.functional.avg_pool1d diff --git a/docs/api/paddle/nn/functional/avg_pool2d_cn.rst b/docs/api/paddle/nn/functional/avg_pool2d_cn.rst index 0c3f65687b7..5574d0b97f1 100644 --- a/docs/api/paddle/nn/functional/avg_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool2d_cn.rst @@ -45,4 +45,4 @@ avg_pool2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.avg_pool2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.avg_pool2d diff --git a/docs/api/paddle/nn/functional/avg_pool3d_cn.rst b/docs/api/paddle/nn/functional/avg_pool3d_cn.rst index f3420c5e57d..6e5c224dbea 100644 --- a/docs/api/paddle/nn/functional/avg_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool3d_cn.rst @@ -49,4 +49,4 @@ avg_pool3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.avg_pool3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.avg_pool3d diff --git a/docs/api/paddle/nn/functional/batch_norm_cn.rst b/docs/api/paddle/nn/functional/batch_norm_cn.rst index 5d20908a5c3..697a3cf109f 100644 --- a/docs/api/paddle/nn/functional/batch_norm_cn.rst +++ b/docs/api/paddle/nn/functional/batch_norm_cn.rst @@ -31,4 +31,4 @@ batch_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.batch_norm \ No newline at end of file +COPY-FROM: paddle.nn.functional.batch_norm diff --git a/docs/api/paddle/nn/functional/bilinear_cn.rst b/docs/api/paddle/nn/functional/bilinear_cn.rst index bb757b634d5..53956b42e99 100644 --- a/docs/api/paddle/nn/functional/bilinear_cn.rst +++ b/docs/api/paddle/nn/functional/bilinear_cn.rst @@ -24,4 +24,4 @@ bilinear 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.bilinear \ No newline at end of file +COPY-FROM: paddle.nn.functional.bilinear diff --git a/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst index fc2dca0b159..1aa613c080d 100644 --- a/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst @@ -48,4 +48,4 @@ binary_cross_entropy 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.binary_cross_entropy \ No newline at end of file +COPY-FROM: paddle.nn.functional.binary_cross_entropy diff --git a/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst b/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst index d75107ef29b..864fd2d4846 100644 --- a/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst +++ b/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst @@ -48,4 +48,4 @@ binary_cross_entropy_with_logits 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.binary_cross_entropy_with_logits \ No newline at end of file +COPY-FROM: paddle.nn.functional.binary_cross_entropy_with_logits diff --git a/docs/api/paddle/nn/functional/celu_cn.rst b/docs/api/paddle/nn/functional/celu_cn.rst index ecca6f6a1e9..79418dda6c2 100644 --- a/docs/api/paddle/nn/functional/celu_cn.rst +++ b/docs/api/paddle/nn/functional/celu_cn.rst @@ -30,4 +30,4 @@ celu激活层(CELU Activation Operator) 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.celu \ No newline at end of file +COPY-FROM: paddle.nn.functional.celu diff --git a/docs/api/paddle/nn/functional/conv1d_cn.rst b/docs/api/paddle/nn/functional/conv1d_cn.rst index ad2b982728d..b563861dc8e 100755 --- a/docs/api/paddle/nn/functional/conv1d_cn.rst +++ b/docs/api/paddle/nn/functional/conv1d_cn.rst @@ -71,4 +71,4 @@ conv1d 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv1d \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv1d diff --git a/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst index 762b736e511..f28fa3ea8c4 100644 --- a/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst @@ -90,4 +90,4 @@ conv1d_transpose 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv1d_transpose \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv1d_transpose diff --git a/docs/api/paddle/nn/functional/conv2d_cn.rst b/docs/api/paddle/nn/functional/conv2d_cn.rst index 9eb8719d0a3..f91cccf68fd 100755 --- a/docs/api/paddle/nn/functional/conv2d_cn.rst +++ b/docs/api/paddle/nn/functional/conv2d_cn.rst @@ -79,4 +79,4 @@ conv2d 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv2d diff --git a/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst index e087e9867c3..f8b1906c526 100644 --- a/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst @@ -91,4 +91,4 @@ conv2d_transpose 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv2d_transpose \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv2d_transpose diff --git a/docs/api/paddle/nn/functional/conv3d_cn.rst b/docs/api/paddle/nn/functional/conv3d_cn.rst index 8c3ae057cd6..c6f1951ac6d 100755 --- a/docs/api/paddle/nn/functional/conv3d_cn.rst +++ b/docs/api/paddle/nn/functional/conv3d_cn.rst @@ -74,4 +74,4 @@ Tensor。 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv3d diff --git a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst index 104ffcdc830..663bf3c516c 100755 --- a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst @@ -112,4 +112,4 @@ Tensor 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.conv3d_transpose \ No newline at end of file +COPY-FROM: paddle.nn.functional.conv3d_transpose diff --git a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst index 1e9733ac8a3..3bc66432ab8 100644 --- a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst @@ -39,4 +39,3 @@ cosine_embedding_loss 代码示例 ::::::::: COPY-FROM: paddle.nn.functional.cosine_embedding_loss:code-example1 - diff --git a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst index 236e074aabf..475fbcbf715 100644 --- a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst @@ -25,4 +25,4 @@ Tensor,余弦相似度的计算结果,数据类型与x1, x2相同。 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.cosine_similarity \ No newline at end of file +COPY-FROM: paddle.nn.functional.cosine_similarity diff --git a/docs/api/paddle/nn/functional/cross_entropy_cn.rst b/docs/api/paddle/nn/functional/cross_entropy_cn.rst index 0828592b61d..d506c5b6ce1 100644 --- a/docs/api/paddle/nn/functional/cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/cross_entropy_cn.rst @@ -99,4 +99,4 @@ cross_entropy 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.cross_entropy \ No newline at end of file +COPY-FROM: paddle.nn.functional.cross_entropy diff --git a/docs/api/paddle/nn/functional/ctc_loss_cn.rst b/docs/api/paddle/nn/functional/ctc_loss_cn.rst index c847f3845de..fbd67f034ef 100644 --- a/docs/api/paddle/nn/functional/ctc_loss_cn.rst +++ b/docs/api/paddle/nn/functional/ctc_loss_cn.rst @@ -25,4 +25,4 @@ ctc_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.ctc_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.ctc_loss diff --git a/docs/api/paddle/nn/functional/dice_loss_cn.rst b/docs/api/paddle/nn/functional/dice_loss_cn.rst index 980b33e2010..f3a524d7f3d 100644 --- a/docs/api/paddle/nn/functional/dice_loss_cn.rst +++ b/docs/api/paddle/nn/functional/dice_loss_cn.rst @@ -32,4 +32,4 @@ dice_loss定义为: 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.dice_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.dice_loss diff --git a/docs/api/paddle/nn/functional/dropout2d_cn.rst b/docs/api/paddle/nn/functional/dropout2d_cn.rst index fa5a6ffd05b..f57827431ad 100644 --- a/docs/api/paddle/nn/functional/dropout2d_cn.rst +++ b/docs/api/paddle/nn/functional/dropout2d_cn.rst @@ -24,4 +24,4 @@ dropout2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.dropout2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.dropout2d diff --git a/docs/api/paddle/nn/functional/dropout3d_cn.rst b/docs/api/paddle/nn/functional/dropout3d_cn.rst index 8dadd45b894..087345d3af1 100644 --- a/docs/api/paddle/nn/functional/dropout3d_cn.rst +++ b/docs/api/paddle/nn/functional/dropout3d_cn.rst @@ -24,4 +24,4 @@ dropout3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.dropout3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.dropout3d diff --git a/docs/api/paddle/nn/functional/elu_cn.rst b/docs/api/paddle/nn/functional/elu_cn.rst index e2fb5f0a724..4b2b7a67112 100644 --- a/docs/api/paddle/nn/functional/elu_cn.rst +++ b/docs/api/paddle/nn/functional/elu_cn.rst @@ -36,4 +36,4 @@ elu激活层(ELU Activation Operator) 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.elu \ No newline at end of file +COPY-FROM: paddle.nn.functional.elu diff --git a/docs/api/paddle/nn/functional/embedding_cn.rst b/docs/api/paddle/nn/functional/embedding_cn.rst index e1a26ad422b..5a1f3d4a64d 100644 --- a/docs/api/paddle/nn/functional/embedding_cn.rst +++ b/docs/api/paddle/nn/functional/embedding_cn.rst @@ -77,5 +77,3 @@ Tensor, input映射后得到的Embedding Tensor,数据类型和权重定义的 emb = nn.functional.embedding( x=x, weight=w, sparse=True, name="embedding") - - diff --git a/docs/api/paddle/nn/functional/fold_cn.rst b/docs/api/paddle/nn/functional/fold_cn.rst index 7b24b5332a8..12779759866 100644 --- a/docs/api/paddle/nn/functional/fold_cn.rst +++ b/docs/api/paddle/nn/functional/fold_cn.rst @@ -37,4 +37,4 @@ fold 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.fold \ No newline at end of file +COPY-FROM: paddle.nn.functional.fold diff --git a/docs/api/paddle/nn/functional/gather_tree_cn.rst b/docs/api/paddle/nn/functional/gather_tree_cn.rst index b17c79a6c4d..16ea356a3fa 100644 --- a/docs/api/paddle/nn/functional/gather_tree_cn.rst +++ b/docs/api/paddle/nn/functional/gather_tree_cn.rst @@ -53,4 +53,4 @@ gather_tree 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.gather_tree \ No newline at end of file +COPY-FROM: paddle.nn.functional.gather_tree diff --git a/docs/api/paddle/nn/functional/gelu_cn.rst b/docs/api/paddle/nn/functional/gelu_cn.rst index 40b7e83c995..55d1053afde 100644 --- a/docs/api/paddle/nn/functional/gelu_cn.rst +++ b/docs/api/paddle/nn/functional/gelu_cn.rst @@ -36,4 +36,4 @@ gelu激活层(GELU Activation Operator) 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.gelu \ No newline at end of file +COPY-FROM: paddle.nn.functional.gelu diff --git a/docs/api/paddle/nn/functional/grid_sample_cn.rst b/docs/api/paddle/nn/functional/grid_sample_cn.rst index 91aeceea906..d1958431c88 100644 --- a/docs/api/paddle/nn/functional/grid_sample_cn.rst +++ b/docs/api/paddle/nn/functional/grid_sample_cn.rst @@ -111,4 +111,3 @@ Tensor,输入X基于输入网格的双线性插值计算结果,维度为 :ma # [[[[ 0.34 0.016 0.086 -0.448] # [ 0.55 -0.076 0.35 0.59 ] # [ 0.596 0.38 0.52 0.24 ]]]] - diff --git a/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst b/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst index 0ddf724ad1a..10c4ae63cd8 100644 --- a/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst +++ b/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst @@ -42,4 +42,4 @@ gumbel_softmax 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.gumbel_softmax \ No newline at end of file +COPY-FROM: paddle.nn.functional.gumbel_softmax diff --git a/docs/api/paddle/nn/functional/hardshrink_cn.rst b/docs/api/paddle/nn/functional/hardshrink_cn.rst index 5158d1a3673..8ffb23cb886 100644 --- a/docs/api/paddle/nn/functional/hardshrink_cn.rst +++ b/docs/api/paddle/nn/functional/hardshrink_cn.rst @@ -32,4 +32,4 @@ hardshrink激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.hardshrink \ No newline at end of file +COPY-FROM: paddle.nn.functional.hardshrink diff --git a/docs/api/paddle/nn/functional/hardsigmoid_cn.rst b/docs/api/paddle/nn/functional/hardsigmoid_cn.rst index c11ac229909..c1afe461bd9 100644 --- a/docs/api/paddle/nn/functional/hardsigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/hardsigmoid_cn.rst @@ -36,4 +36,4 @@ hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmo 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.hardsigmoid \ No newline at end of file +COPY-FROM: paddle.nn.functional.hardsigmoid diff --git a/docs/api/paddle/nn/functional/hardswish_cn.rst b/docs/api/paddle/nn/functional/hardswish_cn.rst index dd7135843fd..8381cdc40d0 100644 --- a/docs/api/paddle/nn/functional/hardswish_cn.rst +++ b/docs/api/paddle/nn/functional/hardswish_cn.rst @@ -32,4 +32,4 @@ hardswish激活函数。在MobileNetV3架构中被提出,相较于swish函数 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.hardswish \ No newline at end of file +COPY-FROM: paddle.nn.functional.hardswish diff --git a/docs/api/paddle/nn/functional/hardtanh_cn.rst b/docs/api/paddle/nn/functional/hardtanh_cn.rst index 69adfeb2a91..e7cf10de9c8 100644 --- a/docs/api/paddle/nn/functional/hardtanh_cn.rst +++ b/docs/api/paddle/nn/functional/hardtanh_cn.rst @@ -33,4 +33,4 @@ hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.hardtanh \ No newline at end of file +COPY-FROM: paddle.nn.functional.hardtanh diff --git a/docs/api/paddle/nn/functional/hinge_embedding_loss_cn.rst b/docs/api/paddle/nn/functional/hinge_embedding_loss_cn.rst index 5937b39d2b6..3bfbd63fd6a 100644 --- a/docs/api/paddle/nn/functional/hinge_embedding_loss_cn.rst +++ b/docs/api/paddle/nn/functional/hinge_embedding_loss_cn.rst @@ -48,4 +48,4 @@ hinge_embedding_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.hinge_embedding_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.hinge_embedding_loss diff --git a/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst b/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst index 48a7a7ec113..1dc6d4099ff 100644 --- a/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst +++ b/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst @@ -42,4 +42,4 @@ hsigmoid_loss 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.hsigmoid_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.hsigmoid_loss diff --git a/docs/api/paddle/nn/functional/instance_norm_cn.rst b/docs/api/paddle/nn/functional/instance_norm_cn.rst index 6e7bb8e3e65..6b72fc38728 100644 --- a/docs/api/paddle/nn/functional/instance_norm_cn.rst +++ b/docs/api/paddle/nn/functional/instance_norm_cn.rst @@ -31,4 +31,4 @@ instance_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.instance_norm \ No newline at end of file +COPY-FROM: paddle.nn.functional.instance_norm diff --git a/docs/api/paddle/nn/functional/interpolate_cn.rst b/docs/api/paddle/nn/functional/interpolate_cn.rst index 4e6c3cee797..1d9638268e1 100644 --- a/docs/api/paddle/nn/functional/interpolate_cn.rst +++ b/docs/api/paddle/nn/functional/interpolate_cn.rst @@ -158,4 +158,4 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.interpolate \ No newline at end of file +COPY-FROM: paddle.nn.functional.interpolate diff --git a/docs/api/paddle/nn/functional/kl_div_cn.rst b/docs/api/paddle/nn/functional/kl_div_cn.rst index e25d79715df..ce23ca168b9 100644 --- a/docs/api/paddle/nn/functional/kl_div_cn.rst +++ b/docs/api/paddle/nn/functional/kl_div_cn.rst @@ -37,4 +37,4 @@ Tensor KL散度损失。 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.kl_div \ No newline at end of file +COPY-FROM: paddle.nn.functional.kl_div diff --git a/docs/api/paddle/nn/functional/l1_loss_cn.rst b/docs/api/paddle/nn/functional/l1_loss_cn.rst index b2d736bd0a5..933c27d883c 100644 --- a/docs/api/paddle/nn/functional/l1_loss_cn.rst +++ b/docs/api/paddle/nn/functional/l1_loss_cn.rst @@ -40,4 +40,4 @@ l1_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.l1_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.l1_loss diff --git a/docs/api/paddle/nn/functional/label_smooth_cn.rst b/docs/api/paddle/nn/functional/label_smooth_cn.rst index 126104b6f6b..fffc08d3774 100644 --- a/docs/api/paddle/nn/functional/label_smooth_cn.rst +++ b/docs/api/paddle/nn/functional/label_smooth_cn.rst @@ -37,4 +37,4 @@ label_smooth 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.label_smooth \ No newline at end of file +COPY-FROM: paddle.nn.functional.label_smooth diff --git a/docs/api/paddle/nn/functional/layer_norm_cn.rst b/docs/api/paddle/nn/functional/layer_norm_cn.rst index 19b125272f5..e2a41d4f6f0 100644 --- a/docs/api/paddle/nn/functional/layer_norm_cn.rst +++ b/docs/api/paddle/nn/functional/layer_norm_cn.rst @@ -27,4 +27,4 @@ layer_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.layer_norm \ No newline at end of file +COPY-FROM: paddle.nn.functional.layer_norm diff --git a/docs/api/paddle/nn/functional/leaky_relu_cn.rst b/docs/api/paddle/nn/functional/leaky_relu_cn.rst index 9b3b116ae11..28c47d54c4f 100644 --- a/docs/api/paddle/nn/functional/leaky_relu_cn.rst +++ b/docs/api/paddle/nn/functional/leaky_relu_cn.rst @@ -31,4 +31,4 @@ leaky_relu激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.leaky_relu \ No newline at end of file +COPY-FROM: paddle.nn.functional.leaky_relu diff --git a/docs/api/paddle/nn/functional/linear_cn.rst b/docs/api/paddle/nn/functional/linear_cn.rst index ea418e8c036..7c0e0484845 100644 --- a/docs/api/paddle/nn/functional/linear_cn.rst +++ b/docs/api/paddle/nn/functional/linear_cn.rst @@ -38,4 +38,4 @@ Tensor,形状为 :math:`[batch\_size, *, out\_features]`,数据类型与输 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.linear \ No newline at end of file +COPY-FROM: paddle.nn.functional.linear diff --git a/docs/api/paddle/nn/functional/local_response_norm_cn.rst b/docs/api/paddle/nn/functional/local_response_norm_cn.rst index 3c334d0a89e..b1418a8d00d 100644 --- a/docs/api/paddle/nn/functional/local_response_norm_cn.rst +++ b/docs/api/paddle/nn/functional/local_response_norm_cn.rst @@ -37,4 +37,4 @@ local_response_norm 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.local_response_norm \ No newline at end of file +COPY-FROM: paddle.nn.functional.local_response_norm diff --git a/docs/api/paddle/nn/functional/log_loss_cn.rst b/docs/api/paddle/nn/functional/log_loss_cn.rst index a1505f3f4b5..07b348a4d1e 100644 --- a/docs/api/paddle/nn/functional/log_loss_cn.rst +++ b/docs/api/paddle/nn/functional/log_loss_cn.rst @@ -34,4 +34,4 @@ log_loss 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.log_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.log_loss diff --git a/docs/api/paddle/nn/functional/log_sigmoid_cn.rst b/docs/api/paddle/nn/functional/log_sigmoid_cn.rst index 9caf025604b..24ac76664fc 100644 --- a/docs/api/paddle/nn/functional/log_sigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/log_sigmoid_cn.rst @@ -25,4 +25,4 @@ log_sigmoid激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.log_sigmoid \ No newline at end of file +COPY-FROM: paddle.nn.functional.log_sigmoid diff --git a/docs/api/paddle/nn/functional/log_softmax_cn.rst b/docs/api/paddle/nn/functional/log_softmax_cn.rst index 31decd56ab0..24428dcb7de 100644 --- a/docs/api/paddle/nn/functional/log_softmax_cn.rst +++ b/docs/api/paddle/nn/functional/log_softmax_cn.rst @@ -27,4 +27,4 @@ log_softmax 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.log_softmax \ No newline at end of file +COPY-FROM: paddle.nn.functional.log_softmax diff --git a/docs/api/paddle/nn/functional/margin_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/margin_cross_entropy_cn.rst index b7c1da0f4d1..00b446160d1 100644 --- a/docs/api/paddle/nn/functional/margin_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/margin_cross_entropy_cn.rst @@ -35,4 +35,4 @@ margin_cross_entropy 代码示例 :::::::::::: COPY-FROM: paddle.nn.functional.margin_cross_entropy:code-example1 -COPY-FROM: paddle.nn.functional.margin_cross_entropy:code-example2 \ No newline at end of file +COPY-FROM: paddle.nn.functional.margin_cross_entropy:code-example2 diff --git a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst index 1977b725d7c..e5670203b2f 100644 --- a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst +++ b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst @@ -38,4 +38,4 @@ Tensor,如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'``,则形状 代码示例 :::::::: -COPY-FROM: paddle.nn.functional.margin_ranking_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.margin_ranking_loss diff --git a/docs/api/paddle/nn/functional/max_pool1d_cn.rst b/docs/api/paddle/nn/functional/max_pool1d_cn.rst index 2e8c8e4874d..fb231fbf084 100755 --- a/docs/api/paddle/nn/functional/max_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool1d_cn.rst @@ -34,4 +34,4 @@ max_pool1d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_pool1d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_pool1d diff --git a/docs/api/paddle/nn/functional/max_pool2d_cn.rst b/docs/api/paddle/nn/functional/max_pool2d_cn.rst index eb341b4ff51..bad909ede7b 100644 --- a/docs/api/paddle/nn/functional/max_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool2d_cn.rst @@ -44,4 +44,4 @@ max_pool2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_pool2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_pool2d diff --git a/docs/api/paddle/nn/functional/max_pool3d_cn.rst b/docs/api/paddle/nn/functional/max_pool3d_cn.rst index 143fdbd6972..284a36d82cf 100644 --- a/docs/api/paddle/nn/functional/max_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool3d_cn.rst @@ -45,4 +45,4 @@ max_pool3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_pool3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_pool3d diff --git a/docs/api/paddle/nn/functional/max_unpool1d_cn.rst b/docs/api/paddle/nn/functional/max_unpool1d_cn.rst index 1be0dc0c4d9..490c5c198f9 100644 --- a/docs/api/paddle/nn/functional/max_unpool1d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool1d_cn.rst @@ -44,4 +44,4 @@ max_unpool1d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_unpool1d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_unpool1d diff --git a/docs/api/paddle/nn/functional/max_unpool2d_cn.rst b/docs/api/paddle/nn/functional/max_unpool2d_cn.rst index eeeed7782a3..a4aef5f774c 100644 --- a/docs/api/paddle/nn/functional/max_unpool2d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool2d_cn.rst @@ -48,4 +48,4 @@ max_unpool2d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_unpool2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_unpool2d diff --git a/docs/api/paddle/nn/functional/max_unpool3d_cn.rst b/docs/api/paddle/nn/functional/max_unpool3d_cn.rst index 0c5c240ec3f..1f2c9c32f3c 100644 --- a/docs/api/paddle/nn/functional/max_unpool3d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool3d_cn.rst @@ -50,4 +50,4 @@ max_unpool3d 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.max_unpool3d \ No newline at end of file +COPY-FROM: paddle.nn.functional.max_unpool3d diff --git a/docs/api/paddle/nn/functional/maxout_cn.rst b/docs/api/paddle/nn/functional/maxout_cn.rst index 104c860a17b..2e29a9cbded 100644 --- a/docs/api/paddle/nn/functional/maxout_cn.rst +++ b/docs/api/paddle/nn/functional/maxout_cn.rst @@ -34,4 +34,4 @@ maxout激活层。 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.maxout \ No newline at end of file +COPY-FROM: paddle.nn.functional.maxout diff --git a/docs/api/paddle/nn/functional/mish_cn.rst b/docs/api/paddle/nn/functional/mish_cn.rst index 9b53a55546d..36fc6ba2b02 100644 --- a/docs/api/paddle/nn/functional/mish_cn.rst +++ b/docs/api/paddle/nn/functional/mish_cn.rst @@ -29,4 +29,4 @@ mish激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.mish \ No newline at end of file +COPY-FROM: paddle.nn.functional.mish diff --git a/docs/api/paddle/nn/functional/mse_loss_cn.rst b/docs/api/paddle/nn/functional/mse_loss_cn.rst index 4b1efbb644a..c098fed27c8 100644 --- a/docs/api/paddle/nn/functional/mse_loss_cn.rst +++ b/docs/api/paddle/nn/functional/mse_loss_cn.rst @@ -37,4 +37,4 @@ mse_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.mse_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.mse_loss diff --git a/docs/api/paddle/nn/functional/nll_loss_cn.rst b/docs/api/paddle/nn/functional/nll_loss_cn.rst index ec7eee6f9e4..904032822b3 100644 --- a/docs/api/paddle/nn/functional/nll_loss_cn.rst +++ b/docs/api/paddle/nn/functional/nll_loss_cn.rst @@ -22,4 +22,4 @@ nll_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.nll_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.nll_loss diff --git a/docs/api/paddle/nn/functional/normalize_cn.rst b/docs/api/paddle/nn/functional/normalize_cn.rst index c5cb123e8b9..c31655dfd60 100644 --- a/docs/api/paddle/nn/functional/normalize_cn.rst +++ b/docs/api/paddle/nn/functional/normalize_cn.rst @@ -33,4 +33,4 @@ normalize 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.normalize \ No newline at end of file +COPY-FROM: paddle.nn.functional.normalize diff --git a/docs/api/paddle/nn/functional/npair_loss_cn.rst b/docs/api/paddle/nn/functional/npair_loss_cn.rst index bba84a2f541..7c8d43a4d51 100644 --- a/docs/api/paddle/nn/functional/npair_loss_cn.rst +++ b/docs/api/paddle/nn/functional/npair_loss_cn.rst @@ -28,4 +28,4 @@ NPair损失需要成对的数据。NPair损失分为两部分:第一部分是 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.npair_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.npair_loss diff --git a/docs/api/paddle/nn/functional/one_hot_cn.rst b/docs/api/paddle/nn/functional/one_hot_cn.rst index 6ff2d3f609b..e6e845502a1 100644 --- a/docs/api/paddle/nn/functional/one_hot_cn.rst +++ b/docs/api/paddle/nn/functional/one_hot_cn.rst @@ -61,4 +61,4 @@ Tensor,转换后的one_hot Tensor,数据类型为float32。 # one_hot_label = [[0., 1., 0., 0.], # [0., 1., 0., 0.], # [0., 0., 0., 1.], - # [1., 0., 0., 0.]] \ No newline at end of file + # [1., 0., 0., 0.]] diff --git a/docs/api/paddle/nn/functional/pad_cn.rst b/docs/api/paddle/nn/functional/pad_cn.rst index 3b34c1fd95f..707e53774a6 100644 --- a/docs/api/paddle/nn/functional/pad_cn.rst +++ b/docs/api/paddle/nn/functional/pad_cn.rst @@ -114,6 +114,3 @@ Tensor,对 ``x`` 进行 ``'pad'`` 的结果,数据类型和 ``x`` 相同。 # [3. 1. 2. 3. 1. 2.] # [6. 4. 5. 6. 4. 5.] # [3. 1. 2. 3. 1. 2.]]]] - - - diff --git a/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst b/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst index 44e166eb917..a8983cc6e65 100644 --- a/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst +++ b/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst @@ -24,4 +24,4 @@ pixel_shuffle 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.pixel_shuffle \ No newline at end of file +COPY-FROM: paddle.nn.functional.pixel_shuffle diff --git a/docs/api/paddle/nn/functional/prelu_cn.rst b/docs/api/paddle/nn/functional/prelu_cn.rst index c11bd9f372b..5fa250b30f6 100644 --- a/docs/api/paddle/nn/functional/prelu_cn.rst +++ b/docs/api/paddle/nn/functional/prelu_cn.rst @@ -27,4 +27,4 @@ prelu激活层(PRelu Activation Operator)。计算公式如下: 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.prelu \ No newline at end of file +COPY-FROM: paddle.nn.functional.prelu diff --git a/docs/api/paddle/nn/functional/relu6_cn.rst b/docs/api/paddle/nn/functional/relu6_cn.rst index 7a09475fd0b..d8e0c43fa28 100644 --- a/docs/api/paddle/nn/functional/relu6_cn.rst +++ b/docs/api/paddle/nn/functional/relu6_cn.rst @@ -25,4 +25,4 @@ relu6激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.relu6 \ No newline at end of file +COPY-FROM: paddle.nn.functional.relu6 diff --git a/docs/api/paddle/nn/functional/relu_cn.rst b/docs/api/paddle/nn/functional/relu_cn.rst index 0a486150b4a..6eaae69fe6b 100644 --- a/docs/api/paddle/nn/functional/relu_cn.rst +++ b/docs/api/paddle/nn/functional/relu_cn.rst @@ -26,4 +26,4 @@ relu激活层(Rectified Linear Unit)。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.relu \ No newline at end of file +COPY-FROM: paddle.nn.functional.relu diff --git a/docs/api/paddle/nn/functional/selu_cn.rst b/docs/api/paddle/nn/functional/selu_cn.rst index b3d985a1d7c..c772212b3c3 100644 --- a/docs/api/paddle/nn/functional/selu_cn.rst +++ b/docs/api/paddle/nn/functional/selu_cn.rst @@ -33,4 +33,4 @@ selu激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.selu \ No newline at end of file +COPY-FROM: paddle.nn.functional.selu diff --git a/docs/api/paddle/nn/functional/sigmoid_cn.rst b/docs/api/paddle/nn/functional/sigmoid_cn.rst index 92f4f346560..278a8e157ed 100755 --- a/docs/api/paddle/nn/functional/sigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/sigmoid_cn.rst @@ -27,4 +27,4 @@ Tensor,激活函数的输出值,数据类型为float32。 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.sigmoid \ No newline at end of file +COPY-FROM: paddle.nn.functional.sigmoid diff --git a/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst b/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst index 215dc274534..c519de86bb5 100644 --- a/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst +++ b/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst @@ -40,4 +40,4 @@ sigmoid_focal_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.sigmoid_focal_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.sigmoid_focal_loss diff --git a/docs/api/paddle/nn/functional/silu_cn.rst b/docs/api/paddle/nn/functional/silu_cn.rst index 7f1ac37ce1d..ece21e7b147 100644 --- a/docs/api/paddle/nn/functional/silu_cn.rst +++ b/docs/api/paddle/nn/functional/silu_cn.rst @@ -25,4 +25,4 @@ silu激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.silu \ No newline at end of file +COPY-FROM: paddle.nn.functional.silu diff --git a/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst b/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst index 87306e28afe..1440fafce44 100644 --- a/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst +++ b/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst @@ -38,4 +38,4 @@ Tensor,返回计算 `smooth_l1_loss` 后的损失值。 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.smooth_l1_loss \ No newline at end of file +COPY-FROM: paddle.nn.functional.smooth_l1_loss diff --git a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst index b7062cb57e0..0d29957930e 100644 --- a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst @@ -52,4 +52,4 @@ softmax_with_cross_entropy 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.softmax_with_cross_entropy \ No newline at end of file +COPY-FROM: paddle.nn.functional.softmax_with_cross_entropy diff --git a/docs/api/paddle/nn/functional/softplus_cn.rst b/docs/api/paddle/nn/functional/softplus_cn.rst index 0c46c6dd196..3fef83d3903 100644 --- a/docs/api/paddle/nn/functional/softplus_cn.rst +++ b/docs/api/paddle/nn/functional/softplus_cn.rst @@ -30,4 +30,4 @@ softplus激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.softplus \ No newline at end of file +COPY-FROM: paddle.nn.functional.softplus diff --git a/docs/api/paddle/nn/functional/softshrink_cn.rst b/docs/api/paddle/nn/functional/softshrink_cn.rst index 0570a00e4a7..67afbd8a1a5 100644 --- a/docs/api/paddle/nn/functional/softshrink_cn.rst +++ b/docs/api/paddle/nn/functional/softshrink_cn.rst @@ -32,4 +32,4 @@ softshrink激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.softshrink \ No newline at end of file +COPY-FROM: paddle.nn.functional.softshrink diff --git a/docs/api/paddle/nn/functional/softsign_cn.rst b/docs/api/paddle/nn/functional/softsign_cn.rst index c02d683eaa3..f379ecdcfe7 100644 --- a/docs/api/paddle/nn/functional/softsign_cn.rst +++ b/docs/api/paddle/nn/functional/softsign_cn.rst @@ -25,4 +25,4 @@ softsign激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.softsign \ No newline at end of file +COPY-FROM: paddle.nn.functional.softsign diff --git a/docs/api/paddle/nn/functional/sparse_attention_cn.rst b/docs/api/paddle/nn/functional/sparse_attention_cn.rst index 50833d86799..a052aaeff8a 100755 --- a/docs/api/paddle/nn/functional/sparse_attention_cn.rst +++ b/docs/api/paddle/nn/functional/sparse_attention_cn.rst @@ -32,4 +32,4 @@ sparse_attention 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.sparse_attention \ No newline at end of file +COPY-FROM: paddle.nn.functional.sparse_attention diff --git a/docs/api/paddle/nn/functional/square_error_cost_cn.rst b/docs/api/paddle/nn/functional/square_error_cost_cn.rst index 5e3f70ba886..0cc886f2986 100644 --- a/docs/api/paddle/nn/functional/square_error_cost_cn.rst +++ b/docs/api/paddle/nn/functional/square_error_cost_cn.rst @@ -28,4 +28,4 @@ Tensor,预测值和目标值的方差 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.square_error_cost \ No newline at end of file +COPY-FROM: paddle.nn.functional.square_error_cost diff --git a/docs/api/paddle/nn/functional/swish_cn.rst b/docs/api/paddle/nn/functional/swish_cn.rst index 94e992af439..c8ae0158d1f 100644 --- a/docs/api/paddle/nn/functional/swish_cn.rst +++ b/docs/api/paddle/nn/functional/swish_cn.rst @@ -26,4 +26,4 @@ swish激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.swish \ No newline at end of file +COPY-FROM: paddle.nn.functional.swish diff --git a/docs/api/paddle/nn/functional/tanhshrink_cn.rst b/docs/api/paddle/nn/functional/tanhshrink_cn.rst index 4fcb77d7228..c50ca10af56 100644 --- a/docs/api/paddle/nn/functional/tanhshrink_cn.rst +++ b/docs/api/paddle/nn/functional/tanhshrink_cn.rst @@ -25,4 +25,4 @@ tanhshrink激活层 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.tanhshrink \ No newline at end of file +COPY-FROM: paddle.nn.functional.tanhshrink diff --git a/docs/api/paddle/nn/functional/temporal_shift_cn.rst b/docs/api/paddle/nn/functional/temporal_shift_cn.rst index 57930d0c26f..14df752e6c9 100644 --- a/docs/api/paddle/nn/functional/temporal_shift_cn.rst +++ b/docs/api/paddle/nn/functional/temporal_shift_cn.rst @@ -44,4 +44,4 @@ Tensor,时序位移后的输出张量,维度和数据类型与输入 ``x`` 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.temporal_shift \ No newline at end of file +COPY-FROM: paddle.nn.functional.temporal_shift diff --git a/docs/api/paddle/nn/functional/thresholded_relu_cn.rst b/docs/api/paddle/nn/functional/thresholded_relu_cn.rst index a1a6f562b28..ef7448d6ed4 100644 --- a/docs/api/paddle/nn/functional/thresholded_relu_cn.rst +++ b/docs/api/paddle/nn/functional/thresholded_relu_cn.rst @@ -30,4 +30,4 @@ thresholded relu激活层。计算公式如下: 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.thresholded_relu \ No newline at end of file +COPY-FROM: paddle.nn.functional.thresholded_relu diff --git a/docs/api/paddle/nn/functional/unfold_cn.rst b/docs/api/paddle/nn/functional/unfold_cn.rst index 8d910dcd827..fd3e4de1cd2 100644 --- a/docs/api/paddle/nn/functional/unfold_cn.rst +++ b/docs/api/paddle/nn/functional/unfold_cn.rst @@ -58,4 +58,4 @@ Tensor, unfold操作之后的结果,形状如上面所描述的[N, Cout, Lout 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.unfold \ No newline at end of file +COPY-FROM: paddle.nn.functional.unfold diff --git a/docs/api/paddle/nn/functional/upsample_cn.rst b/docs/api/paddle/nn/functional/upsample_cn.rst index 16275a35cbd..f297d58f600 100644 --- a/docs/api/paddle/nn/functional/upsample_cn.rst +++ b/docs/api/paddle/nn/functional/upsample_cn.rst @@ -157,4 +157,4 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 代码示例 :::::::::::: -COPY-FROM: paddle.nn.functional.upsample \ No newline at end of file +COPY-FROM: paddle.nn.functional.upsample diff --git a/docs/api/paddle/nn/functional/zeropad2d_cn.rst b/docs/api/paddle/nn/functional/zeropad2d_cn.rst index 8bc036883eb..7abaf5d91b2 100644 --- a/docs/api/paddle/nn/functional/zeropad2d_cn.rst +++ b/docs/api/paddle/nn/functional/zeropad2d_cn.rst @@ -20,4 +20,4 @@ zeropad2d 代码示例 :::::::::: -COPY-FROM: paddle.nn.functional.zeropad2d \ No newline at end of file +COPY-FROM: paddle.nn.functional.zeropad2d diff --git a/docs/api/paddle/nn/initializer/Assign_cn.rst b/docs/api/paddle/nn/initializer/Assign_cn.rst index ddd783b9fe4..40513cc7697 100644 --- a/docs/api/paddle/nn/initializer/Assign_cn.rst +++ b/docs/api/paddle/nn/initializer/Assign_cn.rst @@ -22,4 +22,4 @@ Assign 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.Assign \ No newline at end of file +COPY-FROM: paddle.nn.initializer.Assign diff --git a/docs/api/paddle/nn/initializer/Bilinear_cn.rst b/docs/api/paddle/nn/initializer/Bilinear_cn.rst index 10350898e46..d2f842668e6 100644 --- a/docs/api/paddle/nn/initializer/Bilinear_cn.rst +++ b/docs/api/paddle/nn/initializer/Bilinear_cn.rst @@ -19,4 +19,4 @@ Bilinear 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.Bilinear \ No newline at end of file +COPY-FROM: paddle.nn.initializer.Bilinear diff --git a/docs/api/paddle/nn/initializer/Dirac_cn.rst b/docs/api/paddle/nn/initializer/Dirac_cn.rst index 8254531726a..d92a85c4b2a 100644 --- a/docs/api/paddle/nn/initializer/Dirac_cn.rst +++ b/docs/api/paddle/nn/initializer/Dirac_cn.rst @@ -31,4 +31,4 @@ Dirac 代码示例 ::::::::: -COPY-FROM: paddle.nn.initializer.Dirac \ No newline at end of file +COPY-FROM: paddle.nn.initializer.Dirac diff --git a/docs/api/paddle/nn/initializer/Normal_cn.rst b/docs/api/paddle/nn/initializer/Normal_cn.rst index c9f8bb63059..fb545c8c550 100644 --- a/docs/api/paddle/nn/initializer/Normal_cn.rst +++ b/docs/api/paddle/nn/initializer/Normal_cn.rst @@ -23,4 +23,4 @@ Normal 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.Normal \ No newline at end of file +COPY-FROM: paddle.nn.initializer.Normal diff --git a/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst b/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst index 201584b145f..513e58d2278 100644 --- a/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst +++ b/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst @@ -25,4 +25,4 @@ set_global_initializer 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.set_global_initializer \ No newline at end of file +COPY-FROM: paddle.nn.initializer.set_global_initializer diff --git a/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst b/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst index 1ba5155bd0a..42abc4d4e8d 100644 --- a/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst +++ b/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst @@ -19,4 +19,4 @@ parameters_to_vector 代码示例 ::::::::: -COPY-FROM: paddle.nn.utils.parameters_to_vector \ No newline at end of file +COPY-FROM: paddle.nn.utils.parameters_to_vector diff --git a/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst b/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst index ee0e8fa4011..043ac52d3ba 100644 --- a/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst +++ b/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst @@ -21,4 +21,4 @@ remove_weight_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.utils.remove_weight_norm \ No newline at end of file +COPY-FROM: paddle.nn.utils.remove_weight_norm diff --git a/docs/api/paddle/nn/utils/spectral_norm_cn.rst b/docs/api/paddle/nn/utils/spectral_norm_cn.rst index 8d558aac436..60543a5879d 100644 --- a/docs/api/paddle/nn/utils/spectral_norm_cn.rst +++ b/docs/api/paddle/nn/utils/spectral_norm_cn.rst @@ -42,4 +42,4 @@ spectral_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.utils.spectral_norm \ No newline at end of file +COPY-FROM: paddle.nn.utils.spectral_norm diff --git a/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst b/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst index a6c1b767447..fd0275406df 100644 --- a/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst +++ b/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst @@ -20,4 +20,4 @@ vector_to_parameters 代码示例 ::::::::: -COPY-FROM: paddle.nn.utils.vector_to_parameters \ No newline at end of file +COPY-FROM: paddle.nn.utils.vector_to_parameters diff --git a/docs/api/paddle/nn/utils/weight_norm_cn.rst b/docs/api/paddle/nn/utils/weight_norm_cn.rst index c6934c5f0eb..9d966a5414b 100644 --- a/docs/api/paddle/nn/utils/weight_norm_cn.rst +++ b/docs/api/paddle/nn/utils/weight_norm_cn.rst @@ -27,4 +27,4 @@ weight_norm 代码示例 :::::::::::: -COPY-FROM: paddle.nn.utils.weight_norm \ No newline at end of file +COPY-FROM: paddle.nn.utils.weight_norm diff --git a/docs/api/paddle/no_grad_cn.rst b/docs/api/paddle/no_grad_cn.rst index a28ee1fa84c..1b255268082 100644 --- a/docs/api/paddle/no_grad_cn.rst +++ b/docs/api/paddle/no_grad_cn.rst @@ -14,4 +14,4 @@ no_grad 代码示例 :::::::::::: -COPY-FROM: paddle.no_grad \ No newline at end of file +COPY-FROM: paddle.no_grad diff --git a/docs/api/paddle/nonzero_cn.rst b/docs/api/paddle/nonzero_cn.rst index ec36dcf5cec..479207eedf6 100644 --- a/docs/api/paddle/nonzero_cn.rst +++ b/docs/api/paddle/nonzero_cn.rst @@ -25,4 +25,4 @@ nonzero 代码示例 ::::::::: -COPY-FROM: paddle.nonzero \ No newline at end of file +COPY-FROM: paddle.nonzero diff --git a/docs/api/paddle/not_equal_cn.rst b/docs/api/paddle/not_equal_cn.rst index e96c32c675e..2935d5cbf0d 100644 --- a/docs/api/paddle/not_equal_cn.rst +++ b/docs/api/paddle/not_equal_cn.rst @@ -26,4 +26,4 @@ Tensor,shape 和输入一致,数据类型为 bool。 代码示例 :::::::::::: -COPY-FROM: paddle.not_equal \ No newline at end of file +COPY-FROM: paddle.not_equal diff --git a/docs/api/paddle/numel_cn.rst b/docs/api/paddle/numel_cn.rst index 92698d1ba5d..acddcbe21cc 100644 --- a/docs/api/paddle/numel_cn.rst +++ b/docs/api/paddle/numel_cn.rst @@ -21,4 +21,4 @@ numel 代码示例 :::::::::::: -COPY-FROM: paddle.numel \ No newline at end of file +COPY-FROM: paddle.numel diff --git a/docs/api/paddle/ones_like_cn.rst b/docs/api/paddle/ones_like_cn.rst index 72f7f89b8b6..31ce37fe18d 100644 --- a/docs/api/paddle/ones_like_cn.rst +++ b/docs/api/paddle/ones_like_cn.rst @@ -23,4 +23,4 @@ Tensor:和 ``x`` 具有相同形状的数值都为1的 Tensor,数据类型 代码示例 :::::::::: -COPY-FROM: paddle.ones_like \ No newline at end of file +COPY-FROM: paddle.ones_like diff --git a/docs/api/paddle/onnx/export_cn.rst b/docs/api/paddle/onnx/export_cn.rst index 477d258f82e..700e6ece309 100644 --- a/docs/api/paddle/onnx/export_cn.rst +++ b/docs/api/paddle/onnx/export_cn.rst @@ -26,4 +26,4 @@ export 代码示例 ::::::::: -COPY-FROM: paddle.onnx.export \ No newline at end of file +COPY-FROM: paddle.onnx.export diff --git a/docs/api/paddle/optimizer/Adadelta_cn.rst b/docs/api/paddle/optimizer/Adadelta_cn.rst index a4a69ec1a33..843440520bf 100644 --- a/docs/api/paddle/optimizer/Adadelta_cn.rst +++ b/docs/api/paddle/optimizer/Adadelta_cn.rst @@ -241,4 +241,3 @@ float,当前步骤的学习率。 lr = adadelta.get_lr() scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/docs/api/paddle/optimizer/Adagrad_cn.rst b/docs/api/paddle/optimizer/Adagrad_cn.rst index 767d1c7537a..c4bfae53c9b 100644 --- a/docs/api/paddle/optimizer/Adagrad_cn.rst +++ b/docs/api/paddle/optimizer/Adagrad_cn.rst @@ -39,4 +39,4 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以 代码示例 :::::::::::: -COPY-FROM: paddle.optimizer.Adagrad \ No newline at end of file +COPY-FROM: paddle.optimizer.Adagrad diff --git a/docs/api/paddle/optimizer/Momentum_cn.rst b/docs/api/paddle/optimizer/Momentum_cn.rst index 5f22a222534..e87ced5126c 100644 --- a/docs/api/paddle/optimizer/Momentum_cn.rst +++ b/docs/api/paddle/optimizer/Momentum_cn.rst @@ -148,5 +148,3 @@ set_lr(value) .. note:: 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 - - diff --git a/docs/api/paddle/optimizer/Optimizer_cn.rst b/docs/api/paddle/optimizer/Optimizer_cn.rst index 06c3808c1d7..93e8a27dc03 100755 --- a/docs/api/paddle/optimizer/Optimizer_cn.rst +++ b/docs/api/paddle/optimizer/Optimizer_cn.rst @@ -232,4 +232,3 @@ float,当前步骤的学习率。 lr = adam.get_lr() scheduler.step() np.allclose(lr, ret[i], rtol=1e-06, atol=0.0) # True - diff --git a/docs/api/paddle/optimizer/SGD_cn.rst b/docs/api/paddle/optimizer/SGD_cn.rst index cf8e7d232f8..451706e2275 100644 --- a/docs/api/paddle/optimizer/SGD_cn.rst +++ b/docs/api/paddle/optimizer/SGD_cn.rst @@ -140,7 +140,3 @@ set_lr(value) .. note:: 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 - - - - diff --git a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst index 7d95b4c833f..2a48352deef 100644 --- a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst @@ -106,4 +106,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 :::::::::::: 参照上述示例代码。 - diff --git a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst index 9c128de2b5c..9567dbdbbc6 100644 --- a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst @@ -55,4 +55,4 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** -参照上述示例代码。 \ No newline at end of file +参照上述示例代码。 diff --git a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst index 349f4a41682..a0818576404 100644 --- a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst @@ -94,5 +94,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - - diff --git a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst index 8bb23c9a9ec..d97d2dc643d 100644 --- a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst @@ -94,4 +94,4 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** -参照上述示例代码。 \ No newline at end of file +参照上述示例代码。 diff --git a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst index 8575c119856..bdf0986a928 100644 --- a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst @@ -99,4 +99,4 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** -参照上述示例代码。 \ No newline at end of file +参照上述示例代码。 diff --git a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst index 7caf1c8ac5f..f862a1d8385 100644 --- a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst +++ b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst @@ -106,4 +106,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - diff --git a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst index 690975f0bc3..f4a04972418 100644 --- a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst @@ -73,5 +73,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - - diff --git a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst index 4197178b131..47aedfe5825 100644 --- a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst @@ -93,4 +93,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - diff --git a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst index fcf2b98bca9..e26bfa16c18 100644 --- a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst @@ -99,4 +99,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - diff --git a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst index f5502ff7276..9e7ba71fdd4 100644 --- a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst @@ -109,4 +109,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - diff --git a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst index 9b4fcd28d88..aa8e66eef28 100644 --- a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst @@ -102,4 +102,3 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** 参照上述示例代码。 - diff --git a/docs/api/paddle/poisson_cn.rst b/docs/api/paddle/poisson_cn.rst index c5bb6304226..20d33d3717f 100644 --- a/docs/api/paddle/poisson_cn.rst +++ b/docs/api/paddle/poisson_cn.rst @@ -23,4 +23,4 @@ poisson 代码示例 ::::::::: -COPY-FROM: paddle.poisson \ No newline at end of file +COPY-FROM: paddle.poisson diff --git a/docs/api/paddle/pow_cn.rst b/docs/api/paddle/pow_cn.rst index 3325a209f15..284198da1bb 100644 --- a/docs/api/paddle/pow_cn.rst +++ b/docs/api/paddle/pow_cn.rst @@ -27,4 +27,4 @@ Tensor,维度和数据类型都和 ``x`` 相同。 代码示例 ::::::::: -COPY-FROM: paddle.pow \ No newline at end of file +COPY-FROM: paddle.pow diff --git a/docs/api/paddle/profiler/SortedKeys_cn.rst b/docs/api/paddle/profiler/SortedKeys_cn.rst index 62b17b96d29..10f0775c5bd 100644 --- a/docs/api/paddle/profiler/SortedKeys_cn.rst +++ b/docs/api/paddle/profiler/SortedKeys_cn.rst @@ -19,5 +19,3 @@ SortedKeys枚举类用来指定打印的统计 :ref:`表单 ` 的on_trace_ready参数。 -COPY-FROM: paddle.profiler.export_chrome_tracing:code-example1 \ No newline at end of file +COPY-FROM: paddle.profiler.export_chrome_tracing:code-example1 diff --git a/docs/api/paddle/profiler/export_protobuf_cn.rst b/docs/api/paddle/profiler/export_protobuf_cn.rst index 1c74401c39e..332f188396b 100644 --- a/docs/api/paddle/profiler/export_protobuf_cn.rst +++ b/docs/api/paddle/profiler/export_protobuf_cn.rst @@ -25,4 +25,4 @@ export_protobuf 用于 :ref:`性能分析器 ` 的on_trace_ready参数。 -COPY-FROM: paddle.profiler.export_protobuf:code-example1 \ No newline at end of file +COPY-FROM: paddle.profiler.export_protobuf:code-example1 diff --git a/docs/api/paddle/put_along_axis_cn.rst b/docs/api/paddle/put_along_axis_cn.rst index 040a0833e56..15688b2f331 100644 --- a/docs/api/paddle/put_along_axis_cn.rst +++ b/docs/api/paddle/put_along_axis_cn.rst @@ -25,4 +25,3 @@ put_along_axis COPY-FROM: paddle.put_along_axis:code-example1 - diff --git a/docs/api/paddle/quantile_cn.rst b/docs/api/paddle/quantile_cn.rst index 7228019793e..20e3b2efb98 100644 --- a/docs/api/paddle/quantile_cn.rst +++ b/docs/api/paddle/quantile_cn.rst @@ -22,4 +22,4 @@ quantile 代码示例 :::::::::: -COPY-FROM: paddle.quantile \ No newline at end of file +COPY-FROM: paddle.quantile diff --git a/docs/api/paddle/randint_cn.rst b/docs/api/paddle/randint_cn.rst index 9e7d7398180..8f9a2c470c1 100644 --- a/docs/api/paddle/randint_cn.rst +++ b/docs/api/paddle/randint_cn.rst @@ -22,4 +22,4 @@ randint 代码示例 ::::::::::: -COPY-FROM: paddle.randint \ No newline at end of file +COPY-FROM: paddle.randint diff --git a/docs/api/paddle/randint_like_cn.rst b/docs/api/paddle/randint_like_cn.rst index 565a4ed05c6..a2b8db3044e 100644 --- a/docs/api/paddle/randint_like_cn.rst +++ b/docs/api/paddle/randint_like_cn.rst @@ -22,4 +22,4 @@ randint_like 代码示例 ::::::::::: -COPY-FROM: paddle.randint_like \ No newline at end of file +COPY-FROM: paddle.randint_like diff --git a/docs/api/paddle/randperm_cn.rst b/docs/api/paddle/randperm_cn.rst index 962dc2902e6..60a21841795 100644 --- a/docs/api/paddle/randperm_cn.rst +++ b/docs/api/paddle/randperm_cn.rst @@ -20,4 +20,4 @@ randperm 代码示例 :::::::::: -COPY-FROM: paddle.randperm \ No newline at end of file +COPY-FROM: paddle.randperm diff --git a/docs/api/paddle/rank_cn.rst b/docs/api/paddle/rank_cn.rst index 3c015bc69cc..1e618a32943 100644 --- a/docs/api/paddle/rank_cn.rst +++ b/docs/api/paddle/rank_cn.rst @@ -23,4 +23,4 @@ rank 代码示例 :::::::::::: -COPY-FROM: paddle.rank \ No newline at end of file +COPY-FROM: paddle.rank diff --git a/docs/api/paddle/real_cn.rst b/docs/api/paddle/real_cn.rst index 5fb6d2a1b14..c204cc5247a 100644 --- a/docs/api/paddle/real_cn.rst +++ b/docs/api/paddle/real_cn.rst @@ -20,4 +20,4 @@ Tensor,包含原复数 Tensor 的实部数值。 代码示例 :::::::::::: -COPY-FROM: paddle.real \ No newline at end of file +COPY-FROM: paddle.real diff --git a/docs/api/paddle/reciprocal_cn.rst b/docs/api/paddle/reciprocal_cn.rst index 7f4bd98b784..63e98483c38 100644 --- a/docs/api/paddle/reciprocal_cn.rst +++ b/docs/api/paddle/reciprocal_cn.rst @@ -29,4 +29,4 @@ reciprocal 对输入Tensor取倒数 代码示例 :::::::::::: -COPY-FROM: paddle.reciprocal \ No newline at end of file +COPY-FROM: paddle.reciprocal diff --git a/docs/api/paddle/regularizer/L1Decay_cn.rst b/docs/api/paddle/regularizer/L1Decay_cn.rst index 86aefab13b3..d81a9c68969 100644 --- a/docs/api/paddle/regularizer/L1Decay_cn.rst +++ b/docs/api/paddle/regularizer/L1Decay_cn.rst @@ -69,4 +69,3 @@ L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵 padding=0, weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)), bias_attr=False) - diff --git a/docs/api/paddle/regularizer/L2Decay_cn.rst b/docs/api/paddle/regularizer/L2Decay_cn.rst index 38b0e01532c..8aed5423ff3 100644 --- a/docs/api/paddle/regularizer/L2Decay_cn.rst +++ b/docs/api/paddle/regularizer/L2Decay_cn.rst @@ -69,4 +69,3 @@ L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模 padding=0, weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)), bias_attr=False) - diff --git a/docs/api/paddle/reshape_cn.rst b/docs/api/paddle/reshape_cn.rst index 91322ab7410..403f51dca70 100644 --- a/docs/api/paddle/reshape_cn.rst +++ b/docs/api/paddle/reshape_cn.rst @@ -41,11 +41,3 @@ reshape COPY-FROM: paddle.reshape:code-example1 - - - - - - - - diff --git a/docs/api/paddle/roll_cn.rst b/docs/api/paddle/roll_cn.rst index 9b56835b721..84057c557e8 100644 --- a/docs/api/paddle/roll_cn.rst +++ b/docs/api/paddle/roll_cn.rst @@ -26,4 +26,4 @@ roll 代码示例 ::::::::: -COPY-FROM: paddle.roll \ No newline at end of file +COPY-FROM: paddle.roll diff --git a/docs/api/paddle/rot90_cn.rst b/docs/api/paddle/rot90_cn.rst index c529de6b6b6..e05dd88bb10 100644 --- a/docs/api/paddle/rot90_cn.rst +++ b/docs/api/paddle/rot90_cn.rst @@ -26,4 +26,4 @@ rot90 代码示例 :::::::::: -COPY-FROM: paddle.rot90 \ No newline at end of file +COPY-FROM: paddle.rot90 diff --git a/docs/api/paddle/round_cn.rst b/docs/api/paddle/round_cn.rst index 9e696805b55..e19cd6a62d9 100644 --- a/docs/api/paddle/round_cn.rst +++ b/docs/api/paddle/round_cn.rst @@ -41,6 +41,3 @@ round x = paddle.to_tensor([1.2, -0.9, 3.4, 0.9], dtype='float32') result = paddle.round(x) print(result) # result=[1., -1., 3., 1.] - - - diff --git a/docs/api/paddle/rsqrt_cn.rst b/docs/api/paddle/rsqrt_cn.rst index f91b1771126..79ea4b2c371 100644 --- a/docs/api/paddle/rsqrt_cn.rst +++ b/docs/api/paddle/rsqrt_cn.rst @@ -31,4 +31,4 @@ Tensor,对输入x进行rsqrt激活函数计算结果,数据shape、类型和 代码示例 :::::::::::: -COPY-FROM: paddle.rsqrt \ No newline at end of file +COPY-FROM: paddle.rsqrt diff --git a/docs/api/paddle/scale_cn.rst b/docs/api/paddle/scale_cn.rst index b5e20672751..d799dc1dedd 100644 --- a/docs/api/paddle/scale_cn.rst +++ b/docs/api/paddle/scale_cn.rst @@ -42,4 +42,4 @@ scale data = paddle.randn(shape=[2,3], dtype='float32') res = paddle.scale(data, scale=2.0, bias=1.0) -COPY-FROM: paddle.scale \ No newline at end of file +COPY-FROM: paddle.scale diff --git a/docs/api/paddle/scatter_nd_add_cn.rst b/docs/api/paddle/scatter_nd_add_cn.rst index d76f7b4a40c..96530eaac49 100644 --- a/docs/api/paddle/scatter_nd_add_cn.rst +++ b/docs/api/paddle/scatter_nd_add_cn.rst @@ -57,4 +57,4 @@ Tensor,数据类型和形状都与 :code:`x` 相同。 代码示例 :::::::::::: -COPY-FROM: paddle.scatter_nd_add \ No newline at end of file +COPY-FROM: paddle.scatter_nd_add diff --git a/docs/api/paddle/scatter_nd_cn.rst b/docs/api/paddle/scatter_nd_cn.rst index a9c0de1b808..071e5f7f3c9 100644 --- a/docs/api/paddle/scatter_nd_cn.rst +++ b/docs/api/paddle/scatter_nd_cn.rst @@ -26,4 +26,4 @@ Tensor,数据类型与 :code:`updates` 相同,形状是 :code:`shape` 。 代码示例 :::::::::::: -COPY-FROM: paddle.scatter_nd \ No newline at end of file +COPY-FROM: paddle.scatter_nd diff --git a/docs/api/paddle/searchsorted_cn.rst b/docs/api/paddle/searchsorted_cn.rst index 066b127b4a1..62cb0a26f25 100644 --- a/docs/api/paddle/searchsorted_cn.rst +++ b/docs/api/paddle/searchsorted_cn.rst @@ -25,4 +25,4 @@ Tensor(与 ``values`` 维度相同),如果参数 ``out_int32`` 为False,则 代码示例 :::::::: -COPY-FROM: paddle.searchsorted \ No newline at end of file +COPY-FROM: paddle.searchsorted diff --git a/docs/api/paddle/seed_cn.rst b/docs/api/paddle/seed_cn.rst index 06ff1a50156..3cf9ee3b069 100644 --- a/docs/api/paddle/seed_cn.rst +++ b/docs/api/paddle/seed_cn.rst @@ -23,4 +23,4 @@ seed 代码示例 :::::::::::: -COPY-FROM: paddle.seed \ No newline at end of file +COPY-FROM: paddle.seed diff --git a/docs/api/paddle/set_cuda_rng_state_cn.rst b/docs/api/paddle/set_cuda_rng_state_cn.rst index b8740aed605..eabd4371cf9 100644 --- a/docs/api/paddle/set_cuda_rng_state_cn.rst +++ b/docs/api/paddle/set_cuda_rng_state_cn.rst @@ -23,4 +23,4 @@ set_cuda_rng_state 代码示例 :::::::::::: -COPY-FROM: paddle.set_cuda_rng_state \ No newline at end of file +COPY-FROM: paddle.set_cuda_rng_state diff --git a/docs/api/paddle/set_default_dtype_cn.rst b/docs/api/paddle/set_default_dtype_cn.rst index 24d89ac58af..ca3c65aa4c9 100644 --- a/docs/api/paddle/set_default_dtype_cn.rst +++ b/docs/api/paddle/set_default_dtype_cn.rst @@ -22,4 +22,4 @@ set_default_dtype 代码示例 :::::::::::: -COPY-FROM: paddle.set_default_dtype \ No newline at end of file +COPY-FROM: paddle.set_default_dtype diff --git a/docs/api/paddle/set_flags_cn.rst b/docs/api/paddle/set_flags_cn.rst index 6cd6b08ce52..6fc6f64d94f 100644 --- a/docs/api/paddle/set_flags_cn.rst +++ b/docs/api/paddle/set_flags_cn.rst @@ -23,4 +23,4 @@ set_flags 代码示例 :::::::::::: -COPY-FROM: paddle.set_flags \ No newline at end of file +COPY-FROM: paddle.set_flags diff --git a/docs/api/paddle/set_grad_enabled_cn.rst b/docs/api/paddle/set_grad_enabled_cn.rst index c1148c5a808..04706d1fd65 100644 --- a/docs/api/paddle/set_grad_enabled_cn.rst +++ b/docs/api/paddle/set_grad_enabled_cn.rst @@ -22,4 +22,4 @@ None 代码示例 ::::::::: -COPY-FROM: paddle.set_grad_enabled \ No newline at end of file +COPY-FROM: paddle.set_grad_enabled diff --git a/docs/api/paddle/set_printoptions_cn.rst b/docs/api/paddle/set_printoptions_cn.rst index ccaf2541ec4..43f2f992713 100644 --- a/docs/api/paddle/set_printoptions_cn.rst +++ b/docs/api/paddle/set_printoptions_cn.rst @@ -26,4 +26,4 @@ set_printoptions 代码示例 ::::::::: -COPY-FROM: paddle.set_printoptions \ No newline at end of file +COPY-FROM: paddle.set_printoptions diff --git a/docs/api/paddle/shape_cn.rst b/docs/api/paddle/shape_cn.rst index 2aaf7b52636..28b38846934 100755 --- a/docs/api/paddle/shape_cn.rst +++ b/docs/api/paddle/shape_cn.rst @@ -43,4 +43,4 @@ shape层。 代码示例 :::::::::::: -COPY-FROM: paddle.shape \ No newline at end of file +COPY-FROM: paddle.shape diff --git a/docs/api/paddle/shard_index_cn.rst b/docs/api/paddle/shard_index_cn.rst index 7ca3f5d4104..4b7485c71bc 100644 --- a/docs/api/paddle/shard_index_cn.rst +++ b/docs/api/paddle/shard_index_cn.rst @@ -33,4 +33,4 @@ Tensor 代码示例 :::::::::::: -COPY-FROM: paddle.shard_index \ No newline at end of file +COPY-FROM: paddle.shard_index diff --git a/docs/api/paddle/sign_cn.rst b/docs/api/paddle/sign_cn.rst index 3cb43339b40..4755aed9bf1 100644 --- a/docs/api/paddle/sign_cn.rst +++ b/docs/api/paddle/sign_cn.rst @@ -20,4 +20,4 @@ Tensor,输出正负号,数据的 shape 大小及数据类型和输入 ``x`` 代码示例 :::::::::::: -COPY-FROM: paddle.sign \ No newline at end of file +COPY-FROM: paddle.sign diff --git a/docs/api/paddle/sin_cn.rst b/docs/api/paddle/sin_cn.rst index 87982a1937f..a69a2abf9f2 100644 --- a/docs/api/paddle/sin_cn.rst +++ b/docs/api/paddle/sin_cn.rst @@ -23,4 +23,4 @@ sin 代码示例 :::::::::::: -COPY-FROM: paddle.sin \ No newline at end of file +COPY-FROM: paddle.sin diff --git a/docs/api/paddle/sinh_cn.rst b/docs/api/paddle/sinh_cn.rst index 135264b3a63..6f7c88a4535 100644 --- a/docs/api/paddle/sinh_cn.rst +++ b/docs/api/paddle/sinh_cn.rst @@ -29,4 +29,4 @@ sinh 代码示例 :::::::::::: -COPY-FROM: paddle.sinh \ No newline at end of file +COPY-FROM: paddle.sinh diff --git a/docs/api/paddle/slice_cn.rst b/docs/api/paddle/slice_cn.rst index 6a8c5a5831f..54c0ef0ddce 100755 --- a/docs/api/paddle/slice_cn.rst +++ b/docs/api/paddle/slice_cn.rst @@ -44,4 +44,4 @@ slice 代码示例 :::::::::::: -COPY-FROM: paddle.slice \ No newline at end of file +COPY-FROM: paddle.slice diff --git a/docs/api/paddle/sort_cn.rst b/docs/api/paddle/sort_cn.rst index ab1edde289f..0b0d00ce8a2 100644 --- a/docs/api/paddle/sort_cn.rst +++ b/docs/api/paddle/sort_cn.rst @@ -27,4 +27,3 @@ Tensor,排序后的输出(与 ``x`` 维度相同、数据类型相同)。 :::::::::::: COPY-FROM: paddle.sort:code-example1 - diff --git a/docs/api/paddle/split_cn.rst b/docs/api/paddle/split_cn.rst index 72c63add851..9482686a01d 100644 --- a/docs/api/paddle/split_cn.rst +++ b/docs/api/paddle/split_cn.rst @@ -24,4 +24,4 @@ split 代码示例 ::::::::: -COPY-FROM: paddle.split \ No newline at end of file +COPY-FROM: paddle.split diff --git a/docs/api/paddle/sqrt_cn.rst b/docs/api/paddle/sqrt_cn.rst index 616f7f8c276..f0b4b09ef54 100644 --- a/docs/api/paddle/sqrt_cn.rst +++ b/docs/api/paddle/sqrt_cn.rst @@ -29,4 +29,4 @@ sqrt 代码示例 :::::::::::: -COPY-FROM: paddle.sqrt \ No newline at end of file +COPY-FROM: paddle.sqrt diff --git a/docs/api/paddle/square_cn.rst b/docs/api/paddle/square_cn.rst index 001ed8101d5..c364540201b 100644 --- a/docs/api/paddle/square_cn.rst +++ b/docs/api/paddle/square_cn.rst @@ -26,4 +26,4 @@ square 代码示例 :::::::::::: -COPY-FROM: paddle.square \ No newline at end of file +COPY-FROM: paddle.square diff --git a/docs/api/paddle/stack_cn.rst b/docs/api/paddle/stack_cn.rst index 910b2ec41c8..d31769b92ea 100644 --- a/docs/api/paddle/stack_cn.rst +++ b/docs/api/paddle/stack_cn.rst @@ -90,5 +90,3 @@ stack # [[[1., 2.], # [3., 4.], # [5., 6.]]] - - diff --git a/docs/api/paddle/stanh_cn.rst b/docs/api/paddle/stanh_cn.rst index 32666dfe89d..1886ac16ae5 100644 --- a/docs/api/paddle/stanh_cn.rst +++ b/docs/api/paddle/stanh_cn.rst @@ -27,4 +27,4 @@ stanh 激活函数 代码示例 :::::::::: -COPY-FROM: paddle.stanh \ No newline at end of file +COPY-FROM: paddle.stanh diff --git a/docs/api/paddle/static/Executor_cn.rst b/docs/api/paddle/static/Executor_cn.rst index f4156d5adc6..55192305453 100644 --- a/docs/api/paddle/static/Executor_cn.rst +++ b/docs/api/paddle/static/Executor_cn.rst @@ -313,4 +313,3 @@ train_from_dataset将销毁每次运行在executor中创建的所有资源。 exe.run(paddle.static.default_startup_program()) exe.train_from_dataset(program=paddle.static.default_main_program(), dataset=dataset) - diff --git a/docs/api/paddle/static/IpuCompiledProgram_cn.rst b/docs/api/paddle/static/IpuCompiledProgram_cn.rst index 053fb1596c7..916b35c124f 100644 --- a/docs/api/paddle/static/IpuCompiledProgram_cn.rst +++ b/docs/api/paddle/static/IpuCompiledProgram_cn.rst @@ -44,4 +44,3 @@ Program,编译之后的 ``Program`` 对象。 **代码示例** COPY-FROM: paddle.static.IpuCompiledProgram.compile - diff --git a/docs/api/paddle/static/Overview_cn.rst b/docs/api/paddle/static/Overview_cn.rst index bd67b6db6c7..05520c040f7 100644 --- a/docs/api/paddle/static/Overview_cn.rst +++ b/docs/api/paddle/static/Overview_cn.rst @@ -169,4 +169,4 @@ io相关API " :ref:`name_scope ` ", "为OP生成命名空间" " :ref:`py_func ` ", "自定义算子" " :ref:`scope_guard ` ", "切换作用域" - " :ref:`while_loop ` ", "while循环控制" \ No newline at end of file + " :ref:`while_loop ` ", "while循环控制" diff --git a/docs/api/paddle/static/ParallelExecutor_cn.rst b/docs/api/paddle/static/ParallelExecutor_cn.rst index 6211df0f8d6..152b3eece49 100644 --- a/docs/api/paddle/static/ParallelExecutor_cn.rst +++ b/docs/api/paddle/static/ParallelExecutor_cn.rst @@ -202,4 +202,3 @@ drop_local_exe_scopes() fetch_list=[loss.name]) parallel_exe.drop_local_exe_scopes() - diff --git a/docs/api/paddle/static/Print_cn.rst b/docs/api/paddle/static/Print_cn.rst index b5096d2d167..e41fd3e0e3f 100644 --- a/docs/api/paddle/static/Print_cn.rst +++ b/docs/api/paddle/static/Print_cn.rst @@ -36,4 +36,4 @@ Print 代码示例 :::::::::::: -COPY-FROM: paddle.static.Print \ No newline at end of file +COPY-FROM: paddle.static.Print diff --git a/docs/api/paddle/static/WeightNormParamAttr_cn.rst b/docs/api/paddle/static/WeightNormParamAttr_cn.rst index 9887f189c22..93ad53d2cd7 100644 --- a/docs/api/paddle/static/WeightNormParamAttr_cn.rst +++ b/docs/api/paddle/static/WeightNormParamAttr_cn.rst @@ -34,4 +34,4 @@ WeightNormParamAttr 代码示例 :::::::::::: -COPY-FROM: paddle.static.WeightNormParamAttr \ No newline at end of file +COPY-FROM: paddle.static.WeightNormParamAttr diff --git a/docs/api/paddle/static/accuracy_cn.rst b/docs/api/paddle/static/accuracy_cn.rst index d650b2549ee..88cb61c622b 100755 --- a/docs/api/paddle/static/accuracy_cn.rst +++ b/docs/api/paddle/static/accuracy_cn.rst @@ -29,4 +29,4 @@ accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall 代码示例 :::::::::::: -COPY-FROM: paddle.static.accuracy \ No newline at end of file +COPY-FROM: paddle.static.accuracy diff --git a/docs/api/paddle/static/append_backward_cn.rst b/docs/api/paddle/static/append_backward_cn.rst index ae55d0210ae..43ef3b3e2d1 100644 --- a/docs/api/paddle/static/append_backward_cn.rst +++ b/docs/api/paddle/static/append_backward_cn.rst @@ -31,4 +31,4 @@ append_backward 代码示例 :::::::::::: -COPY-FROM: paddle.static.append_backward \ No newline at end of file +COPY-FROM: paddle.static.append_backward diff --git a/docs/api/paddle/static/auc_cn.rst b/docs/api/paddle/static/auc_cn.rst index 18bfd924e4a..a1859e6c566 100755 --- a/docs/api/paddle/static/auc_cn.rst +++ b/docs/api/paddle/static/auc_cn.rst @@ -48,4 +48,4 @@ tuple,当前计算出的AUC。数据类型是tensor,支持float32和float64 代码示例 :::::::::::: -COPY-FROM: paddle.static.auc \ No newline at end of file +COPY-FROM: paddle.static.auc diff --git a/docs/api/paddle/static/cpu_places_cn.rst b/docs/api/paddle/static/cpu_places_cn.rst index beedbaf5634..af3bbe8a34d 100644 --- a/docs/api/paddle/static/cpu_places_cn.rst +++ b/docs/api/paddle/static/cpu_places_cn.rst @@ -22,4 +22,4 @@ list[paddle.CPUPlace], ``CPUPlace`` 的列表。 代码示例 ::::::::: -COPY-FROM: paddle.static.cpu_places \ No newline at end of file +COPY-FROM: paddle.static.cpu_places diff --git a/docs/api/paddle/static/create_global_var_cn.rst b/docs/api/paddle/static/create_global_var_cn.rst index 4c5256f52be..5730e44b6c9 100644 --- a/docs/api/paddle/static/create_global_var_cn.rst +++ b/docs/api/paddle/static/create_global_var_cn.rst @@ -28,4 +28,4 @@ Variable,创建的Tensor变量。 代码示例 :::::::::::: -COPY-FROM: paddle.static.create_global_var \ No newline at end of file +COPY-FROM: paddle.static.create_global_var diff --git a/docs/api/paddle/static/create_parameter_cn.rst b/docs/api/paddle/static/create_parameter_cn.rst index 9be4c4d90b1..ce3d3f7c278 100644 --- a/docs/api/paddle/static/create_parameter_cn.rst +++ b/docs/api/paddle/static/create_parameter_cn.rst @@ -30,4 +30,4 @@ create_parameter 代码示例 :::::::::::: -COPY-FROM: paddle.static.create_parameter \ No newline at end of file +COPY-FROM: paddle.static.create_parameter diff --git a/docs/api/paddle/static/cuda_places_cn.rst b/docs/api/paddle/static/cuda_places_cn.rst index 6e1306aa8bb..5030d49c75c 100644 --- a/docs/api/paddle/static/cuda_places_cn.rst +++ b/docs/api/paddle/static/cuda_places_cn.rst @@ -29,4 +29,4 @@ list[paddle.CUDAPlace],创建的 ``paddle.CUDAPlace`` 列表。 代码示例 ::::::::: -COPY-FROM: paddle.static.cuda_places \ No newline at end of file +COPY-FROM: paddle.static.cuda_places diff --git a/docs/api/paddle/static/data_cn.rst b/docs/api/paddle/static/data_cn.rst index fb6699fba20..304a6cf576a 100644 --- a/docs/api/paddle/static/data_cn.rst +++ b/docs/api/paddle/static/data_cn.rst @@ -28,4 +28,4 @@ Tensor,全局变量,可进行数据访问。 代码示例 :::::::::::: -COPY-FROM: paddle.static.data \ No newline at end of file +COPY-FROM: paddle.static.data diff --git a/docs/api/paddle/static/default_main_program_cn.rst b/docs/api/paddle/static/default_main_program_cn.rst index 1af2442e66e..464948ca96f 100644 --- a/docs/api/paddle/static/default_main_program_cn.rst +++ b/docs/api/paddle/static/default_main_program_cn.rst @@ -22,4 +22,4 @@ default_main_program 代码示例 ::::::::: -COPY-FROM: paddle.static.default_main_program \ No newline at end of file +COPY-FROM: paddle.static.default_main_program diff --git a/docs/api/paddle/static/default_startup_program_cn.rst b/docs/api/paddle/static/default_startup_program_cn.rst index e64c8ad7b20..38bd20e2c12 100644 --- a/docs/api/paddle/static/default_startup_program_cn.rst +++ b/docs/api/paddle/static/default_startup_program_cn.rst @@ -27,4 +27,4 @@ default_startup_program 代码示例 ::::::::: -COPY-FROM: paddle.static.default_startup_program \ No newline at end of file +COPY-FROM: paddle.static.default_startup_program diff --git a/docs/api/paddle/static/deserialize_persistables_cn.rst b/docs/api/paddle/static/deserialize_persistables_cn.rst index 103a98c00f5..9f10f53ced1 100644 --- a/docs/api/paddle/static/deserialize_persistables_cn.rst +++ b/docs/api/paddle/static/deserialize_persistables_cn.rst @@ -26,4 +26,4 @@ deserialize_persistables 代码示例 :::::::::::: -COPY-FROM: paddle.static.deserialize_persistables \ No newline at end of file +COPY-FROM: paddle.static.deserialize_persistables diff --git a/docs/api/paddle/static/deserialize_program_cn.rst b/docs/api/paddle/static/deserialize_program_cn.rst index 20f622da593..c7b84f79dfe 100644 --- a/docs/api/paddle/static/deserialize_program_cn.rst +++ b/docs/api/paddle/static/deserialize_program_cn.rst @@ -24,4 +24,4 @@ deserialize_program 代码示例 :::::::::::: -COPY-FROM: paddle.static.deserialize_program \ No newline at end of file +COPY-FROM: paddle.static.deserialize_program diff --git a/docs/api/paddle/static/device_guard_cn.rst b/docs/api/paddle/static/device_guard_cn.rst index 4917f6c8285..a9d6145fd83 100644 --- a/docs/api/paddle/static/device_guard_cn.rst +++ b/docs/api/paddle/static/device_guard_cn.rst @@ -18,4 +18,4 @@ device_guard 代码示例 :::::::::::: -COPY-FROM: paddle.static.device_guard \ No newline at end of file +COPY-FROM: paddle.static.device_guard diff --git a/docs/api/paddle/static/global_scope_cn.rst b/docs/api/paddle/static/global_scope_cn.rst index 913afe51d93..2c6a97469d8 100644 --- a/docs/api/paddle/static/global_scope_cn.rst +++ b/docs/api/paddle/static/global_scope_cn.rst @@ -18,4 +18,4 @@ Scope,全局/默认作用域实例。 代码示例 :::::::::::: -COPY-FROM: paddle.static.global_scope \ No newline at end of file +COPY-FROM: paddle.static.global_scope diff --git a/docs/api/paddle/static/load_cn.rst b/docs/api/paddle/static/load_cn.rst index 7473d67f8bc..536959b0e96 100644 --- a/docs/api/paddle/static/load_cn.rst +++ b/docs/api/paddle/static/load_cn.rst @@ -28,4 +28,4 @@ load 代码示例 :::::::::::: -COPY-FROM: paddle.static.load \ No newline at end of file +COPY-FROM: paddle.static.load diff --git a/docs/api/paddle/static/load_from_file_cn.rst b/docs/api/paddle/static/load_from_file_cn.rst index 3e822d155e8..ce1f073afd0 100644 --- a/docs/api/paddle/static/load_from_file_cn.rst +++ b/docs/api/paddle/static/load_from_file_cn.rst @@ -23,4 +23,4 @@ load_from_file 代码示例 :::::::::::: -COPY-FROM: paddle.static.load_from_file \ No newline at end of file +COPY-FROM: paddle.static.load_from_file diff --git a/docs/api/paddle/static/load_inference_model_cn.rst b/docs/api/paddle/static/load_inference_model_cn.rst index 4367071eb09..39028d41776 100644 --- a/docs/api/paddle/static/load_inference_model_cn.rst +++ b/docs/api/paddle/static/load_inference_model_cn.rst @@ -33,4 +33,4 @@ load_inference_model 代码示例 :::::::::::: -COPY-FROM: paddle.static.load_inference_model \ No newline at end of file +COPY-FROM: paddle.static.load_inference_model diff --git a/docs/api/paddle/static/load_program_state_cn.rst b/docs/api/paddle/static/load_program_state_cn.rst index e74d27cdabf..c5fca4f08f3 100644 --- a/docs/api/paddle/static/load_program_state_cn.rst +++ b/docs/api/paddle/static/load_program_state_cn.rst @@ -20,4 +20,4 @@ load_program_state 代码示例 :::::::::::: -COPY-FROM: paddle.static.load_program_state \ No newline at end of file +COPY-FROM: paddle.static.load_program_state diff --git a/docs/api/paddle/static/mlu_places_cn.rst b/docs/api/paddle/static/mlu_places_cn.rst index e2adaab1408..027e58568d4 100644 --- a/docs/api/paddle/static/mlu_places_cn.rst +++ b/docs/api/paddle/static/mlu_places_cn.rst @@ -29,4 +29,3 @@ list[paddle.device.MLUPlace],创建的 ``paddle.device.MLUPlace`` 列表。 代码示例 ::::::::: COPY-FROM: paddle.static.mlu_places - diff --git a/docs/api/paddle/static/name_scope_cn.rst b/docs/api/paddle/static/name_scope_cn.rst index e6e19df89b6..a9ab41dae11 100644 --- a/docs/api/paddle/static/name_scope_cn.rst +++ b/docs/api/paddle/static/name_scope_cn.rst @@ -20,4 +20,4 @@ name_scope 代码示例 :::::::::::: -COPY-FROM: paddle.static.name_scope \ No newline at end of file +COPY-FROM: paddle.static.name_scope diff --git a/docs/api/paddle/static/nn/batch_norm_cn.rst b/docs/api/paddle/static/nn/batch_norm_cn.rst index d79a279b166..ebc9d06f524 100644 --- a/docs/api/paddle/static/nn/batch_norm_cn.rst +++ b/docs/api/paddle/static/nn/batch_norm_cn.rst @@ -70,4 +70,4 @@ moving_mean和moving_var是训练过程中统计得到的全局均值和方差 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.batch_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.batch_norm diff --git a/docs/api/paddle/static/nn/bilinear_tensor_product_cn.rst b/docs/api/paddle/static/nn/bilinear_tensor_product_cn.rst index 08b76e8dbf6..0f55c5ebb2b 100644 --- a/docs/api/paddle/static/nn/bilinear_tensor_product_cn.rst +++ b/docs/api/paddle/static/nn/bilinear_tensor_product_cn.rst @@ -42,4 +42,4 @@ bilinear_tensor_product 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.bilinear_tensor_product \ No newline at end of file +COPY-FROM: paddle.static.nn.bilinear_tensor_product diff --git a/docs/api/paddle/static/nn/case_cn.rst b/docs/api/paddle/static/nn/case_cn.rst index 4ac72f751c6..846a227af28 100644 --- a/docs/api/paddle/static/nn/case_cn.rst +++ b/docs/api/paddle/static/nn/case_cn.rst @@ -27,4 +27,4 @@ Tensor|list(Tensor) 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.case \ No newline at end of file +COPY-FROM: paddle.static.nn.case diff --git a/docs/api/paddle/static/nn/cond_cn.rst b/docs/api/paddle/static/nn/cond_cn.rst index ccde363936e..c4293e3b0e0 100644 --- a/docs/api/paddle/static/nn/cond_cn.rst +++ b/docs/api/paddle/static/nn/cond_cn.rst @@ -75,4 +75,3 @@ Tensor|list(Tensor)|tuple(Tensor),如果 ``pred`` 是 ``True``,该API返回 # ret[0] = [[1 1]] # ret[1] = [[ True True True] # [ True True True]] - diff --git a/docs/api/paddle/static/nn/conv2d_cn.rst b/docs/api/paddle/static/nn/conv2d_cn.rst index 77207ea050b..32d4762a63b 100644 --- a/docs/api/paddle/static/nn/conv2d_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_cn.rst @@ -98,4 +98,4 @@ conv2d 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.conv2d \ No newline at end of file +COPY-FROM: paddle.static.nn.conv2d diff --git a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst index a8e680cbb2d..fa8a2fb5a80 100644 --- a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst @@ -108,4 +108,4 @@ conv2d_transpose 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.conv2d_transpose \ No newline at end of file +COPY-FROM: paddle.static.nn.conv2d_transpose diff --git a/docs/api/paddle/static/nn/conv3d_cn.rst b/docs/api/paddle/static/nn/conv3d_cn.rst index f857db7bcf2..fa5d0e9f8b2 100644 --- a/docs/api/paddle/static/nn/conv3d_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_cn.rst @@ -102,4 +102,4 @@ conv3d 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.conv3d \ No newline at end of file +COPY-FROM: paddle.static.nn.conv3d diff --git a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst index ce593612817..3d115cb8757 100755 --- a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst @@ -114,4 +114,4 @@ conv3d_transpose 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.conv3d_transpose \ No newline at end of file +COPY-FROM: paddle.static.nn.conv3d_transpose diff --git a/docs/api/paddle/static/nn/crf_decoding_cn.rst b/docs/api/paddle/static/nn/crf_decoding_cn.rst index bdc85ed9f90..7d1307a8247 100644 --- a/docs/api/paddle/static/nn/crf_decoding_cn.rst +++ b/docs/api/paddle/static/nn/crf_decoding_cn.rst @@ -35,4 +35,4 @@ Tensor,解码结果具体内容根据 ``Label`` 参数是否提供而定,请 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.crf_decoding \ No newline at end of file +COPY-FROM: paddle.static.nn.crf_decoding diff --git a/docs/api/paddle/static/nn/data_norm_cn.rst b/docs/api/paddle/static/nn/data_norm_cn.rst index 25a8908667c..b38723d4767 100644 --- a/docs/api/paddle/static/nn/data_norm_cn.rst +++ b/docs/api/paddle/static/nn/data_norm_cn.rst @@ -53,4 +53,4 @@ Tensor,是对输入数据进行正则化后的结果。 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.data_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.data_norm diff --git a/docs/api/paddle/static/nn/deform_conv2d_cn.rst b/docs/api/paddle/static/nn/deform_conv2d_cn.rst index 80296a9a1f9..297730cdcaa 100644 --- a/docs/api/paddle/static/nn/deform_conv2d_cn.rst +++ b/docs/api/paddle/static/nn/deform_conv2d_cn.rst @@ -73,4 +73,4 @@ Tensor,可变形卷积输出的4-D Tensor,数据类型为float32或float64 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.deform_conv2d \ No newline at end of file +COPY-FROM: paddle.static.nn.deform_conv2d diff --git a/docs/api/paddle/static/nn/embedding_cn.rst b/docs/api/paddle/static/nn/embedding_cn.rst index faebd8cc9e7..8762d12f1ca 100644 --- a/docs/api/paddle/static/nn/embedding_cn.rst +++ b/docs/api/paddle/static/nn/embedding_cn.rst @@ -78,4 +78,4 @@ Variable,input映射后得到的Embedding Tensor或LoDTensor,数据类型和 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.embedding \ No newline at end of file +COPY-FROM: paddle.static.nn.embedding diff --git a/docs/api/paddle/static/nn/fc_cn.rst b/docs/api/paddle/static/nn/fc_cn.rst index f600426dd1c..e447b906fe7 100755 --- a/docs/api/paddle/static/nn/fc_cn.rst +++ b/docs/api/paddle/static/nn/fc_cn.rst @@ -118,5 +118,3 @@ Tensor,形状为 :math:`[batch\_size, *, size]`,数据类型与输入Tensor weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)), bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))) # out: [[1.8 1.8]] - - diff --git a/docs/api/paddle/static/nn/group_norm_cn.rst b/docs/api/paddle/static/nn/group_norm_cn.rst index 6a03c18336e..55d9440e918 100755 --- a/docs/api/paddle/static/nn/group_norm_cn.rst +++ b/docs/api/paddle/static/nn/group_norm_cn.rst @@ -28,4 +28,4 @@ Tensor,数据类型和格式与 `input` 一致。 代码示例 ::::::::: -COPY-FROM: paddle.static.nn.group_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.group_norm diff --git a/docs/api/paddle/static/nn/instance_norm_cn.rst b/docs/api/paddle/static/nn/instance_norm_cn.rst index b4dfd2108d6..a8d14498169 100644 --- a/docs/api/paddle/static/nn/instance_norm_cn.rst +++ b/docs/api/paddle/static/nn/instance_norm_cn.rst @@ -42,4 +42,4 @@ Tensor,在输入中运用instance normalization后的结果。 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.instance_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.instance_norm diff --git a/docs/api/paddle/static/nn/layer_norm_cn.rst b/docs/api/paddle/static/nn/layer_norm_cn.rst index 3e38c986bd3..586798f3dd9 100644 --- a/docs/api/paddle/static/nn/layer_norm_cn.rst +++ b/docs/api/paddle/static/nn/layer_norm_cn.rst @@ -49,4 +49,4 @@ layer_norm 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.layer_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.layer_norm diff --git a/docs/api/paddle/static/nn/nce_cn.rst b/docs/api/paddle/static/nn/nce_cn.rst index ceb387e32e9..f491e30acfd 100644 --- a/docs/api/paddle/static/nn/nce_cn.rst +++ b/docs/api/paddle/static/nn/nce_cn.rst @@ -39,4 +39,4 @@ Tensor,nce loss,数据类型与 **input** 相同。 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.nce \ No newline at end of file +COPY-FROM: paddle.static.nn.nce diff --git a/docs/api/paddle/static/nn/prelu_cn.rst b/docs/api/paddle/static/nn/prelu_cn.rst index 258d6a18561..7df9d36827b 100644 --- a/docs/api/paddle/static/nn/prelu_cn.rst +++ b/docs/api/paddle/static/nn/prelu_cn.rst @@ -34,4 +34,4 @@ prelu激活函数 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.prelu \ No newline at end of file +COPY-FROM: paddle.static.nn.prelu diff --git a/docs/api/paddle/static/nn/row_conv_cn.rst b/docs/api/paddle/static/nn/row_conv_cn.rst index c5080ddfa9d..74ee1e81816 100644 --- a/docs/api/paddle/static/nn/row_conv_cn.rst +++ b/docs/api/paddle/static/nn/row_conv_cn.rst @@ -43,4 +43,4 @@ row_conv 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.row_conv \ No newline at end of file +COPY-FROM: paddle.static.nn.row_conv diff --git a/docs/api/paddle/static/nn/sequence_concat_cn.rst b/docs/api/paddle/static/nn/sequence_concat_cn.rst index 333aae58ba1..63a4e717e64 100644 --- a/docs/api/paddle/static/nn/sequence_concat_cn.rst +++ b/docs/api/paddle/static/nn/sequence_concat_cn.rst @@ -45,13 +45,3 @@ Tensor,输出连接后的LoDTensor,数据类型和输入一致。 ::::::::: COPY-FROM: paddle.static.nn.sequence_concat - - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_conv_cn.rst b/docs/api/paddle/static/nn/sequence_conv_cn.rst index 7f2febfff6b..87479435c33 100644 --- a/docs/api/paddle/static/nn/sequence_conv_cn.rst +++ b/docs/api/paddle/static/nn/sequence_conv_cn.rst @@ -72,9 +72,3 @@ sequence_conv ::::::::: COPY-FROM: paddle.static.nn.sequence_conv - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst index a79806b90c9..90bccc4a1ee 100644 --- a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst +++ b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst @@ -43,12 +43,3 @@ sequence_enumerate 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_enumerate - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst index 2c93c5a40ed..59c38932e3f 100644 --- a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst +++ b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst @@ -62,12 +62,3 @@ Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_expand_as - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_expand_cn.rst b/docs/api/paddle/static/nn/sequence_expand_cn.rst index af3d0d39e59..5d35be2d5dc 100644 --- a/docs/api/paddle/static/nn/sequence_expand_cn.rst +++ b/docs/api/paddle/static/nn/sequence_expand_cn.rst @@ -65,10 +65,3 @@ sequence_expand 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_expand - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_first_step_cn.rst b/docs/api/paddle/static/nn/sequence_first_step_cn.rst index 89c52cf6a9f..a8d7e30bf59 100644 --- a/docs/api/paddle/static/nn/sequence_first_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_first_step_cn.rst @@ -54,12 +54,3 @@ sequence_first_step 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_first_step - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_last_step_cn.rst b/docs/api/paddle/static/nn/sequence_last_step_cn.rst index 7b1d3523506..cc4ae53762b 100644 --- a/docs/api/paddle/static/nn/sequence_last_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_last_step_cn.rst @@ -55,12 +55,3 @@ sequence_last_step 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_last_step - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_pad_cn.rst b/docs/api/paddle/static/nn/sequence_pad_cn.rst index 54a7d73128e..121ade1a9ee 100644 --- a/docs/api/paddle/static/nn/sequence_pad_cn.rst +++ b/docs/api/paddle/static/nn/sequence_pad_cn.rst @@ -72,10 +72,3 @@ sequence_pad 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_pad - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_reshape_cn.rst b/docs/api/paddle/static/nn/sequence_reshape_cn.rst index a479cd06f92..963e12974fb 100644 --- a/docs/api/paddle/static/nn/sequence_reshape_cn.rst +++ b/docs/api/paddle/static/nn/sequence_reshape_cn.rst @@ -46,12 +46,3 @@ sequence_reshape ::::::::: COPY-FROM: paddle.static.nn.sequence_reshape - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_reverse_cn.rst b/docs/api/paddle/static/nn/sequence_reverse_cn.rst index 09b414c0418..18917c67d7a 100644 --- a/docs/api/paddle/static/nn/sequence_reverse_cn.rst +++ b/docs/api/paddle/static/nn/sequence_reverse_cn.rst @@ -46,10 +46,3 @@ sequence_reverse :::::::::::: COPY-FROM: paddle.static.nn.sequence_reverse - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_scatter_cn.rst b/docs/api/paddle/static/nn/sequence_scatter_cn.rst index 77eef5a6000..730ff637e71 100644 --- a/docs/api/paddle/static/nn/sequence_scatter_cn.rst +++ b/docs/api/paddle/static/nn/sequence_scatter_cn.rst @@ -58,12 +58,3 @@ output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_scatter - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_slice_cn.rst b/docs/api/paddle/static/nn/sequence_slice_cn.rst index e2f140c868d..e54f5398cee 100644 --- a/docs/api/paddle/static/nn/sequence_slice_cn.rst +++ b/docs/api/paddle/static/nn/sequence_slice_cn.rst @@ -53,13 +53,3 @@ Tensor,序列切片运算结果。 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_slice - - - - - - - - - - diff --git a/docs/api/paddle/static/nn/sequence_softmax_cn.rst b/docs/api/paddle/static/nn/sequence_softmax_cn.rst index cd2b8f56024..8339f8799f1 100644 --- a/docs/api/paddle/static/nn/sequence_softmax_cn.rst +++ b/docs/api/paddle/static/nn/sequence_softmax_cn.rst @@ -59,4 +59,3 @@ sequence_softmax :::::::::::: COPY-FROM: paddle.static.nn.sequence_softmax - diff --git a/docs/api/paddle/static/nn/sequence_unpad_cn.rst b/docs/api/paddle/static/nn/sequence_unpad_cn.rst index 612120cda7d..dd4500a6e66 100644 --- a/docs/api/paddle/static/nn/sequence_unpad_cn.rst +++ b/docs/api/paddle/static/nn/sequence_unpad_cn.rst @@ -48,4 +48,3 @@ sequence_unpad 代码示例 ::::::::: COPY-FROM: paddle.static.nn.sequence_unpad - diff --git a/docs/api/paddle/static/nn/sparse_embedding_cn.rst b/docs/api/paddle/static/nn/sparse_embedding_cn.rst index 2333ac43840..e570586ab9b 100644 --- a/docs/api/paddle/static/nn/sparse_embedding_cn.rst +++ b/docs/api/paddle/static/nn/sparse_embedding_cn.rst @@ -75,4 +75,4 @@ Variable,input映射后得到的Embedding Tensor或LoDTensor,数据类型和 代码示例 :::::::: -COPY-FROM: paddle.static.nn.sparse_embedding \ No newline at end of file +COPY-FROM: paddle.static.nn.sparse_embedding diff --git a/docs/api/paddle/static/nn/spectral_norm_cn.rst b/docs/api/paddle/static/nn/spectral_norm_cn.rst index 493ba6dbb0b..7fcb038e19a 100644 --- a/docs/api/paddle/static/nn/spectral_norm_cn.rst +++ b/docs/api/paddle/static/nn/spectral_norm_cn.rst @@ -44,4 +44,4 @@ spectral_norm 代码示例 ::::::::: -COPY-FROM: paddle.static.nn.spectral_norm \ No newline at end of file +COPY-FROM: paddle.static.nn.spectral_norm diff --git a/docs/api/paddle/static/nn/switch_case_cn.rst b/docs/api/paddle/static/nn/switch_case_cn.rst index 709c2ec1889..8be87d970a9 100644 --- a/docs/api/paddle/static/nn/switch_case_cn.rst +++ b/docs/api/paddle/static/nn/switch_case_cn.rst @@ -28,4 +28,4 @@ Tensor|list(Tensor) 代码示例 :::::::::::: -COPY-FROM: paddle.static.nn.switch_case \ No newline at end of file +COPY-FROM: paddle.static.nn.switch_case diff --git a/docs/api/paddle/static/normalize_program_cn.rst b/docs/api/paddle/static/normalize_program_cn.rst index cb0e86f26b5..c39ce94555b 100644 --- a/docs/api/paddle/static/normalize_program_cn.rst +++ b/docs/api/paddle/static/normalize_program_cn.rst @@ -25,4 +25,4 @@ normalize_program 代码示例 :::::::::::: -COPY-FROM: paddle.static.normalize_program \ No newline at end of file +COPY-FROM: paddle.static.normalize_program diff --git a/docs/api/paddle/static/npu_places_cn.rst b/docs/api/paddle/static/npu_places_cn.rst index dad8e324b99..ae760f79262 100644 --- a/docs/api/paddle/static/npu_places_cn.rst +++ b/docs/api/paddle/static/npu_places_cn.rst @@ -29,4 +29,3 @@ list[paddle.NPUPlace],创建的 ``paddle.NPUPlace`` 列表。 代码示例 ::::::::: COPY-FROM: paddle.static.npu_places - diff --git a/docs/api/paddle/static/program_guard_cn.rst b/docs/api/paddle/static/program_guard_cn.rst index fd6994bd045..6b7ef649821 100644 --- a/docs/api/paddle/static/program_guard_cn.rst +++ b/docs/api/paddle/static/program_guard_cn.rst @@ -47,4 +47,3 @@ program_guard # does not care about startup program. Just pass a temporary value. with paddle.static.program_guard(main_program, paddle.static.Program()): data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - diff --git a/docs/api/paddle/static/save_cn.rst b/docs/api/paddle/static/save_cn.rst index 0a3edfe5803..d33aea854c7 100644 --- a/docs/api/paddle/static/save_cn.rst +++ b/docs/api/paddle/static/save_cn.rst @@ -31,4 +31,4 @@ save 代码示例 :::::::::::: -COPY-FROM: paddle.static.save \ No newline at end of file +COPY-FROM: paddle.static.save diff --git a/docs/api/paddle/static/save_inference_model_cn.rst b/docs/api/paddle/static/save_inference_model_cn.rst index 9d40bd5ef94..d5cb66bcf12 100644 --- a/docs/api/paddle/static/save_inference_model_cn.rst +++ b/docs/api/paddle/static/save_inference_model_cn.rst @@ -33,4 +33,4 @@ save_inference_model 代码示例 :::::::::::: -COPY-FROM: paddle.static.save_inference_model \ No newline at end of file +COPY-FROM: paddle.static.save_inference_model diff --git a/docs/api/paddle/static/save_to_file_cn.rst b/docs/api/paddle/static/save_to_file_cn.rst index 784ced4da9b..c324e896891 100644 --- a/docs/api/paddle/static/save_to_file_cn.rst +++ b/docs/api/paddle/static/save_to_file_cn.rst @@ -24,4 +24,4 @@ save_to_file 代码示例 :::::::::::: -COPY-FROM: paddle.static.save_to_file \ No newline at end of file +COPY-FROM: paddle.static.save_to_file diff --git a/docs/api/paddle/static/scope_guard_cn.rst b/docs/api/paddle/static/scope_guard_cn.rst index d9944c4a159..f522bdddf7a 100644 --- a/docs/api/paddle/static/scope_guard_cn.rst +++ b/docs/api/paddle/static/scope_guard_cn.rst @@ -29,4 +29,4 @@ scope_guard 代码示例 :::::::::::: -COPY-FROM: paddle.static.scope_guard \ No newline at end of file +COPY-FROM: paddle.static.scope_guard diff --git a/docs/api/paddle/static/serialize_persistables_cn.rst b/docs/api/paddle/static/serialize_persistables_cn.rst index dd5d7168604..16fa2931232 100644 --- a/docs/api/paddle/static/serialize_persistables_cn.rst +++ b/docs/api/paddle/static/serialize_persistables_cn.rst @@ -29,4 +29,4 @@ serialize_persistables 代码示例 :::::::::::: -COPY-FROM: paddle.static.serialize_persistables \ No newline at end of file +COPY-FROM: paddle.static.serialize_persistables diff --git a/docs/api/paddle/static/serialize_program_cn.rst b/docs/api/paddle/static/serialize_program_cn.rst index 67df7e1de4e..23fcb0813a2 100644 --- a/docs/api/paddle/static/serialize_program_cn.rst +++ b/docs/api/paddle/static/serialize_program_cn.rst @@ -28,4 +28,4 @@ serialize_program 代码示例 :::::::::::: -COPY-FROM: paddle.static.serialize_program \ No newline at end of file +COPY-FROM: paddle.static.serialize_program diff --git a/docs/api/paddle/static/set_program_state_cn.rst b/docs/api/paddle/static/set_program_state_cn.rst index 390e9a2efcb..decd38070b1 100644 --- a/docs/api/paddle/static/set_program_state_cn.rst +++ b/docs/api/paddle/static/set_program_state_cn.rst @@ -26,4 +26,4 @@ set_program_state 代码示例 :::::::::::: -COPY-FROM: paddle.static.set_program_state \ No newline at end of file +COPY-FROM: paddle.static.set_program_state diff --git a/docs/api/paddle/static/xpu_places_cn.rst b/docs/api/paddle/static/xpu_places_cn.rst index 0415c52371d..4e4cd21481e 100644 --- a/docs/api/paddle/static/xpu_places_cn.rst +++ b/docs/api/paddle/static/xpu_places_cn.rst @@ -29,4 +29,4 @@ list[paddle.XPUPlace],创建的 ``paddle.XPUPlace`` 列表。 代码示例 ::::::::: -COPY-FROM: paddle.static.xpu_places \ No newline at end of file +COPY-FROM: paddle.static.xpu_places diff --git a/docs/api/paddle/std_cn.rst b/docs/api/paddle/std_cn.rst index 99aa5d42626..6e584afc3e5 100644 --- a/docs/api/paddle/std_cn.rst +++ b/docs/api/paddle/std_cn.rst @@ -22,4 +22,4 @@ std 代码示例 :::::::::: -COPY-FROM: paddle.std \ No newline at end of file +COPY-FROM: paddle.std diff --git a/docs/api/paddle/strided_slice_cn.rst b/docs/api/paddle/strided_slice_cn.rst index 3b8318148fc..4e0c09fb9ea 100644 --- a/docs/api/paddle/strided_slice_cn.rst +++ b/docs/api/paddle/strided_slice_cn.rst @@ -63,4 +63,4 @@ strided_slice算子。 代码示例 :::::::::::: -COPY-FROM: paddle.strided_slice \ No newline at end of file +COPY-FROM: paddle.strided_slice diff --git a/docs/api/paddle/subtract_cn.rst b/docs/api/paddle/subtract_cn.rst index dcc39722101..19c4008ae86 100644 --- a/docs/api/paddle/subtract_cn.rst +++ b/docs/api/paddle/subtract_cn.rst @@ -30,4 +30,4 @@ subtract 代码示例 :::::::::: -COPY-FROM: paddle.subtract \ No newline at end of file +COPY-FROM: paddle.subtract diff --git a/docs/api/paddle/summary_cn.rst b/docs/api/paddle/summary_cn.rst index 70b8b033a27..157827532a4 100644 --- a/docs/api/paddle/summary_cn.rst +++ b/docs/api/paddle/summary_cn.rst @@ -22,4 +22,4 @@ summary 代码示例 ::::::::: -COPY-FROM: paddle.summary \ No newline at end of file +COPY-FROM: paddle.summary diff --git a/docs/api/paddle/sysconfig/get_include_cn.rst b/docs/api/paddle/sysconfig/get_include_cn.rst index f42ee160b24..bc4fea6e966 100644 --- a/docs/api/paddle/sysconfig/get_include_cn.rst +++ b/docs/api/paddle/sysconfig/get_include_cn.rst @@ -15,4 +15,4 @@ get_include 代码示例 :::::::::: -COPY-FROM: paddle.sysconfig.get_include \ No newline at end of file +COPY-FROM: paddle.sysconfig.get_include diff --git a/docs/api/paddle/sysconfig/get_lib_cn.rst b/docs/api/paddle/sysconfig/get_lib_cn.rst index 535b273500f..7f925cf7f4d 100644 --- a/docs/api/paddle/sysconfig/get_lib_cn.rst +++ b/docs/api/paddle/sysconfig/get_lib_cn.rst @@ -15,4 +15,4 @@ get_lib 代码示例 :::::::::: -COPY-FROM: paddle.sysconfig.get_lib \ No newline at end of file +COPY-FROM: paddle.sysconfig.get_lib diff --git a/docs/api/paddle/take_along_axis_cn.rst b/docs/api/paddle/take_along_axis_cn.rst index a57a9357146..e06ddf03a5e 100644 --- a/docs/api/paddle/take_along_axis_cn.rst +++ b/docs/api/paddle/take_along_axis_cn.rst @@ -23,4 +23,3 @@ take_along_axis COPY-FROM: paddle.take_along_axis:code-example1 - diff --git a/docs/api/paddle/tan_cn.rst b/docs/api/paddle/tan_cn.rst index 80cf22e5c56..65d579a2963 100644 --- a/docs/api/paddle/tan_cn.rst +++ b/docs/api/paddle/tan_cn.rst @@ -27,4 +27,4 @@ Tensor - 该OP的输出为Tensor,数据类型为输入一致。 代码示例 ::::::::: -COPY-FROM: paddle.tan \ No newline at end of file +COPY-FROM: paddle.tan diff --git a/docs/api/paddle/tanh_cn.rst b/docs/api/paddle/tanh_cn.rst index b77d9d44744..ee238332157 100644 --- a/docs/api/paddle/tanh_cn.rst +++ b/docs/api/paddle/tanh_cn.rst @@ -26,4 +26,4 @@ tanh的输出Tensor,和输入有着相同类型和shape。 代码示例 ::::::::: -COPY-FROM: paddle.tanh \ No newline at end of file +COPY-FROM: paddle.tanh diff --git a/docs/api/paddle/tensordot_cn.rst b/docs/api/paddle/tensordot_cn.rst index 08a236d06ad..0009995de28 100644 --- a/docs/api/paddle/tensordot_cn.rst +++ b/docs/api/paddle/tensordot_cn.rst @@ -35,4 +35,4 @@ tensordot 代码示例 :::::::::::: -COPY-FROM: paddle.tensordot \ No newline at end of file +COPY-FROM: paddle.tensordot diff --git a/docs/api/paddle/text/Conll05st_cn.rst b/docs/api/paddle/text/Conll05st_cn.rst index f9cb4d19e8c..9a7dea23f79 100644 --- a/docs/api/paddle/text/Conll05st_cn.rst +++ b/docs/api/paddle/text/Conll05st_cn.rst @@ -28,4 +28,4 @@ Conll05st 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.Conll05st \ No newline at end of file +COPY-FROM: paddle.text.datasets.Conll05st diff --git a/docs/api/paddle/text/Imdb_cn.rst b/docs/api/paddle/text/Imdb_cn.rst index b067c394de7..e2d7e874d8a 100644 --- a/docs/api/paddle/text/Imdb_cn.rst +++ b/docs/api/paddle/text/Imdb_cn.rst @@ -23,4 +23,4 @@ Imdb 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.Imdb \ No newline at end of file +COPY-FROM: paddle.text.datasets.Imdb diff --git a/docs/api/paddle/text/Imikolov_cn.rst b/docs/api/paddle/text/Imikolov_cn.rst index 9ed7e6d2a4f..b97ba08c5d3 100644 --- a/docs/api/paddle/text/Imikolov_cn.rst +++ b/docs/api/paddle/text/Imikolov_cn.rst @@ -25,4 +25,4 @@ Imikolov 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.Imikolov \ No newline at end of file +COPY-FROM: paddle.text.datasets.Imikolov diff --git a/docs/api/paddle/text/Movielens_cn.rst b/docs/api/paddle/text/Movielens_cn.rst index 085703af345..dd3b36b11cc 100644 --- a/docs/api/paddle/text/Movielens_cn.rst +++ b/docs/api/paddle/text/Movielens_cn.rst @@ -25,4 +25,4 @@ Movielens 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.Movielens \ No newline at end of file +COPY-FROM: paddle.text.datasets.Movielens diff --git a/docs/api/paddle/text/UCIHousing_cn.rst b/docs/api/paddle/text/UCIHousing_cn.rst index 8f6076a4e75..e8c430345ad 100644 --- a/docs/api/paddle/text/UCIHousing_cn.rst +++ b/docs/api/paddle/text/UCIHousing_cn.rst @@ -46,4 +46,3 @@ UCIHousing model = SimpleNet() feature, target = model(feature, target) print(feature.numpy().shape, target.numpy()) - diff --git a/docs/api/paddle/text/ViterbiDecoder_cn.rst b/docs/api/paddle/text/ViterbiDecoder_cn.rst index ecdf6a748c5..b10b772bfbc 100644 --- a/docs/api/paddle/text/ViterbiDecoder_cn.rst +++ b/docs/api/paddle/text/ViterbiDecoder_cn.rst @@ -25,4 +25,4 @@ ViterbiDecoder 代码示例 ::::::::: -COPY-FROM: paddle.text.ViterbiDecoder \ No newline at end of file +COPY-FROM: paddle.text.ViterbiDecoder diff --git a/docs/api/paddle/text/WMT14_cn.rst b/docs/api/paddle/text/WMT14_cn.rst index 638eaba9c59..d56497fdd83 100644 --- a/docs/api/paddle/text/WMT14_cn.rst +++ b/docs/api/paddle/text/WMT14_cn.rst @@ -32,4 +32,4 @@ http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.WMT14 \ No newline at end of file +COPY-FROM: paddle.text.datasets.WMT14 diff --git a/docs/api/paddle/text/WMT16_cn.rst b/docs/api/paddle/text/WMT16_cn.rst index 6bb9d67e0a2..41632240ac1 100644 --- a/docs/api/paddle/text/WMT16_cn.rst +++ b/docs/api/paddle/text/WMT16_cn.rst @@ -32,4 +32,4 @@ http://www.statmt.org/wmt16/multimodal-task.html#task1 代码示例 ::::::::: -COPY-FROM: paddle.text.datasets.WMT16 \ No newline at end of file +COPY-FROM: paddle.text.datasets.WMT16 diff --git a/docs/api/paddle/text/viterbi_decode_cn.rst b/docs/api/paddle/text/viterbi_decode_cn.rst index 84f4431cc50..1897401b6ad 100644 --- a/docs/api/paddle/text/viterbi_decode_cn.rst +++ b/docs/api/paddle/text/viterbi_decode_cn.rst @@ -24,4 +24,4 @@ viterbi_decode 代码示例 ::::::::: -COPY-FROM: paddle.text.viterbi_decode \ No newline at end of file +COPY-FROM: paddle.text.viterbi_decode diff --git a/docs/api/paddle/tile_cn.rst b/docs/api/paddle/tile_cn.rst index 3b676c08c39..81cc5e953f1 100644 --- a/docs/api/paddle/tile_cn.rst +++ b/docs/api/paddle/tile_cn.rst @@ -22,4 +22,4 @@ tile 代码示例 ::::::::: -COPY-FROM: paddle.tile \ No newline at end of file +COPY-FROM: paddle.tile diff --git a/docs/api/paddle/to_tensor_cn.rst b/docs/api/paddle/to_tensor_cn.rst index 84d8c2fc663..fc6680f3a35 100644 --- a/docs/api/paddle/to_tensor_cn.rst +++ b/docs/api/paddle/to_tensor_cn.rst @@ -29,4 +29,4 @@ to_tensor 代码示例 ::::::::: -COPY-FROM: paddle.to_tensor \ No newline at end of file +COPY-FROM: paddle.to_tensor diff --git a/docs/api/paddle/trace_cn.rst b/docs/api/paddle/trace_cn.rst index 6d2c470e379..5dcad586275 100644 --- a/docs/api/paddle/trace_cn.rst +++ b/docs/api/paddle/trace_cn.rst @@ -34,4 +34,4 @@ Tensor,指定二维平面的对角线元素之和。数据类型和输入数 代码示例 ::::::::: -COPY-FROM: paddle.trace \ No newline at end of file +COPY-FROM: paddle.trace diff --git a/docs/api/paddle/transpose_cn.rst b/docs/api/paddle/transpose_cn.rst index 98ed78e6368..f40f874322c 100644 --- a/docs/api/paddle/transpose_cn.rst +++ b/docs/api/paddle/transpose_cn.rst @@ -58,4 +58,3 @@ transpose x_transposed = paddle.transpose(x, perm=[1, 0, 2]) print(x_transposed.shape) # [3L, 2L, 4L] - diff --git a/docs/api/paddle/tril_cn.rst b/docs/api/paddle/tril_cn.rst index ddd39bab8f6..b0863121b3f 100644 --- a/docs/api/paddle/tril_cn.rst +++ b/docs/api/paddle/tril_cn.rst @@ -25,4 +25,4 @@ Tensor,数据类型与输入 `input` 数据类型一致。 代码示例 ::::::::: -COPY-FROM: paddle.tril \ No newline at end of file +COPY-FROM: paddle.tril diff --git a/docs/api/paddle/triu_cn.rst b/docs/api/paddle/triu_cn.rst index 21790259e64..b0b709d0301 100644 --- a/docs/api/paddle/triu_cn.rst +++ b/docs/api/paddle/triu_cn.rst @@ -22,4 +22,4 @@ Tensor,数据类型与输入 `input` 数据类型一致。 代码示例 ::::::::: -COPY-FROM: paddle.triu \ No newline at end of file +COPY-FROM: paddle.triu diff --git a/docs/api/paddle/trunc_cn.rst b/docs/api/paddle/trunc_cn.rst index 6939e00669c..7c5eac665b7 100644 --- a/docs/api/paddle/trunc_cn.rst +++ b/docs/api/paddle/trunc_cn.rst @@ -22,4 +22,4 @@ trunc 代码示例 ::::::::: -COPY-FROM: paddle.trunc \ No newline at end of file +COPY-FROM: paddle.trunc diff --git a/docs/api/paddle/unsqueeze_cn.rst b/docs/api/paddle/unsqueeze_cn.rst index c75a5062f97..de5d2e07e2f 100644 --- a/docs/api/paddle/unsqueeze_cn.rst +++ b/docs/api/paddle/unsqueeze_cn.rst @@ -23,4 +23,4 @@ Tensor,扩展维度后的多维Tensor,数据类型与输入Tensor一致。 代码示例 ::::::::: -COPY-FROM: paddle.unsqueeze \ No newline at end of file +COPY-FROM: paddle.unsqueeze diff --git a/docs/api/paddle/unstack_cn.rst b/docs/api/paddle/unstack_cn.rst index 222b3759f57..e444983bc00 100644 --- a/docs/api/paddle/unstack_cn.rst +++ b/docs/api/paddle/unstack_cn.rst @@ -29,4 +29,4 @@ unstack 代码示例 :::::::::::: -COPY-FROM: paddle.unstack \ No newline at end of file +COPY-FROM: paddle.unstack diff --git a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst index d59da32c454..31de33ef795 100644 --- a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst @@ -39,4 +39,4 @@ CppExtension 返回 :::::::::::: - ``setuptools.Extension`` 对象。 \ No newline at end of file + ``setuptools.Extension`` 对象。 diff --git a/docs/api/paddle/utils/cpp_extension/get_build_directory_cn.rst b/docs/api/paddle/utils/cpp_extension/get_build_directory_cn.rst index 8bf53217279..693ddb63354 100644 --- a/docs/api/paddle/utils/cpp_extension/get_build_directory_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/get_build_directory_cn.rst @@ -15,4 +15,4 @@ get_build_directory 代码示例 :::::::::::: -COPY-FROM: paddle.utils.cpp_extension.get_build_directory \ No newline at end of file +COPY-FROM: paddle.utils.cpp_extension.get_build_directory diff --git a/docs/api/paddle/utils/deprecated_cn.rst b/docs/api/paddle/utils/deprecated_cn.rst index 0ca3c05f004..133397c0b9f 100644 --- a/docs/api/paddle/utils/deprecated_cn.rst +++ b/docs/api/paddle/utils/deprecated_cn.rst @@ -21,4 +21,3 @@ paddle_utils_deprecated 返回 :::::::::::: 装饰器(装饰器函数或者装饰器类)。 - diff --git a/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst b/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst index 431bdf68ce9..82cfe37e79e 100644 --- a/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst +++ b/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst @@ -17,4 +17,4 @@ from_dlpack 代码示例 ::::::::: -COPY-FROM: paddle.utils.dlpack.from_dlpack \ No newline at end of file +COPY-FROM: paddle.utils.dlpack.from_dlpack diff --git a/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst b/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst index 17bd019943c..68ac04f4fad 100644 --- a/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst +++ b/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst @@ -17,4 +17,4 @@ to_dlpack 代码示例 ::::::::: -COPY-FROM: paddle.utils.dlpack.to_dlpack \ No newline at end of file +COPY-FROM: paddle.utils.dlpack.to_dlpack diff --git a/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst b/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst index 37ae60d0ff5..accf8c8aa98 100644 --- a/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst +++ b/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst @@ -21,4 +21,4 @@ get_weights_path_from_url 代码示例 :::::::::::: -COPY-FROM: paddle.utils.download.get_weights_path_from_url \ No newline at end of file +COPY-FROM: paddle.utils.download.get_weights_path_from_url diff --git a/docs/api/paddle/utils/run_check_cn.rst b/docs/api/paddle/utils/run_check_cn.rst index a090403f198..679efae1b31 100644 --- a/docs/api/paddle/utils/run_check_cn.rst +++ b/docs/api/paddle/utils/run_check_cn.rst @@ -11,4 +11,4 @@ run_check 代码示例 :::::::::: -COPY-FROM: paddle.utils.run_check \ No newline at end of file +COPY-FROM: paddle.utils.run_check diff --git a/docs/api/paddle/utils/unique_name/generate_cn.rst b/docs/api/paddle/utils/unique_name/generate_cn.rst index 630eb13c08d..3bcf02b382b 100644 --- a/docs/api/paddle/utils/unique_name/generate_cn.rst +++ b/docs/api/paddle/utils/unique_name/generate_cn.rst @@ -22,4 +22,4 @@ str,含前缀key的唯一名称。 代码示例 :::::::::::: -COPY-FROM: paddle.utils.unique_name.generate \ No newline at end of file +COPY-FROM: paddle.utils.unique_name.generate diff --git a/docs/api/paddle/utils/unique_name/guard_cn.rst b/docs/api/paddle/utils/unique_name/guard_cn.rst index a7b10e9e063..9c3b77a941b 100644 --- a/docs/api/paddle/utils/unique_name/guard_cn.rst +++ b/docs/api/paddle/utils/unique_name/guard_cn.rst @@ -22,4 +22,4 @@ guard 代码示例 :::::::::::: -COPY-FROM: paddle.utils.unique_name.guard \ No newline at end of file +COPY-FROM: paddle.utils.unique_name.guard diff --git a/docs/api/paddle/utils/unique_name/switch_cn.rst b/docs/api/paddle/utils/unique_name/switch_cn.rst index e1a1550b467..c07365d0cb1 100644 --- a/docs/api/paddle/utils/unique_name/switch_cn.rst +++ b/docs/api/paddle/utils/unique_name/switch_cn.rst @@ -22,4 +22,4 @@ UniqueNameGenerator,先前的命名空间,一般无需操作该返回值。 代码示例 :::::::::::: -COPY-FROM: paddle.utils.unique_name.switch \ No newline at end of file +COPY-FROM: paddle.utils.unique_name.switch diff --git a/docs/api/paddle/var_cn.rst b/docs/api/paddle/var_cn.rst index 06569078d59..bd53df417d1 100644 --- a/docs/api/paddle/var_cn.rst +++ b/docs/api/paddle/var_cn.rst @@ -27,4 +27,4 @@ var 代码示例 :::::::::: -COPY-FROM: paddle.var \ No newline at end of file +COPY-FROM: paddle.var diff --git a/docs/api/paddle/version/Overview_cn.rst b/docs/api/paddle/version/Overview_cn.rst index 015bc60f2de..59edcec0e15 100644 --- a/docs/api/paddle/version/Overview_cn.rst +++ b/docs/api/paddle/version/Overview_cn.rst @@ -16,4 +16,3 @@ paddle.version 目录下包含的API返回 paddle 安装包相关配置的版本 " :ref:`cuda ` ", "获取paddle wheel包编译时使用的CUDA版本" " :ref:`cudnn ` ", "获取paddle wheel包编译时使用的cuDNN版本" " :ref:`show ` ", "打印paddle版本、CUDA版本、cuDNN版本等信息" - diff --git a/docs/api/paddle/version/cuda_cn.rst b/docs/api/paddle/version/cuda_cn.rst index 1752cd8f83f..15862a34187 100644 --- a/docs/api/paddle/version/cuda_cn.rst +++ b/docs/api/paddle/version/cuda_cn.rst @@ -22,4 +22,3 @@ cuda paddle.version.cuda() # '10.2' - diff --git a/docs/api/paddle/version/cudnn_cn.rst b/docs/api/paddle/version/cudnn_cn.rst index b754990cac1..e73d8211055 100644 --- a/docs/api/paddle/version/cudnn_cn.rst +++ b/docs/api/paddle/version/cudnn_cn.rst @@ -22,4 +22,3 @@ cudnn paddle.version.cudnn() # '7.6.5' - diff --git a/docs/api/paddle/version/show_cn.rst b/docs/api/paddle/version/show_cn.rst index b84e5092295..f74eff8d927 100644 --- a/docs/api/paddle/version/show_cn.rst +++ b/docs/api/paddle/version/show_cn.rst @@ -25,4 +25,4 @@ show 代码示例 :::::::::: -COPY-FROM: paddle.version.show \ No newline at end of file +COPY-FROM: paddle.version.show diff --git a/docs/api/paddle/vision/get_image_backend_cn.rst b/docs/api/paddle/vision/get_image_backend_cn.rst index ce9e0d8e2d1..b9ae82d7464 100644 --- a/docs/api/paddle/vision/get_image_backend_cn.rst +++ b/docs/api/paddle/vision/get_image_backend_cn.rst @@ -16,4 +16,4 @@ get_image_backend 代码示例 ::::::::: -COPY-FROM: paddle.vision.image.get_image_backend \ No newline at end of file +COPY-FROM: paddle.vision.image.get_image_backend diff --git a/docs/api/paddle/vision/image_load_cn.rst b/docs/api/paddle/vision/image_load_cn.rst index 39ca278d519..afd1decc80f 100644 --- a/docs/api/paddle/vision/image_load_cn.rst +++ b/docs/api/paddle/vision/image_load_cn.rst @@ -21,4 +21,4 @@ image_load 代码示例 ::::::::: -COPY-FROM: paddle.vision.image.image_load \ No newline at end of file +COPY-FROM: paddle.vision.image.image_load diff --git a/docs/api/paddle/vision/models/MobileNetV1_cn.rst b/docs/api/paddle/vision/models/MobileNetV1_cn.rst index 4657c5c43f5..b1cf6375bbb 100644 --- a/docs/api/paddle/vision/models/MobileNetV1_cn.rst +++ b/docs/api/paddle/vision/models/MobileNetV1_cn.rst @@ -24,4 +24,3 @@ MobileNetV1 模型,来自论文 `"MobileNets: Efficient Convolutional Neural N ::::::::: COPY-FROM: paddle.vision.models.MobileNetV1 - diff --git a/docs/api/paddle/vision/models/MobileNetV2_cn.rst b/docs/api/paddle/vision/models/MobileNetV2_cn.rst index 809f3be9097..e9ce4d635b0 100644 --- a/docs/api/paddle/vision/models/MobileNetV2_cn.rst +++ b/docs/api/paddle/vision/models/MobileNetV2_cn.rst @@ -24,4 +24,3 @@ MobileNetV2 模型,来自论文 `"MobileNetV2: Inverted Residuals and Linear B ::::::::: COPY-FROM: paddle.vision.models.MobileNetV2 - diff --git a/docs/api/paddle/vision/models/squeezenet1_1_cn.rst b/docs/api/paddle/vision/models/squeezenet1_1_cn.rst index 3f61e249385..2aead4f8636 100644 --- a/docs/api/paddle/vision/models/squeezenet1_1_cn.rst +++ b/docs/api/paddle/vision/models/squeezenet1_1_cn.rst @@ -23,4 +23,3 @@ SqueezeNet v1.1 模型,来自论文 `"SqueezeNet: AlexNet-level accuracy with ::::::::: COPY-FROM: paddle.vision.models.squeezenet1_1 - diff --git a/docs/api/paddle/vision/models/vgg13_cn.rst b/docs/api/paddle/vision/models/vgg13_cn.rst index 22d44d209d2..b19038241e9 100644 --- a/docs/api/paddle/vision/models/vgg13_cn.rst +++ b/docs/api/paddle/vision/models/vgg13_cn.rst @@ -24,4 +24,3 @@ vgg13 ::::::::: COPY-FROM: paddle.vision.models.vgg13 - diff --git a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst index eb64ea39d4a..94d9a3b1258 100644 --- a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst +++ b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst @@ -75,4 +75,4 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x 代码示例 :::::::::::: -COPY-FROM: paddle.vision.ops.DeformConv2D \ No newline at end of file +COPY-FROM: paddle.vision.ops.DeformConv2D diff --git a/docs/api/paddle/vision/ops/RoIPool_cn.rst b/docs/api/paddle/vision/ops/RoIPool_cn.rst index c105bdcd15d..f784bae909b 100644 --- a/docs/api/paddle/vision/ops/RoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/RoIPool_cn.rst @@ -38,4 +38,4 @@ RoIPool boxes_num = paddle.to_tensor([3]).astype('int32') roi_pool = RoIPool(output_size=(4, 3)) pool_out = roi_pool(data, boxes, boxes_num) - assert pool_out.shape == [3, 256, 4, 3], '' \ No newline at end of file + assert pool_out.shape == [3, 256, 4, 3], '' diff --git a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst index b34aa905b7b..0f9b49ac599 100755 --- a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst +++ b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst @@ -67,4 +67,4 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x 代码示例 :::::::::::: -COPY-FROM: paddle.vision.ops.deform_conv2d \ No newline at end of file +COPY-FROM: paddle.vision.ops.deform_conv2d diff --git a/docs/api/paddle/vision/ops/roi_pool_cn.rst b/docs/api/paddle/vision/ops/roi_pool_cn.rst index 8489f56c786..da599738ced 100644 --- a/docs/api/paddle/vision/ops/roi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/roi_pool_cn.rst @@ -37,4 +37,4 @@ roi_pool boxes[:, 3] += boxes[:, 1] + 4 boxes_num = paddle.to_tensor([3]).astype('int32') pool_out = roi_pool(data, boxes, boxes_num=boxes_num, output_size=3) - assert pool_out.shape == [3, 256, 3, 3], '' \ No newline at end of file + assert pool_out.shape == [3, 256, 3, 3], '' diff --git a/docs/api/paddle/vision/ops/yolo_box_cn.rst b/docs/api/paddle/vision/ops/yolo_box_cn.rst index 1e9dae02586..dce1f90eae4 100644 --- a/docs/api/paddle/vision/ops/yolo_box_cn.rst +++ b/docs/api/paddle/vision/ops/yolo_box_cn.rst @@ -49,4 +49,4 @@ yolo_box 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.yolo_box \ No newline at end of file +COPY-FROM: paddle.vision.ops.yolo_box diff --git a/docs/api/paddle/vision/ops/yolo_loss_cn.rst b/docs/api/paddle/vision/ops/yolo_loss_cn.rst index eb786e72b60..60c68857f8d 100644 --- a/docs/api/paddle/vision/ops/yolo_loss_cn.rst +++ b/docs/api/paddle/vision/ops/yolo_loss_cn.rst @@ -77,4 +77,4 @@ Tensor,yolov3损失的值,具有形状[N]的1-D Tensor。 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.yolo_loss \ No newline at end of file +COPY-FROM: paddle.vision.ops.yolo_loss diff --git a/docs/api/paddle/vision/set_image_backend_cn.rst b/docs/api/paddle/vision/set_image_backend_cn.rst index 5de1ce12a5c..910ac48828a 100644 --- a/docs/api/paddle/vision/set_image_backend_cn.rst +++ b/docs/api/paddle/vision/set_image_backend_cn.rst @@ -16,4 +16,4 @@ set_image_backend 代码示例 ::::::::: -COPY-FROM: paddle.vision.image.set_image_backend \ No newline at end of file +COPY-FROM: paddle.vision.image.set_image_backend diff --git a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst index 6dbea6693e8..d2d90c02530 100644 --- a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst @@ -106,4 +106,3 @@ BaseTransform flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask')) (converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask)) print('converted boxes', converted_boxes) - diff --git a/docs/api/paddle/vision/transforms/Compose_cn.rst b/docs/api/paddle/vision/transforms/Compose_cn.rst index 20843ea2068..7a1faea0253 100644 --- a/docs/api/paddle/vision/transforms/Compose_cn.rst +++ b/docs/api/paddle/vision/transforms/Compose_cn.rst @@ -31,4 +31,3 @@ Compose for i in range(10): sample = flowers[i] print(sample[0].size, sample[1]) - diff --git a/docs/api/paddle/vision/transforms/Normalize_cn.rst b/docs/api/paddle/vision/transforms/Normalize_cn.rst index 719f7e1bf16..45e149ac2c8 100644 --- a/docs/api/paddle/vision/transforms/Normalize_cn.rst +++ b/docs/api/paddle/vision/transforms/Normalize_cn.rst @@ -24,4 +24,4 @@ normalize 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.normalize \ No newline at end of file +COPY-FROM: paddle.vision.transforms.normalize diff --git a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst index 533b80285c0..7f0f2b28cf1 100644 --- a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst @@ -40,4 +40,3 @@ RandomErasing result = transform(fake_img) print(result) - diff --git a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst index c6049299301..8ccf7487108 100644 --- a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst @@ -54,4 +54,3 @@ RandomRotate fake_img = transform(fake_img) print(fake_img.size) - diff --git a/docs/api/paddle/vision/transforms/Resize_cn.rst b/docs/api/paddle/vision/transforms/Resize_cn.rst index efb090d235e..62b461b435e 100644 --- a/docs/api/paddle/vision/transforms/Resize_cn.rst +++ b/docs/api/paddle/vision/transforms/Resize_cn.rst @@ -52,4 +52,3 @@ resize converted_img = F.resize(fake_img, (200, 150)) print(converted_img.size) # (150, 200) - diff --git a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst index 41bd4160b4c..94c3d610257 100644 --- a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst @@ -22,5 +22,3 @@ adjust_brightness ::::::::: COPY-FROM: paddle.vision.transforms.adjust_brightness - - diff --git a/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst b/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst index f1267e057d0..dfa7dc85419 100644 --- a/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst @@ -21,4 +21,4 @@ adjust_contrast 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.adjust_contrast \ No newline at end of file +COPY-FROM: paddle.vision.transforms.adjust_contrast diff --git a/docs/api/paddle/vision/transforms/adjust_hue_cn.rst b/docs/api/paddle/vision/transforms/adjust_hue_cn.rst index e9b5edec5e2..35b4a033c1f 100644 --- a/docs/api/paddle/vision/transforms/adjust_hue_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_hue_cn.rst @@ -21,4 +21,4 @@ adjust_hue 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.adjust_hue \ No newline at end of file +COPY-FROM: paddle.vision.transforms.adjust_hue diff --git a/docs/api/paddle/vision/transforms/center_crop_cn.rst b/docs/api/paddle/vision/transforms/center_crop_cn.rst index d8bf5e9aa9e..302ca60629c 100644 --- a/docs/api/paddle/vision/transforms/center_crop_cn.rst +++ b/docs/api/paddle/vision/transforms/center_crop_cn.rst @@ -21,4 +21,4 @@ center_crop 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.center_crop \ No newline at end of file +COPY-FROM: paddle.vision.transforms.center_crop diff --git a/docs/api/paddle/vision/transforms/crop_cn.rst b/docs/api/paddle/vision/transforms/crop_cn.rst index 33fe114a4b5..9aa45e124f5 100644 --- a/docs/api/paddle/vision/transforms/crop_cn.rst +++ b/docs/api/paddle/vision/transforms/crop_cn.rst @@ -24,4 +24,4 @@ crop 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.crop \ No newline at end of file +COPY-FROM: paddle.vision.transforms.crop diff --git a/docs/api/paddle/vision/transforms/erase_cn.rst b/docs/api/paddle/vision/transforms/erase_cn.rst index 72111c16078..650439556d4 100644 --- a/docs/api/paddle/vision/transforms/erase_cn.rst +++ b/docs/api/paddle/vision/transforms/erase_cn.rst @@ -26,4 +26,4 @@ erase 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.erase \ No newline at end of file +COPY-FROM: paddle.vision.transforms.erase diff --git a/docs/api/paddle/vision/transforms/hflip_cn.rst b/docs/api/paddle/vision/transforms/hflip_cn.rst index d821b242ac2..e78c433bba7 100644 --- a/docs/api/paddle/vision/transforms/hflip_cn.rst +++ b/docs/api/paddle/vision/transforms/hflip_cn.rst @@ -20,4 +20,4 @@ hflip 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.hflip \ No newline at end of file +COPY-FROM: paddle.vision.transforms.hflip diff --git a/docs/api/paddle/vision/transforms/resize_cn.rst b/docs/api/paddle/vision/transforms/resize_cn.rst index efb090d235e..62b461b435e 100644 --- a/docs/api/paddle/vision/transforms/resize_cn.rst +++ b/docs/api/paddle/vision/transforms/resize_cn.rst @@ -52,4 +52,3 @@ resize converted_img = F.resize(fake_img, (200, 150)) print(converted_img.size) # (150, 200) - diff --git a/docs/api/paddle/vision/transforms/rotate_cn.rst b/docs/api/paddle/vision/transforms/rotate_cn.rst index d705f32a380..b94f5a978b8 100644 --- a/docs/api/paddle/vision/transforms/rotate_cn.rst +++ b/docs/api/paddle/vision/transforms/rotate_cn.rst @@ -37,4 +37,3 @@ rotate rotated_img = F.rotate(fake_img, 90) print(rotated_img.size) - diff --git a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst index be19de26592..928b22cb286 100644 --- a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst @@ -36,4 +36,3 @@ to_grayscale gray_img = F.to_grayscale(fake_img) print(gray_img.size) - diff --git a/docs/api/paddle/vision/transforms/to_tensor_cn.rst b/docs/api/paddle/vision/transforms/to_tensor_cn.rst index 4e72626ead0..9a1d59bc2ca 100644 --- a/docs/api/paddle/vision/transforms/to_tensor_cn.rst +++ b/docs/api/paddle/vision/transforms/to_tensor_cn.rst @@ -28,4 +28,4 @@ to_tensor 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.to_tensor \ No newline at end of file +COPY-FROM: paddle.vision.transforms.to_tensor diff --git a/docs/api/paddle/vision/transforms/vflip_cn.rst b/docs/api/paddle/vision/transforms/vflip_cn.rst index ea9394e6fa5..66b166563c8 100644 --- a/docs/api/paddle/vision/transforms/vflip_cn.rst +++ b/docs/api/paddle/vision/transforms/vflip_cn.rst @@ -20,4 +20,4 @@ vflip 代码示例 ::::::::: -COPY-FROM: paddle.vision.transforms.vflip \ No newline at end of file +COPY-FROM: paddle.vision.transforms.vflip diff --git a/docs/api/paddle/zeros_cn.rst b/docs/api/paddle/zeros_cn.rst index ab88f4bcd8c..84d5fc734e8 100644 --- a/docs/api/paddle/zeros_cn.rst +++ b/docs/api/paddle/zeros_cn.rst @@ -24,4 +24,4 @@ zeros 代码示例 :::::::::::: -COPY-FROM: paddle.zeros \ No newline at end of file +COPY-FROM: paddle.zeros diff --git a/docs/api/paddle/zeros_like_cn.rst b/docs/api/paddle/zeros_like_cn.rst index e6143a745f5..f6362b1e21c 100644 --- a/docs/api/paddle/zeros_like_cn.rst +++ b/docs/api/paddle/zeros_like_cn.rst @@ -22,4 +22,4 @@ zeros_like 代码示例 :::::::::: -COPY-FROM: paddle.zeros_like \ No newline at end of file +COPY-FROM: paddle.zeros_like diff --git a/docs/api_guides/low_level/backward_en.rst b/docs/api_guides/low_level/backward_en.rst index 599e4111dc4..210dd89370b 100644 --- a/docs/api_guides/low_level/backward_en.rst +++ b/docs/api_guides/low_level/backward_en.rst @@ -20,4 +20,3 @@ We do not recommend directly calling backpropagation-related APIs in :code:`flu If you want to implement it by yourself, you can also use: :code:`callback` in :ref:`api_fluid_backward_append_backward` to define the customized gradient form of Operator. For more information, please refer to: :ref:`api_fluid_backward_append_backward` - diff --git a/docs/api_guides/low_level/distributed/index_en.rst b/docs/api_guides/low_level/distributed/index_en.rst index a77fb47375a..8a30dfa3ff1 100644 --- a/docs/api_guides/low_level/distributed/index_en.rst +++ b/docs/api_guides/low_level/distributed/index_en.rst @@ -9,5 +9,3 @@ Distributed Training async_training_en.rst large_scale_sparse_feature_training_en.rst cluster_train_data_en.rst - - diff --git a/docs/api_guides/low_level/executor_en.rst b/docs/api_guides/low_level/executor_en.rst index f6f2c32544b..232016b35a3 100755 --- a/docs/api_guides/low_level/executor_en.rst +++ b/docs/api_guides/low_level/executor_en.rst @@ -29,6 +29,3 @@ For simple example please refer to `basics_fit_a_line <../../beginners_guide/bas - Related API : - :ref:`api_fluid_Executor` - - - diff --git a/docs/api_guides/low_level/layers/control_flow_en.rst b/docs/api_guides/low_level/layers/control_flow_en.rst index 28f9b18edd8..17fdef6ae0f 100755 --- a/docs/api_guides/low_level/layers/control_flow_en.rst +++ b/docs/api_guides/low_level/layers/control_flow_en.rst @@ -56,4 +56,4 @@ StaticRNN Static RNN can only process fixed-length sequence data, and accept Variable with :code:`lod_level=0` as input. Similar to :code:`DynamicRNN`, at each single time step of the RNN, the user needs to customize the calculation logic and export the status and output. -Please refer to :ref:`api_fluid_layers_StaticRNN` \ No newline at end of file +Please refer to :ref:`api_fluid_layers_StaticRNN` diff --git a/docs/api_guides/low_level/layers/conv_en.rst b/docs/api_guides/low_level/layers/conv_en.rst index 6c0850f1f0a..7313b4ae117 100755 --- a/docs/api_guides/low_level/layers/conv_en.rst +++ b/docs/api_guides/low_level/layers/conv_en.rst @@ -55,4 +55,4 @@ Suppose the input sequence shape is (T, N), while T is the number of time steps - related API: - :ref:`api_fluid_layers_sequence_conv` - - :ref:`api_fluid_layers_row_conv` \ No newline at end of file + - :ref:`api_fluid_layers_row_conv` diff --git a/docs/api_guides/low_level/layers/data_feeder.rst b/docs/api_guides/low_level/layers/data_feeder.rst index 495869bdafd..a2502141ca1 100644 --- a/docs/api_guides/low_level/layers/data_feeder.rst +++ b/docs/api_guides/low_level/layers/data_feeder.rst @@ -41,4 +41,4 @@ Python List或Tuple类型对象,其中N为创建 :code:`DataFeeder` 对象时 会完成数据类型和维度的转换。若 :code:`feed_list` 中的变量的 :code:`lod_level` 不为零,则Fluid会将经过维度转换后的 :code:`iterable` 中每行数据的第0维作为返回结果的 :code:`LoD`。 -具体使用方法请参见 :ref:`cn_api_fluid_DataFeeder` 。 \ No newline at end of file +具体使用方法请参见 :ref:`cn_api_fluid_DataFeeder` 。 diff --git a/docs/api_guides/low_level/layers/data_feeder_en.rst b/docs/api_guides/low_level/layers/data_feeder_en.rst index c1fec09bb37..03e28ca0647 100755 --- a/docs/api_guides/low_level/layers/data_feeder_en.rst +++ b/docs/api_guides/low_level/layers/data_feeder_en.rst @@ -38,4 +38,4 @@ you can feed Python int, float, and other types of data. The data types and dime the same as :code:`dtype` and :code:`shape` specified at :code:`fluid.layers.data()`. :code:`DataFeeder` internally performs the conversion of data types and dimensions. If the :code:`lod_level` of the variable in :code:`feed_list` is not zero, in Fluid, the 0th dimension of each row in the dimensionally converted :code:`iterable` will be returned as :code:`LoD` . -Read :ref:`api_fluid_DataFeeder` for specific usage. \ No newline at end of file +Read :ref:`api_fluid_DataFeeder` for specific usage. diff --git a/docs/api_guides/low_level/layers/data_in_out.rst b/docs/api_guides/low_level/layers/data_in_out.rst index 4517c17fbe4..6267a373a34 100644 --- a/docs/api_guides/low_level/layers/data_in_out.rst +++ b/docs/api_guides/low_level/layers/data_in_out.rst @@ -30,4 +30,4 @@ fetch期望的输出变量,通过设置 :code:`return_numpy` 参数设置是 若 :code:`return_numpy` 为 :code:`False` ,则返回 :code:`LoDTensor` 类型数据。 具体使用方式请参考相关API文档 :ref:`cn_api_fluid_executor_Executor` 和 -:ref:`cn_api_fluid_ParallelExecutor`。 \ No newline at end of file +:ref:`cn_api_fluid_ParallelExecutor`。 diff --git a/docs/api_guides/low_level/layers/data_in_out_en.rst b/docs/api_guides/low_level/layers/data_in_out_en.rst index db74e069740..2be989f5121 100755 --- a/docs/api_guides/low_level/layers/data_in_out_en.rst +++ b/docs/api_guides/low_level/layers/data_in_out_en.rst @@ -27,4 +27,4 @@ The user can fetch expected variables from :code:`executor.run(fetch_list=[...], If :code:`return_numpy` is :code:`False` , data of type :code:`LoDTensor` will be returned. For specific usage, please refer to the relevant API documentation :ref:`api_fluid_executor_Executor` and -:ref:`api_fluid_ParallelExecutor`. \ No newline at end of file +:ref:`api_fluid_ParallelExecutor`. diff --git a/docs/api_guides/low_level/layers/detection.rst b/docs/api_guides/low_level/layers/detection.rst index d580fe6feb4..532954de708 100644 --- a/docs/api_guides/low_level/layers/detection.rst +++ b/docs/api_guides/low_level/layers/detection.rst @@ -99,5 +99,3 @@ OCR * roi_perspective_transform:对输入roi做透视变换。API Reference 请参考 :ref:`cn_api_fluid_layers_roi_perspective_transform` * polygon_box_transform:对不规则检测框进行坐标变换。API Reference 请参考 :ref:`cn_api_fluid_layers_polygon_box_transform` - - diff --git a/docs/api_guides/low_level/layers/detection_en.rst b/docs/api_guides/low_level/layers/detection_en.rst index 5321de0dc88..d2ead09fcd7 100755 --- a/docs/api_guides/low_level/layers/detection_en.rst +++ b/docs/api_guides/low_level/layers/detection_en.rst @@ -59,4 +59,4 @@ Scene text recognition is a process of converting image information into a seque * roi_perspective_transform: Make a perspective transformation on the input RoI. For API Reference, please refer to :ref:`api_fluid_layers_roi_perspective_transform` -* polygon_box_transform: Coordinate transformation of the irregular bounding box. For API Reference, please refer to :ref:`api_fluid_layers_polygon_box_transform` \ No newline at end of file +* polygon_box_transform: Coordinate transformation of the irregular bounding box. For API Reference, please refer to :ref:`api_fluid_layers_polygon_box_transform` diff --git a/docs/api_guides/low_level/layers/index.rst b/docs/api_guides/low_level/layers/index.rst index 78bf1fa1f2f..d0182ed8ae7 100644 --- a/docs/api_guides/low_level/layers/index.rst +++ b/docs/api_guides/low_level/layers/index.rst @@ -18,4 +18,3 @@ data_feeder.rst learning_rate_scheduler.rst tensor.rst - diff --git a/docs/api_guides/low_level/layers/index_en.rst b/docs/api_guides/low_level/layers/index_en.rst index c4d1193fe2a..06ce0de3809 100644 --- a/docs/api_guides/low_level/layers/index_en.rst +++ b/docs/api_guides/low_level/layers/index_en.rst @@ -18,4 +18,3 @@ Neural Network Layer data_feeder_en.rst learning_rate_scheduler_en.rst tensor_en.rst - diff --git a/docs/api_guides/low_level/layers/learning_rate_scheduler.rst b/docs/api_guides/low_level/layers/learning_rate_scheduler.rst index a222609581b..b510df09862 100644 --- a/docs/api_guides/low_level/layers/learning_rate_scheduler.rst +++ b/docs/api_guides/low_level/layers/learning_rate_scheduler.rst @@ -60,4 +60,4 @@ 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` * :code:`CyclicLR`: 学习率根据指定的缩放策略以固定频率在最小和最大学习率之间进行循环。 - 相关API Reference请参考 :ref:`_cn_api_paddle_optimizer_lr_CyclicLR` \ No newline at end of file + 相关API Reference请参考 :ref:`_cn_api_paddle_optimizer_lr_CyclicLR` diff --git a/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst b/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst index 28f47cac1d7..967efdd33d5 100755 --- a/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst +++ b/docs/api_guides/low_level/layers/learning_rate_scheduler_en.rst @@ -43,4 +43,4 @@ The following content describes the APIs related to the learning rate scheduler: * :code:`OneCycleLR`: One cycle decay. That is, the initial learning rate first increases to maximum learning rate, and then it decreases to minimum learning rate which is much less than initial learning rate. For related API Reference please refer to :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` -* :code:`CyclicLR`: Cyclic decay. That is, the learning rate cycles between minimum and maximum learning rate with a constant frequency in specified a sacle method. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_CyclicLR` \ No newline at end of file +* :code:`CyclicLR`: Cyclic decay. That is, the learning rate cycles between minimum and maximum learning rate with a constant frequency in specified a sacle method. For related API Reference please refer to :ref:`api_paddle_optimizer_lr_CyclicLR` diff --git a/docs/api_guides/low_level/layers/loss_function.rst b/docs/api_guides/low_level/layers/loss_function.rst index 6b4d22e1bf4..b8e7e44282d 100644 --- a/docs/api_guides/low_level/layers/loss_function.rst +++ b/docs/api_guides/low_level/layers/loss_function.rst @@ -57,4 +57,4 @@ API Reference 请参考 :ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_log 对于一些较为复杂的损失函数,可以尝试使用其他损失函数组合实现;Paddle Fluid 中提供的用于图像分割任务的 :ref:`cn_api_fluid_layers_dice_loss` 即是使用其他 OP 组合(计算各像素位置似然概率的均值)而成;多目标损失函数也可看作这样的情况,如 Faster RCNN 就使用 cross entropy 和 smooth_l1 loss 的加权和作为损失函数。 -**注意**,在定义损失函数之后为能够使用 :ref:`api_guide_optimizer` 进行优化,通常需要使用 :ref:`cn_api_fluid_layers_mean` 或其他操作将损失函数返回的高维 Tensor 转换为 Scalar 值。 \ No newline at end of file +**注意**,在定义损失函数之后为能够使用 :ref:`api_guide_optimizer` 进行优化,通常需要使用 :ref:`cn_api_fluid_layers_mean` 或其他操作将损失函数返回的高维 Tensor 转换为 Scalar 值。 diff --git a/docs/api_guides/low_level/layers/loss_function_en.rst b/docs/api_guides/low_level/layers/loss_function_en.rst index ba5f5da8f77..487a8515cb1 100755 --- a/docs/api_guides/low_level/layers/loss_function_en.rst +++ b/docs/api_guides/low_level/layers/loss_function_en.rst @@ -58,4 +58,4 @@ More For more complex loss functions, try to use combinations of other loss functions; the :ref:`api_fluid_layers_dice_loss` provided in Paddle Fluid for image segmentation tasks is an example of using combinations of other operators (calculate the average likelihood probability of each pixel position). The multi-objective loss function can also be considered similarly, such as Faster RCNN that uses the weighted sum of cross entropy and smooth_l1 loss as a loss function. -**Note**, after defining the loss function, in order to optimize with :ref:`api_guide_optimizer_en`, you usually need to use :ref:`api_fluid_layers_mean` or other operations to convert the high-dimensional Tensor returned by the loss function to a Scalar value. \ No newline at end of file +**Note**, after defining the loss function, in order to optimize with :ref:`api_guide_optimizer_en`, you usually need to use :ref:`api_fluid_layers_mean` or other operations to convert the high-dimensional Tensor returned by the loss function to a Scalar value. diff --git a/docs/api_guides/low_level/layers/sequence.rst b/docs/api_guides/low_level/layers/sequence.rst index 3c2dba96cb2..13801e41a74 100644 --- a/docs/api_guides/low_level/layers/sequence.rst +++ b/docs/api_guides/low_level/layers/sequence.rst @@ -109,4 +109,3 @@ API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_pad` :code:`input` 的长度tensor通常可以直接用 :code:`sequence_pad` 返回的 :code:`Length`。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_mask` - diff --git a/docs/api_guides/low_level/metrics.rst b/docs/api_guides/low_level/metrics.rst index e771b738b6b..e10312fed05 100644 --- a/docs/api_guides/low_level/metrics.rst +++ b/docs/api_guides/low_level/metrics.rst @@ -48,4 +48,3 @@ - 编辑距离: :code:`EditDistance` ,用来衡量两个字符串的相似度。可以参考文档 `Edit_distance `_。 API Reference 请参考 :ref:`cn_api_fluid_metrics_EditDistance` - diff --git a/docs/api_guides/low_level/metrics_en.rst b/docs/api_guides/low_level/metrics_en.rst index 98cafe5420c..58afd2eccd9 100755 --- a/docs/api_guides/low_level/metrics_en.rst +++ b/docs/api_guides/low_level/metrics_en.rst @@ -47,4 +47,4 @@ The generation task produces output directly from the input. In NLP tasks (such - Edit distance: :code:`EditDistance` to measure the similarity of two strings. You can refer to the documentation `Edit_distance `_. -  For API Reference, please refer to :ref:`api_fluid_metrics_EditDistance` \ No newline at end of file +  For API Reference, please refer to :ref:`api_fluid_metrics_EditDistance` diff --git a/docs/api_guides/low_level/model_save_reader_en.rst b/docs/api_guides/low_level/model_save_reader_en.rst index 6978c10f69d..1c2f37e9bfe 100755 --- a/docs/api_guides/low_level/model_save_reader_en.rst +++ b/docs/api_guides/low_level/model_save_reader_en.rst @@ -56,4 +56,4 @@ Introduction to APIs for loading a model For API Reference, please refer to :ref:`api_fluid_io_load_persistables` . -- :code:`fluid.io.load_inference_model`: please refer to :ref:`api_guide_inference_en` . \ No newline at end of file +- :code:`fluid.io.load_inference_model`: please refer to :ref:`api_guide_inference_en` . diff --git a/docs/api_guides/low_level/nets.rst b/docs/api_guides/low_level/nets.rst index 5e7f1fef723..ddb118e5041 100644 --- a/docs/api_guides/low_level/nets.rst +++ b/docs/api_guides/low_level/nets.rst @@ -59,4 +59,3 @@ API Reference 请参考 :ref:`cn_api_fluid_nets_glu` 该模块广泛使用在 `机器翻译 `_ 的模型中,比如 `Transformer `_ 。 API Reference 请参考 :ref:`cn_api_fluid_nets_scaled_dot_product_attention` - diff --git a/docs/api_guides/low_level/optimizer.rst b/docs/api_guides/low_level/optimizer.rst index 016d36b652c..38cc8855be6 100644 --- a/docs/api_guides/low_level/optimizer.rst +++ b/docs/api_guides/low_level/optimizer.rst @@ -88,5 +88,3 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` :code:`ModelAverage` 优化器,在训练中通过窗口来累计历史 parameter,在预测时使用取平均值后的paramet,整体提高预测的精度。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_ModelAverage` - - diff --git a/docs/api_guides/low_level/optimizer_en.rst b/docs/api_guides/low_level/optimizer_en.rst index d166163796e..a0e8d54093d 100755 --- a/docs/api_guides/low_level/optimizer_en.rst +++ b/docs/api_guides/low_level/optimizer_en.rst @@ -87,4 +87,3 @@ API Reference: :ref:`api_fluid_optimizer_FtrlOptimizer` :code:`ModelAverage` Optimizer accumulates history parameters through sliding window during the model training. We use averaged parameters at inference time to upgrade general accuracy of inference. API Reference: :ref:`api_fluid_optimizer_ModelAverage` - diff --git a/docs/design/mkldnn/gru/index_en.rst b/docs/design/mkldnn/gru/index_en.rst index e206e5f20e3..d80286e1de7 100644 --- a/docs/design/mkldnn/gru/index_en.rst +++ b/docs/design/mkldnn/gru/index_en.rst @@ -4,4 +4,4 @@ oneDNN GRU operator .. toctree:: :maxdepth: 1 - gru.md \ No newline at end of file + gru.md diff --git a/docs/dev_guides/custom_device_docs/custom_runtime_en.rst b/docs/dev_guides/custom_device_docs/custom_runtime_en.rst index 565dd063657..e15ddea1005 100644 --- a/docs/dev_guides/custom_device_docs/custom_runtime_en.rst +++ b/docs/dev_guides/custom_device_docs/custom_runtime_en.rst @@ -141,4 +141,3 @@ Event APIs memory_api_en.md stream_api_en.md event_api_en.md - diff --git a/docs/dev_guides/index_cn.rst b/docs/dev_guides/index_cn.rst index 40213432afb..a0d83c519ef 100644 --- a/docs/dev_guides/index_cn.rst +++ b/docs/dev_guides/index_cn.rst @@ -30,4 +30,3 @@ sugon/index_cn.rst custom_device_docs/index_cn.rst docs_contributing_guides_cn.md - diff --git a/docs/dev_guides/style_guides_cn.md b/docs/dev_guides/style_guides_cn.md index 2658af9eec3..ff77912f9d6 100644 --- a/docs/dev_guides/style_guides_cn.md +++ b/docs/dev_guides/style_guides_cn.md @@ -3,4 +3,4 @@ 请参考以下规范,进行代码开发: - C++:[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) -- Python:[Google Python Style Guide](https://google.github.io/styleguide/pyguide.html) \ No newline at end of file +- Python:[Google Python Style Guide](https://google.github.io/styleguide/pyguide.html) diff --git a/docs/dev_guides/sugon/complie_and_test_cn.md b/docs/dev_guides/sugon/complie_and_test_cn.md index d0949593312..005daae35c0 100644 --- a/docs/dev_guides/sugon/complie_and_test_cn.md +++ b/docs/dev_guides/sugon/complie_and_test_cn.md @@ -100,4 +100,4 @@ ctest -R test_atan2_op -VV 预期得到如下结果,即为编译和单测环境配置成功 -![图片](../images/sugon_result.png) \ No newline at end of file +![图片](../images/sugon_result.png) diff --git a/docs/dev_guides/sugon/paddle_c86_cn.md b/docs/dev_guides/sugon/paddle_c86_cn.md index f605f3e6bbb..bd02b23b7d2 100644 --- a/docs/dev_guides/sugon/paddle_c86_cn.md +++ b/docs/dev_guides/sugon/paddle_c86_cn.md @@ -80,4 +80,4 @@ Tensor(shape=[2, 3, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True, [[0.03205860, 0.08714432, 0.23688284, 0.64391428], [0.03205860, 0.08714432, 0.23688284, 0.64391428], [0.03205860, 0.08714432, 0.23688284, 0.64391428]]]) -``` \ No newline at end of file +``` diff --git "a/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" "b/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" index dea3806b4e4..2b690185337 100644 --- "a/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" +++ "b/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" @@ -112,8 +112,3 @@ if __name__ == '__main__': 2、语法层面,对numpy的支持性存在一些问题。 3、报错调试,在使用排错日志的时候,没有报错信息直接定位到错误代码的位置,且报错内容提示太多,对新手不友好。建议直接在报错的时候,报错的最后位置,重复一遍,最重要的报错信息,并提示报错代码所在位置。 4、文档层面,在使用指南->动态图转静态图->案例解析 中全部都是动静转化机制的各种API的分章节介绍,建议在案例解析最后增加一个完整的实例代码。 - - - - - diff --git a/docs/guides/06_distributed_training/cluster_quick_start_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_cn.rst index 9dfded28af4..77d505cd330 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_cn.rst @@ -9,4 +9,4 @@ 二、ParameterServer训练快速开始 -------------------------------- \ No newline at end of file +------------------------------- diff --git a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst index f4fad856a2f..8d1e47a2290 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst @@ -250,4 +250,3 @@ train_with_fleet.py 相关启动问题,可参考 `paddle.distributed.launch `_。 - diff --git a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst index fe7cb347af8..12163ac9c24 100755 --- a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst @@ -44,4 +44,3 @@ size 训练效果的目的。具体来说,就是使用若干原有大小的bat if batch_id % k == 0: optimizer.minimize(avg_loss) model.clear_gradients() - diff --git a/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst b/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst index 5d3e1df43f9..d8f20f65f8d 100644 --- a/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst @@ -217,4 +217,4 @@ 五、参考文献 ----------------------- -[1] `Highly Scalable Deep Learning Training System with Mixed-Precision: Training ImageNet in Four Minutes `_ \ No newline at end of file +[1] `Highly Scalable Deep Learning Training System with Mixed-Precision: Training ImageNet in Four Minutes `_ diff --git a/docs/guides/06_distributed_training/deployment_cn.rst b/docs/guides/06_distributed_training/deployment_cn.rst index 44fe8f2bed8..ed911ae3691 100644 --- a/docs/guides/06_distributed_training/deployment_cn.rst +++ b/docs/guides/06_distributed_training/deployment_cn.rst @@ -421,4 +421,3 @@ GPU 分布式不生效? 节点数不对? * 设置 nnodes 需要设置范围,例如 2:4 * 检查超时设置是否过长 - diff --git a/docs/guides/06_distributed_training/distributed_overview.rst b/docs/guides/06_distributed_training/distributed_overview.rst index dfbd2ba165b..fd11211e1bd 100644 --- a/docs/guides/06_distributed_training/distributed_overview.rst +++ b/docs/guides/06_distributed_training/distributed_overview.rst @@ -69,4 +69,3 @@ Paddle提供了传统纯 CPU 参数服务器、纯 GPU 参数服务器以及异 * Yulong Ao, Zhihua Wu, Dianhai Yu, Weibao Gong, Zhiqing Kui, Minxu Zhang, Zilingfeng Ye, Liang Shen, Yanjun Ma, Tian Wu, Haifeng Wang, Wei Zeng, Chao Yang. `End-to-end Adaptive Distributed Training on PaddlePaddle `__ . * Yang Xiang, Zhihua Wu, Weibao Gong, Siyu Ding, Xianjie Mo, Yuang Liu, Shuohuan Wang, Peng Liu, Yongshuai Hou, Long Li, Bin Wang, Shaohuai Shi, Yaqian Han, Yue Yu, Ge Li, Yu Sun, Yanjun Ma, Dianhai Yu. `Nebula-I: A General Framework for Collaboratively Training Deep Learning Models on Low-Bandwidth Cloud Clusters `__ . - diff --git a/docs/guides/06_distributed_training/fleet_api_howto_cn.rst b/docs/guides/06_distributed_training/fleet_api_howto_cn.rst index 34a73a1830d..1b222b30070 100644 --- a/docs/guides/06_distributed_training/fleet_api_howto_cn.rst +++ b/docs/guides/06_distributed_training/fleet_api_howto_cn.rst @@ -273,5 +273,3 @@ RoleMaker server_endpoints=["127.0.0.1:36011", "127.0.0.1:36012"]) fleet.init(role) - - diff --git a/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst b/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst index 59887218cab..c3fa12e8156 100644 --- a/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst +++ b/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst @@ -172,5 +172,3 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 [2022-05-18 09:35:15,566] [ INFO] - global step 4, epoch: 0, batch: 3, loss: 11.052285194, avg_reader_cost: 0.00022 sec, avg_batch_cost: 0.13303 sec, speed: 7.52 step/s, ips: 61579 tokens/s, learning rate: 2.34375e-07 [2022-05-18 09:35:15,722] [ INFO] - global step 5, epoch: 0, batch: 4, loss: 11.028432846, avg_reader_cost: 0.00036 sec, avg_batch_cost: 0.15526 sec, speed: 6.44 step/s, ips: 52764 tokens/s, learning rate: 2.81250e-07 [2022-05-18 09:35:15,880] [ INFO] - global step 6, epoch: 0, batch: 5, loss: 11.032807350, avg_reader_cost: 0.00021 sec, avg_batch_cost: 0.15763 sec, speed: 6.34 step/s, ips: 51971 tokens/s, learning rate: 3.28125e-07 - - diff --git a/docs/guides/06_distributed_training/model_parallel_cn.rst b/docs/guides/06_distributed_training/model_parallel_cn.rst index fae7aea5b05..83083ea85ae 100644 --- a/docs/guides/06_distributed_training/model_parallel_cn.rst +++ b/docs/guides/06_distributed_training/model_parallel_cn.rst @@ -364,4 +364,4 @@ 四、参考文献 ----------------------- -[1] `NVIDIA/Megatron-LM: Ongoing research training transformer `__ \ No newline at end of file +[1] `NVIDIA/Megatron-LM: Ongoing research training transformer `__ diff --git a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst index c0c76e574ac..040499be557 100644 --- a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst +++ b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst @@ -304,4 +304,3 @@ model.train_batch(...):这一步主要就是执行1F1B的流水线并行方式 pp_loss: [2.2849925] pp_loss: [2.2974687] pp_loss: [2.3173313] - diff --git a/docs/guides/10_contribution/community_contribution_cn.md b/docs/guides/10_contribution/community_contribution_cn.md index a42190894f6..b7fcf595663 100644 --- a/docs/guides/10_contribution/community_contribution_cn.md +++ b/docs/guides/10_contribution/community_contribution_cn.md @@ -80,4 +80,4 @@ GitHub: [PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle)、Gitee: [查看我所在的本地开源社区](https://www.paddlepaddle.org.cn/ppdenavigategroup) -加入飞桨领航团QQ群:484908840 \ No newline at end of file +加入飞桨领航团QQ群:484908840 diff --git a/docs/guides/beginner/index_cn.rst b/docs/guides/beginner/index_cn.rst index 6de9626265a..3142d7db4c2 100644 --- a/docs/guides/beginner/index_cn.rst +++ b/docs/guides/beginner/index_cn.rst @@ -25,4 +25,3 @@ model_cn.ipynb train_eval_predict_cn.rst model_save_load_cn.rst - diff --git a/docs/guides/beginner/index_en.rst b/docs/guides/beginner/index_en.rst index 403bb7c6dff..8e0cf99f2ee 100644 --- a/docs/guides/beginner/index_en.rst +++ b/docs/guides/beginner/index_en.rst @@ -9,4 +9,3 @@ Model Development :hidden: tensor_en.md - diff --git a/docs/guides/beginner/model_save_load_cn.rst b/docs/guides/beginner/model_save_load_cn.rst index aaadeffb654..205bf4f7127 100644 --- a/docs/guides/beginner/model_save_load_cn.rst +++ b/docs/guides/beginner/model_save_load_cn.rst @@ -741,4 +741,3 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 ################### 如果你是从飞桨框架1.x切换到2.1,曾经使用飞桨框架1.x的fluid相关接口保存模型或者参数,飞桨框架2.1也对这种情况进行了兼容性支持,请参考 :ref:`兼容载入旧格式模型 ` - diff --git a/docs/guides/custom_op/index_cn.rst b/docs/guides/custom_op/index_cn.rst index 87950c1343c..92dc6f027e5 100644 --- a/docs/guides/custom_op/index_cn.rst +++ b/docs/guides/custom_op/index_cn.rst @@ -17,4 +17,3 @@ new_cpp_op_cn.md new_python_op_cn.md - diff --git a/docs/guides/flags/cudnn_cn.rst b/docs/guides/flags/cudnn_cn.rst index 26d259d4ab0..8151c80d429 100644 --- a/docs/guides/flags/cudnn_cn.rst +++ b/docs/guides/flags/cudnn_cn.rst @@ -68,4 +68,4 @@ Bool型,缺省值为False。 示例 ------- -FLAGS_cudnn_exhaustive_search=True - 使用穷举搜索方法来选择卷积算法。 \ No newline at end of file +FLAGS_cudnn_exhaustive_search=True - 使用穷举搜索方法来选择卷积算法。 diff --git a/docs/guides/flags/data_cn.rst b/docs/guides/flags/data_cn.rst index db4bd5e3cd3..efc400902dc 100644 --- a/docs/guides/flags/data_cn.rst +++ b/docs/guides/flags/data_cn.rst @@ -43,4 +43,4 @@ FLAGS_use_mkldnn仅用于python训练和预测脚本。要在CAPI中启用MKL-DN 英特尔(R)Xeon(R)处理器E3,E5和E7系列(原Sandy Bridge,Ivy Bridge,Haswell和Broadwell); 英特尔(R)Xeon(R)可扩展处理器(原Skylake和Cascade Lake); 英特尔(R)Xeon Phi(TM)处理器(原Knights Landing and Knights Mill); -兼容处理器。 \ No newline at end of file +兼容处理器。 diff --git a/docs/guides/flags/debug_en.rst b/docs/guides/flags/debug_en.rst index 62f20df686c..83c52ea152f 100644 --- a/docs/guides/flags/debug_en.rst +++ b/docs/guides/flags/debug_en.rst @@ -83,4 +83,4 @@ This flag will work only when you are using py_reader. .. toctree:: :hidden: - check_nan_inf_en.md \ No newline at end of file + check_nan_inf_en.md diff --git a/docs/guides/flags/device_cn.rst b/docs/guides/flags/device_cn.rst index 0bed575e98c..143518d5fac 100644 --- a/docs/guides/flags/device_cn.rst +++ b/docs/guides/flags/device_cn.rst @@ -34,4 +34,4 @@ FLAGS_selected_gpus=0,1,2,3,4,5,6,7 - 令0-7号GPU设备用于训练和预测。 注意 ------- -使用该flag的原因是我们希望在GPU设备之间使用聚合通信,但通过CUDA_VISIBLE_DEVICES只能使用共享内存。 \ No newline at end of file +使用该flag的原因是我们希望在GPU设备之间使用聚合通信,但通过CUDA_VISIBLE_DEVICES只能使用共享内存。 diff --git a/docs/guides/flags/device_en.rst b/docs/guides/flags/device_en.rst index 5397ee9fc9c..960280f763b 100644 --- a/docs/guides/flags/device_en.rst +++ b/docs/guides/flags/device_en.rst @@ -34,4 +34,4 @@ FLAGS_selected_gpus=0,1,2,3,4,5,6,7 makes GPU devices 0-7 to be used for trainin Note ------- -The reason for using this flag is that we want to use collective communication between GPU devices, but with CUDA_VISIBLE_DEVICES can only use share-memory. \ No newline at end of file +The reason for using this flag is that we want to use collective communication between GPU devices, but with CUDA_VISIBLE_DEVICES can only use share-memory. diff --git a/docs/guides/flags/distributed_cn.rst b/docs/guides/flags/distributed_cn.rst index e8e1697f59a..f786f925e16 100644 --- a/docs/guides/flags/distributed_cn.rst +++ b/docs/guides/flags/distributed_cn.rst @@ -268,4 +268,4 @@ Bool型,缺省值为false。 示例 ------- -FLAGS_allreduce_record_one_event=true - 使allreduce操作只等待一个事件而不是多个事件。 \ No newline at end of file +FLAGS_allreduce_record_one_event=true - 使allreduce操作只等待一个事件而不是多个事件。 diff --git a/docs/guides/flags/distributed_en.rst b/docs/guides/flags/distributed_en.rst index c412857811d..1c86c8fc867 100644 --- a/docs/guides/flags/distributed_en.rst +++ b/docs/guides/flags/distributed_en.rst @@ -268,4 +268,4 @@ Bool. The default value is false. Example ------- -FLAGS_allreduce_record_one_event=true would make the allreduce operations would only wait one event instead of multiple events. \ No newline at end of file +FLAGS_allreduce_record_one_event=true would make the allreduce operations would only wait one event instead of multiple events. diff --git a/docs/guides/flags/executor_cn.rst b/docs/guides/flags/executor_cn.rst index 56c3c7b04dc..3882b185552 100644 --- a/docs/guides/flags/executor_cn.rst +++ b/docs/guides/flags/executor_cn.rst @@ -64,4 +64,4 @@ FLAGS_use_ngraph=True - 开启使用nGraph运行。 注意 ------- -英特尔nGraph目前仅在少数模型中支持。我们只验证了[ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md)的训练和预测。 \ No newline at end of file +英特尔nGraph目前仅在少数模型中支持。我们只验证了[ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md)的训练和预测。 diff --git a/docs/guides/flags/executor_en.rst b/docs/guides/flags/executor_en.rst index 7a262c00163..689b5fa4d54 100644 --- a/docs/guides/flags/executor_en.rst +++ b/docs/guides/flags/executor_en.rst @@ -64,4 +64,4 @@ FLAGS_use_ngraph=True will enable running with nGraph support. Note ------- -Intel nGraph is only supported in few models yet. We have only verified [ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md) training and inference. \ No newline at end of file +Intel nGraph is only supported in few models yet. We have only verified [ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md) training and inference. diff --git a/docs/guides/flags/flags_en.rst b/docs/guides/flags/flags_en.rst index 6a77675cca4..1b3dbabeea7 100644 --- a/docs/guides/flags/flags_en.rst +++ b/docs/guides/flags/flags_en.rst @@ -29,49 +29,3 @@ FLAGS Quick Search memory_en.rst npu_en.rst others_en.rst - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/guides/flags/npu_cn.rst b/docs/guides/flags/npu_cn.rst index 285bd671fc1..33702337c56 100644 --- a/docs/guides/flags/npu_cn.rst +++ b/docs/guides/flags/npu_cn.rst @@ -19,4 +19,3 @@ String型,取值范围:['force_fp32', 'force_fp16', 'allow_fp32_to_fp16', 'mus 示例 ------- FLAGS_npu_precision_mode="allow_mix_precision" - 表示使用混合精度模式。 - diff --git a/docs/guides/flags/npu_en.rst b/docs/guides/flags/npu_en.rst index 94659cb8902..5b9ae79b04a 100644 --- a/docs/guides/flags/npu_en.rst +++ b/docs/guides/flags/npu_en.rst @@ -19,4 +19,3 @@ Please refer to `here `_ ", "`paddle.nn.LSTMCell `_ ", "False" "360", "`paddle.fluid.layers.LSTMCell `_ ", "`paddle.nn.LSTMCell `_ ", "False" "361", "`paddle.fluid.optimizer.RMSProp `_ ", "`paddle.optimizer.RMSProp `_ ", "True" - "362", "`paddle.fluid.layers.gru_unit `_ ", "`paddle.nn.GRUCell `_ ", "False" \ No newline at end of file + "362", "`paddle.fluid.layers.gru_unit `_ ", "`paddle.nn.GRUCell `_ ", "False" diff --git a/docs/guides/performance_improving/analysis_tools/index_en.rst b/docs/guides/performance_improving/analysis_tools/index_en.rst index c303e49f349..abacd2fb5fe 100644 --- a/docs/guides/performance_improving/analysis_tools/index_en.rst +++ b/docs/guides/performance_improving/analysis_tools/index_en.rst @@ -16,5 +16,3 @@ This section illustrates how to optimize performance of Fluid: - `CPU profiling `_:How to use cProfile, yep, and Google perftools to profile and optimize model performance - `Heap Memory Profiling and Optimization `_:Use gperftool to perform Heap Memory Profiling and Optimization to solve memory leaks. - `How to use timeline tool to do profiling `_ :How to use timeline tool to do profile and optimization - - diff --git a/docs/guides/performance_improving/index_cn.rst b/docs/guides/performance_improving/index_cn.rst index 13208283134..601deacf335 100644 --- a/docs/guides/performance_improving/index_cn.rst +++ b/docs/guides/performance_improving/index_cn.rst @@ -13,4 +13,4 @@ amp_cn.md quantization.md - profiling_model.md \ No newline at end of file + profiling_model.md diff --git a/docs/guides/performance_improving/index_en.rst b/docs/guides/performance_improving/index_en.rst index a8a3e52ba1f..bac97a83052 100644 --- a/docs/guides/performance_improving/index_en.rst +++ b/docs/guides/performance_improving/index_en.rst @@ -8,4 +8,4 @@ Performance Improving .. toctree:: :hidden: - amp_en.md \ No newline at end of file + amp_en.md diff --git a/docs/guides/performance_improving/memory_optimize.rst b/docs/guides/performance_improving/memory_optimize.rst index ae9238813a6..23d19d912fc 100644 --- a/docs/guides/performance_improving/memory_optimize.rst +++ b/docs/guides/performance_improving/memory_optimize.rst @@ -155,4 +155,3 @@ Inplace策略适用于使用ParallelExecutor或CompiledProgram+with_data_paralle - 开启Inplace策略:设置 :code:`build_strategy.enable_inplace = True` ,并在<1.6版本中设置fetch_list中的 :code:`var.persistable = True` 。 **在1.6+的版本中,上述最佳策略均已默认打开,无需手动配置,亦无需设置fetch_list变量为persistable。** - diff --git a/docs/guides/performance_improving/memory_optimize_en.rst b/docs/guides/performance_improving/memory_optimize_en.rst index 220703db90f..5654c982924 100644 --- a/docs/guides/performance_improving/memory_optimize_en.rst +++ b/docs/guides/performance_improving/memory_optimize_en.rst @@ -175,4 +175,3 @@ We recommend the best memory optimization strategy as: - Enable Inplace strategy:set :code:`build_strategy.enable_inplace = True`, and set variables in fetch_list to persistable using :code:`var.persistable = True` when the version of PaddlePaddle < 1.6. **Since version 1.6+, the above optimal strategy have been enabled by default and setting variables in fetch_list to persistable is not needed.** - diff --git a/docs/index_cn.rst b/docs/index_cn.rst index d7478e13446..792d50a5a95 100644 --- a/docs/index_cn.rst +++ b/docs/index_cn.rst @@ -18,4 +18,3 @@ dev_guides/index_cn.rst faq/index_cn.rst release_note_cn.md - diff --git a/docs/index_en.rst b/docs/index_en.rst index f9f0fdc16d5..bf1f8001ed0 100644 --- a/docs/index_en.rst +++ b/docs/index_en.rst @@ -10,4 +10,3 @@ api/index_en.rst dev_guides/index_en.rst release_note_en.md - diff --git a/docs/practices/jit/index_cn.rst b/docs/practices/jit/index_cn.rst index 30bd40ab0c0..314eb9a650a 100644 --- a/docs/practices/jit/index_cn.rst +++ b/docs/practices/jit/index_cn.rst @@ -10,4 +10,4 @@ :hidden: :titlesonly: - image_search_with_jit.ipynb \ No newline at end of file + image_search_with_jit.ipynb diff --git a/docs/practices/nlp/index_cn.rst b/docs/practices/nlp/index_cn.rst index a8d01d80289..821630b415d 100644 --- a/docs/practices/nlp/index_cn.rst +++ b/docs/practices/nlp/index_cn.rst @@ -19,4 +19,4 @@ imdb_bow_classification.ipynb pretrained_word_embeddings.ipynb seq2seq_with_attention.ipynb - addition_rnn.ipynb \ No newline at end of file + addition_rnn.ipynb diff --git a/docs/practices/quick_start/index_cn.rst b/docs/practices/quick_start/index_cn.rst index 28b6acd8696..26123949dc0 100644 --- a/docs/practices/quick_start/index_cn.rst +++ b/docs/practices/quick_start/index_cn.rst @@ -21,4 +21,4 @@ dynamic_graph.ipynb high_level_api.ipynb save_model.ipynb - linear_regression.ipynb \ No newline at end of file + linear_regression.ipynb From 231f8dbc2e7d34bc57ab28460ca09c20fcf3e2d5 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 09:53:10 +0000 Subject: [PATCH 05/20] insert-whitespace-between-cn-and-en-char --- CONTRIBUTING_cn.md | 20 +- README_cn.md | 4 +- .../performance_improving/amp/amp.md | 88 +-- .../paddle_xpu_infer_cn.md | 22 +- .../cpu_train_best_practice.rst | 52 +- .../cpu_train_best_practice_en.rst | 2 +- .../dist_training_gpu.rst | 74 +- .../gpu_training_with_low_bandwidth_dgc.md | 70 +- .../gpu_training_with_recompute.rst | 92 +-- docs/api/index_cn.rst | 60 +- docs/api/paddle/CUDAPlace_cn.rst | 4 +- docs/api/paddle/DataParallel_cn.rst | 24 +- docs/api/paddle/Model_cn.rst | 6 +- docs/api/paddle/NPUPlace_cn.rst | 4 +- docs/api/paddle/Overview_cn.rst | 342 ++++----- docs/api/paddle/ParamAttr_cn.rst | 14 +- docs/api/paddle/Tensor_cn.rst | 506 +++++++------- docs/api/paddle/abs_cn.rst | 4 +- docs/api/paddle/acos_cn.rst | 6 +- docs/api/paddle/acosh_cn.rst | 6 +- docs/api/paddle/add_cn.rst | 10 +- docs/api/paddle/add_n_cn.rst | 2 +- docs/api/paddle/addmm_cn.rst | 4 +- docs/api/paddle/all_cn.rst | 8 +- docs/api/paddle/allclose_cn.rst | 12 +- docs/api/paddle/amax_cn.rst | 10 +- docs/api/paddle/amin_cn.rst | 10 +- docs/api/paddle/amp/GradScaler_cn.rst | 96 +-- docs/api/paddle/amp/Overview_cn.rst | 28 +- docs/api/paddle/amp/auto_cast_cn.rst | 4 +- docs/api/paddle/amp/decorate_cn.rst | 12 +- docs/api/paddle/angle_cn.rst | 4 +- docs/api/paddle/arange_cn.rst | 10 +- docs/api/paddle/argmax_cn.rst | 6 +- docs/api/paddle/argmin_cn.rst | 2 +- docs/api/paddle/asin_cn.rst | 6 +- docs/api/paddle/asinh_cn.rst | 6 +- docs/api/paddle/atan2_cn.rst | 10 +- docs/api/paddle/atan_cn.rst | 6 +- docs/api/paddle/atanh_cn.rst | 6 +- .../api/paddle/autograd/PyLayerContext_cn.rst | 2 +- docs/api/paddle/autograd/PyLayer_cn.rst | 6 +- docs/api/paddle/autograd/backward_cn.rst | 6 +- docs/api/paddle/batch_cn.rst | 8 +- docs/api/paddle/bincount_cn.rst | 10 +- docs/api/paddle/bitwise_and_cn.rst | 6 +- docs/api/paddle/bitwise_not_cn.rst | 6 +- docs/api/paddle/bitwise_or_cn.rst | 6 +- docs/api/paddle/bitwise_xor_cn.rst | 6 +- docs/api/paddle/bmm_cn.rst | 4 +- docs/api/paddle/broadcast_shape_cn.rst | 8 +- docs/api/paddle/broadcast_tensors_cn.rst | 10 +- docs/api/paddle/broadcast_to_cn.rst | 6 +- docs/api/paddle/callbacks/Callback_cn.rst | 62 +- .../api/paddle/callbacks/EarlyStopping_cn.rst | 12 +- docs/api/paddle/callbacks/LRScheduler_cn.rst | 4 +- .../paddle/callbacks/ModelCheckpoint_cn.rst | 4 +- docs/api/paddle/callbacks/Overview_cn.rst | 8 +- .../api/paddle/callbacks/ProgBarLogger_cn.rst | 6 +- .../paddle/callbacks/ReduceLROnPlateau_cn.rst | 10 +- docs/api/paddle/callbacks/VisualDL_cn.rst | 2 +- docs/api/paddle/cast_cn.rst | 4 +- docs/api/paddle/ceil_cn.rst | 4 +- docs/api/paddle/chunk_cn.rst | 10 +- docs/api/paddle/clip_cn.rst | 6 +- docs/api/paddle/clone_cn.rst | 8 +- docs/api/paddle/compat/floor_division_cn.rst | 6 +- .../compat/get_exception_message_cn.rst | 2 +- docs/api/paddle/compat/long_type_cn.rst | 2 +- docs/api/paddle/compat/round_cn.rst | 2 +- docs/api/paddle/compat/to_bytes_cn.rst | 10 +- docs/api/paddle/compat/to_text_cn.rst | 10 +- docs/api/paddle/complex_cn.rst | 2 +- docs/api/paddle/concat_cn.rst | 2 +- docs/api/paddle/cos_cn.rst | 4 +- docs/api/paddle/cosh_cn.rst | 4 +- docs/api/paddle/crop_cn.rst | 12 +- docs/api/paddle/cross_cn.rst | 4 +- docs/api/paddle/cumprod_cn.rst | 8 +- docs/api/paddle/cumsum_cn.rst | 6 +- docs/api/paddle/deg2rad_cn.rst | 6 +- docs/api/paddle/device/XPUPlace_cn.rst | 4 +- docs/api/paddle/device/cuda/Event_cn.rst | 18 +- docs/api/paddle/device/cuda/Stream_cn.rst | 26 +- .../paddle/device/cuda/current_stream_cn.rst | 6 +- .../paddle/device/cuda/device_count_cn.rst | 4 +- .../device/cuda/get_device_capability_cn.rst | 4 +- .../paddle/device/cuda/get_device_name_cn.rst | 4 +- .../device/cuda/get_device_properties_cn.rst | 4 +- .../device/cuda/max_memory_allocated_cn.rst | 8 +- .../device/cuda/max_memory_reserved_cn.rst | 6 +- .../device/cuda/memory_allocated_cn.rst | 8 +- .../paddle/device/cuda/memory_reserved_cn.rst | 6 +- .../paddle/device/cuda/stream_guard_cn.rst | 4 +- .../api/paddle/device/cuda/synchronize_cn.rst | 4 +- .../paddle/device/get_cudnn_version_cn.rst | 4 +- docs/api/paddle/device/get_device_cn.rst | 2 +- .../device/is_compiled_with_cinn_cn.rst | 2 +- .../device/is_compiled_with_cuda_cn.rst | 4 +- .../paddle/device/is_compiled_with_ipu_cn.rst | 4 +- .../paddle/device/is_compiled_with_mlu_cn.rst | 4 +- .../paddle/device/is_compiled_with_npu_cn.rst | 2 +- .../device/is_compiled_with_rocm_cn.rst | 4 +- .../paddle/device/is_compiled_with_xpu_cn.rst | 4 +- docs/api/paddle/device/set_device_cn.rst | 6 +- docs/api/paddle/diag_cn.rst | 10 +- docs/api/paddle/diagflat_cn.rst | 4 +- docs/api/paddle/diagonal_cn.rst | 2 +- docs/api/paddle/diff_cn.rst | 10 +- docs/api/paddle/digamma_cn.rst | 6 +- docs/api/paddle/disable_signal_handler_cn.rst | 16 +- docs/api/paddle/disable_static_cn.rst | 4 +- docs/api/paddle/dist_cn.rst | 18 +- .../paddle/distributed/InMemoryDataset_cn.rst | 102 +-- docs/api/paddle/distributed/Overview_cn.rst | 46 +- .../api/paddle/distributed/ParallelEnv_cn.rst | 16 +- .../paddle/distributed/QueueDataset_cn.rst | 24 +- docs/api/paddle/distributed/all_gather_cn.rst | 12 +- docs/api/paddle/distributed/all_reduce_cn.rst | 10 +- docs/api/paddle/distributed/alltoall_cn.rst | 14 +- docs/api/paddle/distributed/barrier_cn.rst | 2 +- docs/api/paddle/distributed/broadcast_cn.rst | 8 +- .../fleet/DistributedStrategy_cn.rst | 114 +-- .../api/paddle/distributed/fleet/Fleet_cn.rst | 106 +-- .../fleet/PaddleCloudRoleMaker_cn.rst | 4 +- .../fleet/UserDefinedRoleMaker_cn.rst | 4 +- .../paddle/distributed/fleet/UtilBase_cn.rst | 14 +- .../distributed/fleet/utils/HDFSClient_cn.rst | 40 +- .../distributed/fleet/utils/LocalFS_cn.rst | 2 +- .../distributed/fleet/utils/recompute_cn.rst | 6 +- docs/api/paddle/distributed/get_rank_cn.rst | 6 +- .../paddle/distributed/get_world_size_cn.rst | 2 +- docs/api/paddle/distributed/irecv_cn.rst | 8 +- .../paddle/distributed/is_initialized_cn.rst | 2 +- docs/api/paddle/distributed/isend_cn.rst | 8 +- docs/api/paddle/distributed/launch_cn.rst | 28 +- docs/api/paddle/distributed/recv_cn.rst | 6 +- docs/api/paddle/distributed/reduce_cn.rst | 10 +- .../paddle/distributed/reduce_scatter_cn.rst | 8 +- docs/api/paddle/distributed/scatter_cn.rst | 14 +- docs/api/paddle/distributed/send_cn.rst | 6 +- .../sharding/group_sharded_parallel_cn.rst | 24 +- .../sharding/save_group_sharded_model_cn.rst | 8 +- docs/api/paddle/distributed/spawn_cn.rst | 8 +- docs/api/paddle/distributed/split_cn.rst | 36 +- .../distributed/utils/global_gather_cn.rst | 36 +- .../distributed/utils/global_scatter_cn.rst | 42 +- docs/api/paddle/distribution/Beta_cn.rst | 30 +- .../paddle/distribution/Categorical_cn.rst | 24 +- docs/api/paddle/distribution/Dirichlet_cn.rst | 22 +- .../paddle/distribution/Distribution_cn.rst | 8 +- .../paddle/distribution/Independent_cn.rst | 10 +- .../paddle/distribution/Multinomial_cn.rst | 10 +- docs/api/paddle/distribution/Normal_cn.rst | 24 +- docs/api/paddle/distribution/Overview_cn.rst | 50 +- .../distribution/ReshapeTransform_cn.rst | 6 +- .../distribution/SigmoidTransform_cn.rst | 2 +- .../distribution/SoftmaxTransform_cn.rst | 6 +- .../paddle/distribution/StackTransform_cn.rst | 4 +- .../StickBreakingTransform_cn.rst | 2 +- .../paddle/distribution/TanhTransform_cn.rst | 2 +- .../TransformedDistribution_cn.rst | 8 +- docs/api/paddle/distribution/Uniform_cn.rst | 20 +- .../paddle/distribution/kl_divergence_cn.rst | 8 +- .../paddle/distribution/register_kl_cn.rst | 8 +- docs/api/paddle/divide_cn.rst | 14 +- docs/api/paddle/dot_cn.rst | 6 +- docs/api/paddle/einsum_cn.rst | 14 +- docs/api/paddle/empty_like_cn.rst | 8 +- docs/api/paddle/enable_static_cn.rst | 2 +- docs/api/paddle/equal_all_cn.rst | 6 +- docs/api/paddle/equal_cn.rst | 4 +- docs/api/paddle/erfinv_cn.rst | 10 +- docs/api/paddle/exp_cn.rst | 6 +- docs/api/paddle/expand_as_cn.rst | 2 +- docs/api/paddle/expand_cn.rst | 4 +- docs/api/paddle/expm1_cn.rst | 6 +- docs/api/paddle/eye_cn.rst | 10 +- docs/api/paddle/fft/Overview_cn.rst | 10 +- docs/api/paddle/fft/fft_cn.rst | 2 +- docs/api/paddle/fft/ifft_cn.rst | 2 +- docs/api/paddle/fft/ihfft_cn.rst | 2 +- docs/api/paddle/fft/rfft_cn.rst | 2 +- docs/api/paddle/flatten_cn.rst | 4 +- docs/api/paddle/flip_cn.rst | 2 +- docs/api/paddle/floor_cn.rst | 4 +- docs/api/paddle/floor_divide_cn.rst | 10 +- docs/api/paddle/flops_cn.rst | 2 +- docs/api/paddle/fmax_cn.rst | 10 +- docs/api/paddle/fmin_cn.rst | 10 +- docs/api/paddle/frac_cn.rst | 2 +- docs/api/paddle/full_cn.rst | 10 +- docs/api/paddle/full_like_cn.rst | 4 +- docs/api/paddle/gather_nd_cn.rst | 6 +- docs/api/paddle/gcd_cn.rst | 10 +- docs/api/paddle/get_cuda_rng_state_cn.rst | 2 +- docs/api/paddle/get_default_dtype_cn.rst | 4 +- docs/api/paddle/get_flags_cn.rst | 4 +- docs/api/paddle/grad_cn.rst | 20 +- docs/api/paddle/greater_equal_cn.rst | 4 +- docs/api/paddle/histogram_cn.rst | 10 +- docs/api/paddle/hub/Overview_cn.rst | 10 +- docs/api/paddle/hub/help_cn.rst | 8 +- docs/api/paddle/hub/list_cn.rst | 8 +- docs/api/paddle/hub/load_cn.rst | 8 +- docs/api/paddle/in_dynamic_mode_cn.rst | 6 +- docs/api/paddle/increment_cn.rst | 2 +- docs/api/paddle/incubate/LookAhead_cn.rst | 20 +- docs/api/paddle/incubate/ModelAverage_cn.rst | 4 +- .../paddle/incubate/autograd/Hessian_cn.rst | 28 +- .../paddle/incubate/autograd/Jacobian_cn.rst | 30 +- .../paddle/incubate/autograd/Overview_cn.rst | 28 +- docs/api/paddle/incubate/autograd/jvp_cn.rst | 12 +- .../paddle/incubate/autograd/prim2orig_cn.rst | 2 +- docs/api/paddle/incubate/autograd/vjp_cn.rst | 10 +- .../incubate/autotune/set_config_cn.rst | 14 +- .../paddle/incubate/graph_khop_sampler_cn.rst | 14 +- docs/api/paddle/incubate/graph_reindex_cn.rst | 8 +- .../incubate/graph_sample_neighbors_cn.rst | 16 +- .../paddle/incubate/graph_send_recv_cn.rst | 6 +- docs/api/paddle/incubate/identity_loss_cn.rst | 6 +- .../incubate/nn/FusedFeedForward_cn.rst | 12 +- .../nn/FusedMultiHeadAttention_cn.rst | 20 +- .../nn/FusedTransformerEncoderLayer_cn.rst | 16 +- .../nn/functional/fused_feedforward_cn.rst | 34 +- .../fused_multi_head_attention_cn.rst | 32 +- .../optimizer/functional/minimize_bfgs_cn.rst | 14 +- .../functional/minimize_lbfgs_cn.rst | 14 +- docs/api/paddle/incubate/segment_max_cn.rst | 4 +- docs/api/paddle/incubate/segment_mean_cn.rst | 4 +- docs/api/paddle/incubate/segment_min_cn.rst | 4 +- docs/api/paddle/incubate/segment_sum_cn.rst | 4 +- .../paddle/incubate/softmax_mask_fuse_cn.rst | 8 +- .../softmax_mask_fuse_upper_triangle_cn.rst | 6 +- docs/api/paddle/index_sample_cn.rst | 2 +- docs/api/paddle/index_select_cn.rst | 10 +- docs/api/paddle/inner_cn.rst | 10 +- docs/api/paddle/io/BatchSampler_cn.rst | 14 +- docs/api/paddle/io/DataLoader_cn.rst | 86 +-- docs/api/paddle/io/Dataset_cn.rst | 2 +- .../paddle/io/DistributedBatchSampler_cn.rst | 20 +- docs/api/paddle/io/IterableDataset_cn.rst | 2 +- docs/api/paddle/io/Overview_cn.rst | 34 +- docs/api/paddle/io/RandomSampler_cn.rst | 6 +- docs/api/paddle/io/Sampler_cn.rst | 2 +- docs/api/paddle/io/SequenceSampler_cn.rst | 2 +- docs/api/paddle/io/TensorDataset_cn.rst | 4 +- .../paddle/io/WeightedRandomSampler_cn.rst | 4 +- docs/api/paddle/io/get_worker_info_cn.rst | 2 +- docs/api/paddle/io/random_split_cn.rst | 6 +- docs/api/paddle/is_floating_point_cn.rst | 6 +- docs/api/paddle/isclose_cn.rst | 12 +- docs/api/paddle/isinf_cn.rst | 4 +- docs/api/paddle/isnan_cn.rst | 4 +- docs/api/paddle/jit/Overview_cn.rst | 20 +- docs/api/paddle/jit/ProgramTranslator_cn.rst | 30 +- docs/api/paddle/jit/TracedLayer_cn.rst | 28 +- docs/api/paddle/jit/TranslatedLayer_cn.rst | 6 +- docs/api/paddle/jit/load_cn.rst | 16 +- docs/api/paddle/jit/save_cn.rst | 14 +- docs/api/paddle/jit/set_code_level_cn.rst | 2 +- docs/api/paddle/jit/set_verbosity_cn.rst | 2 +- docs/api/paddle/jit/to_static_cn.rst | 2 +- docs/api/paddle/kron_cn.rst | 2 +- docs/api/paddle/kthvalue_cn.rst | 10 +- docs/api/paddle/lcm_cn.rst | 10 +- docs/api/paddle/lerp_cn.rst | 10 +- docs/api/paddle/less_than_cn.rst | 4 +- docs/api/paddle/lgamma_cn.rst | 4 +- docs/api/paddle/linalg/Overview_cn.rst | 40 +- docs/api/paddle/linalg/cholesky_cn.rst | 8 +- docs/api/paddle/linalg/cholesky_solve_cn.rst | 12 +- docs/api/paddle/linalg/cond_cn.rst | 2 +- docs/api/paddle/linalg/corrcoef_cn.rst | 10 +- docs/api/paddle/linalg/cov_cn.rst | 16 +- docs/api/paddle/linalg/det_cn.rst | 4 +- docs/api/paddle/linalg/eig_cn.rst | 10 +- docs/api/paddle/linalg/eigh_cn.rst | 6 +- docs/api/paddle/linalg/eigvals_cn.rst | 6 +- docs/api/paddle/linalg/eigvalsh_cn.rst | 4 +- docs/api/paddle/linalg/inv_cn.rst | 4 +- docs/api/paddle/linalg/lstsq_cn.rst | 4 +- docs/api/paddle/linalg/lu_cn.rst | 22 +- docs/api/paddle/linalg/lu_unpack_cn.rst | 20 +- docs/api/paddle/linalg/matrix_power_cn.rst | 2 +- docs/api/paddle/linalg/matrix_rank_cn.rst | 10 +- docs/api/paddle/linalg/multi_dot_cn.rst | 14 +- docs/api/paddle/linalg/norm_cn.rst | 14 +- docs/api/paddle/linalg/pinv_cn.rst | 12 +- docs/api/paddle/linalg/qr_cn.rst | 10 +- docs/api/paddle/linalg/slogdet_cn.rst | 6 +- docs/api/paddle/linalg/solve_cn.rst | 2 +- docs/api/paddle/linalg/svd_cn.rst | 14 +- .../api/paddle/linalg/triangular_solve_cn.rst | 10 +- docs/api/paddle/linspace_cn.rst | 12 +- docs/api/paddle/load_cn.rst | 6 +- docs/api/paddle/log10_cn.rst | 6 +- docs/api/paddle/log1p_cn.rst | 2 +- docs/api/paddle/log2_cn.rst | 6 +- docs/api/paddle/log_cn.rst | 6 +- docs/api/paddle/logcumsumexp_cn.rst | 6 +- docs/api/paddle/logical_and_cn.rst | 8 +- docs/api/paddle/logical_not_cn.rst | 6 +- docs/api/paddle/logical_or_cn.rst | 8 +- docs/api/paddle/logical_xor_cn.rst | 8 +- docs/api/paddle/logit_cn.rst | 4 +- docs/api/paddle/logsumexp_cn.rst | 6 +- docs/api/paddle/masked_select_cn.rst | 4 +- docs/api/paddle/matmul_cn.rst | 18 +- docs/api/paddle/maximum_cn.rst | 10 +- docs/api/paddle/mean_cn.rst | 6 +- docs/api/paddle/median_cn.rst | 4 +- docs/api/paddle/meshgrid_cn.rst | 4 +- docs/api/paddle/metric/Accuracy_cn.rst | 14 +- docs/api/paddle/metric/Auc_cn.rst | 26 +- docs/api/paddle/metric/Metric_cn.rst | 18 +- docs/api/paddle/metric/Overview_cn.rst | 16 +- docs/api/paddle/metric/Precision_cn.rst | 22 +- docs/api/paddle/metric/Recall_cn.rst | 22 +- docs/api/paddle/metric/accuracy_cn.rst | 14 +- docs/api/paddle/mm_cn.rst | 4 +- docs/api/paddle/mod_cn.rst | 8 +- docs/api/paddle/mode_cn.rst | 6 +- docs/api/paddle/moveaxis_cn.rst | 6 +- docs/api/paddle/multinomial_cn.rst | 4 +- docs/api/paddle/multiplex_cn.rst | 18 +- docs/api/paddle/multiply_cn.rst | 8 +- docs/api/paddle/mv_cn.rst | 4 +- docs/api/paddle/nanmean_cn.rst | 6 +- docs/api/paddle/nanmedian_cn.rst | 10 +- docs/api/paddle/neg_cn.rst | 4 +- docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst | 12 +- docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst | 12 +- docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst | 12 +- docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst | 12 +- docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst | 12 +- docs/api/paddle/nn/AlphaDropout_cn.rst | 8 +- docs/api/paddle/nn/AvgPool1D_cn.rst | 20 +- docs/api/paddle/nn/AvgPool2D_cn.rst | 16 +- docs/api/paddle/nn/AvgPool3D_cn.rst | 16 +- docs/api/paddle/nn/BCELoss_cn.rst | 14 +- docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst | 28 +- docs/api/paddle/nn/BatchNorm1D_cn.rst | 14 +- docs/api/paddle/nn/BatchNorm2D_cn.rst | 14 +- docs/api/paddle/nn/BatchNorm3D_cn.rst | 14 +- docs/api/paddle/nn/BatchNorm_cn.rst | 22 +- docs/api/paddle/nn/BeamSearchDecoder_cn.rst | 96 +-- docs/api/paddle/nn/BiRNN_cn.rst | 20 +- docs/api/paddle/nn/Bilinear_cn.rst | 10 +- docs/api/paddle/nn/CELU_cn.rst | 10 +- docs/api/paddle/nn/CTCLoss_cn.rst | 12 +- .../api/paddle/nn/ClipGradByGlobalNorm_cn.rst | 2 +- docs/api/paddle/nn/ClipGradByNorm_cn.rst | 8 +- docs/api/paddle/nn/ClipGradByValue_cn.rst | 4 +- docs/api/paddle/nn/Conv1DTranspose_cn.rst | 18 +- docs/api/paddle/nn/Conv1D_cn.rst | 20 +- docs/api/paddle/nn/Conv2DTranspose_cn.rst | 20 +- docs/api/paddle/nn/Conv2D_cn.rst | 20 +- docs/api/paddle/nn/Conv3DTranspose_cn.rst | 22 +- docs/api/paddle/nn/Conv3D_cn.rst | 20 +- docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst | 20 +- docs/api/paddle/nn/CosineSimilarity_cn.rst | 6 +- docs/api/paddle/nn/CrossEntropyLoss_cn.rst | 20 +- docs/api/paddle/nn/Dropout2D_cn.rst | 6 +- docs/api/paddle/nn/Dropout3D_cn.rst | 6 +- docs/api/paddle/nn/Dropout_cn.rst | 8 +- docs/api/paddle/nn/ELU_cn.rst | 10 +- docs/api/paddle/nn/Embedding_cn.rst | 20 +- docs/api/paddle/nn/Flatten_cn.rst | 4 +- docs/api/paddle/nn/Fold_cn.rst | 18 +- docs/api/paddle/nn/GELU_cn.rst | 8 +- docs/api/paddle/nn/GRUCell_cn.rst | 24 +- docs/api/paddle/nn/GRU_cn.rst | 28 +- docs/api/paddle/nn/GroupNorm_cn.rst | 8 +- docs/api/paddle/nn/HSigmoidLoss_cn.rst | 28 +- docs/api/paddle/nn/Hardshrink_cn.rst | 8 +- docs/api/paddle/nn/Hardsigmoid_cn.rst | 6 +- docs/api/paddle/nn/Hardswish_cn.rst | 6 +- docs/api/paddle/nn/Hardtanh_cn.rst | 10 +- docs/api/paddle/nn/Identity_cn.rst | 6 +- docs/api/paddle/nn/InstanceNorm1D_cn.rst | 12 +- docs/api/paddle/nn/InstanceNorm2D_cn.rst | 12 +- docs/api/paddle/nn/InstanceNorm3D_cn.rst | 12 +- docs/api/paddle/nn/KLDivLoss_cn.rst | 14 +- docs/api/paddle/nn/L1Loss_cn.rst | 6 +- docs/api/paddle/nn/LSTMCell_cn.rst | 26 +- docs/api/paddle/nn/LSTM_cn.rst | 30 +- docs/api/paddle/nn/LayerDict_cn.rst | 16 +- docs/api/paddle/nn/LayerList_cn.rst | 8 +- docs/api/paddle/nn/LayerNorm_cn.rst | 8 +- docs/api/paddle/nn/Layer_cn.rst | 112 +-- docs/api/paddle/nn/LeakyReLU_cn.rst | 6 +- docs/api/paddle/nn/Linear_cn.rst | 16 +- docs/api/paddle/nn/LocalResponseNorm_cn.rst | 10 +- docs/api/paddle/nn/LogSigmoid_cn.rst | 6 +- docs/api/paddle/nn/LogSoftmax_cn.rst | 8 +- docs/api/paddle/nn/MSELoss_cn.rst | 12 +- docs/api/paddle/nn/MarginRankingLoss_cn.rst | 10 +- docs/api/paddle/nn/MaxPool1D_cn.rst | 20 +- docs/api/paddle/nn/MaxPool2D_cn.rst | 18 +- docs/api/paddle/nn/MaxPool3D_cn.rst | 18 +- docs/api/paddle/nn/MaxUnPool1D_cn.rst | 12 +- docs/api/paddle/nn/MaxUnPool2D_cn.rst | 12 +- docs/api/paddle/nn/MaxUnPool3D_cn.rst | 12 +- docs/api/paddle/nn/Maxout_cn.rst | 10 +- docs/api/paddle/nn/Mish_cn.rst | 6 +- docs/api/paddle/nn/MultiHeadAttention_cn.rst | 8 +- .../paddle/nn/MultiLabelSoftMarginLoss_cn.rst | 12 +- docs/api/paddle/nn/NLLLoss_cn.rst | 16 +- docs/api/paddle/nn/Overview_cn.rst | 402 +++++------ docs/api/paddle/nn/PReLU_cn.rst | 12 +- docs/api/paddle/nn/Pad1D_cn.rst | 8 +- docs/api/paddle/nn/Pad2D_cn.rst | 8 +- docs/api/paddle/nn/Pad3D_cn.rst | 8 +- docs/api/paddle/nn/PairwiseDistance_cn.rst | 12 +- docs/api/paddle/nn/ParameterList_cn.rst | 4 +- docs/api/paddle/nn/PixelShuffle_cn.rst | 10 +- docs/api/paddle/nn/RNNCellBase_cn.rst | 14 +- docs/api/paddle/nn/RNN_cn.rst | 20 +- docs/api/paddle/nn/RReLU_cn.rst | 10 +- docs/api/paddle/nn/ReLU6_cn.rst | 6 +- docs/api/paddle/nn/ReLU_cn.rst | 6 +- docs/api/paddle/nn/SELU_cn.rst | 10 +- docs/api/paddle/nn/Sequential_cn.rst | 4 +- docs/api/paddle/nn/Sigmoid_cn.rst | 2 +- docs/api/paddle/nn/Silu_cn.rst | 6 +- docs/api/paddle/nn/SimpleRNNCell_cn.rst | 24 +- docs/api/paddle/nn/SimpleRNN_cn.rst | 28 +- docs/api/paddle/nn/SmoothL1Loss_cn.rst | 12 +- docs/api/paddle/nn/SoftMarginLoss_cn.rst | 12 +- docs/api/paddle/nn/Softmax_cn.rst | 26 +- docs/api/paddle/nn/Softplus_cn.rst | 12 +- docs/api/paddle/nn/Softshrink_cn.rst | 8 +- docs/api/paddle/nn/Softsign_cn.rst | 6 +- docs/api/paddle/nn/SpectralNorm_cn.rst | 14 +- docs/api/paddle/nn/Swish_cn.rst | 6 +- docs/api/paddle/nn/SyncBatchNorm_cn.rst | 16 +- docs/api/paddle/nn/Tanh_cn.rst | 6 +- docs/api/paddle/nn/Tanhshrink_cn.rst | 6 +- docs/api/paddle/nn/ThresholdedReLU_cn.rst | 8 +- .../paddle/nn/TransformerDecoderLayer_cn.rst | 12 +- docs/api/paddle/nn/TransformerDecoder_cn.rst | 6 +- .../paddle/nn/TransformerEncoderLayer_cn.rst | 14 +- docs/api/paddle/nn/TransformerEncoder_cn.rst | 6 +- docs/api/paddle/nn/Transformer_cn.rst | 18 +- docs/api/paddle/nn/TripletMarginLoss_cn.rst | 20 +- .../nn/TripletMarginWithDistanceLoss_cn.rst | 18 +- docs/api/paddle/nn/Unfold_cn.rst | 16 +- docs/api/paddle/nn/Upsample_cn.rst | 22 +- .../api/paddle/nn/UpsamplingBilinear2D_cn.rst | 12 +- docs/api/paddle/nn/UpsamplingNearest2D_cn.rst | 10 +- docs/api/paddle/nn/ZeroPad2D_cn.rst | 8 +- docs/api/paddle/nn/dynamic_decode_cn.rst | 16 +- .../nn/functional/adaptive_avg_pool2d_cn.rst | 10 +- .../nn/functional/adaptive_avg_pool3d_cn.rst | 10 +- .../nn/functional/adaptive_max_pool1d_cn.rst | 10 +- .../nn/functional/adaptive_max_pool2d_cn.rst | 10 +- .../nn/functional/adaptive_max_pool3d_cn.rst | 10 +- .../paddle/nn/functional/affine_grid_cn.rst | 10 +- .../paddle/nn/functional/alpha_dropout_cn.rst | 6 +- .../paddle/nn/functional/avg_pool1d_cn.rst | 16 +- .../paddle/nn/functional/avg_pool2d_cn.rst | 12 +- .../paddle/nn/functional/avg_pool3d_cn.rst | 12 +- .../paddle/nn/functional/batch_norm_cn.rst | 12 +- docs/api/paddle/nn/functional/bilinear_cn.rst | 2 +- .../nn/functional/binary_cross_entropy_cn.rst | 10 +- .../binary_cross_entropy_with_logits_cn.rst | 26 +- docs/api/paddle/nn/functional/celu_cn.rst | 6 +- docs/api/paddle/nn/functional/conv1d_cn.rst | 20 +- .../nn/functional/conv1d_transpose_cn.rst | 26 +- docs/api/paddle/nn/functional/conv2d_cn.rst | 20 +- .../nn/functional/conv2d_transpose_cn.rst | 26 +- docs/api/paddle/nn/functional/conv3d_cn.rst | 32 +- .../nn/functional/conv3d_transpose_cn.rst | 38 +- .../functional/cosine_embedding_loss_cn.rst | 20 +- .../nn/functional/cosine_similarity_cn.rst | 12 +- .../paddle/nn/functional/cross_entropy_cn.rst | 44 +- docs/api/paddle/nn/functional/ctc_loss_cn.rst | 12 +- .../api/paddle/nn/functional/dice_loss_cn.rst | 12 +- .../api/paddle/nn/functional/dropout2d_cn.rst | 10 +- .../api/paddle/nn/functional/dropout3d_cn.rst | 10 +- docs/api/paddle/nn/functional/dropout_cn.rst | 56 +- docs/api/paddle/nn/functional/elu_cn.rst | 6 +- .../api/paddle/nn/functional/embedding_cn.rst | 20 +- docs/api/paddle/nn/functional/fold_cn.rst | 18 +- .../paddle/nn/functional/gather_tree_cn.rst | 2 +- docs/api/paddle/nn/functional/gelu_cn.rst | 4 +- .../paddle/nn/functional/grid_sample_cn.rst | 14 +- .../nn/functional/gumbel_softmax_cn.rst | 20 +- .../paddle/nn/functional/hardshrink_cn.rst | 4 +- .../paddle/nn/functional/hardsigmoid_cn.rst | 6 +- .../api/paddle/nn/functional/hardswish_cn.rst | 2 +- docs/api/paddle/nn/functional/hardtanh_cn.rst | 6 +- .../paddle/nn/functional/hsigmoid_loss_cn.rst | 30 +- .../paddle/nn/functional/instance_norm_cn.rst | 14 +- .../paddle/nn/functional/interpolate_cn.rst | 24 +- docs/api/paddle/nn/functional/kl_div_cn.rst | 16 +- docs/api/paddle/nn/functional/l1_loss_cn.rst | 4 +- .../paddle/nn/functional/label_smooth_cn.rst | 8 +- .../paddle/nn/functional/layer_norm_cn.rst | 8 +- .../paddle/nn/functional/leaky_relu_cn.rst | 6 +- docs/api/paddle/nn/functional/linear_cn.rst | 16 +- .../nn/functional/local_response_norm_cn.rst | 12 +- docs/api/paddle/nn/functional/log_loss_cn.rst | 2 +- .../paddle/nn/functional/log_sigmoid_cn.rst | 2 +- .../paddle/nn/functional/log_softmax_cn.rst | 6 +- .../nn/functional/margin_ranking_loss_cn.rst | 6 +- .../paddle/nn/functional/max_pool1d_cn.rst | 16 +- .../paddle/nn/functional/max_pool2d_cn.rst | 14 +- .../paddle/nn/functional/max_pool3d_cn.rst | 14 +- .../paddle/nn/functional/max_unpool1d_cn.rst | 8 +- .../paddle/nn/functional/max_unpool2d_cn.rst | 8 +- .../paddle/nn/functional/max_unpool3d_cn.rst | 8 +- docs/api/paddle/nn/functional/maxout_cn.rst | 8 +- docs/api/paddle/nn/functional/mish_cn.rst | 2 +- docs/api/paddle/nn/functional/mse_loss_cn.rst | 8 +- .../multi_label_soft_margin_loss_cn.rst | 14 +- docs/api/paddle/nn/functional/nll_loss_cn.rst | 10 +- .../api/paddle/nn/functional/normalize_cn.rst | 6 +- .../paddle/nn/functional/npair_loss_cn.rst | 12 +- docs/api/paddle/nn/functional/one_hot_cn.rst | 16 +- docs/api/paddle/nn/functional/pad_cn.rst | 20 +- .../paddle/nn/functional/pixel_shuffle_cn.rst | 6 +- docs/api/paddle/nn/functional/prelu_cn.rst | 4 +- docs/api/paddle/nn/functional/relu6_cn.rst | 2 +- docs/api/paddle/nn/functional/relu_cn.rst | 2 +- docs/api/paddle/nn/functional/rrelu_cn.rst | 6 +- docs/api/paddle/nn/functional/selu_cn.rst | 6 +- .../paddle/nn/functional/sequence_mask_cn.rst | 4 +- docs/api/paddle/nn/functional/sigmoid_cn.rst | 6 +- .../nn/functional/sigmoid_focal_loss_cn.rst | 20 +- docs/api/paddle/nn/functional/silu_cn.rst | 2 +- .../nn/functional/smooth_l1_loss_cn.rst | 12 +- .../nn/functional/soft_margin_loss_cn.rst | 8 +- docs/api/paddle/nn/functional/softmax_cn.rst | 24 +- .../softmax_with_cross_entropy_cn.rst | 26 +- docs/api/paddle/nn/functional/softplus_cn.rst | 8 +- .../paddle/nn/functional/softshrink_cn.rst | 4 +- docs/api/paddle/nn/functional/softsign_cn.rst | 2 +- .../nn/functional/sparse_attention_cn.rst | 18 +- .../nn/functional/square_error_cost_cn.rst | 8 +- docs/api/paddle/nn/functional/swish_cn.rst | 2 +- .../paddle/nn/functional/tanhshrink_cn.rst | 2 +- .../nn/functional/temporal_shift_cn.rst | 18 +- .../nn/functional/thresholded_relu_cn.rst | 4 +- .../nn/functional/triplet_margin_loss_cn.rst | 16 +- .../triplet_margin_with_distance_loss_cn.rst | 16 +- docs/api/paddle/nn/functional/unfold_cn.rst | 14 +- docs/api/paddle/nn/functional/upsample_cn.rst | 24 +- .../api/paddle/nn/functional/zeropad2d_cn.rst | 8 +- docs/api/paddle/nn/initializer/Assign_cn.rst | 6 +- .../api/paddle/nn/initializer/Bilinear_cn.rst | 2 +- .../api/paddle/nn/initializer/Constant_cn.rst | 2 +- docs/api/paddle/nn/initializer/Dirac_cn.rst | 6 +- .../nn/initializer/KaimingNormal_cn.rst | 8 +- .../nn/initializer/KaimingUniform_cn.rst | 6 +- .../paddle/nn/initializer/Orthogonal_cn.rst | 2 +- .../nn/initializer/calculate_gain_cn.rst | 8 +- .../initializer/set_global_initializer_cn.rst | 10 +- .../nn/utils/parameters_to_vector_cn.rst | 6 +- .../paddle/nn/utils/remove_weight_norm_cn.rst | 2 +- docs/api/paddle/nn/utils/spectral_norm_cn.rst | 8 +- .../nn/utils/vector_to_parameters_cn.rst | 6 +- docs/api/paddle/nn/utils/weight_norm_cn.rst | 4 +- docs/api/paddle/no_grad_cn.rst | 2 +- docs/api/paddle/normal_cn.rst | 4 +- docs/api/paddle/not_equal_cn.rst | 4 +- docs/api/paddle/numel_cn.rst | 6 +- docs/api/paddle/ones_like_cn.rst | 6 +- docs/api/paddle/onnx/export_cn.rst | 8 +- docs/api/paddle/optimizer/Adadelta_cn.rst | 38 +- docs/api/paddle/optimizer/Adagrad_cn.rst | 20 +- docs/api/paddle/optimizer/AdamW_cn.rst | 48 +- docs/api/paddle/optimizer/Adam_cn.rst | 42 +- docs/api/paddle/optimizer/Adamax_cn.rst | 42 +- docs/api/paddle/optimizer/Lamb_cn.rst | 38 +- docs/api/paddle/optimizer/Momentum_cn.rst | 28 +- docs/api/paddle/optimizer/Optimizer_cn.rst | 34 +- docs/api/paddle/optimizer/Overview_cn.rst | 42 +- docs/api/paddle/optimizer/RMSProp_cn.rst | 42 +- docs/api/paddle/optimizer/SGD_cn.rst | 26 +- .../optimizer/lr/CosineAnnealingDecay_cn.rst | 12 +- docs/api/paddle/optimizer/lr/CyclicLR_cn.rst | 18 +- .../optimizer/lr/ExponentialDecay_cn.rst | 10 +- .../optimizer/lr/InverseTimeDecay_cn.rst | 8 +- .../paddle/optimizer/lr/LRScheduler_cn.rst | 36 +- .../paddle/optimizer/lr/LambdaDecay_cn.rst | 10 +- .../paddle/optimizer/lr/LinearWarmup_cn.rst | 20 +- .../paddle/optimizer/lr/MultiStepDecay_cn.rst | 10 +- .../optimizer/lr/MultiplicativeDecay_cn.rst | 10 +- .../optimizer/lr/NaturalExpDecay_cn.rst | 10 +- docs/api/paddle/optimizer/lr/NoamDecay_cn.rst | 16 +- .../api/paddle/optimizer/lr/OneCycleLR_cn.rst | 14 +- .../paddle/optimizer/lr/PiecewiseDecay_cn.rst | 16 +- .../optimizer/lr/PolynomialDecay_cn.rst | 18 +- .../optimizer/lr/ReduceOnPlateau_cn.rst | 22 +- docs/api/paddle/optimizer/lr/StepDecay_cn.rst | 10 +- docs/api/paddle/outer_cn.rst | 10 +- docs/api/paddle/poisson_cn.rst | 6 +- docs/api/paddle/profiler/Overview_cn.rst | 32 +- docs/api/paddle/profiler/ProfilerState_cn.rst | 2 +- .../api/paddle/profiler/ProfilerTarget_cn.rst | 8 +- docs/api/paddle/profiler/Profiler_cn.rst | 34 +- docs/api/paddle/profiler/RecordEvent_cn.rst | 4 +- docs/api/paddle/profiler/SortedKeys_cn.rst | 18 +- .../profiler/export_chrome_tracing_cn.rst | 6 +- .../paddle/profiler/export_protobuf_cn.rst | 6 +- .../profiler/load_profiler_result_cn.rst | 6 +- .../api/paddle/profiler/make_scheduler_cn.rst | 18 +- docs/api/paddle/put_along_axis_cn.rst | 12 +- docs/api/paddle/quantile_cn.rst | 10 +- docs/api/paddle/rad2deg_cn.rst | 6 +- docs/api/paddle/rand_cn.rst | 8 +- docs/api/paddle/randint_cn.rst | 6 +- docs/api/paddle/randint_like_cn.rst | 12 +- docs/api/paddle/randn_cn.rst | 4 +- docs/api/paddle/randperm_cn.rst | 6 +- docs/api/paddle/rank_cn.rst | 6 +- docs/api/paddle/reciprocal_cn.rst | 6 +- docs/api/paddle/regularizer/L1Decay_cn.rst | 6 +- docs/api/paddle/regularizer/L2Decay_cn.rst | 6 +- docs/api/paddle/repeat_interleave_cn.rst | 6 +- docs/api/paddle/reshape_cn.rst | 12 +- docs/api/paddle/rot90_cn.rst | 8 +- docs/api/paddle/round_cn.rst | 4 +- docs/api/paddle/rsqrt_cn.rst | 8 +- docs/api/paddle/save_cn.rst | 14 +- docs/api/paddle/scale_cn.rst | 14 +- docs/api/paddle/scatter_cn.rst | 10 +- docs/api/paddle/scatter_nd_add_cn.rst | 6 +- docs/api/paddle/scatter_nd_cn.rst | 8 +- docs/api/paddle/searchsorted_cn.rst | 10 +- docs/api/paddle/seed_cn.rst | 4 +- docs/api/paddle/set_default_dtype_cn.rst | 2 +- docs/api/paddle/set_flags_cn.rst | 4 +- docs/api/paddle/set_printoptions_cn.rst | 8 +- docs/api/paddle/shape_cn.rst | 20 +- docs/api/paddle/shard_index_cn.rst | 6 +- docs/api/paddle/sign_cn.rst | 2 +- docs/api/paddle/signal/Overview_cn.rst | 4 +- docs/api/paddle/signal/istft_cn.rst | 14 +- docs/api/paddle/signal/stft_cn.rst | 12 +- docs/api/paddle/sin_cn.rst | 4 +- docs/api/paddle/sinh_cn.rst | 4 +- docs/api/paddle/slice_cn.rst | 12 +- docs/api/paddle/sort_cn.rst | 4 +- .../paddle/sparse/sparse_coo_tensor_cn.rst | 32 +- .../paddle/sparse/sparse_csr_tensor_cn.rst | 28 +- docs/api/paddle/split_cn.rst | 4 +- docs/api/paddle/sqrt_cn.rst | 4 +- docs/api/paddle/square_cn.rst | 4 +- docs/api/paddle/squeeze_cn.rst | 4 +- docs/api/paddle/stack_cn.rst | 4 +- docs/api/paddle/standard_normal_cn.rst | 8 +- docs/api/paddle/stanh_cn.rst | 4 +- docs/api/paddle/static/BuildStrategy_cn.rst | 20 +- docs/api/paddle/static/CompiledProgram_cn.rst | 24 +- .../paddle/static/ExecutionStrategy_cn.rst | 12 +- docs/api/paddle/static/Executor_cn.rst | 116 ++-- .../static/ExponentialMovingAverage_cn.rst | 2 +- docs/api/paddle/static/InputSpec_cn.rst | 30 +- .../paddle/static/IpuCompiledProgram_cn.rst | 10 +- docs/api/paddle/static/IpuStrategy_cn.rst | 68 +- docs/api/paddle/static/Overview_cn.rst | 128 ++-- .../api/paddle/static/ParallelExecutor_cn.rst | 70 +- docs/api/paddle/static/Print_cn.rst | 26 +- docs/api/paddle/static/Program_cn.rst | 80 +-- docs/api/paddle/static/Variable_cn.rst | 16 +- .../paddle/static/WeightNormParamAttr_cn.rst | 16 +- docs/api/paddle/static/accuracy_cn.rst | 10 +- docs/api/paddle/static/auc_cn.rst | 30 +- docs/api/paddle/static/cpu_places_cn.rst | 2 +- .../paddle/static/create_global_var_cn.rst | 8 +- .../api/paddle/static/create_parameter_cn.rst | 12 +- docs/api/paddle/static/cuda_places_cn.rst | 6 +- docs/api/paddle/static/data_cn.rst | 8 +- .../paddle/static/default_main_program_cn.rst | 6 +- .../static/default_startup_program_cn.rst | 2 +- .../static/deserialize_persistables_cn.rst | 2 +- docs/api/paddle/static/device_guard_cn.rst | 6 +- docs/api/paddle/static/global_scope_cn.rst | 2 +- docs/api/paddle/static/gradients_cn.rst | 2 +- docs/api/paddle/static/ipu_shard_guard_cn.rst | 12 +- docs/api/paddle/static/load_cn.rst | 14 +- .../paddle/static/load_inference_model_cn.rst | 4 +- .../paddle/static/load_program_state_cn.rst | 8 +- docs/api/paddle/static/mlu_places_cn.rst | 6 +- docs/api/paddle/static/name_scope_cn.rst | 4 +- docs/api/paddle/static/nn/batch_norm_cn.rst | 30 +- docs/api/paddle/static/nn/case_cn.rst | 8 +- docs/api/paddle/static/nn/cond_cn.rst | 22 +- docs/api/paddle/static/nn/conv2d_cn.rst | 38 +- .../paddle/static/nn/conv2d_transpose_cn.rst | 56 +- docs/api/paddle/static/nn/conv3d_cn.rst | 38 +- .../paddle/static/nn/conv3d_transpose_cn.rst | 58 +- docs/api/paddle/static/nn/crf_decoding_cn.rst | 6 +- docs/api/paddle/static/nn/data_norm_cn.rst | 24 +- .../api/paddle/static/nn/deform_conv2d_cn.rst | 36 +- docs/api/paddle/static/nn/embedding_cn.rst | 36 +- docs/api/paddle/static/nn/fc_cn.rst | 32 +- docs/api/paddle/static/nn/group_norm_cn.rst | 10 +- .../api/paddle/static/nn/instance_norm_cn.rst | 12 +- docs/api/paddle/static/nn/layer_norm_cn.rst | 10 +- .../paddle/static/nn/multi_box_head_cn.rst | 30 +- docs/api/paddle/static/nn/nce_cn.rst | 12 +- docs/api/paddle/static/nn/prelu_cn.rst | 14 +- docs/api/paddle/static/nn/row_conv_cn.rst | 14 +- .../paddle/static/nn/sequence_concat_cn.rst | 12 +- .../api/paddle/static/nn/sequence_conv_cn.rst | 30 +- .../static/nn/sequence_enumerate_cn.rst | 8 +- .../static/nn/sequence_expand_as_cn.rst | 38 +- .../paddle/static/nn/sequence_expand_cn.rst | 42 +- .../static/nn/sequence_first_step_cn.rst | 22 +- .../static/nn/sequence_last_step_cn.rst | 22 +- docs/api/paddle/static/nn/sequence_pad_cn.rst | 38 +- .../api/paddle/static/nn/sequence_pool_cn.rst | 36 +- .../paddle/static/nn/sequence_reshape_cn.rst | 14 +- .../paddle/static/nn/sequence_reverse_cn.rst | 12 +- .../paddle/static/nn/sequence_scatter_cn.rst | 20 +- .../paddle/static/nn/sequence_slice_cn.rst | 16 +- .../paddle/static/nn/sequence_softmax_cn.rst | 18 +- .../paddle/static/nn/sequence_unpad_cn.rst | 14 +- .../paddle/static/nn/sparse_embedding_cn.rst | 42 +- .../api/paddle/static/nn/spectral_norm_cn.rst | 16 +- docs/api/paddle/static/nn/switch_case_cn.rst | 10 +- docs/api/paddle/static/nn/while_loop_cn.rst | 10 +- docs/api/paddle/static/npu_places_cn.rst | 6 +- docs/api/paddle/static/program_guard_cn.rst | 4 +- docs/api/paddle/static/py_func_cn.rst | 34 +- docs/api/paddle/static/save_cn.rst | 10 +- docs/api/paddle/static/set_ipu_shard_cn.rst | 10 +- .../paddle/static/set_program_state_cn.rst | 2 +- docs/api/paddle/static/xpu_places_cn.rst | 6 +- docs/api/paddle/std_cn.rst | 8 +- docs/api/paddle/strided_slice_cn.rst | 18 +- docs/api/paddle/subtract_cn.rst | 2 +- docs/api/paddle/sum_cn.rst | 2 +- docs/api/paddle/sysconfig/get_include_cn.rst | 2 +- docs/api/paddle/sysconfig/get_lib_cn.rst | 2 +- docs/api/paddle/t_cn.rst | 6 +- docs/api/paddle/take_along_axis_cn.rst | 8 +- docs/api/paddle/tan_cn.rst | 6 +- docs/api/paddle/tanh_cn.rst | 4 +- docs/api/paddle/tensordot_cn.rst | 14 +- docs/api/paddle/text/Conll05st_cn.rst | 16 +- docs/api/paddle/text/Imdb_cn.rst | 10 +- docs/api/paddle/text/Imikolov_cn.rst | 10 +- docs/api/paddle/text/Movielens_cn.rst | 10 +- docs/api/paddle/text/Overview_cn.rst | 42 +- docs/api/paddle/text/UCIHousing_cn.rst | 6 +- docs/api/paddle/text/ViterbiDecoder_cn.rst | 12 +- docs/api/paddle/text/WMT14_cn.rst | 14 +- docs/api/paddle/text/WMT16_cn.rst | 14 +- docs/api/paddle/text/viterbi_decode_cn.rst | 14 +- docs/api/paddle/tile_cn.rst | 8 +- docs/api/paddle/to_tensor_cn.rst | 6 +- docs/api/paddle/tolist_cn.rst | 4 +- docs/api/paddle/topk_cn.rst | 2 +- docs/api/paddle/transpose_cn.rst | 12 +- docs/api/paddle/tril_cn.rst | 6 +- docs/api/paddle/tril_indices_cn.rst | 4 +- docs/api/paddle/triu_cn.rst | 6 +- docs/api/paddle/trunc_cn.rst | 4 +- docs/api/paddle/unbind_cn.rst | 4 +- docs/api/paddle/uniform_cn.rst | 16 +- docs/api/paddle/unique_cn.rst | 4 +- docs/api/paddle/unique_consecutive_cn.rst | 16 +- docs/api/paddle/unsqueeze_cn.rst | 8 +- docs/api/paddle/unstack_cn.rst | 12 +- docs/api/paddle/utils/Overview_cn.rst | 32 +- .../utils/cpp_extension/CUDAExtension_cn.rst | 2 +- .../utils/cpp_extension/CppExtension_cn.rst | 2 +- .../paddle/utils/cpp_extension/load_cn.rst | 8 +- .../paddle/utils/cpp_extension/setup_cn.rst | 6 +- docs/api/paddle/utils/deprecated_cn.rst | 10 +- .../paddle/utils/dlpack/from_dlpack_cn.rst | 6 +- docs/api/paddle/utils/dlpack/to_dlpack_cn.rst | 6 +- .../download/get_weights_path_from_url_cn.rst | 4 +- docs/api/paddle/utils/run_check_cn.rst | 2 +- .../paddle/utils/unique_name/generate_cn.rst | 4 +- .../api/paddle/utils/unique_name/guard_cn.rst | 4 +- .../paddle/utils/unique_name/switch_cn.rst | 4 +- docs/api/paddle/var_cn.rst | 4 +- docs/api/paddle/version/Overview_cn.rst | 12 +- docs/api/paddle/version/cuda_cn.rst | 2 +- docs/api/paddle/version/cudnn_cn.rst | 2 +- docs/api/paddle/version/show_cn.rst | 20 +- docs/api/paddle/vision/Overview_cn.rst | 140 ++-- .../api/paddle/vision/ops/DeformConv2D_cn.rst | 14 +- docs/api/paddle/vision/ops/PSRoIPool_cn.rst | 8 +- docs/api/paddle/vision/ops/RoIAlign_cn.rst | 10 +- docs/api/paddle/vision/ops/RoIPool_cn.rst | 6 +- .../paddle/vision/ops/deform_conv2d_cn.rst | 20 +- docs/api/paddle/vision/ops/nms_cn.rst | 20 +- docs/api/paddle/vision/ops/psroi_pool_cn.rst | 14 +- docs/api/paddle/vision/ops/roi_align_cn.rst | 18 +- docs/api/paddle/vision/ops/roi_pool_cn.rst | 10 +- docs/api/paddle/vision/ops/yolo_box_cn.rst | 20 +- docs/api/paddle/vision/ops/yolo_loss_cn.rst | 42 +- .../transforms/BrightnessTransform_cn.rst | 2 +- .../vision/transforms/ColorJitter_cn.rst | 2 +- .../paddle/vision/transforms/Compose_cn.rst | 2 +- .../transforms/ContrastTransform_cn.rst | 2 +- .../paddle/vision/transforms/Grayscale_cn.rst | 4 +- .../vision/transforms/HueTransform_cn.rst | 2 +- docs/api/paddle/vision/transforms/Pad_cn.rst | 6 +- .../vision/transforms/RandomCrop_cn.rst | 8 +- .../vision/transforms/RandomErasing_cn.rst | 4 +- .../transforms/RandomHorizontalFlip_cn.rst | 2 +- .../transforms/RandomResizedCrop_cn.rst | 4 +- .../vision/transforms/RandomRotation_cn.rst | 6 +- .../transforms/RandomVerticalFlip_cn.rst | 2 +- .../paddle/vision/transforms/Resize_cn.rst | 2 +- .../transforms/SaturationTransform_cn.rst | 2 +- .../paddle/vision/transforms/Transpose_cn.rst | 4 +- .../transforms/adjust_brightness_cn.rst | 2 +- .../vision/transforms/adjust_contrast_cn.rst | 2 +- .../vision/transforms/adjust_hue_cn.rst | 2 +- .../api/paddle/vision/transforms/erase_cn.rst | 4 +- docs/api/paddle/vision/transforms/pad_cn.rst | 6 +- .../paddle/vision/transforms/resize_cn.rst | 2 +- .../paddle/vision/transforms/rotate_cn.rst | 4 +- .../vision/transforms/to_grayscale_cn.rst | 4 +- docs/api/paddle/zeros_cn.rst | 8 +- docs/api/paddle/zeros_like_cn.rst | 8 +- docs/api_guides/X2Paddle/Caffe-Fluid.rst | 10 +- docs/api_guides/X2Paddle/TensorFlow-Fluid.rst | 18 +- docs/api_guides/index_cn.rst | 4 +- docs/api_guides/low_level/backward.rst | 6 +- .../api_guides/low_level/compiled_program.rst | 10 +- .../low_level/distributed/async_training.rst | 14 +- .../distributed/cluster_train_data_cn.rst | 4 +- .../large_scale_sparse_feature_training.rst | 20 +- .../low_level/distributed/sync_training.rst | 62 +- docs/api_guides/low_level/executor.rst | 6 +- .../low_level/layers/activations.rst | 6 +- .../low_level/layers/control_flow.rst | 26 +- docs/api_guides/low_level/layers/conv.rst | 42 +- docs/api_guides/low_level/layers/conv_en.rst | 2 +- .../low_level/layers/data_feeder.rst | 18 +- .../low_level/layers/data_in_out.rst | 16 +- .../api_guides/low_level/layers/detection.rst | 44 +- .../layers/learning_rate_scheduler.rst | 52 +- docs/api_guides/low_level/layers/math.rst | 4 +- docs/api_guides/low_level/layers/pooling.rst | 24 +- docs/api_guides/low_level/layers/sequence.rst | 70 +- .../low_level/layers/sparse_update.rst | 18 +- docs/api_guides/low_level/layers/tensor.rst | 14 +- docs/api_guides/low_level/metrics.rst | 12 +- .../low_level/model_save_reader.rst | 6 +- docs/api_guides/low_level/nets.rst | 2 +- docs/api_guides/low_level/optimizer.rst | 14 +- .../low_level/parallel_executor.rst | 22 +- docs/api_guides/low_level/parameter.rst | 56 +- docs/api_guides/low_level/program.rst | 20 +- docs/design/dynamic_rnn/index_cn.rst | 2 +- docs/design/dynamic_rnn/rnn_design.md | 66 +- docs/design/others/releasing_process.md | 56 +- docs/design/phi/design.md | 654 +++++++++--------- .../training_quantization_model_format.md | 76 +- docs/dev_guides/Overview_cn.md | 8 +- .../api_accpetance_criteria_cn.md | 58 +- .../api_contributing_guides_cn.rst | 56 +- .../api_design_guidelines_standard_cn.md | 262 +++---- .../api_docs_guidelines_cn.md | 168 ++--- .../api_contributing_guides/new_cpp_op_cn.md | 354 +++++----- .../new_python_api_cn.md | 24 +- .../custom_device_example_cn.md | 44 +- .../custom_device_docs/custom_kernel_cn.rst | 10 +- .../custom_kernel_docs/context_api_cn.md | 76 +- .../custom_kernel_docs/cpp_api_cn.rst | 12 +- .../custom_kernel_docs/exception_api_cn.md | 26 +- .../custom_kernel_docs/kernel_declare_cn.md | 20 +- .../custom_kernel_docs/register_api_cn.md | 24 +- .../custom_kernel_docs/tensor_api_cn.md | 126 ++-- .../custom_device_docs/custom_runtime_cn.rst | 38 +- .../custom_device_docs/device_api_cn.md | 2 +- .../custom_device_docs/event_api_cn.md | 24 +- .../custom_device_docs/index_cn.rst | 4 +- .../custom_device_docs/memory_api_cn.md | 12 +- .../runtime_data_type_cn.md | 8 +- .../custom_device_docs/stream_api_cn.md | 28 +- .../dev_guides/docs_contributing_guides_cn.md | 34 +- docs/dev_guides/git_guides/code_review_cn.md | 4 +- .../git_guides/codestyle_check_guide_cn.md | 28 +- docs/dev_guides/git_guides/index_cn.rst | 4 +- .../git_guides/local_dev_guide_cn.md | 6 +- .../git_guides/paddle_ci_manual_cn.md | 110 +-- .../git_guides/submit_pr_guide_cn.md | 36 +- docs/dev_guides/index_cn.rst | 6 +- ..._dtype_extension_acceptance_criteria_cn.md | 32 +- ...dtype_extension_contributing_guides_cn.rst | 38 +- .../kernel_primitive_api/example_cn.rst | 2 +- .../kernel_primitive_api/index_cn.rst | 2 +- .../kernel_primitive_api/io_api_cn.md | 6 +- .../kernel_primitive_api/model_example_cn.md | 4 +- .../kernel_primitive_api/reduce_example_cn.md | 2 +- .../op_optimization_accpetance_criteria_cn.md | 22 +- ...op_optimization_contributing_guides_cn.rst | 30 +- .../op_optimization_method_introduction_cn.md | 28 +- docs/dev_guides/sugon/complie_and_test_cn.md | 38 +- docs/dev_guides/sugon/index_cn.rst | 6 +- docs/dev_guides/sugon/paddle_c86_cn.md | 22 +- .../sugon/paddle_c86_fix_guides_cn.md | 70 +- docs/eval/evaluation_of_docs_system.md | 24 +- ...200\220Hackathon No.111\343\200\221 PR.md" | 22 +- ...200\220Hackathon No.113\343\200\221 PR.md" | 100 +-- docs/faq/2.0.md | 56 +- docs/faq/data_cn.md | 28 +- docs/faq/distributed_cn.md | 122 ++-- docs/faq/index_cn.rst | 4 +- docs/faq/install_cn.md | 76 +- docs/faq/others_cn.md | 8 +- docs/faq/params_cn.md | 50 +- docs/faq/save_cn.md | 50 +- docs/faq/train_cn.md | 90 +-- .../cluster_overview_ps_cn.rst | 82 +-- .../cluster_quick_start_cn.rst | 2 +- .../cluster_quick_start_collective_cn.rst | 44 +- .../cluster_quick_start_ps_cn.rst | 72 +- .../data_parallel/amp_cn.rst | 50 +- .../data_parallel/gradient_merge_cn.rst | 24 +- .../data_parallel/principle_and_demo_cn.rst | 26 +- .../data_parallel/recompute_cn.rst | 26 +- .../06_distributed_training/deployment_cn.rst | 12 +- .../distributed_overview.rst | 16 +- .../fleet_api_howto_cn.rst | 56 +- .../group_sharded_parallel_cn.rst | 50 +- .../06_distributed_training/index_cn.rst | 2 +- .../model_parallel_cn.rst | 46 +- .../guides/06_distributed_training/moe_cn.rst | 16 +- .../pipeline_parallel_cn.rst | 32 +- .../community_contribution_cn.md | 22 +- .../10_contribution/docs_contribution.md | 56 +- docs/guides/10_contribution/faq_cn.rst | 32 +- docs/guides/10_contribution/hackathon_cn.md | 20 +- docs/guides/10_contribution/rfcs.md | 22 +- docs/guides/advanced/autograd_cn.rst | 36 +- docs/guides/advanced/gradient_clip_cn.rst | 42 +- docs/guides/advanced/index_cn.rst | 4 +- docs/guides/advanced/layer_and_model_cn.md | 4 +- docs/guides/advanced/model_to_onnx_cn.rst | 28 +- docs/guides/advanced/visualdl_cn.md | 86 +-- docs/guides/advanced/visualdl_usage_cn.md | 106 +-- docs/guides/beginner/index_cn.rst | 10 +- docs/guides/beginner/model_save_load_cn.rst | 62 +- docs/guides/beginner/tensor_cn.md | 158 ++--- docs/guides/custom_op/index_cn.rst | 8 +- docs/guides/custom_op/new_cpp_op_cn.md | 174 ++--- docs/guides/custom_op/new_python_op_cn.md | 114 +-- docs/guides/flags/check_nan_inf_cn.md | 52 +- docs/guides/flags/cudnn_cn.rst | 34 +- docs/guides/flags/data_cn.rst | 32 +- docs/guides/flags/debug_cn.rst | 42 +- docs/guides/flags/device_cn.rst | 18 +- docs/guides/flags/distributed_cn.rst | 116 ++-- docs/guides/flags/executor_cn.rst | 32 +- docs/guides/flags/flags_cn.rst | 10 +- docs/guides/flags/memory_cn.rst | 134 ++-- docs/guides/flags/npu_cn.rst | 6 +- docs/guides/flags/others_cn.rst | 38 +- .../hardware_support/hardware_info_cn.md | 56 +- docs/guides/hardware_support/index_cn.rst | 8 +- .../hardware_support/ipu_docs/index_cn.rst | 12 +- .../ipu_docs/infer_example_cn.md | 10 +- .../ipu_docs/paddle_install_cn.md | 10 +- .../ipu_docs/train_example_cn.md | 8 +- .../hardware_support/npu_docs/index_cn.rst | 8 +- .../npu_docs/paddle_install_cn.md | 34 +- .../npu_docs/train_example_cn.md | 22 +- .../hardware_support/rocm_docs/index_cn.rst | 12 +- .../rocm_docs/infer_example_cn.md | 26 +- .../rocm_docs/paddle_install_cn.md | 38 +- .../rocm_docs/paddle_rocm_cn.md | 6 +- .../rocm_docs/train_example_cn.md | 28 +- .../hardware_support/xpu_docs/index_cn.rst | 22 +- .../xpu_docs/inference_install_example_cn.md | 12 +- .../xpu_docs/paddle_2.0_xpu2_cn.md | 10 +- .../xpu_docs/paddle_2.0_xpu_cn.md | 16 +- .../xpu_docs/paddle_install_cn.md | 80 +-- .../xpu_docs/paddle_install_xpu2_cn.md | 64 +- .../xpu_docs/train_example_cn.md | 16 +- .../xpu_docs/train_example_xpu2_cn.md | 18 +- docs/guides/infer/inference/inference_cn.md | 44 +- docs/guides/infer/mobile/mobile_index_cn.md | 36 +- .../guides/infer/paddleslim/paddle_slim_cn.md | 30 +- docs/guides/jit/basic_usage_cn.md | 56 +- docs/guides/jit/basic_usage_en.md | 8 +- docs/guides/jit/case_analysis_cn.md | 12 +- docs/guides/jit/debugging_cn.md | 48 +- docs/guides/jit/grammar_list_cn.md | 126 ++-- docs/guides/jit/principle_cn.md | 14 +- docs/guides/model_convert/index_cn.rst | 10 +- .../load_old_format_model_cn.rst | 22 +- docs/guides/model_convert/migration_cn.rst | 56 +- .../model_convert/paddle_api_mapping_cn.rst | 10 +- .../model_convert/pytorch_api_mapping_cn.md | 142 ++-- docs/guides/model_convert/update_cn.md | 210 +++--- docs/guides/model_convert/update_en.md | 2 +- docs/guides/performance_improving/amp_cn.md | 226 +++--- .../analysis_tools/benchmark_cn.md | 32 +- .../analysis_tools/cpu_profiling_cn.md | 44 +- .../analysis_tools/index_cn.rst | 4 +- .../analysis_tools/timeline_cn.md | 20 +- .../performance_improving/device_switching.md | 36 +- .../performance_improving/memory_optimize.rst | 104 +-- .../paddle_tensorrt_infer.md | 90 +-- .../paddle_tensorrt_infer_en.md | 2 +- .../performance_improving/profiling_model.md | 116 ++-- .../performance_improving/quantization.md | 70 +- .../training_best_practice.rst | 136 ++-- docs/install/FAQ.md | 40 +- docs/install/Tables.md | 100 +-- docs/install/compile/arm-compile.md | 32 +- docs/install/compile/linux-compile.md | 198 +++--- docs/install/compile/linux-compile_en.md | 2 +- docs/install/compile/macos-compile.md | 130 ++-- docs/install/compile/mips-compile.md | 36 +- docs/install/compile/sw-compile.md | 36 +- docs/install/compile/windows-compile.md | 48 +- docs/install/compile/zhaoxin-compile.md | 28 +- docs/install/conda/fromconda.rst | 2 +- docs/install/conda/linux-conda.md | 44 +- docs/install/conda/macos-conda.md | 32 +- docs/install/conda/windows-conda.md | 44 +- docs/install/docker/fromdocker.rst | 2 +- docs/install/docker/linux-docker.md | 80 +-- docs/install/docker/macos-docker.md | 62 +- docs/install/index_cn.rst | 46 +- docs/install/install_Kunlun_en.md | 2 +- docs/install/install_Kunlun_zh.md | 14 +- docs/install/install_ROCM_zh.md | 12 +- docs/install/install_script.md | 42 +- docs/install/pip/frompip.rst | 2 +- docs/install/pip/linux-pip.md | 58 +- docs/install/pip/macos-pip.md | 30 +- docs/install/pip/windows-pip.md | 62 +- docs/practices/cv/index_cn.rst | 6 +- docs/practices/index_cn.rst | 12 +- docs/practices/nlp/index_cn.rst | 4 +- docs/practices/quick_start/index_cn.rst | 2 +- docs/release_note_cn.md | 266 +++---- 1042 files changed, 10905 insertions(+), 10905 deletions(-) diff --git a/CONTRIBUTING_cn.md b/CONTRIBUTING_cn.md index 03d9789c769..b43b77ea3d7 100644 --- a/CONTRIBUTING_cn.md +++ b/CONTRIBUTING_cn.md @@ -8,7 +8,7 @@ - 使用教程 - 应用实践 -- API文档 +- API 文档 ### 使用教程 @@ -16,26 +16,26 @@ ### 应用实践 -应用实践主要是使用飞桨框架进行具体的案例实现。目前已经有许多开发者贡献了非常优秀的案例,如OCR识别、人脸关键点检测等,我们非常欢迎你提交你的项目到我们的repo中来,并最终呈现在飞桨的官网上。 +应用实践主要是使用飞桨框架进行具体的案例实现。目前已经有许多开发者贡献了非常优秀的案例,如 OCR 识别、人脸关键点检测等,我们非常欢迎你提交你的项目到我们的 repo 中来,并最终呈现在飞桨的官网上。 -### API文档 +### API 文档 -API文档是飞桨框架的API文档,包含了飞桨框架API的说明介绍。我们非常欢迎你对我们的API文档提出修改,不管是typo或者是修改说明与示例,我们都非常感谢你对于API文档所作出的任何贡献。 +API 文档是飞桨框架的 API 文档,包含了飞桨框架 API 的说明介绍。我们非常欢迎你对我们的 API 文档提出修改,不管是 typo 或者是修改说明与示例,我们都非常感谢你对于 API 文档所作出的任何贡献。 ## 参与方式 ### 使用教程 -这部分内容存放在 [docs/docs/guides](https://github.com/PaddlePaddle/docs/tree/develop/docs/guides) 目录下,你可以通过提交PR的方式,来作出你的修改。具体修改方式请参考:[文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)。 +这部分内容存放在 [docs/docs/guides](https://github.com/PaddlePaddle/docs/tree/develop/docs/guides) 目录下,你可以通过提交 PR 的方式,来作出你的修改。具体修改方式请参考:[文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)。 ### 应用实践 -这部分内容分为源代码与官网文档两部分,源代码的部分以notebook的形式,存放在 [book/paddle2.0_docs](https://github.com/PaddlePaddle/book/tree/develop/paddle2.0_docs) 目录下,你可以提交你的notebook格式的源码于该目录中;在你的notebook文件被合入后,我们会将其转为md文件,存储在[docs/docs/tutorial](https://github.com/PaddlePaddle/docs/tree/develop/docs/tutorial)中,然后呈现到官网。具体信息请参考:[[Call for Contribution] Tutorials for PaddlePaddle 2.0](https://github.com/PaddlePaddle/book/issues/905). +这部分内容分为源代码与官网文档两部分,源代码的部分以 notebook 的形式,存放在 [book/paddle2.0_docs](https://github.com/PaddlePaddle/book/tree/develop/paddle2.0_docs) 目录下,你可以提交你的 notebook 格式的源码于该目录中;在你的 notebook 文件被合入后,我们会将其转为 md 文件,存储在[docs/docs/tutorial](https://github.com/PaddlePaddle/docs/tree/develop/docs/tutorial)中,然后呈现到官网。具体信息请参考:[[Call for Contribution] Tutorials for PaddlePaddle 2.0](https://github.com/PaddlePaddle/book/issues/905). -### API文档 +### API 文档 -飞桨框架同时提供中英文API文档。其中,英文API文档存于[Paddle](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle)源代码中,绝大部分通过官网文档的源代码即可链接到,你可以在此位置对英文文档进行修改;而中文API文档存放在[docs/docs/api](https://github.com/PaddlePaddle/docs/tree/develop/docs/api)目录下。你可以针对文档中的任何错误与内容进行修复与完善,或者是新增你认为该文档中所需要的内容,我们非常感谢你对于API文档所付出的一切。具体修改方式请参考:[英文API文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E8%8B%B1%E6%96%87API%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)、[中文API文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E4%B8%AD%E6%96%87API%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)。 +飞桨框架同时提供中英文 API 文档。其中,英文 API 文档存于[Paddle](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle)源代码中,绝大部分通过官网文档的源代码即可链接到,你可以在此位置对英文文档进行修改;而中文 API 文档存放在[docs/docs/api](https://github.com/PaddlePaddle/docs/tree/develop/docs/api)目录下。你可以针对文档中的任何错误与内容进行修复与完善,或者是新增你认为该文档中所需要的内容,我们非常感谢你对于 API 文档所付出的一切。具体修改方式请参考:[英文 API 文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E8%8B%B1%E6%96%87API%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)、[中文 API 文档贡献指南](https://github.com/PaddlePaddle/docs/wiki/%E4%B8%AD%E6%96%87API%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97)。 -## 提交PR +## 提交 PR -你对于飞桨文档的任何修改,都应该通过提交PR的方式来完成,具体的方法可以参考[提交PR](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/08_contribution/local_dev_guide.html) +你对于飞桨文档的任何修改,都应该通过提交 PR 的方式来完成,具体的方法可以参考[提交 PR](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/08_contribution/local_dev_guide.html) diff --git a/README_cn.md b/README_cn.md index 345c490eb45..f80db11e27a 100644 --- a/README_cn.md +++ b/README_cn.md @@ -4,12 +4,12 @@ docs 是 [PaddlePaddle 官网文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/index_cn.html) 的源文件。 -注意:英文版API文档直接从[PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle) 的 docstring 中生成,[飞桨其他项目](https://www.paddlepaddle.org.cn/overview)的文档分别在其对应的位置中管理。 +注意:英文版 API 文档直接从[PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle) 的 docstring 中生成,[飞桨其他项目](https://www.paddlepaddle.org.cn/overview)的文档分别在其对应的位置中管理。 ## 仓库结构 - [docs](docs): 飞桨框架 2.0 以及之后版本文档的源文件。 -- [docs/api](docs/api): 飞桨中文 API文档的源文件。 +- [docs/api](docs/api): 飞桨中文 API 文档的源文件。 - [docs/guides](docs/guides): 飞桨官方教程的源文件。 - [docs/tutorial](docs/tutorial): 飞桨相关案例的源文件。 - [ci_scripts](ci_scripts): docs CI 相关的文件。 diff --git a/docs/advanced_guide/performance_improving/amp/amp.md b/docs/advanced_guide/performance_improving/amp/amp.md index 3a41a447f78..d0b8866bfb3 100644 --- a/docs/advanced_guide/performance_improving/amp/amp.md +++ b/docs/advanced_guide/performance_improving/amp/amp.md @@ -1,30 +1,30 @@ # 混合精度训练最佳实践 -Automatic Mixed Precision (AMP) 是一种自动混合使用半精度(FP16)和单精度(FP32)来加速模型训练的技术。AMP技术可方便用户快速将使用 FP32 训练的模型修改为使用混合精度训练,并通过黑白名单和动态`loss scaling`来保证训练时的数值稳定性进而避免梯度Infinite或者NaN(Not a Number)。借力于新一代NVIDIA GPU中Tensor Cores的计算性能,PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练加速比可达1.5~2.9。 +Automatic Mixed Precision (AMP) 是一种自动混合使用半精度(FP16)和单精度(FP32)来加速模型训练的技术。AMP 技术可方便用户快速将使用 FP32 训练的模型修改为使用混合精度训练,并通过黑白名单和动态`loss scaling`来保证训练时的数值稳定性进而避免梯度 Infinite 或者 NaN(Not a Number)。借力于新一代 NVIDIA GPU 中 Tensor Cores 的计算性能,PaddlePaddle AMP 技术在 ResNet50、Transformer 等模型上训练速度相对于 FP32 训练加速比可达 1.5~2.9。 -### 半精度浮点类型FP16 +### 半精度浮点类型 FP16 -如图 1 所示,半精度(Float Precision16,FP16)是一种相对较新的浮点类型,在计算机中使用2字节(16位)存储。在IEEE 754-2008标准中,它亦被称作binary16。与计算中常用的单精度(FP32)和双精度(FP64)类型相比,FP16更适于在精度要求不高的场景中使用。 +如图 1 所示,半精度(Float Precision16,FP16)是一种相对较新的浮点类型,在计算机中使用 2 字节(16 位)存储。在 IEEE 754-2008 标准中,它亦被称作 binary16。与计算中常用的单精度(FP32)和双精度(FP64)类型相比,FP16 更适于在精度要求不高的场景中使用。
missing
图 1. 半精度和单精度数据示意图
-### 英伟达GPU的FP16算力 +### 英伟达 GPU 的 FP16 算力 -在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的Volta及Turing架构GPU在使用FP16计算时具有如下特点: +在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的 Volta 及 Turing 架构 GPU 在使用 FP16 计算时具有如下特点: -* FP16可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的batch size大小。 -* FP16可以充分利用英伟达Volta及Turing架构GPU提供的Tensor Cores技术。在相同的GPU硬件上,Tensor Cores的FP16计算吞吐量是FP32的8倍。 +* FP16 可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的 batch size 大小。 +* FP16 可以充分利用英伟达 Volta 及 Turing 架构 GPU 提供的 Tensor Cores 技术。在相同的 GPU 硬件上,Tensor Cores 的 FP16 计算吞吐量是 FP32 的 8 倍。 -### PaddlePaddle AMP功能——牛刀小试 +### PaddlePaddle AMP 功能——牛刀小试 -如前文所述,使用FP16数据类型可能会造成计算精度上的损失,但对深度学习领域而言,并不是所有计算都要求很高的精度,一些局部的精度损失对最终训练效果影响很微弱,却能使吞吐和训练速度带来大幅提升。因此,混合精度计算的需求应运而生。具体而言,训练过程中将一些对精度损失不敏感且能利用Tensor Cores进行加速的运算使用半精度处理,而对精度损失敏感部分依然保持FP32计算精度,用以最大限度提升访存和计算效率。 +如前文所述,使用 FP16 数据类型可能会造成计算精度上的损失,但对深度学习领域而言,并不是所有计算都要求很高的精度,一些局部的精度损失对最终训练效果影响很微弱,却能使吞吐和训练速度带来大幅提升。因此,混合精度计算的需求应运而生。具体而言,训练过程中将一些对精度损失不敏感且能利用 Tensor Cores 进行加速的运算使用半精度处理,而对精度损失敏感部分依然保持 FP32 计算精度,用以最大限度提升访存和计算效率。 -为了避免对每个具体模型人工地去设计和尝试精度混合的方法,PaddlePaadle框架提供自动混合精度训练(AMP)功能,解放"炼丹师"的双手。在PaddlePaddle中使用AMP训练是一件十分容易的事情,用户只需要增加一行代码即可将原有的FP32训练转变为AMP训练。下面以`MNIST`为例介绍PaddlePaddle AMP功能的使用示例。 +为了避免对每个具体模型人工地去设计和尝试精度混合的方法,PaddlePaadle 框架提供自动混合精度训练(AMP)功能,解放"炼丹师"的双手。在 PaddlePaddle 中使用 AMP 训练是一件十分容易的事情,用户只需要增加一行代码即可将原有的 FP32 训练转变为 AMP 训练。下面以`MNIST`为例介绍 PaddlePaddle AMP 功能的使用示例。 -**MNIST网络定义** +**MNIST 网络定义** ```python import paddle.fluid as fluid @@ -41,16 +41,16 @@ def MNIST(data, class_dim): return fc2 ``` -针对CV(Computer Vision)类模型组网,为获得更高的训练性能需要注意如下三点: +针对 CV(Computer Vision)类模型组网,为获得更高的训练性能需要注意如下三点: -* `conv2d`、`batch_norm`以及`pool2d`等需要将数据布局设置为`NHWC`,这样有助于使用TensorCore技术加速计算过程1。 -* Tensor Cores要求在使用FP16加速卷积运算时conv2d的输入/输出通道数为8的倍数2,因此设计网络时推荐将conv2d层的输入/输出通道数设置为8的倍数。 -* Tensor Cores要求在使用FP16加速矩阵乘运算时矩阵行数和列数均为8的倍数3,因此设计网络时推荐将fc层的size参数设置为8的倍数。 +* `conv2d`、`batch_norm`以及`pool2d`等需要将数据布局设置为`NHWC`,这样有助于使用 TensorCore 技术加速计算过程1。 +* Tensor Cores 要求在使用 FP16 加速卷积运算时 conv2d 的输入/输出通道数为 8 的倍数2,因此设计网络时推荐将 conv2d 层的输入/输出通道数设置为 8 的倍数。 +* Tensor Cores 要求在使用 FP16 加速矩阵乘运算时矩阵行数和列数均为 8 的倍数3,因此设计网络时推荐将 fc 层的 size 参数设置为 8 的倍数。 **FP32 训练** -为了训练 MNIST 网络,还需要定义损失函数来更新权重参数,此处使用的优化器是SGDOptimizer。为了简化说明,这里省略了迭代训练的相关代码,仅体现损失函数及优化器定义相关的内容。 +为了训练 MNIST 网络,还需要定义损失函数来更新权重参数,此处使用的优化器是 SGDOptimizer。为了简化说明,这里省略了迭代训练的相关代码,仅体现损失函数及优化器定义相关的内容。 ```python import paddle @@ -68,48 +68,48 @@ sgd = fluid.optimizer.SGDOptimizer(learning_rate=1e-3) sgd.minimize(avg_loss) ``` -**AMP训练** +**AMP 训练** -与FP32训练相比,用户仅需使用PaddlePaddle提供的`fluid.contrib.mixed_precision.decorate` 函数将原来的优化器SGDOptimizer进行封装,然后使用封装后的优化器(mp_sgd)更新参数梯度即可完成向AMP训练的转换,代码如下所示: +与 FP32 训练相比,用户仅需使用 PaddlePaddle 提供的`fluid.contrib.mixed_precision.decorate` 函数将原来的优化器 SGDOptimizer 进行封装,然后使用封装后的优化器(mp_sgd)更新参数梯度即可完成向 AMP 训练的转换,代码如下所示: ```python sgd = SGDOptimizer(learning_rate=1e-3) -# 此处只需要使用fluid.contrib.mixed_precision.decorate将sgd封装成AMP训练所需的 -# 优化器mp_sgd,并使用mp_sgd.minimize(avg_loss)代替原来的sgd.minimize(avg_loss)语句即可。 +# 此处只需要使用 fluid.contrib.mixed_precision.decorate 将 sgd 封装成 AMP 训练所需的 +# 优化器 mp_sgd,并使用 mp_sgd.minimize(avg_loss)代替原来的 sgd.minimize(avg_loss)语句即可。 mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd) mp_sgd.minimize(avg_loss) ``` -运行上述混合精度训练python脚本时为得到更好的执行性能可配置如下环境参数,并保证cudnn版本在7.4.1及以上。 +运行上述混合精度训练 python 脚本时为得到更好的执行性能可配置如下环境参数,并保证 cudnn 版本在 7.4.1 及以上。 ```shell -export FLAGS_conv_workspace_size_limit=1024 # MB,根据所使用的GPU显存容量及模型特点设置数值,值越大越有可能选择到更快的卷积算法 +export FLAGS_conv_workspace_size_limit=1024 # MB,根据所使用的 GPU 显存容量及模型特点设置数值,值越大越有可能选择到更快的卷积算法 export FLAGS_cudnn_exhaustive_search=1 # 使用穷举搜索方法来选择快速卷积算法 -export FLAGS_cudnn_batchnorm_spatial_persistent=1 # 用于触发batch_norm和relu的融合 +export FLAGS_cudnn_batchnorm_spatial_persistent=1 # 用于触发 batch_norm 和 relu 的融合 ``` -上述即为最简单的PaddlePaddle AMP功能使用方法。ResNet50模型的AMP训练示例可[点击此处](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E8%AE%AD%E7%BB%83)查看,其他模型使用PaddlePaddle AMP的方法也与此类似。若AMP训练过程中出现连续的loss nan等不收敛现象,可尝试使用[check nan inf工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/flags/check_nan_inf_cn.html#span-id-speed-span)进行调试。 +上述即为最简单的 PaddlePaddle AMP 功能使用方法。ResNet50 模型的 AMP 训练示例可[点击此处](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README.md#%E6%B7%B7%E5%90%88%E7%B2%BE%E5%BA%A6%E8%AE%AD%E7%BB%83)查看,其他模型使用 PaddlePaddle AMP 的方法也与此类似。若 AMP 训练过程中出现连续的 loss nan 等不收敛现象,可尝试使用[check nan inf 工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/flags/check_nan_inf_cn.html#span-id-speed-span)进行调试。 -### PaddlePaddle AMP功能——进阶使用 +### PaddlePaddle AMP 功能——进阶使用 -上一小节所述均为默认AMP训练行为,用户当然也可以改变一些默认的参数设置来满足特定的模型训练场景需求。接下来的章节将介绍PaddlePaddle AMP功能使用中用户可配置的参数行为,即进阶使用技巧。 +上一小节所述均为默认 AMP 训练行为,用户当然也可以改变一些默认的参数设置来满足特定的模型训练场景需求。接下来的章节将介绍 PaddlePaddle AMP 功能使用中用户可配置的参数行为,即进阶使用技巧。 #### 自定义黑白名单 -PaddlePaddle AMP功能实现中根据FP16数据类型计算稳定性和加速效果在框架内部定义了算子(Op)的黑白名单。具体来说,将对FP16计算友好且能利用Tensor Cores的Op归类于白名单,将使用FP16计算会导致数值不稳定的Op归类于黑名单,将对FP16计算没有多少影响的Op归类于灰名单。然而,框架开发人员不可能考虑到所有的网络模型情况,尤其是那些特殊场景中使用到的模型。用户可以在使用`fluid.contrib.mixed_precision.decorate` 函数时通过指定自定义的黑白名单列表来改变默认的FP16计算行为。 +PaddlePaddle AMP 功能实现中根据 FP16 数据类型计算稳定性和加速效果在框架内部定义了算子(Op)的黑白名单。具体来说,将对 FP16 计算友好且能利用 Tensor Cores 的 Op 归类于白名单,将使用 FP16 计算会导致数值不稳定的 Op 归类于黑名单,将对 FP16 计算没有多少影响的 Op 归类于灰名单。然而,框架开发人员不可能考虑到所有的网络模型情况,尤其是那些特殊场景中使用到的模型。用户可以在使用`fluid.contrib.mixed_precision.decorate` 函数时通过指定自定义的黑白名单列表来改变默认的 FP16 计算行为。 ```python sgd = SGDOptimizer(learning_rate=1e-3) -# list1是白名单op列表,list2是黑名单op列表,list3是黑名单var_name列表(凡是以这些黑名单var_name为输入或输出的op均会被视为黑名单op) +# list1 是白名单 op 列表,list2 是黑名单 op 列表,list3 是黑名单 var_name 列表(凡是以这些黑名单 var_name 为输入或输出的 op 均会被视为黑名单 op) amp_list = AutoMixedPrecisionLists(custom_white_list=list1, custom_black_list=list2, custom_black_varnames=list3) mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, amp_list) mp_sgd.minimize(avg_loss) ``` -#### 自动loss scaling +#### 自动 loss scaling -为了避免梯度Infinite或者NAN,PaddlePaddle AMP功能支持根据训练过程中梯度的数值自动调整loss scale值。用户在使用`fluid.contrib.mixed_precision.decorate` 函数时也可以改变与loss scaling相关的参数设置,示例如下: +为了避免梯度 Infinite 或者 NAN,PaddlePaddle AMP 功能支持根据训练过程中梯度的数值自动调整 loss scale 值。用户在使用`fluid.contrib.mixed_precision.decorate` 函数时也可以改变与 loss scaling 相关的参数设置,示例如下: ```python sgd = SGDOptimizer(learning_rate=1e-3) @@ -124,43 +124,43 @@ mp_sgd = fluid.contrib.mixed_precision.decorator.decorate(sgd, mp_sgd.minimize(avg_loss) ``` -`init_loss_scaling `、`incr_every_n_steps` 以及`decr_every_n_nan_or_inf`等参数控制着自动loss scaling的行为。它们仅当 `use_dynamic_loss_scaling`设置为True时有效。下面详述这些参数的意义: +`init_loss_scaling `、`incr_every_n_steps` 以及`decr_every_n_nan_or_inf`等参数控制着自动 loss scaling 的行为。它们仅当 `use_dynamic_loss_scaling`设置为 True 时有效。下面详述这些参数的意义: -* init_loss_scaling(float):初始loss scaling值。 -* incr_every_n_steps(int):每经过incr_every_n_steps个连续的正常梯度值才会增大loss scaling值。 -* decr_every_n_nan_or_inf(int):每经过decr_every_n_nan_or_inf个连续的无效梯度值(nan或者inf)才会减小loss scaling值。 -* incr_ratio(float):每次增大loss scaling值的扩增倍数,其为大于1的浮点数。 -* decr_ratio(float):每次减小loss scaling值的比例系数,其为小于1的浮点数。 +* init_loss_scaling(float):初始 loss scaling 值。 +* incr_every_n_steps(int):每经过 incr_every_n_steps 个连续的正常梯度值才会增大 loss scaling 值。 +* decr_every_n_nan_or_inf(int):每经过 decr_every_n_nan_or_inf 个连续的无效梯度值(nan 或者 inf)才会减小 loss scaling 值。 +* incr_ratio(float):每次增大 loss scaling 值的扩增倍数,其为大于 1 的浮点数。 +* decr_ratio(float):每次减小 loss scaling 值的比例系数,其为小于 1 的浮点数。 -### 多卡GPU训练的优化 +### 多卡 GPU 训练的优化 -PaddlePaddle AMP功能对多卡GPU训练进行了深度优化。如图 2 所示,优化之前的参数梯度更新特点:梯度计算时虽然使用的是FP16数据类型,但是不同GPU卡之间的梯度传输数据类型仍为FP32。 +PaddlePaddle AMP 功能对多卡 GPU 训练进行了深度优化。如图 2 所示,优化之前的参数梯度更新特点:梯度计算时虽然使用的是 FP16 数据类型,但是不同 GPU 卡之间的梯度传输数据类型仍为 FP32。
missing -
图 2. 不同GPU卡之间传输梯度使用FP32数据类型(优化前)
+
图 2. 不同 GPU 卡之间传输梯度使用 FP32 数据类型(优化前)
-为了降低GPU多卡之间的梯度传输带宽,我们将梯度传输提前至`Cast`操作之前,而每个GPU卡在得到对应的FP16梯度后再执行`Cast`操作将其转变为FP32类型,具体操作详见图2。这一优化在训练大模型时对减少带宽占用尤其有效,如多卡训练BERT-Large模型。 +为了降低 GPU 多卡之间的梯度传输带宽,我们将梯度传输提前至`Cast`操作之前,而每个 GPU 卡在得到对应的 FP16 梯度后再执行`Cast`操作将其转变为 FP32 类型,具体操作详见图 2。这一优化在训练大模型时对减少带宽占用尤其有效,如多卡训练 BERT-Large 模型。
missing -
图 3. 不同GPU卡之间传输梯度使用FP16数据类型(优化后)
+
图 3. 不同 GPU 卡之间传输梯度使用 FP16 数据类型(优化后)
### 训练性能对比(AMP VS FP32) -PaddlePaddle AMP技术在ResNet50、Transformer等模型上训练速度相对于FP32训练上均有可观的加速比,下面是ResNet50和ERNIE Large模型的AMP训练相对于FP32训练的加速效果。 +PaddlePaddle AMP 技术在 ResNet50、Transformer 等模型上训练速度相对于 FP32 训练上均有可观的加速比,下面是 ResNet50 和 ERNIE Large 模型的 AMP 训练相对于 FP32 训练的加速效果。
- +
图 4. Paddle AMP训练加速效果(横坐标为卡数,如8*8代表8机8卡)
图 4. Paddle AMP 训练加速效果(横坐标为卡数,如 8*8 代表 8 机 8 卡)
missing missing
-从图4所示的图表可以看出,ResNet50的AMP训练相对与FP32训练加速比可达$2.8 \times$以上,而ERNIE Large的AMP训练相对与FP32训练加速比亦可达 $1.7 \times -- 2.1 \times$ 。 +从图 4 所示的图表可以看出,ResNet50 的 AMP 训练相对与 FP32 训练加速比可达$2.8 \times$以上,而 ERNIE Large 的 AMP 训练相对与 FP32 训练加速比亦可达 $1.7 \times -- 2.1 \times$ 。 ### 参考文献 diff --git a/docs/advanced_guide/performance_improving/inference_improving/paddle_xpu_infer_cn.md b/docs/advanced_guide/performance_improving/inference_improving/paddle_xpu_infer_cn.md index 8faf2504553..e08fdb781e9 100644 --- a/docs/advanced_guide/performance_improving/inference_improving/paddle_xpu_infer_cn.md +++ b/docs/advanced_guide/performance_improving/inference_improving/paddle_xpu_infer_cn.md @@ -1,16 +1,16 @@ # 使用昆仑预测 -百度的昆仑芯⽚是⼀款⾼性能的AI SoC芯⽚,⽀持推理和训练。昆仑芯⽚采⽤百度的先进AI架构,⾮常适合常⽤的深度学习和机器学习算法的云端计算需求,并能适配诸如⾃然语⾔处理、⼤规模语⾳识别、⾃动驾驶、⼤规模推荐等多种终端场景的计算需求。 +百度的昆仑芯⽚是⼀款⾼性能的 AI SoC 芯⽚,⽀持推理和训练。昆仑芯⽚采⽤百度的先进 AI 架构,⾮常适合常⽤的深度学习和机器学习算法的云端计算需求,并能适配诸如⾃然语⾔处理、⼤规模语⾳识别、⾃动驾驶、⼤规模推荐等多种终端场景的计算需求。 -Paddle Inference集成了[Paddle-Lite预测引擎](https://paddle-lite.readthedocs.io/zh/latest/demo_guides/baidu_xpu.html)在昆仑xpu上进行预测部署。 +Paddle Inference 集成了[Paddle-Lite 预测引擎](https://paddle-lite.readthedocs.io/zh/latest/demo_guides/baidu_xpu.html)在昆仑 xpu 上进行预测部署。 ## 编译注意事项 -请确保编译的时候设置了WITH_LITE=ON,且XPU_SDK_ROOT设置了正确的路径。 +请确保编译的时候设置了 WITH_LITE=ON,且 XPU_SDK_ROOT 设置了正确的路径。 ## 使用介绍 -在使用Predictor时,我们通过配置Config中的接口,在XPU上运行。 +在使用 Predictor 时,我们通过配置 Config 中的接口,在 XPU 上运行。 ```c++ config->EnableLiteEngine( @@ -21,12 +21,12 @@ config->EnableLiteEngine( ) ``` -- **`precision_mode`**,类型:`enum class PrecisionType {kFloat32 = 0, kHalf, kInt8,};`, 默认值为`PrecisionType::kFloat32`。指定lite子图的运行精度。 -- **`zero_copy`**,类型:bool,lite子图与Paddle之间的数据传递是否是零拷贝模式。 +- **`precision_mode`**,类型:`enum class PrecisionType {kFloat32 = 0, kHalf, kInt8,};`, 默认值为`PrecisionType::kFloat32`。指定 lite 子图的运行精度。 +- **`zero_copy`**,类型:bool,lite 子图与 Paddle 之间的数据传递是否是零拷贝模式。 - **`passes_filter`**,类型:`std::vector`,默认为空,扩展借口,暂不使用。 -- **`ops_filer`**,类型:`std::vector`,默认为空,显示指定哪些op不使用lite子图运行。 +- **`ops_filer`**,类型:`std::vector`,默认为空,显示指定哪些 op 不使用 lite 子图运行。 -Python接口如下: +Python 接口如下: ```python config.enable_lite_engine( @@ -39,9 +39,9 @@ config.enable_lite_engine( ### Python demo -因目前Paddle-Inference目前未将xpu sdk打包到whl包内,所以需要用户下载xpu sdk,并加入到环境变量中,之后会考虑解决该问题。 +因目前 Paddle-Inference 目前未将 xpu sdk 打包到 whl 包内,所以需要用户下载 xpu sdk,并加入到环境变量中,之后会考虑解决该问题。 -下载[xpu_tool_chain](https://paddle-inference-dist.bj.bcebos.com/inference_demo/xpu_tool_chain.tgz),解压后将shlib加入到LD_LIBRARY_PATH +下载[xpu_tool_chain](https://paddle-inference-dist.bj.bcebos.com/inference_demo/xpu_tool_chain.tgz),解压后将 shlib 加入到 LD_LIBRARY_PATH ``` tar xzf xpu_tool_chain.tgz @@ -56,7 +56,7 @@ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/output/XTDK/shlib/:$PWD/output/XTDK python resnet50_subgraph.py --model_file ./ResNet50/model --params_file ./ResNet50/params ``` -resnet50_subgraph.py的内容是: +resnet50_subgraph.py 的内容是: ``` import argparse diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst index 86df9f539c2..c50b4506d21 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice.rst @@ -1,18 +1,18 @@ .. _api_guide_cpu_training_best_practice: #################### -分布式CPU训练优秀实践 +分布式 CPU 训练优秀实践 #################### -提高CPU分布式训练的训练速度,主要要从四个方面来考虑: -1)提高训练速度,主要是提高CPU的使用率;2)提高通信速度,主要是减少通信传输的数据量;3)提高数据IO速度;4)更换分布式训练策略,提高分布式训练速度。 +提高 CPU 分布式训练的训练速度,主要要从四个方面来考虑: +1)提高训练速度,主要是提高 CPU 的使用率;2)提高通信速度,主要是减少通信传输的数据量;3)提高数据 IO 速度;4)更换分布式训练策略,提高分布式训练速度。 -提高CPU的使用率 +提高 CPU 的使用率 ============= -提高CPU使用率主要依赖 :code:`ParallelExecutor`,可以充分利用多个CPU的计算能力来加速计算。 +提高 CPU 使用率主要依赖 :code:`ParallelExecutor`,可以充分利用多个 CPU 的计算能力来加速计算。 -API详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例用法: +API 详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例用法: .. code-block:: python @@ -20,7 +20,7 @@ API详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例 exec_strategy = fluid.ExecutionStrategy() exec_strategy.num_threads = 8 - # 配置构图策略,对于CPU训练而言,应该使用Reduce模式进行训练 + # 配置构图策略,对于 CPU 训练而言,应该使用 Reduce 模式进行训练 build_strategy = fluid.BuildStrategy() if int(os.getenv("CPU_NUM")) > 1: build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce @@ -34,13 +34,13 @@ API详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例 以上参数中: -- :code:`num_threads` : 模型训练使用的线程数,最好和训练所在机器的物理CPU核数接近 -- :code:`reduce_strategy` : 对于CPU训练而言,应该选择 fluid.BuildStrategy.ReduceStrategy.Reduce +- :code:`num_threads` : 模型训练使用的线程数,最好和训练所在机器的物理 CPU 核数接近 +- :code:`reduce_strategy` : 对于 CPU 训练而言,应该选择 fluid.BuildStrategy.ReduceStrategy.Reduce 通用环境变量配置: -- :code:`CPU_NUM` :模型副本replica的个数,最好和num_threads一致 +- :code:`CPU_NUM` :模型副本 replica 的个数,最好和 num_threads 一致 提高通信速度 @@ -55,17 +55,17 @@ API详细使用方法参考 :ref:`cn_api_fluid_ParallelExecutor` ,简单实例 以上参数中: -- :code:`is_sparse` : 配置embedding使用稀疏更新,如果embedding的dict_size很大,而每次数据data很少,建议使用sparse更新方式。 +- :code:`is_sparse` : 配置 embedding 使用稀疏更新,如果 embedding 的 dict_size 很大,而每次数据 data 很少,建议使用 sparse 更新方式。 -提高数据IO速度 +提高数据 IO 速度 ========== -要提高CPU分布式的数据IO速度,可以首先考虑使用dataset API进行数据读取。 dataset是一种多生产者多消费者模式的数据读取方法,默认情况下耦合数据读取线程与训练线程,在多线程的训练中,dataset表现出极高的性能优势。 +要提高 CPU 分布式的数据 IO 速度,可以首先考虑使用 dataset API 进行数据读取。 dataset 是一种多生产者多消费者模式的数据读取方法,默认情况下耦合数据读取线程与训练线程,在多线程的训练中,dataset 表现出极高的性能优势。 -API接口介绍可以参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dataset_cn/QueueDataset_cn.html +API 接口介绍可以参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dataset_cn/QueueDataset_cn.html -结合实际的网络,比如CTR-DNN模型,引入的方法可以参考:https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleRec/ctr/dnn +结合实际的网络,比如 CTR-DNN 模型,引入的方法可以参考:https://github.com/PaddlePaddle/models/tree/release/1.7/PaddleRec/ctr/dnn 最后使用 :code:`train_from_dataset` 接口来进行网络的训练: @@ -80,7 +80,7 @@ API接口介绍可以参考:https://www.paddlepaddle.org.cn/documentation/docs 更换分布式训练策略 ========== -CPU分布式训练速度进一步提高的核心在于选择合适的分布式训练策略,比如定义通信策略、编译策略、执行策略等等。paddlepaddle于v1.7版本发布了 :code:`DistributedStrategy` 功能,可以十分灵活且方便的指定分布式运行策略。 +CPU 分布式训练速度进一步提高的核心在于选择合适的分布式训练策略,比如定义通信策略、编译策略、执行策略等等。paddlepaddle 于 v1.7 版本发布了 :code:`DistributedStrategy` 功能,可以十分灵活且方便的指定分布式运行策略。 首先需要在代码中引入相关库: @@ -90,29 +90,29 @@ CPU分布式训练速度进一步提高的核心在于选择合适的分布式 import paddle.fluid.incubate.fleet.base.role_maker as role_maker from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy_factory import DistributedStrategyFactory -然后指定CPU分布式运行的训练策略,目前可选配置有四种:同步训练(Sync)、异步训练(Async)、半异步训练(Half-Async)以及GEO训练。 +然后指定 CPU 分布式运行的训练策略,目前可选配置有四种:同步训练(Sync)、异步训练(Async)、半异步训练(Half-Async)以及 GEO 训练。 -通过如下代码引入上述策略的默认配置,并进行CPU分布式训练: +通过如下代码引入上述策略的默认配置,并进行 CPU 分布式训练: .. code-block:: python - # step1: 引入CPU分布式训练策略 + # step1: 引入 CPU 分布式训练策略 # 同步训练策略 strategy = DistributedStrategyFactory.create_sync_strategy() # 半异步训练策略 strategy = DistributedStrategyFactory.create_half_async_strategy() # 异步训练策略 strategy = DistributedStrategyFactory.create_async_strategy() - # GEO训练策略 + # GEO 训练策略 strategy = DistributedStrategyFactory.create_geo_strategy(update_frequency=400) # step2: 定义节点角色 role = role_maker.PaddleCloudRoleMaker() fleet.init(role) - # step3: 分布式训练program构建 - optimizer = fluid.optimizer.SGD(learning_rate) # 以SGD优化器为例 + # step3: 分布式训练 program 构建 + optimizer = fluid.optimizer.SGD(learning_rate) # 以 SGD 优化器为例 optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(loss) @@ -130,9 +130,9 @@ CPU分布式训练速度进一步提高的核心在于选择合适的分布式 fleet.stop_worker() -paddlepaddle支持对训练策略中的细节进行调整: +paddlepaddle 支持对训练策略中的细节进行调整: -- 创建compiled_program所需的build_strategy及exec_strategy可以直接基于strategy获得 +- 创建 compiled_program 所需的 build_strategy 及 exec_strategy 可以直接基于 strategy 获得 .. code-block:: python @@ -142,7 +142,7 @@ paddlepaddle支持对训练策略中的细节进行调整: exec_strategy=strategy.get_execute_strategy()) -- 自定义训练策略细节,支持对DistributeTranspilerConfig、TrainerRuntimeConfig、ServerRuntimeConfig、fluid.ExecutionStrategy、fluid.BuildStrategy进行自定义配置。以DistributeTranspilerConfig为例,修改方式如下所示: +- 自定义训练策略细节,支持对 DistributeTranspilerConfig、TrainerRuntimeConfig、ServerRuntimeConfig、fluid.ExecutionStrategy、fluid.BuildStrategy 进行自定义配置。以 DistributeTranspilerConfig 为例,修改方式如下所示: .. code-block:: python @@ -153,7 +153,7 @@ paddlepaddle支持对训练策略中的细节进行调整: config.min_block_size = 81920 - # 方式二:调用set_program_config修改组网相关配置,支持DistributeTranspilerConfig和dict两种数据类型 + # 方式二:调用 set_program_config 修改组网相关配置,支持 DistributeTranspilerConfig 和 dict 两种数据类型 config = DistributeTranspilerConfig() config.min_block_size = 81920 # config = dict() diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst index 3e9b3949631..350606f34ea 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/cpu_train_best_practice_en.rst @@ -116,7 +116,7 @@ The default configuration of the above policy is introduced by the following cod fleet.init(role) # step3: get distributed training program - optimizer = fluid.optimizer.SGD(learning_rate) # 以SGD优化器为例 + optimizer = fluid.optimizer.SGD(learning_rate) # 以 SGD 优化器为例 optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(loss) diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst index f9dbb4bb6fa..ff808bc1d17 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/dist_training_gpu.rst @@ -1,24 +1,24 @@ .. _best_practice_dist_training_gpu: ##################### -分布式GPU训练优秀实践 +分布式 GPU 训练优秀实践 ##################### -开始优化您的GPU分布式训练任务 +开始优化您的 GPU 分布式训练任务 --------------------------- -PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分布式训练。通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,检查每项优化点并验证对应提升,从而提升最终的性能。 +PaddlePaddle Fluid 支持在现代 GPU [#]_ 服务器集群上完成高性能分布式训练。通常可以通过以下方法优化在多机多卡环境训练性能,建议在进行性能优化时,检查每项优化点并验证对应提升,从而提升最终的性能。 -一个简单的验证当前的训练程序是否需要进一步优化性能的方法,是查看GPU的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。如果GPU利用率较低,则可能存在较大的优化空间。下面主要从数据准备、训练策略设置和训练方式三个方面介绍GPU分布式训练中常用的优化方法。 +一个简单的验证当前的训练程序是否需要进一步优化性能的方法,是查看 GPU 的计算利用率 [#]_ ,通常用 :code:`nvidia-smi` 命令查看。如果 GPU 利用率较低,则可能存在较大的优化空间。下面主要从数据准备、训练策略设置和训练方式三个方面介绍 GPU 分布式训练中常用的优化方法。 1、数据准备 =========== -数据读取的优化在GPU训练中至关重要,尤其在不断增加batch_size提升吞吐时,计算对reader性能会有更高对要求,优化reader性能需要考虑的点包括: +数据读取的优化在 GPU 训练中至关重要,尤其在不断增加 batch_size 提升吞吐时,计算对 reader 性能会有更高对要求,优化 reader 性能需要考虑的点包括: - - 使用 :code:`DataLoader` 。参考 `这里 `_ 使用DataLoader,并建议开启 :code:`use_double_buffer` 。 - - reader返回uint8类型数据。图片在解码后一般会以uint8类型存储,如果在reader中转换成float类型数据,会将数据体积扩大4倍。直接返回uint8数据,然后在GPU上转化成float类型进行训练可以提升数据读取效率。 - - 减少reader初始化时间 (infinite read)。在训练任务开始执行第一轮训练时,reader开始不断异步地从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据填充到队列中供计算使用。从0开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练都重新填充队列,会产生一些时间的开销。所以,在使用DataLoader时,可以让reader函数不断地产生数据,直到训练循环结束: + - 使用 :code:`DataLoader` 。参考 `这里 `_ 使用 DataLoader,并建议开启 :code:`use_double_buffer` 。 + - reader 返回 uint8 类型数据。图片在解码后一般会以 uint8 类型存储,如果在 reader 中转换成 float 类型数据,会将数据体积扩大 4 倍。直接返回 uint8 数据,然后在 GPU 上转化成 float 类型进行训练可以提升数据读取效率。 + - 减少 reader 初始化时间 (infinite read)。在训练任务开始执行第一轮训练时,reader 开始不断异步地从磁盘或其他存储中读取数据并执行预处理,然后将处理好的数据填充到队列中供计算使用。从 0 开始填充这个队列直到数据可以源源不断供给计算,需要一定时间的预热。所以,如果每轮训练都重新填充队列,会产生一些时间的开销。所以,在使用 DataLoader 时,可以让 reader 函数不断地产生数据,直到训练循环结束: .. code-block:: python :linenos: @@ -39,7 +39,7 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 data_loader.reset() -另外,可以使用DALI库提升数据处理性能。DALI是NVIDIA开发的数据加载库,更多内容请参考 `官网文档 `_ 。飞桨中如何结合使用DALI库请参考 `使用示例 `_ 。 +另外,可以使用 DALI 库提升数据处理性能。DALI 是 NVIDIA 开发的数据加载库,更多内容请参考 `官网文档 `_ 。飞桨中如何结合使用 DALI 库请参考 `使用示例 `_ 。 2、训练策略设置 =========== @@ -50,21 +50,21 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 :header: "选项", "类型", "默认值", "说明" :widths: 3, 3, 3, 5 - ":code:`num_threads`", "int", "1", "CPU线程数" - ":code:`nccl_comm_num`", "int", "1", "nccl通信器数量" - ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将AllReduce操纵进行融合" - ":code:`use_hierarchical_allreduce` ", "bool", "False", "分级式reduce" - ":code:`num_iteration_per_drop_scope`", "int", "1", "scope drop频率,设置每隔几个batch的迭代之后执行一次清理scope" - ":code:`fetch_frequency`", "int", "1", "fetch的刷新频率" - ":code:`fuse_bn_act_ops`", "bool", "False", "是否开启batch normalization和激活函数的融合" - ":code:`fuse_elewise_add_act_ops`", "bool", "False", "是否开启elementwise add函数和激活函数的融合" + ":code:`num_threads`", "int", "1", "CPU 线程数" + ":code:`nccl_comm_num`", "int", "1", "nccl 通信器数量" + ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将 AllReduce 操纵进行融合" + ":code:`use_hierarchical_allreduce` ", "bool", "False", "分级式 reduce" + ":code:`num_iteration_per_drop_scope`", "int", "1", "scope drop 频率,设置每隔几个 batch 的迭代之后执行一次清理 scope" + ":code:`fetch_frequency`", "int", "1", "fetch 的刷新频率" + ":code:`fuse_bn_act_ops`", "bool", "False", "是否开启 batch normalization 和激活函数的融合" + ":code:`fuse_elewise_add_act_ops`", "bool", "False", "是否开启 elementwise add 函数和激活函数的融合" 说明: -- 关于设置合适的CPU线程数 :code:`num_threads` 和nccl通信器数量 :code:`nccl_comm_num` 。PaddlePaddle Fluid使用“线程池” [#]_ 模型调度并执行Op,Op在启动GPU计算之前,通常需要CPU的协助,然而如果Op本身占用时间很小,“线程池”模型下又会带来额外的调度开销。使用多进程模式时,如果神经网络的计算图 [#]_ 节点间有较高的并发度,即使每个进程只在一个GPU上运行,使用多个线程可以更大限度的提升GPU利用率。nccl通信器数量 :code:`nccl_comm_num` 可以加快GPU之间的通信效率,建议单机设置为1,多机设置为2。针对CPU线程数 :code:`num_threads` ,建议单机设置为1,多机设置为 :code:`nccl_comm_num` +1。 -- 关于AllReduce融合 :code:`fuse_all_reduce_ops` ,默认情况下会将同一layer中参数的梯度的AllReduce操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次AllReduce操作,现在只用一次AllReduce 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 和 :code:`FLAGS_fuse_parameter_groups_size` 两个环境变量选项。用户可以指定融合AllReduce操作之后,每个AllReduce操作的梯度字节数,比如希望每次AllReduce调用传输16MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=16` ,经验值为总通信量的十分之一。可以指定每次AllReduce操作的最大层数,即到达该层数就进行AllReduce,如指定50层 :code:`export FLAGS_fuse_parameter_groups_size=50` 。注意:目前不支持sparse参数梯度。 -- 关于使用分级式reduce :code:`use_hierarchical_allreduce` 。对于多机模式,针对小数据量的通信,Ring AllReduce通信效率低,采用Hierarchical AllReduce可以解决该问题。 -- 关于降低scope drop频率 :code:`num_iteration_per_drop_scope` 和fetch频率 :code:`fetch_frequency` 。减少scope drop和fetch频率,可以减少频繁的变量内存申请、释放和拷贝,从而提升性能。 +- 关于设置合适的 CPU 线程数 :code:`num_threads` 和 nccl 通信器数量 :code:`nccl_comm_num` 。PaddlePaddle Fluid 使用“线程池” [#]_ 模型调度并执行 Op,Op 在启动 GPU 计算之前,通常需要 CPU 的协助,然而如果 Op 本身占用时间很小,“线程池”模型下又会带来额外的调度开销。使用多进程模式时,如果神经网络的计算图 [#]_ 节点间有较高的并发度,即使每个进程只在一个 GPU 上运行,使用多个线程可以更大限度的提升 GPU 利用率。nccl 通信器数量 :code:`nccl_comm_num` 可以加快 GPU 之间的通信效率,建议单机设置为 1,多机设置为 2。针对 CPU 线程数 :code:`num_threads` ,建议单机设置为 1,多机设置为 :code:`nccl_comm_num` +1。 +- 关于 AllReduce 融合 :code:`fuse_all_reduce_ops` ,默认情况下会将同一 layer 中参数的梯度的 AllReduce 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有 Weight 和 Bias 两个参数,打开该选项之后,原本需要两次 AllReduce 操作,现在只用一次 AllReduce 操作。此外,为支持更大粒度的参数梯度融合,Paddle 提供了 :code:`FLAGS_fuse_parameter_memory_size` 和 :code:`FLAGS_fuse_parameter_groups_size` 两个环境变量选项。用户可以指定融合 AllReduce 操作之后,每个 AllReduce 操作的梯度字节数,比如希望每次 AllReduce 调用传输 16MB 的梯度,:code:`export FLAGS_fuse_parameter_memory_size=16` ,经验值为总通信量的十分之一。可以指定每次 AllReduce 操作的最大层数,即到达该层数就进行 AllReduce,如指定 50 层 :code:`export FLAGS_fuse_parameter_groups_size=50` 。注意:目前不支持 sparse 参数梯度。 +- 关于使用分级式 reduce :code:`use_hierarchical_allreduce` 。对于多机模式,针对小数据量的通信,Ring AllReduce 通信效率低,采用 Hierarchical AllReduce 可以解决该问题。 +- 关于降低 scope drop 频率 :code:`num_iteration_per_drop_scope` 和 fetch 频率 :code:`fetch_frequency` 。减少 scope drop 和 fetch 频率,可以减少频繁的变量内存申请、释放和拷贝,从而提升性能。 - 关于操作融合:通过参数融合可以提升训练性能。 设置这些参数可以参考: @@ -73,12 +73,12 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 :linenos: dist_strategy = DistributedStrategy() - dist_strategy.nccl_comm_num = 2 #建议多机设置为2,单机设置为1 + dist_strategy.nccl_comm_num = 2 #建议多机设置为 2,单机设置为 1 exec_strategy = fluid.ExecutionStrategy() - exe_st.num_threads = 3 #建议多机设置为nccl_comm_num+1,单机设置为1 - exec_strategy.num_iteration_per_drop_scope = 30 #scope drop频率 + exe_st.num_threads = 3 #建议多机设置为 nccl_comm_num+1,单机设置为 1 + exec_strategy.num_iteration_per_drop_scope = 30 #scope drop 频率 dist_strategy.exec_strategy = exec_strategy - dist_strategy.fuse_all_reduce_ops = True #AllReduce是否融合 + dist_strategy.fuse_all_reduce_ops = True #AllReduce 是否融合 ... with fluid.program_guard(main_prog, startup_prog): #组网 params = model.params @@ -89,7 +89,7 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 for pass_id in range(PASS_NUM): batch_id = 0 while True: - if batch_id % fetch_frequency == 0: #fetch频率 + if batch_id % fetch_frequency == 0: #fetch 频率 fetched = exe.run(main_prog, fetch_list) else: exe.run([]) @@ -100,34 +100,34 @@ PaddlePaddle Fluid支持在现代GPU [#]_ 服务器集群上完成高性能分 1、Local SGD -GPU多机多卡同步训练过程中存在慢trainer现象,即每步中训练快的trainer的同步通信需要等待训练慢的trainer。由于每步中慢trainer的rank具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢trainer时间均摊,从而提升同步训练性能。Local SGD训练方式主要有三个参数,分别是: +GPU 多机多卡同步训练过程中存在慢 trainer 现象,即每步中训练快的 trainer 的同步通信需要等待训练慢的 trainer。由于每步中慢 trainer 的 rank 具有随机性,因此我们使用局部异步训练的方式——LocalSGD,通过多步异步训练(无通信阻塞)实现慢 trainer 时间均摊,从而提升同步训练性能。Local SGD 训练方式主要有三个参数,分别是: .. csv-table:: :header: "选项", "类型", "可选值", "说明" :widths: 3, 3, 3, 5 - ":code:`use_local_sgd`", "bool", "False/True", "是否开启Local SGD,默认不开启" - ":code:`local_sgd_is_warm_steps`", "int", "大于0", "训练多少轮之后才使用Local SGD方式训练" - ":code:`local_sgd_steps`", "int", "大于0", "Local SGD的步长" + ":code:`use_local_sgd`", "bool", "False/True", "是否开启 Local SGD,默认不开启" + ":code:`local_sgd_is_warm_steps`", "int", "大于 0", "训练多少轮之后才使用 Local SGD 方式训练" + ":code:`local_sgd_steps`", "int", "大于 0", "Local SGD 的步长" 说明: -- Local SGD的warmup步长 :code:`local_sgd_is_warm_steps` 影响最终模型的泛化能力,一般需要等到模型参数稳定之后在进行Local SGD训练,经验值可以将学习率第一次下降时的epoch作为warmup步长,之后再进行Local SGD训练。 -- Local SGD步长 :code:`local_sgd_steps` ,一般该值越大,通信次数越少,训练速度越快,但随之而来的时模型精度下降。经验值设置为2或者4。 +- Local SGD 的 warmup 步长 :code:`local_sgd_is_warm_steps` 影响最终模型的泛化能力,一般需要等到模型参数稳定之后在进行 Local SGD 训练,经验值可以将学习率第一次下降时的 epoch 作为 warmup 步长,之后再进行 Local SGD 训练。 +- Local SGD 步长 :code:`local_sgd_steps` ,一般该值越大,通信次数越少,训练速度越快,但随之而来的时模型精度下降。经验值设置为 2 或者 4。 -具体的Local SGD的训练代码可以参考:https://github.com/PaddlePaddle/FleetX/tree/old_develop/deprecated/examples/local_sgd/resnet +具体的 Local SGD 的训练代码可以参考:https://github.com/PaddlePaddle/FleetX/tree/old_develop/deprecated/examples/local_sgd/resnet 2、使用混合精度训练 -V100 GPU提供了 `Tensor Core `_ 可以在混合精度计算场景极大的提升性能。使用混合精度计算的例子可以参考:https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training +V100 GPU 提供了 `Tensor Core `_ 可以在混合精度计算场景极大的提升性能。使用混合精度计算的例子可以参考:https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification#using-mixed-precision-training -目前Paddle只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持static loss scaling,其他模型使用混合精度也可以参考以上的实现完成验证。 +目前 Paddle 只提供在两个模型(ResNet, BERT)的混合精度计算实现并支持 static loss scaling,其他模型使用混合精度也可以参考以上的实现完成验证。 附录 ---- -.. [#] 现代GPU:指至少支持运行 `CUDA `_ 版本7.5以上的GPU -.. [#] GPU利用率:这里指GPU计算能力被使用部分所占的百分比 +.. [#] 现代 GPU:指至少支持运行 `CUDA `_ 版本 7.5 以上的 GPU +.. [#] GPU 利用率:这里指 GPU 计算能力被使用部分所占的百分比 .. [#] https://en.wikipedia.org/wiki/Thread_pool .. [#] https://en.wikipedia.org/wiki/Data-flow_diagram diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md index 7da97430c9e..16c36ab14a8 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_low_bandwidth_dgc.md @@ -1,120 +1,120 @@ -# 低配网络的分布式GPU训练 +# 低配网络的分布式 GPU 训练 ## 1. 背景 - 大规模分布式训练需要较高的网络带宽以便进行梯度的聚合更新,这限制了多节点训练时的可扩展性同时也需要昂贵的高带宽设备。在低带宽云网络等环境下进行分布式训练会变得更加糟糕。现有[Deep Gradient Compression](https://arxiv.org/abs/1712.01887)研究表明,分布式SGD中有99.9%的梯度交换都是冗余的,可以使用深度梯度压缩选择重要梯度进行通信来减少通信量,降低对通信带宽的依赖。Paddle目前实现了DGC的稀疏通信方式,可有效在低配网络下进行GPU分布式训练。下面将介绍DGC稀疏通信方式的使用方法、适用场景及基本原理。 + 大规模分布式训练需要较高的网络带宽以便进行梯度的聚合更新,这限制了多节点训练时的可扩展性同时也需要昂贵的高带宽设备。在低带宽云网络等环境下进行分布式训练会变得更加糟糕。现有[Deep Gradient Compression](https://arxiv.org/abs/1712.01887)研究表明,分布式 SGD 中有 99.9%的梯度交换都是冗余的,可以使用深度梯度压缩选择重要梯度进行通信来减少通信量,降低对通信带宽的依赖。Paddle 目前实现了 DGC 的稀疏通信方式,可有效在低配网络下进行 GPU 分布式训练。下面将介绍 DGC 稀疏通信方式的使用方法、适用场景及基本原理。 ## 2. 使用方法 -`注意:使用DGC请使用1.6.2及其之后版本,之前版本存在有若干bug。` -DGC稀疏通信算法以DGCMomentumOptimizer接口的形式提供,目前只支持GPU多卡及GPU多机分布式,由于现有fuse策略会造成DGC失效,所以使用DGC时需设置`strategy.fuse_all_reduce_ops=False`关闭fuse。DGC只支持Momentum优化器,使用时把当前代码中的Momentum优化器替换为DGCMomentumOptimizer,并添加DGC所需参数即可。如下代码所示,其中rampup_begin_step表示从第几步开始使用DGC,更详细参数可见[api文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.html#dgcmomentumoptimizer)。 +`注意:使用 DGC 请使用 1.6.2 及其之后版本,之前版本存在有若干 bug。` +DGC 稀疏通信算法以 DGCMomentumOptimizer 接口的形式提供,目前只支持 GPU 多卡及 GPU 多机分布式,由于现有 fuse 策略会造成 DGC 失效,所以使用 DGC 时需设置`strategy.fuse_all_reduce_ops=False`关闭 fuse。DGC 只支持 Momentum 优化器,使用时把当前代码中的 Momentum 优化器替换为 DGCMomentumOptimizer,并添加 DGC 所需参数即可。如下代码所示,其中 rampup_begin_step 表示从第几步开始使用 DGC,更详细参数可见[api 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn/DGCMomentumOptimizer_cn.html#dgcmomentumoptimizer)。 ``` python import paddle.fluid as fluid # optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) -# 替换Momentum优化器,添加DGC所需参数 +# 替换 Momentum 优化器,添加 DGC 所需参数 optimizer = fluid.optimizer.DGCMomentumOptimizer( learning_rate=0.001, momentum=0.9, rampup_begin_step=0) optimizer.minimize(cost) ``` -在fleet中我们提供了[DGC的示例](https://github.com/PaddlePaddle/FleetX/tree/old_develop/deprecated/examples/dgc_example)。示例中以数字手写体识别为例,将程序移植为分布式版本(注:DGC亦支持单机多卡),再加上DGC优化器。可参照此示例将单机单卡程序迁移到DGC。在单机单卡迁移到DGC过程中,一般需要先对齐多机Momentum的精度,再对齐DGC的精度。 +在 fleet 中我们提供了[DGC 的示例](https://github.com/PaddlePaddle/FleetX/tree/old_develop/deprecated/examples/dgc_example)。示例中以数字手写体识别为例,将程序移植为分布式版本(注:DGC 亦支持单机多卡),再加上 DGC 优化器。可参照此示例将单机单卡程序迁移到 DGC。在单机单卡迁移到 DGC 过程中,一般需要先对齐多机 Momentum 的精度,再对齐 DGC 的精度。 ## 3. 调参&适用场景 ### 3.1 预热调参 -对于正常的训练,使用DGC一般需进行预热训练,否则可能会有精度损失。如下图是ResNet50模型Imagenet数据集的训练结果,未进行预热训练的DGC最终损失了约0.3%的精度。 +对于正常的训练,使用 DGC 一般需进行预热训练,否则可能会有精度损失。如下图是 ResNet50 模型 Imagenet 数据集的训练结果,未进行预热训练的 DGC 最终损失了约 0.3%的精度。

![DGC Resnet50 acc1](images/dgc_resnet50_acc1.png)
-预热训练调参可参照论文的设置。对图像分类,论文在Cifar10和ImageNet数据集上共164和90个epochs的训练中都采用了4个epochs的预热训练。在语言模型PTB数据集上,在共40个epochs的训练中选择了1个epoch进行预热训练。在语音识别AN4数据集上,80个epochs中选择1个epoch进行预热训练。 -论文中使用了75%, 93.75%, 98.4375%, 99.6%, 99.9%稀疏度逐渐提升的策略。由于paddle稀疏梯度聚合通信使用了AllGather,通信量会随卡数增加而增长,所以在卡数较多时不推荐较低稀疏度的预热训练。如75%稀疏度时每张卡会选择25%的梯度进行通信,卡数为32时通信量是正常dense通信的32\*(1-0.75)=8倍,所以前几个epoch使用正常的dense通信为佳。可参照如下写法 +预热训练调参可参照论文的设置。对图像分类,论文在 Cifar10 和 ImageNet 数据集上共 164 和 90 个 epochs 的训练中都采用了 4 个 epochs 的预热训练。在语言模型 PTB 数据集上,在共 40 个 epochs 的训练中选择了 1 个 epoch 进行预热训练。在语音识别 AN4 数据集上,80 个 epochs 中选择 1 个 epoch 进行预热训练。 +论文中使用了 75%, 93.75%, 98.4375%, 99.6%, 99.9%稀疏度逐渐提升的策略。由于 paddle 稀疏梯度聚合通信使用了 AllGather,通信量会随卡数增加而增长,所以在卡数较多时不推荐较低稀疏度的预热训练。如 75%稀疏度时每张卡会选择 25%的梯度进行通信,卡数为 32 时通信量是正常 dense 通信的 32\*(1-0.75)=8 倍,所以前几个 epoch 使用正常的 dense 通信为佳。可参照如下写法 ``` python -# 1. 以1252个step为一个epoch,前2个epochs使用正常dense通信,后3个epochs逐步提升稀疏度为99.9% +# 1. 以 1252 个 step 为一个 epoch,前 2 个 epochs 使用正常 dense 通信,后 3 个 epochs 逐步提升稀疏度为 99.9% optimizer = fluid.optimizer.DGCMomentumOptimizer( learning_rate=0.001, momentum=0.9, rampup_begin_step=1252*2, rampup_step=1252*3, sparsity=[0.984375, 0.996, 0.999]) -# 2. 前面4个epochs都使用dense通信,之后默认0.999稀疏度运行 +# 2. 前面 4 个 epochs 都使用 dense 通信,之后默认 0.999 稀疏度运行 optimizer = fluid.optimizer.DGCMomentumOptimizer( learning_rate=0.001, momentum=0.9, rampup_begin_step=1252*4) ``` -对于Fine-tuning训练,现测试可无需预热训练,从第0个epoch直接使用DGC即可。 +对于 Fine-tuning 训练,现测试可无需预热训练,从第 0 个 epoch 直接使用 DGC 即可。 ``` python -# 从第0步开始DGC稀疏通信 +# 从第 0 步开始 DGC 稀疏通信 optimizer = fluid.optimizer.DGCMomentumOptimizer( learning_rate=0.001, momentum=0.9, rampup_begin_step=0) ``` ### 3.2 适用场景 -DGC稀疏通信在低带宽通信瓶颈时会有较大的性能提升,但在单机多卡及RDMA网络通信并非瓶颈情况下,并不会带来性能上的提升。同时由于AllGather的通信量会随卡数的增多而增大,所以DGC的多机训练规模也不宜过大。故DGC适用于低配网络,同时节点规模不宜过大,如>128张卡。在云网络或高带宽网络设备昂贵时,DGC可有效降低训练成本。 +DGC 稀疏通信在低带宽通信瓶颈时会有较大的性能提升,但在单机多卡及 RDMA 网络通信并非瓶颈情况下,并不会带来性能上的提升。同时由于 AllGather 的通信量会随卡数的增多而增大,所以 DGC 的多机训练规模也不宜过大。故 DGC 适用于低配网络,同时节点规模不宜过大,如>128 张卡。在云网络或高带宽网络设备昂贵时,DGC 可有效降低训练成本。 ## 4. 原理 本节原理部分基本来自[Deep Gradient Compression](https://arxiv.org/abs/1712.01887)论文,本文进行了部分理解翻译,英文较好者建议直接阅读论文。 ### 4.1 梯度稀疏 -DGC的基本思路是通过只传送重要梯度,即只发送大于给定阈值的梯度来减少通信带宽的使用。为避免信息的丢失,DGC会将剩余梯度在局部累加起来,最终这些梯度会累加大到足以传输。 -换个角度,从理论依据上来看,局部梯度累加等同于随时间推移增加batch size,(DGC相当于每一个梯度有自己的batch size)。设定 $F(w)$ 为需要优化的loss函数,则有着N个训练节点的同步分布式SGD更新公式如下 +DGC 的基本思路是通过只传送重要梯度,即只发送大于给定阈值的梯度来减少通信带宽的使用。为避免信息的丢失,DGC 会将剩余梯度在局部累加起来,最终这些梯度会累加大到足以传输。 +换个角度,从理论依据上来看,局部梯度累加等同于随时间推移增加 batch size,(DGC 相当于每一个梯度有自己的 batch size)。设定 $F(w)$ 为需要优化的 loss 函数,则有着 N 个训练节点的同步分布式 SGD 更新公式如下 $$ F(w)=\\frac{1}{\|\\chi\|}\\sum\_{x\\in\\chi}f(x, w), \\qquad w\_{t+1}=w\_{t}-\\eta\\frac{1}{N b}\\sum\_{k=0}^{N}\\sum\_{x\\in\\mathcal{B}\_{k,t}}\\nabla f\\left(x, w\_{t}\\right) \\tag{1} $$ -其中$\chi$是训练集,$w$是网络权值,$f(x, w)$是每个样本$x \in \chi$的loss,$\eta$是学习率,N是训练节点个数,$\mathcal{B}_{k, t}$代表第$k$个节点在第$t$个迭代时的minibatch,大小为b。 -考虑权重的第i个值,在T次迭代后,可获得 +其中$\chi$是训练集,$w$是网络权值,$f(x, w)$是每个样本$x \in \chi$的 loss,$\eta$是学习率,N 是训练节点个数,$\mathcal{B}_{k, t}$代表第$k$个节点在第$t$个迭代时的 minibatch,大小为 b。 +考虑权重的第 i 个值,在 T 次迭代后,可获得 $$ w\_{t+T}^{(i)}=w\_{t}^{(i)}-\\eta T \\cdot \\frac{1}{N b T} \\sum\_{k=1}^{N}\\left(\\sum\_{\\tau=0}^{T-1} \\sum\_{x \\in \\mathcal{B}\_{k, t+\\tau}} \\nabla^{(i)} f\\left(x, w\_{t+\\tau}\\right)\\right) \\tag{2} $$ -等式2表明局部梯度累加可以被认为batch size从$Nb$增大为$NbT$,其中T是$w^{(i)}$两次更新的稀疏通信间隔。 +等式 2 表明局部梯度累加可以被认为 batch size 从$Nb$增大为$NbT$,其中 T 是$w^{(i)}$两次更新的稀疏通信间隔。 ### 4.2 局部梯度累加改进 -正常情况,稀疏更新会严重影响收敛性。DGC中采用动量修正(Momentum Correction)和局部梯度裁减(local gradient clipping)来解决这个问题。 +正常情况,稀疏更新会严重影响收敛性。DGC 中采用动量修正(Momentum Correction)和局部梯度裁减(local gradient clipping)来解决这个问题。 #### 4.2.1 动量修正 -有着N个节点分布式训练中vanilla momentum SGD公式, +有着 N 个节点分布式训练中 vanilla momentum SGD 公式, $$ u\_{t}=m u\_{t-1}+\\sum\_{k=1}^{N}\\left(\\nabla\_{k, t}\\right), \\quad w\_{t+1}=w\_{t}-\\eta u\_{t} \\tag{3} $$ 其中$m$是动量因子,$N$是节点数,$\nabla_{k, t}=\frac{1}{N b} \sum_{x \in \mathcal{B}_{k, t}} \nabla f\left(x, w_{t}\right)$。 -考虑第i个权重$w^{(i)}$,在T次迭代后,权重更新公式如下, +考虑第 i 个权重$w^{(i)}$,在 T 次迭代后,权重更新公式如下, $$ w\_{t+T}^{(i)}=w\_{t}^{(i)}-\\eta\\left[\\cdots+\\left(\\sum\_{\\tau=0}^{T-2} m^{\\tau}\\right) \\nabla\_{k, t+1}^{(i)}+\\left(\\sum\_{\\tau=0}^{T-1} m^{\\tau}\\right) \\nabla\_{k, t}^{(i)}\\right] \\tag{4} $$ -如果直接应用动量SGD到稀疏梯度更新中,则有公式, +如果直接应用动量 SGD 到稀疏梯度更新中,则有公式, $$ v_{k, t}=v_{k, t-1}+\\nabla_{k, t}, \\quad u_{t}=m u_{t-1}+\\sum_{k=1}^{N} \\operatorname{sparse}\\left(v_{k, t}\\right), \\quad w_{t+1}=w_{t}-\\eta u_{t} \\tag{5} $$ -其中$v_k$是训练节点k上的局部梯度累加项,一旦$v_k$大于某一阈值,则会在第二项中压缩梯度进行动量更新,并使用sparse()函数获得mask清空大于阈值的梯度。 -$w^{(i)}$在T次稀疏更新后的权重为, +其中$v_k$是训练节点 k 上的局部梯度累加项,一旦$v_k$大于某一阈值,则会在第二项中压缩梯度进行动量更新,并使用 sparse()函数获得 mask 清空大于阈值的梯度。 +$w^{(i)}$在 T 次稀疏更新后的权重为, $$ w_{t+T}^{(i)}=w_{t}^{(i)}-\\eta\\left(\\cdots+\\nabla_{k, t+1}^{(i)}+\\nabla_{k, t}^{(i)}\\right) \\tag{6} $$ -相比传统动量SGD,方程6缺失了累积衰减因子$\sum_{\tau=0}^{T-1} m^{\tau}$,会导致收敛精度的损失。如下图A,正常梯度更新从A点到B点,但是方程6则从A点到C点。当稀疏度很高时,会显著降低模型性能,所以需要在方程5基础上对梯度进行修正。 +相比传统动量 SGD,方程 6 缺失了累积衰减因子$\sum_{\tau=0}^{T-1} m^{\tau}$,会导致收敛精度的损失。如下图 A,正常梯度更新从 A 点到 B 点,但是方程 6 则从 A 点到 C 点。当稀疏度很高时,会显著降低模型性能,所以需要在方程 5 基础上对梯度进行修正。
-若将方程3中速度项$u_t$当作“梯度”,则方程3第二项可认为是在”梯度“$u_t$上应用传统SGD,前面已经证明了局部梯度累加在传统SGD上是有效的。因此,可以使用方程3局部累加速度项$u_t$而非累加真实的梯度$\nabla_{k, t}$来修正方程5, +若将方程 3 中速度项$u_t$当作“梯度”,则方程 3 第二项可认为是在”梯度“$u_t$上应用传统 SGD,前面已经证明了局部梯度累加在传统 SGD 上是有效的。因此,可以使用方程 3 局部累加速度项$u_t$而非累加真实的梯度$\nabla_{k, t}$来修正方程 5, $$ u_{k, t}=m u_{k, t-1}+\\nabla_{k, t}, \\quad v_{k, t}=v_{k, t-1}+u_{k, t}, \\quad w_{t+1}=w_{t}-\\eta \\sum_{k=1}^{N} \\operatorname{sparse}\\left(v_{k, t}\\right) \\tag{7} $$ -修正后,如上图(b),方程可正常从A点到B点。除了传统动量方程修正,论文还给出了Nesterov动量SGD的修正方程。 +修正后,如上图(b),方程可正常从 A 点到 B 点。除了传统动量方程修正,论文还给出了 Nesterov 动量 SGD 的修正方程。 #### 4.2.2 局部梯度修剪 -梯度修剪是防止梯度爆炸的常用方法。这方法由Pascanu等人在2013年提出,当梯度的l2-norms和大于给定阈值时,就对梯度rescale。正常梯度修剪在梯度聚合后使用,而DGC因为每个节点独立的进行局部梯度累加,所以DGC在使用$G_t$累加前对其进行局部梯度修剪。阈值缩放为原来的$N^{-1/2}$ +梯度修剪是防止梯度爆炸的常用方法。这方法由 Pascanu 等人在 2013 年提出,当梯度的 l2-norms 和大于给定阈值时,就对梯度 rescale。正常梯度修剪在梯度聚合后使用,而 DGC 因为每个节点独立的进行局部梯度累加,所以 DGC 在使用$G_t$累加前对其进行局部梯度修剪。阈值缩放为原来的$N^{-1/2}$ $$ thr_{G^{k}}=N^{-1 / 2} \\cdot thr_{G} \\tag{8} $$ ### 4.3 克服迟滞效应 -因为推迟了较小梯度更新权重的时间,所以会有权重陈旧性问题。稀疏度为99.9%时大部分参数需600到1000步更新一次。迟滞效应会减缓收敛并降低模型精度。DGC中采用动量因子掩藏和预热训练来解决这问题。 +因为推迟了较小梯度更新权重的时间,所以会有权重陈旧性问题。稀疏度为 99.9%时大部分参数需 600 到 1000 步更新一次。迟滞效应会减缓收敛并降低模型精度。DGC 中采用动量因子掩藏和预热训练来解决这问题。 #### 4.3.1 动量因子掩藏 -DGC中使用下面方程来掩藏动量因子减缓陈旧性问题。 +DGC 中使用下面方程来掩藏动量因子减缓陈旧性问题。 $$ Mask \\leftarrow\\left|v_{k, t}\\right|>t h r, \\quad v_{k, t} \\leftarrow v_{k, t} \\odot \\neg Mask, \\quad u_{k, t} \\leftarrow u_{k, t} \\odot \\neg Mask \\tag{9} $$ 此掩码可以停止延迟梯度产生的动量,防止陈旧梯度把权重引入错误的方向。 #### 4.3.2 预热训练 -在训练初期,梯度变动剧烈,需要及时更新权重,此时迟滞效应影响会很大。为此DGC采用预热训练的方法,在预热期间使用更小的学习率来减缓网络的变化速度,并使用较小的稀疏度来减少需推迟更新的梯度数量。预热期间会线性增大学习率,指数型增加稀疏度到最终值。 +在训练初期,梯度变动剧烈,需要及时更新权重,此时迟滞效应影响会很大。为此 DGC 采用预热训练的方法,在预热期间使用更小的学习率来减缓网络的变化速度,并使用较小的稀疏度来减少需推迟更新的梯度数量。预热期间会线性增大学习率,指数型增加稀疏度到最终值。 ### 4.4 正则化(Weight Decay)项修正 -Paddle框架以Weight Decay的形式实现正则化。以L2Decay为例,公式(3)中传统momentum添加weight decay后公式为 +Paddle 框架以 Weight Decay 的形式实现正则化。以 L2Decay 为例,公式(3)中传统 momentum 添加 weight decay 后公式为 $$ G_{t}=\\sum_{k=1}^{N}\\left(\\nabla_{k, t}\\right)+\\lambda w_{t}, \\quad u_{t}=m u_{t-1}+G_{t}, \\quad w_{t+1}=w_{t}-\\eta u_{t} \\tag{10} $$ -其中$\lambda$为Weight Decay系数,$G_{t}$为添加L2Decay项之后的聚合梯度。由于在公式7中进行了局部动量修正,所以按照相同思路在局部梯度上运用修正的Weight Decay项。如下公式在局部梯度上添加局部Weight Decay项即可。 +其中$\lambda$为 Weight Decay 系数,$G_{t}$为添加 L2Decay 项之后的聚合梯度。由于在公式 7 中进行了局部动量修正,所以按照相同思路在局部梯度上运用修正的 Weight Decay 项。如下公式在局部梯度上添加局部 Weight Decay 项即可。 $$ \\nabla_{k, t}=\\nabla_{k, t}+\\frac{\\lambda}{N} w_{t} \\tag{11} $$ -在模型实际训练中,通常会设置weight decay的系数$\lambda=10^{-4}$,在卡数较多如4机32卡的情况下局部weight decay系数为$\frac{\lambda}{N}=\frac{10^{-4}}{32}=3.125*10^{-6}$,在数值精度上偏低,测试训练时会损失一定精度。为此还需对局部weight decay项进行数值修正。如下公式, +在模型实际训练中,通常会设置 weight decay 的系数$\lambda=10^{-4}$,在卡数较多如 4 机 32 卡的情况下局部 weight decay 系数为$\frac{\lambda}{N}=\frac{10^{-4}}{32}=3.125*10^{-6}$,在数值精度上偏低,测试训练时会损失一定精度。为此还需对局部 weight decay 项进行数值修正。如下公式, $$ \\nabla_{k, t}^{'}=N \\nabla_{k, t}+\\lambda w_{t}, \\quad G_{t}^{'}=\\sum_{k=1}^{N}\\left(\\nabla_{k, t}^{'}\\right)=N\\sum_{k=1}^{N}\\left(\\nabla_{k, t}\\right)+N\\lambda w_{t}, \\quad diff --git a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst index a43fc3ebcbf..8841973c398 100644 --- a/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst +++ b/docs/advanced_guide/performance_improving/multinode_training_improving/gpu_training_with_recompute.rst @@ -1,11 +1,11 @@ -重计算:大Batch训练特性 +重计算:大 Batch 训练特性 ============= 背景 --------- -随着训练数据规模的逐渐增加,训练更大、更深的深度学习模型成为一个主流趋势。目前的深度学习模型训练,通常要求保留前向计算的隐层结果,并且需要保存结果的数量会随着模型层数的增加线性增加,这对于目前能够使用的AI芯片的内存大小是个挑战。Forward Recomputation Backpropagation(FRB)可以在额外增加少量计算的情况下,显著增加模型的层数和宽度,同时也可以显著提升模型训练的batch大小。 +随着训练数据规模的逐渐增加,训练更大、更深的深度学习模型成为一个主流趋势。目前的深度学习模型训练,通常要求保留前向计算的隐层结果,并且需要保存结果的数量会随着模型层数的增加线性增加,这对于目前能够使用的 AI 芯片的内存大小是个挑战。Forward Recomputation Backpropagation(FRB)可以在额外增加少量计算的情况下,显著增加模型的层数和宽度,同时也可以显著提升模型训练的 batch 大小。 原理 --------- @@ -16,13 +16,13 @@ - **反向计算**:运行反向算子来计算参数(Parameter)的梯度 - **优化**:应用优化算法以更新参数值 -在前向计算过程中,前向算子会输出大量的中间计算结果,在Paddle中,使用 -Variable来存储这些隐层的中间结果。当模型层数加深时,其数量可达成千上万个, -占据大量的内存。Paddle的 `显存回收机制 `_ +在前向计算过程中,前向算子会输出大量的中间计算结果,在 Paddle 中,使用 +Variable 来存储这些隐层的中间结果。当模型层数加深时,其数量可达成千上万个, +占据大量的内存。Paddle 的 `显存回收机制 `_ 会及时清除无用的中间结果,以节省存储。 -然而,有些中间结果是反向算子的输入,这些Variable必须存储在内存中,直到相应的反向算子计算完毕。 +然而,有些中间结果是反向算子的输入,这些 Variable 必须存储在内存中,直到相应的反向算子计算完毕。 -举个简单的例子, 我们定义一个由mul算子构成的网络,其前向计算为: +举个简单的例子, 我们定义一个由 mul 算子构成的网络,其前向计算为: .. math:: @@ -37,43 +37,43 @@ Variable来存储这些隐层的中间结果。当模型层数加深时,其数 可以看到反向计算中用到了前向计算生成的变量 :math:`y` ,因此变量 :math:`y` 必须存储在内存中,直到这个反向算子计算完毕。当模型加深时,我们会有大量的“ :math:`y` ”,占据了大量的内存。 -Forward Recomputation Backpropagation(FRB)的思想是将深度学习网络切分为k个部分(segments)。对每个segment而言:前向计算时,除了小部分必须存储在内存中的Variable外(我们后续会讨论这些特殊Variable),其他中间结果都将被删除;在反向计算中,首先重新计算一遍前向算子,以获得中间结果,再运行反向算子。简而言之,FRB和普通的网络迭代相比,多计算了一遍前向算子。 +Forward Recomputation Backpropagation(FRB)的思想是将深度学习网络切分为 k 个部分(segments)。对每个 segment 而言:前向计算时,除了小部分必须存储在内存中的 Variable 外(我们后续会讨论这些特殊 Variable),其他中间结果都将被删除;在反向计算中,首先重新计算一遍前向算子,以获得中间结果,再运行反向算子。简而言之,FRB 和普通的网络迭代相比,多计算了一遍前向算子。 -我们把切分网络的变量叫做checkpoints。 -那么问题来了,如何选择checkpoints呢?自从FRB方法提出以来 \ :sup:`[1], [2]`,大量学者在研究这一关键问题。 -我们知道深度学习网络通常是由一个个模块串联得到的,比如ResNet-50由16个block串联而成, -Bert-Large由24个transformer串联而成,以两个子模块中间的变量作为切分点就是一个很好的选择。 -对于非串联的网络(比如含有大量shortcut结构的网络),FRB也支持对其做切分, -只是可能多耗费一点内存(用于存储shortcut的Variable)。 +我们把切分网络的变量叫做 checkpoints。 +那么问题来了,如何选择 checkpoints 呢?自从 FRB 方法提出以来 \ :sup:`[1], [2]`,大量学者在研究这一关键问题。 +我们知道深度学习网络通常是由一个个模块串联得到的,比如 ResNet-50 由 16 个 block 串联而成, +Bert-Large 由 24 个 transformer 串联而成,以两个子模块中间的变量作为切分点就是一个很好的选择。 +对于非串联的网络(比如含有大量 shortcut 结构的网络),FRB 也支持对其做切分, +只是可能多耗费一点内存(用于存储 shortcut 的 Variable)。 Mitsuru Kusumoto \ :sup:`[3]` 等提出了一种基于动态规划的算法, -可以根据指定的内存自动搜索合适的checkpoints,支持各种各样的网络结构。 +可以根据指定的内存自动搜索合适的 checkpoints,支持各种各样的网络结构。 -下图是由4个fc Layer、3个relu Layer、1个sigmoid Layer和1个log-loss Layer串联而成的一个网络:最左侧为其前向计算流程、中间是普通的前向计算和反向计算流程、最右侧为添加FRB后的前向计算和反向计算流程。其中方框代表算子(Operator),红点代表前向计算的中间结果、蓝点代表checkpoints。 +下图是由 4 个 fc Layer、3 个 relu Layer、1 个 sigmoid Layer 和 1 个 log-loss Layer 串联而成的一个网络:最左侧为其前向计算流程、中间是普通的前向计算和反向计算流程、最右侧为添加 FRB 后的前向计算和反向计算流程。其中方框代表算子(Operator),红点代表前向计算的中间结果、蓝点代表 checkpoints。 .. image:: images/recompute.png 注:该例子完整代码位于 `source `_ -添加FRB后,前向计算中需要存储的中间Variable从4个(红点)变为2个(蓝点), +添加 FRB 后,前向计算中需要存储的中间 Variable 从 4 个(红点)变为 2 个(蓝点), 从而节省了这部分内存。当然了,重计算的部分也产生了新的中间变量, 这就需要根据实际情况来做权衡了。这个例子里的网络比较浅,通常来讲, -对层数较深的网络,FRB节省的内存要远多于新增加的内存。 +对层数较深的网络,FRB 节省的内存要远多于新增加的内存。 使用方法 --------- -我们实现了基于Paddle的FRB算法,叫做RecomputeOptimizer, +我们实现了基于 Paddle 的 FRB 算法,叫做 RecomputeOptimizer, 您可以根据其 `源码 `_ 与 `文档 `_ -更深入地了解这一算法。我们为用户提供了两个使用RecomputeOptimizer的方法: -直接调用与Fleet API中使用。在单机单卡或者CPU训练中建议您直接调用RecomputeOptimizer, -在多卡训练或者多机训练任务上建议您在Fleet API中使用Recompute。 +更深入地了解这一算法。我们为用户提供了两个使用 RecomputeOptimizer 的方法: +直接调用与 Fleet API 中使用。在单机单卡或者 CPU 训练中建议您直接调用 RecomputeOptimizer, +在多卡训练或者多机训练任务上建议您在 Fleet API 中使用 Recompute。 **1. 直接调用** -直接调用RecomputeOptimizer非常简单,首先要定义一个经典的Optimizer,比如Adam; -然后在外面包一层RecomputeOptimizer;最后设置checkpoints即可。 +直接调用 RecomputeOptimizer 非常简单,首先要定义一个经典的 Optimizer,比如 Adam; +然后在外面包一层 RecomputeOptimizer;最后设置 checkpoints 即可。 .. code-block:: python @@ -89,25 +89,25 @@ Mitsuru Kusumoto \ :sup:`[3]` 等提出了一种基于动态规划的算法, input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) - # 定义RecomputeOptimizer + # 定义 RecomputeOptimizer sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) - # 设置checkpoints + # 设置 checkpoints sgd._set_checkpoints([fc_1, pred]) # 运行优化算法 sgd.minimize(cost) -Recompute原则上适用于所有Optimizer。 +Recompute 原则上适用于所有 Optimizer。 -**2. 在Fleet API中使用Recompute** +**2. 在 Fleet API 中使用 Recompute** `Fleet API `_ -是基于Fluid的分布式计算高层API。在Fleet API中添加RecomputeOptimizer -仅需要2步: +是基于 Fluid 的分布式计算高层 API。在 Fleet API 中添加 RecomputeOptimizer +仅需要 2 步: -- 设置dist_strategy.forward_recompute为True; +- 设置 dist_strategy.forward_recompute 为 True; -- 设置dist_strategy.recompute_checkpoints。 +- 设置 dist_strategy.recompute_checkpoints。 .. code-block:: python @@ -118,36 +118,36 @@ Recompute原则上适用于所有Optimizer。 optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy) optimizer.minimize(loss) -为了帮助您快速地用Fleet API使用Recompute任务,我们提供了一些例子, +为了帮助您快速地用 Fleet API 使用 Recompute 任务,我们提供了一些例子, 并且给出了这些例子的计算速度、效果和显存节省情况: -- 用Recompute做Bert Fine-tuning: `source `_ +- 用 Recompute 做 Bert Fine-tuning: `source `_ -- 用Recompute做目标检测:开发中. +- 用 Recompute 做目标检测:开发中. Q&A ------- -- **是否支持带有随机性的Op?** +- **是否支持带有随机性的 Op?** - 目前Paddle中带随机性的Op有:dropout,Recompute支持 + 目前 Paddle 中带随机性的 Op 有:dropout,Recompute 支持 dropout Operator,可以保证重计算与初次计算结果保持一致。 -- **有没有更多Recompute的官方例子?** +- **有没有更多 Recompute 的官方例子?** - 更多Recompute的例子将更新在 `examples `_ + 更多 Recompute 的例子将更新在 `examples `_ 和 `Fleet `_ 库下,欢迎关注。 -- **有没有添加checkpoints的建议?** +- **有没有添加 checkpoints 的建议?** - 我们建议将子网络连接部分的变量添加为checkpoints,即: - 如果一个变量能将网络完全分为前后两部分,那么建议将其加入checkpoints。 - checkpoints的数目会影响内存的消耗:如果checkpoints很少, - 那么Recompute起的作用有限;如果checkpoints数量过多, - 那么checkpoints本身占用的内存量就较大,内存消耗可能不降反升。 + 我们建议将子网络连接部分的变量添加为 checkpoints,即: + 如果一个变量能将网络完全分为前后两部分,那么建议将其加入 checkpoints。 + checkpoints 的数目会影响内存的消耗:如果 checkpoints 很少, + 那么 Recompute 起的作用有限;如果 checkpoints 数量过多, + 那么 checkpoints 本身占用的内存量就较大,内存消耗可能不降反升。 我们后续会添加一个估算内存用量的工具, - 可以对每个Operator运算前后的显存用量做可视化, + 可以对每个 Operator 运算前后的显存用量做可视化, 帮助用户定位问题。 [1] Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin . Training deep nets with sublinear memory cost. diff --git a/docs/api/index_cn.rst b/docs/api/index_cn.rst index d72492835c6..2d2a7a796b4 100644 --- a/docs/api/index_cn.rst +++ b/docs/api/index_cn.rst @@ -4,76 +4,76 @@ API 文档 欢迎使用飞桨框架(PaddlePaddle), PaddlePaddle 是一个易用、高效、灵活、可扩展的深度学习框架,致力于让深度学习技术的创新与应用更简单。 -在本版本中,飞桨框架对API做了许多优化,您可以参考下表来了解飞桨框架最新版的API目录结构与说明。更详细的说明,请参见 `版本说明 <../release_note_cn.html>`_ 。此外,您可参考PaddlePaddle的 `GitHub `_ 了解详情。 +在本版本中,飞桨框架对 API 做了许多优化,您可以参考下表来了解飞桨框架最新版的 API 目录结构与说明。更详细的说明,请参见 `版本说明 <../release_note_cn.html>`_ 。此外,您可参考 PaddlePaddle 的 `GitHub `_ 了解详情。 -**注: paddle.fluid.\*, paddle.dataset.\* 会在未来的版本中废弃,请您尽量不要使用这两个目录下的API。** +**注: paddle.fluid.\*, paddle.dataset.\* 会在未来的版本中废弃,请您尽量不要使用这两个目录下的 API。** +-------------------------------+-------------------------------------------------------+ -| 目录 | 功能和包含的API | +| 目录 | 功能和包含的 API | +===============================+=======================================================+ | paddle.\* | paddle | -| | 根目录下保留了常用API的别名,包括:paddle.tensor, | -| | paddle.framework, paddle.device 目录下的所有API | +| | 根目录下保留了常用 API 的别名,包括:paddle.tensor, | +| | paddle.framework, paddle.device 目录下的所有 API | +-------------------------------+-------------------------------------------------------+ -| paddle.tensor | Tensor操作相关的API,包括 创建zeros, | -| | 矩阵运算matmul, 变换concat, 计算add, 查找argmax等 | +| paddle.tensor | Tensor 操作相关的 API,包括 创建 zeros, | +| | 矩阵运算 matmul, 变换 concat, 计算 add, 查找 argmax 等 | +-------------------------------+-------------------------------------------------------+ -| paddle.framework | 框架通用API和动态图模式的API,包括 no_grad 、 | +| paddle.framework | 框架通用 API 和动态图模式的 API,包括 no_grad 、 | | | save 、 load 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.device | 设备管理相关API,包括 set_device, get_device 等。 | +| paddle.device | 设备管理相关 API,包括 set_device, get_device 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.linalg | 线性代数相关API,包括 det, svd 等。 | +| paddle.linalg | 线性代数相关 API,包括 det, svd 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.fft | 快速傅里叶变换的相关API,包括 fft, fft2 等。 | +| paddle.fft | 快速傅里叶变换的相关 API,包括 fft, fft2 等。 | +-------------------------------+-------------------------------------------------------+ | paddle.amp | 自动混合精度策略,包括 auto_cast 、 | | | GradScaler 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.autograd | 自动求导相关API,包括 backward、PyLayer 等。 | +| paddle.autograd | 自动求导相关 API,包括 backward、PyLayer 等。 | +-------------------------------+-------------------------------------------------------+ | paddle.callbacks | 日志回调类,包括 ModelCheckpoint 、 | | | ProgBarLogger 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.distributed | 分布式相关基础API。 | +| paddle.distributed | 分布式相关基础 API。 | +-------------------------------+-------------------------------------------------------+ -| paddle.distributed.fleet | 分布式相关高层API。 | +| paddle.distributed.fleet | 分布式相关高层 API。 | +-------------------------------+-------------------------------------------------------+ -| paddle.hub | 模型拓展相关的API,包括 list、load、help 等。 | +| paddle.hub | 模型拓展相关的 API,包括 list、load、help 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.io | 数据输入输出相关API,包括 Dataset, DataLoader 等。 | +| paddle.io | 数据输入输出相关 API,包括 Dataset, DataLoader 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.jit | 动态图转静态图相关API,包括 to_static、 | +| paddle.jit | 动态图转静态图相关 API,包括 to_static、 | | | ProgramTranslator、TracedLayer 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.metric | 评估指标计算相关的API,包括 Accuracy, Auc等。 | +| paddle.metric | 评估指标计算相关的 API,包括 Accuracy, Auc 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.nn | 组网相关的API,包括 Linear 、卷积 Conv2D 、 | +| paddle.nn | 组网相关的 API,包括 Linear 、卷积 Conv2D 、 | | | 循环神经网络 RNN 、损失函数 CrossEntropyLoss 、 | | | 激活函数 ReLU 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.onnx | paddle转换为onnx协议相关API,包括 export 等。 | +| paddle.onnx | paddle 转换为 onnx 协议相关 API,包括 export 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.optimizer | 优化算法相关API,包括 SGD,Adagrad, Adam 等。 | +| paddle.optimizer | 优化算法相关 API,包括 SGD,Adagrad, Adam 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.optimizer.lr | 学习率衰减相关API,包括 NoamDecay 、 StepDecay 、 | +| paddle.optimizer.lr | 学习率衰减相关 API,包括 NoamDecay 、 StepDecay 、 | | | PiecewiseDecay 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.regularizer | 正则化相关API,包括 L1Decay、L2Decay 等。 | +| paddle.regularizer | 正则化相关 API,包括 L1Decay、L2Decay 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.static | 静态图下基础框架相关API,包括 Variable, Program, | -| | Executor等 | +| paddle.static | 静态图下基础框架相关 API,包括 Variable, Program, | +| | Executor 等 | +-------------------------------+-------------------------------------------------------+ -| paddle.static.nn | 静态图下组网专用API,包括 全连接层 fc 、控制流 | +| paddle.static.nn | 静态图下组网专用 API,包括 全连接层 fc 、控制流 | | | while_loop/cond 。 | +-------------------------------+-------------------------------------------------------+ -| paddle.text | NLP领域API,包括NLP领域相关的数据集, | +| paddle.text | NLP 领域 API,包括 NLP 领域相关的数据集, | | | 如 Imdb 、 Movielens 。 | +-------------------------------+-------------------------------------------------------+ -| paddle.utils | 工具类相关API,包括 CppExtension、CUDAExtension 等。 | +| paddle.utils | 工具类相关 API,包括 CppExtension、CUDAExtension 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.vision | 视觉领域API,包括 数据集 Cifar10 、数据处理 | +| paddle.vision | 视觉领域 API,包括 数据集 Cifar10 、数据处理 | | | ColorJitter、常用基础网络结构 ResNet 等。 | +-------------------------------+-------------------------------------------------------+ -| paddle.sparse | 稀疏领域的API。 | +| paddle.sparse | 稀疏领域的 API。 | +-------------------------------+-------------------------------------------------------+ diff --git a/docs/api/paddle/CUDAPlace_cn.rst b/docs/api/paddle/CUDAPlace_cn.rst index db3a51d3929..077a4f1a24d 100644 --- a/docs/api/paddle/CUDAPlace_cn.rst +++ b/docs/api/paddle/CUDAPlace_cn.rst @@ -10,7 +10,7 @@ CUDAPlace ``CUDAPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 GPU 设备。 -每个 ``CUDAPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``CUDAPlace`` 所代表的显卡编号,编号从 0 开始。 +每个 ``CUDAPlace`` 有一个 ``dev_id`` (设备 id)来表明当前的 ``CUDAPlace`` 所代表的显卡编号,编号从 0 开始。 ``dev_id`` 不同的 ``CUDAPlace`` 所对应的内存不可相互访问。 这里编号指的是可见显卡的逻辑编号,而不是显卡实际的编号。 可以通过 ``CUDA_VISIBLE_DEVICES`` 环境变量限制程序能够使用的 GPU 设备,程序启动时会遍历当前的可见设备,并从 0 开始为这些设备编号。 @@ -19,7 +19,7 @@ CUDAPlace 参数 :::::::::::: - - **id** (int,可选) - GPU的设备ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 + - **id** (int,可选) - GPU 的设备 ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 代码示例 :::::::::::: diff --git a/docs/api/paddle/DataParallel_cn.rst b/docs/api/paddle/DataParallel_cn.rst index 1e804092fca..d34cda1777d 100644 --- a/docs/api/paddle/DataParallel_cn.rst +++ b/docs/api/paddle/DataParallel_cn.rst @@ -26,10 +26,10 @@ DataParallel :::::::::::: - **Layer** (Layer) - 需要通过数据并行方式执行的模型。 - - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为None。 - - **comm_buffer_size** (int,可选) - 它是通信调用(如NCCLAllReduce)时,参数梯度聚合为一组的内存大小(MB)。默认值:25。 + - **strategy** (ParallelStrategy,可选) - (deprecated) 数据并行的策略,包括并行执行的环境配置。默认为 None。 + - **comm_buffer_size** (int,可选) - 它是通信调用(如 NCCLAllReduce)时,参数梯度聚合为一组的内存大小(MB)。默认值:25。 - **last_comm_buffer_size** (float,可选)它限制通信调用中最后一个缓冲区的内存大小(MB)。减小最后一个通信缓冲区的大小有助于提高性能。默认值:1。默认值:1 - - **find_unused_parameters** (bool,可选) 是否在模型forward函数的返回值的所有张量中,遍历整个向后图。对于不包括在loss计算中的参数,其梯度将被预先标记为ready状态用于后续多卡间的规约操作。请注意,模型参数的所有正向输出必须参与loss的计算以及后续的梯度计算。否则,将发生严重错误。请注意,将find_unused_parameters设置为True会影响计算性能,因此,如果确定所有参数都参与了loss计算和自动反向图的构建,请将其设置为False。默认值:False。 + - **find_unused_parameters** (bool,可选) 是否在模型 forward 函数的返回值的所有张量中,遍历整个向后图。对于不包括在 loss 计算中的参数,其梯度将被预先标记为 ready 状态用于后续多卡间的规约操作。请注意,模型参数的所有正向输出必须参与 loss 的计算以及后续的梯度计算。否则,将发生严重错误。请注意,将 find_unused_parameters 设置为 True 会影响计算性能,因此,如果确定所有参数都参与了 loss 计算和自动反向图的构建,请将其设置为 False。默认值:False。 返回 :::::::::::: @@ -40,7 +40,7 @@ DataParallel COPY-FROM: paddle.DataParallel:dp-example .. Note:: - 目前数据并行不支持PyLayer自定义算子。如有此类需求,推荐先使用no_sync接口暂停多卡通信,然后在优化器前手动实现梯度同步;具体实现过程可参考下述示例。 + 目前数据并行不支持 PyLayer 自定义算子。如有此类需求,推荐先使用 no_sync 接口暂停多卡通信,然后在优化器前手动实现梯度同步;具体实现过程可参考下述示例。 代码示例 :::::::::::: @@ -48,7 +48,7 @@ COPY-FROM: paddle.DataParallel:dp-pylayer-example .. py:function:: no_sync() -用于暂停梯度同步的上下文管理器。在no_sync()中参数梯度只会在模型上累加;直到with之外的第一个forward-backward,梯度才会被同步。 +用于暂停梯度同步的上下文管理器。在 no_sync()中参数梯度只会在模型上累加;直到 with 之外的第一个 forward-backward,梯度才会被同步。 代码示例 :::::::::::: @@ -60,15 +60,15 @@ COPY-FROM: paddle.DataParallel.no_sync state_dict(destination=None, include_sublayers=True) ''''''''' -获取当前层及其子层的所有parameters和持久的buffers。并将所有parameters和buffers存放在dict结构中。 +获取当前层及其子层的所有 parameters 和持久的 buffers。并将所有 parameters 和 buffers 存放在 dict 结构中。 **参数** - - **destination** (dict,可选) - 如果提供 ``destination``,则所有参数和持久的buffers都将存放在 ``destination`` 中。默认值:None。 - - **include_sublayers** (bool,可选) - 如果设置为True,则包括子层的参数和buffers。默认值:True。 + - **destination** (dict,可选) - 如果提供 ``destination``,则所有参数和持久的 buffers 都将存放在 ``destination`` 中。默认值:None。 + - **include_sublayers** (bool,可选) - 如果设置为 True,则包括子层的参数和 buffers。默认值:True。 **返回** -dict,包含所有parameters和持久的buffers的dict。 +dict,包含所有 parameters 和持久的 buffers 的 dict。 **代码示例** @@ -78,12 +78,12 @@ COPY-FROM: paddle.DataParallel.state_dict set_state_dict(state_dict, use_structured_name=True) ''''''''' -根据传入的 ``state_dict`` 设置parameters和持久的buffers。所有parameters和buffers将由 ``state_dict`` 中的 ``Tensor`` 设置。 +根据传入的 ``state_dict`` 设置 parameters 和持久的 buffers。所有 parameters 和 buffers 将由 ``state_dict`` 中的 ``Tensor`` 设置。 **参数** - - **state_dict** (dict) - 包含所有parameters和可持久性buffers的dict。 - - **use_structured_name** (bool,可选) - 如果设置为True,将使用Layer的结构性变量名作为dict的key,否则将使用Parameter或者Buffer的变量名作为key。默认值:True。 + - **state_dict** (dict) - 包含所有 parameters 和可持久性 buffers 的 dict。 + - **use_structured_name** (bool,可选) - 如果设置为 True,将使用 Layer 的结构性变量名作为 dict 的 key,否则将使用 Parameter 或者 Buffer 的变量名作为 key。默认值:True。 **返回** diff --git a/docs/api/paddle/Model_cn.rst b/docs/api/paddle/Model_cn.rst index 40911e04da8..ce3ae79c04e 100644 --- a/docs/api/paddle/Model_cn.rst +++ b/docs/api/paddle/Model_cn.rst @@ -7,7 +7,7 @@ Model ``Model`` 对象是一个具备训练、测试、推理的神经网络。该对象同时支持静态图和动态图模式,飞桨框架默认为动态图模式,通过 ``paddle.enable_static()`` 来切换到静态图模式。需要注意的是,需要在实例化 ``Model`` 对象之前完成切换。 -在 GPU 上训练时,高层 API 支持自动混合精度(AMP)训练,并且在静态图下使用 Adam、AdamW、Momentum 优化器时还支持纯 float16 的训练。在使用纯 float16 训练之前,优化器初始化时 ``multi_precision`` 参数可以设置为 True,这样可以避免性能变差或是收敛变慢的问题。并且,在组网中可以使用 ``paddle.static.amp.fp16_guard`` 来限定使用纯float16训练的范围,否则需要把 ``use_fp16_guard`` 手动设置为False以开启全局纯 float16 训练。使用纯 float16 训练前,可能需要手动将 dtype 为 float32 的输入转成 float16 的输入。然而,使用自动混合精度训练(AMP)时,不支持限定混合精度训练的范围。 +在 GPU 上训练时,高层 API 支持自动混合精度(AMP)训练,并且在静态图下使用 Adam、AdamW、Momentum 优化器时还支持纯 float16 的训练。在使用纯 float16 训练之前,优化器初始化时 ``multi_precision`` 参数可以设置为 True,这样可以避免性能变差或是收敛变慢的问题。并且,在组网中可以使用 ``paddle.static.amp.fp16_guard`` 来限定使用纯 float16 训练的范围,否则需要把 ``use_fp16_guard`` 手动设置为 False 以开启全局纯 float16 训练。使用纯 float16 训练前,可能需要手动将 dtype 为 float32 的输入转成 float16 的输入。然而,使用自动混合精度训练(AMP)时,不支持限定混合精度训练的范围。 参数 ::::::::: @@ -165,7 +165,7 @@ prepare(optimizer=None, loss=None, metrics=None, amp_configs=None) - **optimizer** (OOptimizer|None,可选) - 当训练模型的,该参数必须被设定。当评估或测试的时候,该参数可以不设定。默认值:None。 - **loss** (Loss|Callable|None,可选) - 当训练模型的,该参数必须被设定。默认值:None。 - **metrics** (Metric|list[Metric]|None,可选) - 当该参数被设定时,所有给定的评估方法会在训练和测试时被运行,并返回对应的指标。默认值:None。 - - **amp_configs** (str|dict|None,可选) - 混合精度训练的配置,通常是个 dict,也可以是 str。当使用自动混合精度训练或者纯 float16 训练时,``amp_configs`` 的 key ``level`` 需要被设置为 O1 或者 O2,float32 训练时则默认为 O0。除了 ``level`` ,还可以传入更多的和混合精度API一致的参数,例如:``init_loss_scaling``、 ``incr_ratio`` 、 ``decr_ratio``、 ``incr_every_n_steps``、 ``decr_every_n_nan_or_inf``、 ``use_dynamic_loss_scaling``、 ``custom_white_list``、 ``custom_black_list`` ,在静态图下还支持传入 ``custom_black_varnames`` 和 ``use_fp16_guard`` 。详细使用方法可以参考参考混合精度 API 的文档 :ref:`auto_cast ` 和 :ref:`GradScaler ` 。为了方便起见,当不设置其他的配置参数时,也可以直接传入 ``'O1'`` 、``'O2'`` 。在使用 float32 训练时,该参数可以为 None。默认值:None。 + - **amp_configs** (str|dict|None,可选) - 混合精度训练的配置,通常是个 dict,也可以是 str。当使用自动混合精度训练或者纯 float16 训练时,``amp_configs`` 的 key ``level`` 需要被设置为 O1 或者 O2,float32 训练时则默认为 O0。除了 ``level`` ,还可以传入更多的和混合精度 API 一致的参数,例如:``init_loss_scaling``、 ``incr_ratio`` 、 ``decr_ratio``、 ``incr_every_n_steps``、 ``decr_every_n_nan_or_inf``、 ``use_dynamic_loss_scaling``、 ``custom_white_list``、 ``custom_black_list`` ,在静态图下还支持传入 ``custom_black_varnames`` 和 ``use_fp16_guard`` 。详细使用方法可以参考参考混合精度 API 的文档 :ref:`auto_cast ` 和 :ref:`GradScaler ` 。为了方便起见,当不设置其他的配置参数时,也可以直接传入 ``'O1'`` 、``'O2'`` 。在使用 float32 训练时,该参数可以为 None。默认值:None。 fit(train_data=None, eval_data=None, batch_size=1, epochs=1, eval_freq=1, log_freq=10, save_dir=None, save_freq=1, verbose=2, drop_last=False, shuffle=True, num_workers=0, callbacks=None, accumulate_grad_batches=1, num_iters=None) @@ -225,7 +225,7 @@ evaluate(eval_data, batch_size=1, log_freq=10, verbose=2, num_workers=0, callbac **返回** -dict, key是 ``prepare`` 时 Metric 的的名称,value 是该 Metric 的值。 +dict, key 是 ``prepare`` 时 Metric 的的名称,value 是该 Metric 的值。 **代码示例** diff --git a/docs/api/paddle/NPUPlace_cn.rst b/docs/api/paddle/NPUPlace_cn.rst index 723ae38bc3b..cae5f890c8a 100644 --- a/docs/api/paddle/NPUPlace_cn.rst +++ b/docs/api/paddle/NPUPlace_cn.rst @@ -6,14 +6,14 @@ NPUPlace .. py:class:: paddle.NPUPlace ``NPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 NPU 设备。 -每个 ``NPUPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``NPUPlace`` 所代表的显卡编号,编号从 0 开始。 +每个 ``NPUPlace`` 有一个 ``dev_id`` (设备 id)来表明当前的 ``NPUPlace`` 所代表的显卡编号,编号从 0 开始。 ``dev_id`` 不同的 ``NPUPlace`` 所对应的内存不可相互访问。 这里编号指的是显卡实际的编号,而不是显卡的逻辑编号。 参数 :::::::::::: - - **id** (int,可选) - NPU的设备ID。 + - **id** (int,可选) - NPU 的设备 ID。 代码示例 :::::::::::: diff --git a/docs/api/paddle/Overview_cn.rst b/docs/api/paddle/Overview_cn.rst index 20f2fdaecd3..68fdc69f233 100755 --- a/docs/api/paddle/Overview_cn.rst +++ b/docs/api/paddle/Overview_cn.rst @@ -3,62 +3,62 @@ paddle --------------------- -paddle 目录下包含tensor、device、framework相关API以及某些高层API。具体如下: - -- :ref:`tensor数学操作 ` -- :ref:`tensor逻辑操作 ` -- :ref:`tensor属性相关 ` -- :ref:`tensor创建相关 ` -- :ref:`tensor元素查找相关 ` -- :ref:`tensor初始化相关 ` -- :ref:`tensor random相关 ` -- :ref:`tensor线性代数相关 ` -- :ref:`tensor元素操作相关(如:转置,reshape等) ` +paddle 目录下包含 tensor、device、framework 相关 API 以及某些高层 API。具体如下: + +- :ref:`tensor 数学操作 ` +- :ref:`tensor 逻辑操作 ` +- :ref:`tensor 属性相关 ` +- :ref:`tensor 创建相关 ` +- :ref:`tensor 元素查找相关 ` +- :ref:`tensor 初始化相关 ` +- :ref:`tensor random 相关 ` +- :ref:`tensor 线性代数相关 ` +- :ref:`tensor 元素操作相关(如:转置,reshape 等) ` - :ref:`爱因斯坦求和 ` -- :ref:`framework相关 ` -- :ref:`device相关 ` -- :ref:`高层API相关 ` -- :ref:`稀疏API相关 ` +- :ref:`framework 相关 ` +- :ref:`device 相关 ` +- :ref:`高层 API 相关 ` +- :ref:`稀疏 API 相关 ` .. _tensor_math: -tensor数学操作 +tensor 数学操作 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.abs ` ", "绝对值函数" " :ref:`paddle.angle ` ", "相位角函数" - " :ref:`paddle.acos ` ", "arccosine函数" - " :ref:`paddle.add ` ", "Tensor逐元素相加" - " :ref:`paddle.add_n ` ", "对输入的一至多个Tensor或LoDTensor求和" - " :ref:`paddle.addmm ` ", "计算输入Tensor x和y的乘积,将结果乘以标量alpha,再加上input与beta的乘积,得到输出" - " :ref:`paddle.all ` ", "对指定维度上的Tensor元素进行逻辑与运算" - " :ref:`paddle.allclose ` ", "逐个检查输入Tensor x和y的所有元素是否均满足 ∣x−y∣≤atol+rtol×∣y∣" - " :ref:`paddle.isclose ` ", "逐个检查输入Tensor x和y的所有元素是否满足 ∣x−y∣≤atol+rtol×∣y∣" - " :ref:`paddle.any ` ", "对指定维度上的Tensor元素进行逻辑或运算" - " :ref:`paddle.asin ` ", "arcsine函数" - " :ref:`paddle.atan ` ", "arctangent函数" - " :ref:`paddle.atan2 ` ", "arctangent2函数" + " :ref:`paddle.acos ` ", "arccosine 函数" + " :ref:`paddle.add ` ", "Tensor 逐元素相加" + " :ref:`paddle.add_n ` ", "对输入的一至多个 Tensor 或 LoDTensor 求和" + " :ref:`paddle.addmm ` ", "计算输入 Tensor x 和 y 的乘积,将结果乘以标量 alpha,再加上 input 与 beta 的乘积,得到输出" + " :ref:`paddle.all ` ", "对指定维度上的 Tensor 元素进行逻辑与运算" + " :ref:`paddle.allclose ` ", "逐个检查输入 Tensor x 和 y 的所有元素是否均满足 ∣x−y∣≤atol+rtol×∣y∣" + " :ref:`paddle.isclose ` ", "逐个检查输入 Tensor x 和 y 的所有元素是否满足 ∣x−y∣≤atol+rtol×∣y∣" + " :ref:`paddle.any ` ", "对指定维度上的 Tensor 元素进行逻辑或运算" + " :ref:`paddle.asin ` ", "arcsine 函数" + " :ref:`paddle.atan ` ", "arctangent 函数" + " :ref:`paddle.atan2 ` ", "arctangent2 函数" " :ref:`paddle.ceil ` ", "向上取整运算函数" " :ref:`paddle.clip ` ", "将输入的所有元素进行剪裁,使得输出元素限制在[min, max]内" - " :ref:`paddle.conj ` ", "逐元素计算Tensor的共轭运算" + " :ref:`paddle.conj ` ", "逐元素计算 Tensor 的共轭运算" " :ref:`paddle.cos ` ", "余弦函数" " :ref:`paddle.cosh ` ", "双曲余弦函数" " :ref:`paddle.cumsum ` ", "沿给定 axis 计算张量 x 的累加和" " :ref:`paddle.cumprod ` ", "沿给定 dim 计算张量 x 的累乘" - " :ref:`paddle.digamma ` ", "逐元素计算输入x的digamma函数值" + " :ref:`paddle.digamma ` ", "逐元素计算输入 x 的 digamma 函数值" " :ref:`paddle.divide ` ", "逐元素相除算子" - " :ref:`paddle.equal ` ", "该OP返回 x==y 逐元素比较x和y是否相等,相同位置的元素相同则返回True,否则返回False" - " :ref:`paddle.equal_all ` ", "如果所有相同位置的元素相同返回True,否则返回False" + " :ref:`paddle.equal ` ", "该 OP 返回 x==y 逐元素比较 x 和 y 是否相等,相同位置的元素相同则返回 True,否则返回 False" + " :ref:`paddle.equal_all ` ", "如果所有相同位置的元素相同返回 True,否则返回 False" " :ref:`paddle.erf ` ", "逐元素计算 Erf 激活函数" - " :ref:`paddle.exp ` ", "逐元素进行以自然数e为底指数运算" - " :ref:`paddle.expm1 ` ", "逐元素进行exp(x)-1运算" + " :ref:`paddle.exp ` ", "逐元素进行以自然数 e 为底指数运算" + " :ref:`paddle.expm1 ` ", "逐元素进行 exp(x)-1 运算" " :ref:`paddle.floor ` ", "向下取整函数" " :ref:`paddle.floor_divide ` ", "逐元素整除算子,输入 x 与输入 y 逐元素整除,并将各个位置的输出元素保存到返回结果中" " :ref:`paddle.greater_equal ` ", "逐元素地返回 x>=y 的逻辑值" @@ -69,69 +69,69 @@ tensor数学操作 " :ref:`paddle.less_equal ` ", "逐元素地返回 x<=y 的逻辑值" " :ref:`paddle.less_than ` ", "逐元素地返回 x` ", "计算输入 x 的 gamma 函数的自然对数并返回" - " :ref:`paddle.log ` ", "Log激活函数(计算自然对数)" - " :ref:`paddle.log10 ` ", "Log10激活函数(计算底为10的对数)" - " :ref:`paddle.log2 ` ", "计算Log1p(加一的自然对数)结果" + " :ref:`paddle.log ` ", "Log 激活函数(计算自然对数)" + " :ref:`paddle.log10 ` ", "Log10 激活函数(计算底为 10 的对数)" + " :ref:`paddle.log2 ` ", "计算 Log1p(加一的自然对数)结果" " :ref:`paddle.logcumsumexp ` ", "计算 x 的指数的前缀和的对数" " :ref:`paddle.logical_and ` ", "逐元素的对 x 和 y 进行逻辑与运算" - " :ref:`paddle.logical_not ` ", "逐元素的对 X Tensor进行逻辑非运算" + " :ref:`paddle.logical_not ` ", "逐元素的对 X Tensor 进行逻辑非运算" " :ref:`paddle.logical_or ` ", "逐元素的对 X 和 Y 进行逻辑或运算" " :ref:`paddle.logical_xor ` ", "逐元素的对 X 和 Y 进行逻辑异或运算" - " :ref:`paddle.logit ` ", "计算logit结果" + " :ref:`paddle.logit ` ", "计算 logit 结果" " :ref:`paddle.bitwise_and ` ", "逐元素的对 x 和 y 进行按位与运算" - " :ref:`paddle.bitwise_not ` ", "逐元素的对 X Tensor进行按位取反运算" + " :ref:`paddle.bitwise_not ` ", "逐元素的对 X Tensor 进行按位取反运算" " :ref:`paddle.bitwise_or ` ", "逐元素的对 X 和 Y 进行按位或运算" " :ref:`paddle.bitwise_xor ` ", "逐元素的对 X 和 Y 进行按位异或运算" - " :ref:`paddle.logsumexp ` ", "沿着 axis 计算 x 的以e为底的指数的和的自然对数" - " :ref:`paddle.max ` ", "对指定维度上的Tensor元素求最大值运算" - " :ref:`paddle.amax ` ", "对指定维度上的Tensor元素求最大值运算" - " :ref:`paddle.maximum ` ", "逐元素对比输入的两个Tensor,并且把各个位置更大的元素保存到返回结果中" + " :ref:`paddle.logsumexp ` ", "沿着 axis 计算 x 的以 e 为底的指数的和的自然对数" + " :ref:`paddle.max ` ", "对指定维度上的 Tensor 元素求最大值运算" + " :ref:`paddle.amax ` ", "对指定维度上的 Tensor 元素求最大值运算" + " :ref:`paddle.maximum ` ", "逐元素对比输入的两个 Tensor,并且把各个位置更大的元素保存到返回结果中" " :ref:`paddle.mean ` ", "沿 axis 计算 x 的平均值" " :ref:`paddle.median ` ", "沿给定的轴 axis 计算 x 中元素的中位数" - " :ref:`paddle.nanmedian ` ", "沿给定的轴 axis 忽略NAN元素计算 x 中元素的中位数" - " :ref:`paddle.min ` ", "对指定维度上的Tensor元素求最小值运算" - " :ref:`paddle.amin ` ", "对指定维度上的Tensor元素求最小值运算" - " :ref:`paddle.minimum ` ", "逐元素对比输入的两个Tensor,并且把各个位置更小的元素保存到返回结果中" + " :ref:`paddle.nanmedian ` ", "沿给定的轴 axis 忽略 NAN 元素计算 x 中元素的中位数" + " :ref:`paddle.min ` ", "对指定维度上的 Tensor 元素求最小值运算" + " :ref:`paddle.amin ` ", "对指定维度上的 Tensor 元素求最小值运算" + " :ref:`paddle.minimum ` ", "逐元素对比输入的两个 Tensor,并且把各个位置更小的元素保存到返回结果中" " :ref:`paddle.mm ` ", "用于两个输入矩阵的相乘" " :ref:`paddle.inner ` ", "计算两个输入矩阵的内积" " :ref:`paddle.outer ` ", "计算两个输入矩阵的外积" - " :ref:`paddle.multiplex ` ", "从每个输入Tensor中选择特定行构造输出Tensor" + " :ref:`paddle.multiplex ` ", "从每个输入 Tensor 中选择特定行构造输出 Tensor" " :ref:`paddle.multiply ` ", "逐元素相乘算子" " :ref:`paddle.neg ` ", "计算输入 x 的相反数并返回" - " :ref:`paddle.not_equal ` ", "逐元素地返回x!=y 的逻辑值" + " :ref:`paddle.not_equal ` ", "逐元素地返回 x!=y 的逻辑值" " :ref:`paddle.pow ` ", "指数算子,逐元素计算 x 的 y 次幂" - " :ref:`paddle.prod ` ", "对指定维度上的Tensor元素进行求乘积运算" - " :ref:`paddle.reciprocal ` ", "对输入Tensor取倒数" + " :ref:`paddle.prod ` ", "对指定维度上的 Tensor 元素进行求乘积运算" + " :ref:`paddle.reciprocal ` ", "对输入 Tensor 取倒数" " :ref:`paddle.round ` ", "将输入中的数值四舍五入到最接近的整数数值" - " :ref:`paddle.rsqrt ` ", "rsqrt激活函数" + " :ref:`paddle.rsqrt ` ", "rsqrt 激活函数" " :ref:`paddle.scale ` ", "缩放算子" - " :ref:`paddle.sign ` ", "对输入x中每个元素进行正负判断" + " :ref:`paddle.sign ` ", "对输入 x 中每个元素进行正负判断" " :ref:`paddle.sin ` ", "计算输入的正弦值" " :ref:`paddle.sinh ` ", "双曲正弦函数" " :ref:`paddle.sqrt ` ", "计算输入的算数平方根" - " :ref:`paddle.square ` ", "该OP执行逐元素取平方运算" + " :ref:`paddle.square ` ", "该 OP 执行逐元素取平方运算" " :ref:`paddle.stanh ` ", "stanh 激活函数" " :ref:`paddle.std ` ", "沿给定的轴 axis 计算 x 中元素的标准差" " :ref:`paddle.subtract ` ", "逐元素相减算子" - " :ref:`paddle.sum ` ", "对指定维度上的Tensor元素进行求和运算" - " :ref:`paddle.tan ` ", "三角函数tangent" - " :ref:`paddle.tanh ` ", "tanh激活函数" + " :ref:`paddle.sum ` ", "对指定维度上的 Tensor 元素进行求和运算" + " :ref:`paddle.tan ` ", "三角函数 tangent" + " :ref:`paddle.tanh ` ", "tanh 激活函数" " :ref:`paddle.tanh_ ` ", "Inplace 版本的 tanh API,对输入 x 采用 Inplace 策略" " :ref:`paddle.trace ` ", "计算输入 Tensor 在指定平面上的对角线元素之和" " :ref:`paddle.var ` ", "沿给定的轴 axis 计算 x 中元素的方差" " :ref:`paddle.diagonal ` ", "根据给定的轴 axis 返回输入 Tensor 的局部视图" " :ref:`paddle.trunc ` ", "对输入 Tensor 每个元素的小数部分进行截断" " :ref:`paddle.frac ` ", "得到输入 Tensor 每个元素的小数部分" - " :ref:`paddle.log1p ` ", "该OP计算Log1p(加一的自然对数)结果" - " :ref:`paddle.take_along_axis ` ", "根据axis和index获取输入 Tensor 的对应元素" - " :ref:`paddle.put_along_axis ` ", "根据axis和index放置value值至输入 Tensor" - " :ref:`paddle.lerp ` ", "该OP基于给定的 weight 计算 x 与 y 的线性插值" - " :ref:`paddle.diff ` ", "沿着指定维度对输入Tensor计算n阶的前向差值" + " :ref:`paddle.log1p ` ", "该 OP 计算 Log1p(加一的自然对数)结果" + " :ref:`paddle.take_along_axis ` ", "根据 axis 和 index 获取输入 Tensor 的对应元素" + " :ref:`paddle.put_along_axis ` ", "根据 axis 和 index 放置 value 值至输入 Tensor" + " :ref:`paddle.lerp ` ", "该 OP 基于给定的 weight 计算 x 与 y 的线性插值" + " :ref:`paddle.diff ` ", "沿着指定维度对输入 Tensor 计算 n 阶的前向差值" " :ref:`paddle.rad2deg ` ", "将元素从弧度的角度转换为度" " :ref:`paddle.deg2rad ` ", "将元素从度的角度转换为弧度" " :ref:`paddle.gcd ` ", "计算两个输入的按元素绝对值的最大公约数" " :ref:`paddle.lcm ` ", "计算两个输入的按元素绝对值的最小公倍数" - " :ref:`paddle.erfinv ` ", "计算输入Tensor的逆误差函数" + " :ref:`paddle.erfinv ` ", "计算输入 Tensor 的逆误差函数" " :ref:`paddle.acosh ` ", "反双曲余弦函数" " :ref:`paddle.asinh ` ", "反双曲正弦函数" " :ref:`paddle.atanh ` ", "反双曲正切函数" @@ -139,75 +139,75 @@ tensor数学操作 .. _tensor_logic: -tensor逻辑操作 +tensor 逻辑操作 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.is_empty ` ", "测试变量是否为空" - " :ref:`paddle.is_tensor ` ", "用来测试输入对象是否是paddle.Tensor" - " :ref:`paddle.isfinite ` ", "返回输入tensor的每一个值是否为Finite(既非 +/-INF 也非 +/-NaN )" - " :ref:`paddle.isinf ` ", "返回输入tensor的每一个值是否为 +/-INF" - " :ref:`paddle.isnan ` ", "返回输入tensor的每一个值是否为 +/-NaN" + " :ref:`paddle.is_tensor ` ", "用来测试输入对象是否是 paddle.Tensor" + " :ref:`paddle.isfinite ` ", "返回输入 tensor 的每一个值是否为 Finite(既非 +/-INF 也非 +/-NaN )" + " :ref:`paddle.isinf ` ", "返回输入 tensor 的每一个值是否为 +/-INF" + " :ref:`paddle.isnan ` ", "返回输入 tensor 的每一个值是否为 +/-NaN" .. _tensor_attribute: -tensor属性相关 +tensor 属性相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.imag ` ", "返回一个包含输入复数Tensor的虚部数值的新Tensor" - " :ref:`paddle.real ` ", "返回一个包含输入复数Tensor的实部数值的新Tensor" - " :ref:`paddle.shape ` ", "获得输入Tensor或SelectedRows的shape" + " :ref:`paddle.imag ` ", "返回一个包含输入复数 Tensor 的虚部数值的新 Tensor" + " :ref:`paddle.real ` ", "返回一个包含输入复数 Tensor 的实部数值的新 Tensor" + " :ref:`paddle.shape ` ", "获得输入 Tensor 或 SelectedRows 的 shape" " :ref:`paddle.is_complex ` ", "判断输入 tensor 的数据类型是否为复数类型" " :ref:`paddle.is_integer ` ", "判断输入 tensor 的数据类型是否为整数类型" - " :ref:`paddle.broadcast_shape ` ", "返回对x_shape大小的张量和y_shape大小的张量做broadcast操作后得到的shape" - " :ref:`paddle.is_floating_point ` ", "判断输入Tensor的数据类型是否为浮点类型" + " :ref:`paddle.broadcast_shape ` ", "返回对 x_shape 大小的张量和 y_shape 大小的张量做 broadcast 操作后得到的 shape" + " :ref:`paddle.is_floating_point ` ", "判断输入 Tensor 的数据类型是否为浮点类型" .. _tensor_creation: -tensor创建相关 +tensor 创建相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.arange ` ", "返回以步长 step 均匀分隔给定数值区间[start, end)的1-D Tensor,数据类型为 dtype" - " :ref:`paddle.diag ` ", "如果 x 是向量(1-D张量),则返回带有 x 元素作为对角线的2-D方阵;如果 x 是矩阵(2-D张量),则提取 x 的对角线元素,以1-D张量返回。" + " :ref:`paddle.arange ` ", "返回以步长 step 均匀分隔给定数值区间[start, end)的 1-D Tensor,数据类型为 dtype" + " :ref:`paddle.diag ` ", "如果 x 是向量(1-D 张量),则返回带有 x 元素作为对角线的 2-D 方阵;如果 x 是矩阵(2-D 张量),则提取 x 的对角线元素,以 1-D 张量返回。" " :ref:`paddle.diagflat ` ", "如果 x 是一维张量,则返回带有 x 元素作为对角线的二维方阵;如果 x 是大于等于二维的张量,则返回一个二维张量,其对角线元素为 x 在连续维度展开得到的一维张量的元素。" - " :ref:`paddle.empty ` ", "创建形状大小为shape并且数据类型为dtype的Tensor" - " :ref:`paddle.empty_like ` ", "根据 x 的shape和数据类型 dtype 创建未初始化的Tensor" - " :ref:`paddle.eye ` ", "构建二维Tensor(主对角线元素为1,其他元素为0)" - " :ref:`paddle.full ` ", "创建形状大小为 shape 并且数据类型为 dtype 的Tensor" - " :ref:`paddle.full_like ` ", "创建一个和 x 具有相同的形状并且数据类型为 dtype 的Tensor" - " :ref:`paddle.linspace ` ", "返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num" + " :ref:`paddle.empty ` ", "创建形状大小为 shape 并且数据类型为 dtype 的 Tensor" + " :ref:`paddle.empty_like ` ", "根据 x 的 shape 和数据类型 dtype 创建未初始化的 Tensor" + " :ref:`paddle.eye ` ", "构建二维 Tensor(主对角线元素为 1,其他元素为 0)" + " :ref:`paddle.full ` ", "创建形状大小为 shape 并且数据类型为 dtype 的 Tensor" + " :ref:`paddle.full_like ` ", "创建一个和 x 具有相同的形状并且数据类型为 dtype 的 Tensor" + " :ref:`paddle.linspace ` ", "返回一个 Tensor,Tensor 的值为在区间 start 和 stop 上均匀间隔的 num 个值,输出 Tensor 的长度为 num" " :ref:`paddle.meshgrid ` ", "对每个张量做扩充操作" - " :ref:`paddle.numel ` ", "返回一个长度为1并且元素值为输入 x 元素个数的Tensor" - " :ref:`paddle.ones ` ", "创建形状为 shape 、数据类型为 dtype 且值全为1的Tensor" - " :ref:`paddle.ones_like ` ", "返回一个和 x 具有相同形状的数值都为1的Tensor" - " :ref:`paddle.Tensor ` ", "Paddle中最为基础的数据结构" - " :ref:`paddle.to_tensor ` ", "通过已知的data来创建一个tensor" - " :ref:`paddle.tolist ` ", "将paddle Tensor转化为python list" - " :ref:`paddle.zeros ` ", "该OP创建形状为 shape 、数据类型为 dtype 且值全为0的Tensor" - " :ref:`paddle.zeros_like ` ", "该OP返回一个和 x 具有相同的形状的全零Tensor,数据类型为 dtype 或者和 x 相同" + " :ref:`paddle.numel ` ", "返回一个长度为 1 并且元素值为输入 x 元素个数的 Tensor" + " :ref:`paddle.ones ` ", "创建形状为 shape 、数据类型为 dtype 且值全为 1 的 Tensor" + " :ref:`paddle.ones_like ` ", "返回一个和 x 具有相同形状的数值都为 1 的 Tensor" + " :ref:`paddle.Tensor ` ", "Paddle 中最为基础的数据结构" + " :ref:`paddle.to_tensor ` ", "通过已知的 data 来创建一个 tensor" + " :ref:`paddle.tolist ` ", "将 paddle Tensor 转化为 python list" + " :ref:`paddle.zeros ` ", "该 OP 创建形状为 shape 、数据类型为 dtype 且值全为 0 的 Tensor" + " :ref:`paddle.zeros_like ` ", "该 OP 返回一个和 x 具有相同的形状的全零 Tensor,数据类型为 dtype 或者和 x 相同" " :ref:`paddle.complex ` ", "给定实部和虚部,返回一个复数 Tensor" - " :ref:`paddle.create_parameter ` ", "该OP创建一个参数,该参数是一个可学习的变量, 拥有梯度并且可优化" - " :ref:`paddle.clone ` ", "对输入Tensor ``x`` 进行拷贝,并返回一个新的Tensor,并且该操作提供梯度回传" - " :ref:`paddle.batch ` ", "一个reader的装饰器。返回的reader将输入reader的数据打包成指定的batch_size大小的批处理数据(不推荐使用)" + " :ref:`paddle.create_parameter ` ", "该 OP 创建一个参数,该参数是一个可学习的变量, 拥有梯度并且可优化" + " :ref:`paddle.clone ` ", "对输入 Tensor ``x`` 进行拷贝,并返回一个新的 Tensor,并且该操作提供梯度回传" + " :ref:`paddle.batch ` ", "一个 reader 的装饰器。返回的 reader 将输入 reader 的数据打包成指定的 batch_size 大小的批处理数据(不推荐使用)" .. _tensor_search: -tensor元素查找相关 +tensor 元素查找相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.argmax ` ", "沿 axis 计算输入 x 的最大元素的索引" @@ -215,114 +215,114 @@ tensor元素查找相关 " :ref:`paddle.argsort ` ", "对输入变量沿给定轴进行排序,输出排序好的数据的相应索引,其维度和输入相同" " :ref:`paddle.index_sample ` ", "对输入 x 中的元素进行批量抽样" " :ref:`paddle.index_select ` ", "沿着指定轴 axis 对输入 x 进行索引" - " :ref:`paddle.masked_select ` ", "返回一个1-D 的Tensor, Tensor的值是根据 mask 对输入 x 进行选择的" + " :ref:`paddle.masked_select ` ", "返回一个 1-D 的 Tensor, Tensor 的值是根据 mask 对输入 x 进行选择的" " :ref:`paddle.nonzero ` ", "返回输入 x 中非零元素的坐标" " :ref:`paddle.sort ` ", "对输入变量沿给定轴进行排序,输出排序好的数据,其维度和输入相同" " :ref:`paddle.searchsorted ` ", "将根据给定的 values 在 sorted_sequence 的最后一个维度查找合适的索引" - " :ref:`paddle.topk ` ", "沿着可选的 axis 查找topk最大或者最小的结果和结果所在的索引信息" - " :ref:`paddle.where ` ", "该OP返回一个根据输入 condition, 选择 x 或 y 的元素组成的多维 Tensor" + " :ref:`paddle.topk ` ", "沿着可选的 axis 查找 topk 最大或者最小的结果和结果所在的索引信息" + " :ref:`paddle.where ` ", "该 OP 返回一个根据输入 condition, 选择 x 或 y 的元素组成的多维 Tensor" .. _tensor_initializer: -tensor初始化相关 +tensor 初始化相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.assign ` ", "将输入Tensor或numpy数组拷贝至输出Tensor" + " :ref:`paddle.assign ` ", "将输入 Tensor 或 numpy 数组拷贝至输出 Tensor" .. _tensor_random: -tensor random相关 +tensor random 相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.bernoulli ` ", "以输入 x 为概率,生成一个伯努利分布(0-1分布)的Tensor,输出Tensor的形状和数据类型与输入 x 相同" - " :ref:`paddle.multinomial ` ", "以输入 x 为概率,生成一个多项分布的Tensor" - " :ref:`paddle.normal ` ", "返回符合正态分布(均值为 mean ,标准差为 std 的正态随机分布)的随机Tensor" - " :ref:`paddle.rand ` ", "返回符合均匀分布的,范围在[0, 1)的Tensor" - " :ref:`paddle.randint ` ", "返回服从均匀分布的、范围在[low, high)的随机Tensor" - " :ref:`paddle.randint_like ` ", "返回一个和 x 具有相同形状的服从均匀分布的、范围在[low, high)的随机Tensor,数据类型为 dtype 或者和 x 相同。" - " :ref:`paddle.randn ` ", "返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor" - " :ref:`paddle.randperm ` ", "返回一个数值在0到n-1、随机排列的1-D Tensor" - " :ref:`paddle.seed ` ", "设置全局默认generator的随机种子" - " :ref:`paddle.uniform ` ", "返回数值服从范围[min, max)内均匀分布的随机Tensor" - " :ref:`paddle.standard_normal ` ", "返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 shape,数据类型为 dtype" - " :ref:`paddle.poisson ` ", "返回服从泊松分布的随机Tensor,输出Tensor的形状和数据类型与输入 x 相同" + " :ref:`paddle.bernoulli ` ", "以输入 x 为概率,生成一个伯努利分布(0-1 分布)的 Tensor,输出 Tensor 的形状和数据类型与输入 x 相同" + " :ref:`paddle.multinomial ` ", "以输入 x 为概率,生成一个多项分布的 Tensor" + " :ref:`paddle.normal ` ", "返回符合正态分布(均值为 mean ,标准差为 std 的正态随机分布)的随机 Tensor" + " :ref:`paddle.rand ` ", "返回符合均匀分布的,范围在[0, 1)的 Tensor" + " :ref:`paddle.randint ` ", "返回服从均匀分布的、范围在[low, high)的随机 Tensor" + " :ref:`paddle.randint_like ` ", "返回一个和 x 具有相同形状的服从均匀分布的、范围在[low, high)的随机 Tensor,数据类型为 dtype 或者和 x 相同。" + " :ref:`paddle.randn ` ", "返回符合标准正态分布(均值为 0,标准差为 1 的正态随机分布)的随机 Tensor" + " :ref:`paddle.randperm ` ", "返回一个数值在 0 到 n-1、随机排列的 1-D Tensor" + " :ref:`paddle.seed ` ", "设置全局默认 generator 的随机种子" + " :ref:`paddle.uniform ` ", "返回数值服从范围[min, max)内均匀分布的随机 Tensor" + " :ref:`paddle.standard_normal ` ", "返回符合标准正态分布(均值为 0,标准差为 1 的正态随机分布)的随机 Tensor,形状为 shape,数据类型为 dtype" + " :ref:`paddle.poisson ` ", "返回服从泊松分布的随机 Tensor,输出 Tensor 的形状和数据类型与输入 x 相同" .. _tensor_linalg: -tensor线性代数相关 +tensor 线性代数相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.bincount ` ", "统计输入张量中元素的出现次数" - " :ref:`paddle.bmm ` ", "对输入x及输入y进行矩阵相乘" + " :ref:`paddle.bmm ` ", "对输入 x 及输入 y 进行矩阵相乘" " :ref:`paddle.cross ` ", "计算张量 x 和 y 在 axis 维度上的向量积(叉积)" " :ref:`paddle.dist ` ", "计算 (x-y) 的 p 范数(p-norm)" " :ref:`paddle.dot ` ", "计算向量的内积" " :ref:`paddle.histogram ` ", "计算输入张量的直方图" - " :ref:`paddle.matmul ` ", "计算两个Tensor的乘积,遵循完整的广播规则" + " :ref:`paddle.matmul ` ", "计算两个 Tensor 的乘积,遵循完整的广播规则" " :ref:`paddle.mv ` ", "计算矩阵 x 和向量 vec 的乘积" - " :ref:`paddle.rank ` ", "计算输入Tensor的维度(秩)" - " :ref:`paddle.t ` ", "对小于等于2维的Tensor进行数据转置" - " :ref:`paddle.tril ` ", "返回输入矩阵 input 的下三角部分,其余部分被设为0" - " :ref:`paddle.triu ` ", "返回输入矩阵 input 的上三角部分,其余部分被设为0" + " :ref:`paddle.rank ` ", "计算输入 Tensor 的维度(秩)" + " :ref:`paddle.t ` ", "对小于等于 2 维的 Tensor 进行数据转置" + " :ref:`paddle.tril ` ", "返回输入矩阵 input 的下三角部分,其余部分被设为 0" + " :ref:`paddle.triu ` ", "返回输入矩阵 input 的上三角部分,其余部分被设为 0" .. _tensor_manipulation: -tensor元素操作相关(如:转置,reshape等) +tensor 元素操作相关(如:转置,reshape 等) :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.broadcast_to ` ", "根据 shape 指定的形状广播 x ,广播后, x 的形状和 shape 指定的形状一致" - " :ref:`paddle.broadcast_tensors ` ", "对一组输入Tensor进行广播操作, 输入应符合广播规范" - " :ref:`paddle.cast ` ", "将输入的x的数据类型转换为 dtype 并输出" - " :ref:`paddle.chunk ` ", "将输入Tensor分割成多个子Tensor" - " :ref:`paddle.concat ` ", "对输入沿 axis 轴进行联结,返回一个新的Tensor" + " :ref:`paddle.broadcast_tensors ` ", "对一组输入 Tensor 进行广播操作, 输入应符合广播规范" + " :ref:`paddle.cast ` ", "将输入的 x 的数据类型转换为 dtype 并输出" + " :ref:`paddle.chunk ` ", "将输入 Tensor 分割成多个子 Tensor" + " :ref:`paddle.concat ` ", "对输入沿 axis 轴进行联结,返回一个新的 Tensor" " :ref:`paddle.crop ` ", "根据偏移量(offsets)和形状(shape),裁剪输入(x)Tensor" " :ref:`paddle.expand ` ", "根据 shape 指定的形状扩展 x ,扩展后, x 的形状和 shape 指定的形状一致" " :ref:`paddle.expand_as ` ", "根据 y 的形状扩展 x ,扩展后, x 的形状和 y 的形状相同" - " :ref:`paddle.flatten ` ", "根据给定的start_axis 和 stop_axis 将连续的维度展平" - " :ref:`paddle.flip ` ", "沿指定轴反转n维tensor" - " :ref:`paddle.rot90 ` ", "沿axes指定的平面将n维tensor旋转90度k次" + " :ref:`paddle.flatten ` ", "根据给定的 start_axis 和 stop_axis 将连续的维度展平" + " :ref:`paddle.flip ` ", "沿指定轴反转 n 维 tensor" + " :ref:`paddle.rot90 ` ", "沿 axes 指定的平面将 n 维 tensor 旋转 90 度 k 次" " :ref:`paddle.gather ` ", "根据索引 index 获取输入 x 的指定 aixs 维度的条目,并将它们拼接在一起" - " :ref:`paddle.gather_nd ` ", "paddle.gather的高维推广" + " :ref:`paddle.gather_nd ` ", "paddle.gather 的高维推广" " :ref:`paddle.reshape ` ", "在保持输入 x 数据不变的情况下,改变 x 的形状" " :ref:`paddle.reshape_ ` ", "Inplace 版本的 reshape API,对输入 x 采用 Inplace 策略" " :ref:`paddle.roll ` ", "沿着指定维度 axis 对输入 x 进行循环滚动,当元素移动到最后位置时,会从第一个位置重新插入" " :ref:`paddle.scatter ` ", "通过基于 updates 来更新选定索引 index 上的输入来获得输出" " :ref:`paddle.scatter_ ` ", "Inplace 版本的 scatter API,对输入 x 采用 Inplace 策略 " - " :ref:`paddle.scatter_nd ` ", "根据 index ,将 updates 添加到一个新的张量中,从而得到输出的Tensor" - " :ref:`paddle.scatter_nd_add ` ", "通过对Tensor中的单个值或切片应用稀疏加法,从而得到输出的Tensor" + " :ref:`paddle.scatter_nd ` ", "根据 index ,将 updates 添加到一个新的张量中,从而得到输出的 Tensor" + " :ref:`paddle.scatter_nd_add ` ", "通过对 Tensor 中的单个值或切片应用稀疏加法,从而得到输出的 Tensor" " :ref:`paddle.shard_index ` ", "根据分片(shard)的偏移量重新计算分片的索引" " :ref:`paddle.slice ` ", "沿多个轴生成 input 的切片" - " :ref:`paddle.split ` ", "将输入Tensor分割成多个子Tensor" - " :ref:`paddle.squeeze ` ", "删除输入Tensor的Shape中尺寸为1的维度" + " :ref:`paddle.split ` ", "将输入 Tensor 分割成多个子 Tensor" + " :ref:`paddle.squeeze ` ", "删除输入 Tensor 的 Shape 中尺寸为 1 的维度" " :ref:`paddle.squeeze_ ` ", "Inplace 版本的 squeeze API,对输入 x 采用 Inplace 策略" " :ref:`paddle.stack ` ", "沿 axis 轴对输入 x 进行堆叠操作" " :ref:`paddle.strided_slice ` ", "沿多个轴生成 x 的切片" " :ref:`paddle.tile ` ", "根据参数 repeat_times 对输入 x 的各维度进行复制" - " :ref:`paddle.transpose ` ", "根据perm对输入的多维Tensor进行数据重排" - " :ref:`paddle.moveaxis ` ", "移动Tensor的轴,根据移动之后的轴对输入的多维Tensor进行数据重排" - " :ref:`paddle.tensordot ` ", "沿多个轴对输入的x和y进行张量缩并操作" - " :ref:`paddle.unbind ` ", "将输入Tensor按照指定的维度分割成多个子Tensor" - " :ref:`paddle.unique ` ", "返回Tensor按升序排序后的独有元素" - " :ref:`paddle.unique_consecutive ` ", "返回无连续重复元素的Tensor" - " :ref:`paddle.unsqueeze ` ", "该OP向输入Tensor的Shape中一个或多个位置(axis)插入尺寸为1的维度" + " :ref:`paddle.transpose ` ", "根据 perm 对输入的多维 Tensor 进行数据重排" + " :ref:`paddle.moveaxis ` ", "移动 Tensor 的轴,根据移动之后的轴对输入的多维 Tensor 进行数据重排" + " :ref:`paddle.tensordot ` ", "沿多个轴对输入的 x 和 y 进行张量缩并操作" + " :ref:`paddle.unbind ` ", "将输入 Tensor 按照指定的维度分割成多个子 Tensor" + " :ref:`paddle.unique ` ", "返回 Tensor 按升序排序后的独有元素" + " :ref:`paddle.unique_consecutive ` ", "返回无连续重复元素的 Tensor" + " :ref:`paddle.unsqueeze ` ", "该 OP 向输入 Tensor 的 Shape 中一个或多个位置(axis)插入尺寸为 1 的维度" " :ref:`paddle.unsqueeze_ ` ", "Inplace 版本的 unsqueeze API,对输入 x 采用 Inplace 策略" - " :ref:`paddle.unstack ` ", "该OP将单个dim为 D 的Tensor沿 axis 轴unpack为 num 个dim为 (D-1) 的Tensor" + " :ref:`paddle.unstack ` ", "该 OP 将单个 dim 为 D 的 Tensor 沿 axis 轴 unpack 为 num 个 dim 为 (D-1) 的 Tensor" " :ref:`paddle.as_complex ` ", "将实数 Tensor 转为复数 Tensor" " :ref:`paddle.as_real ` ", "将复数 Tensor 转为实数 Tensor" " :ref:`paddle.repeat_interleave ` ", "沿 axis 轴对输入 x 的元素进行复制" @@ -333,58 +333,58 @@ tensor元素操作相关(如:转置,reshape等) :::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.einsum ` ", "根据爱因斯坦标记对多个张量进行爱因斯坦求和" .. _about_framework: -framework相关 +framework 相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.CPUPlace ` ", "一个设备描述符,指定CPUPlace则Tensor将被自动分配在该设备上,并且模型将会运行在该设备上" + " :ref:`paddle.CPUPlace ` ", "一个设备描述符,指定 CPUPlace 则 Tensor 将被自动分配在该设备上,并且模型将会运行在该设备上" " :ref:`paddle.CUDAPinnedPlace ` ", "一个设备描述符,它所指代的页锁定内存由 CUDA 函数 cudaHostAlloc() 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝" " :ref:`paddle.CUDAPlace ` ", "一个设备描述符,表示一个分配或将要分配 Tensor 或 LoDTensor 的 GPU 设备" " :ref:`paddle.DataParallel ` ", "通过数据并行模式执行动态图模型" - " :ref:`paddle.NPUPlace ` ", "一个设备描述符,指NCPUPlace则Tensor将被自动分配在该设备上,并且模型将会运行在该设备上" - " :ref:`paddle.disable_signal_handler ` ", "关闭Paddle系统信号处理方法" + " :ref:`paddle.NPUPlace ` ", "一个设备描述符,指 NCPUPlace 则 Tensor 将被自动分配在该设备上,并且模型将会运行在该设备上" + " :ref:`paddle.disable_signal_handler ` ", "关闭 Paddle 系统信号处理方法" " :ref:`paddle.disable_static ` ", "关闭静态图模式" " :ref:`paddle.enable_static ` ", "开启静态图模式" - " :ref:`paddle.get_default_dtype ` ", "得到当前全局的dtype" + " :ref:`paddle.get_default_dtype ` ", "得到当前全局的 dtype" " :ref:`paddle.grad ` ", "对于每个 inputs ,计算所有 outputs 相对于其的梯度和" - " :ref:`paddle.in_dynamic_mode ` ", "查看paddle当前是否在动态图模式中运行" - " :ref:`paddle.load ` ", "从指定路径载入可以在paddle中使用的对象实例" + " :ref:`paddle.in_dynamic_mode ` ", "查看 paddle 当前是否在动态图模式中运行" + " :ref:`paddle.load ` ", "从指定路径载入可以在 paddle 中使用的对象实例" " :ref:`paddle.no_grad ` ", "创建一个上下文来禁用动态图梯度计算" " :ref:`paddle.ParamAttr ` ", "创建一个参数属性对象" - " :ref:`paddle.save ` ", "将对象实例obj保存到指定的路径中" - " :ref:`paddle.set_default_dtype ` ", "设置默认的全局dtype。" + " :ref:`paddle.save ` ", "将对象实例 obj 保存到指定的路径中" + " :ref:`paddle.set_default_dtype ` ", "设置默认的全局 dtype。" " :ref:`paddle.set_grad_enabled ` ", "创建启用或禁用动态图梯度计算的上下文" " :ref:`paddle.is_grad_enabled ` ", "判断当前动态图下是否启用了计算梯度模式。" " :ref:`paddle.set_printoptions ` ", "设置 paddle 中 Tensor 的打印配置选项" .. _about_device: -device相关 +device 相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.get_cuda_rng_state ` ", "获取cuda随机数生成器的状态信息" - " :ref:`paddle.set_cuda_rng_state ` ", "设置cuda随机数生成器的状态信息" + " :ref:`paddle.get_cuda_rng_state ` ", "获取 cuda 随机数生成器的状态信息" + " :ref:`paddle.set_cuda_rng_state ` ", "设置 cuda 随机数生成器的状态信息" .. _about_hapi: -高层API相关 +高层 API 相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.Model ` ", "一个具备训练、测试、推理的神经网络" @@ -393,12 +393,12 @@ device相关 .. _about_sparse_api: -稀疏API相关 +稀疏 API 相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.sparse.sparse_coo_tensor` ", "创建一个COO(Coordinate)格式的稀疏Tensor" - " :ref:`paddle.sparse.sparse_csr_tensor` ", "创建一个CSR(Compressed Sparse Row)格式的稀疏Tensor" + " :ref:`paddle.sparse.sparse_coo_tensor` ", "创建一个 COO(Coordinate)格式的稀疏 Tensor" + " :ref:`paddle.sparse.sparse_csr_tensor` ", "创建一个 CSR(Compressed Sparse Row)格式的稀疏 Tensor" diff --git a/docs/api/paddle/ParamAttr_cn.rst b/docs/api/paddle/ParamAttr_cn.rst index f15b647c5b4..de16cda638b 100644 --- a/docs/api/paddle/ParamAttr_cn.rst +++ b/docs/api/paddle/ParamAttr_cn.rst @@ -9,7 +9,7 @@ ParamAttr .. note:: - 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐使用``need_clip``来设置梯度裁剪范围,并在初始化 ``optimizer`` 时设置梯度裁剪。 + 该类中的 ``gradient_clip`` 属性在 2.0 版本会废弃,推荐使用``need_clip``来设置梯度裁剪范围,并在初始化 ``optimizer`` 时设置梯度裁剪。 共有三种裁剪策略::ref:`api_paddle_nn_ClipGradByGlobalNorm` 、:ref:`api_paddle_nn_ClipGradByNorm` 、 :ref:`api_paddle_nn_ClipGradByValue` 。 创建一个参数属性对象,用户可设置参数的名称、初始化方式、学习率、正则化规则、是否需要训练、梯度裁剪方式、是否做模型平均等属性。 @@ -18,14 +18,14 @@ ParamAttr :::::::::::: - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **initializer** (Initializer,可选) - 参数的初始化方式。默认值为None,表示权重参数采用Xavier初始化方式,偏置参数采用全0初始化方式。 - - **learning_rate** (float,可选) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以learning rate schedule的系数。 + - **initializer** (Initializer,可选) - 参数的初始化方式。默认值为 None,表示权重参数采用 Xavier 初始化方式,偏置参数采用全 0 初始化方式。 + - **learning_rate** (float,可选) - 参数的学习率。实际参数的学习率等于全局学习率乘以参数的学习率,再乘以 learning rate schedule 的系数。 - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`api_paddle_regularizer_L1Decay` 、 :ref:`api_paddle_regularizer_L2Decay`,如果在 ``optimizer`` (例如 :ref:`api_paddle_optimizer_SGD` ) 中也 - 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - - **trainable** (bool,可选) - 参数是否需要训练。默认值为True,表示需要训练。 - - **do_model_average** (bool,可选) - 是否做模型平均。默认值为True。仅在 :ref:`ExponentialMovingAverage` 下使用。 - - **need_clip** (bool,可选) - 参数是否需要进行梯度裁剪。默认值为True,表示该参数的梯度会根据优化器中设置的裁剪规则进行裁剪。 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为 None,表示没有正则化。 + - **trainable** (bool,可选) - 参数是否需要训练。默认值为 True,表示需要训练。 + - **do_model_average** (bool,可选) - 是否做模型平均。默认值为 True。仅在 :ref:`ExponentialMovingAverage` 下使用。 + - **need_clip** (bool,可选) - 参数是否需要进行梯度裁剪。默认值为 True,表示该参数的梯度会根据优化器中设置的裁剪规则进行裁剪。 返回 :::::::::::: diff --git a/docs/api/paddle/Tensor_cn.rst b/docs/api/paddle/Tensor_cn.rst index 51e3ad5d6b5..3d650113c71 100755 --- a/docs/api/paddle/Tensor_cn.rst +++ b/docs/api/paddle/Tensor_cn.rst @@ -6,16 +6,16 @@ Tensor .. py:class:: paddle.Tensor -``Tensor`` 是Paddle中最为基础的数据结构,有几种创建Tensor的不同方式: +``Tensor`` 是 Paddle 中最为基础的数据结构,有几种创建 Tensor 的不同方式: -- 用预先存在的 ``data`` 数据创建1个Tensor,请参考 :ref:`cn_api_paddle_to_tensor` -- 创建一个指定 ``shape`` 的Tensor,请参考 :ref:`cn_api_tensor_ones` 、 :ref:`cn_api_tensor_zeros`、 :ref:`cn_api_tensor_full` -- 创建一个与其他Tensor具有相同 ``shape`` 与 ``dtype`` 的Tensor,请参考 :ref:`cn_api_tensor_ones_like` 、 :ref:`cn_api_tensor_zeros_like` 、 :ref:`cn_api_tensor_full_like` +- 用预先存在的 ``data`` 数据创建 1 个 Tensor,请参考 :ref:`cn_api_paddle_to_tensor` +- 创建一个指定 ``shape`` 的 Tensor,请参考 :ref:`cn_api_tensor_ones` 、 :ref:`cn_api_tensor_zeros`、 :ref:`cn_api_tensor_full` +- 创建一个与其他 Tensor 具有相同 ``shape`` 与 ``dtype`` 的 Tensor,请参考 :ref:`cn_api_tensor_ones_like` 、 :ref:`cn_api_tensor_zeros_like` 、 :ref:`cn_api_tensor_full_like` clear_grad ::::::::: -将当前Tensor的梯度设为0。仅适用于具有梯度的Tensor,通常我们将其用于参数,因为其他临时Tensor没有梯度。 +将当前 Tensor 的梯度设为 0。仅适用于具有梯度的 Tensor,通常我们将其用于参数,因为其他临时 Tensor 没有梯度。 **代码示例** @@ -33,12 +33,12 @@ clear_grad clear_gradient ::::::::: -与clear_grad功能相同,请参考:clear_grad +与 clear_grad 功能相同,请参考:clear_grad dtype ::::::::: -查看一个Tensor的数据类型,支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64' 类型。 +查看一个 Tensor 的数据类型,支持:'bool','float16','float32','float64','uint8','int8','int16','int32','int64' 类型。 **代码示例** @@ -51,7 +51,7 @@ dtype grad ::::::::: -查看一个Tensor的梯度,数据类型为numpy\.ndarray。 +查看一个 Tensor 的梯度,数据类型为 numpy\.ndarray。 **代码示例** @@ -67,8 +67,8 @@ grad is_leaf ::::::::: -判断Tensor是否为叶子Tensor。对于stop_gradient为True的Tensor,它将是叶子Tensor。对于stop_gradient为False的Tensor, -如果它是由用户创建的,它也会是叶子Tensor。 +判断 Tensor 是否为叶子 Tensor。对于 stop_gradient 为 True 的 Tensor,它将是叶子 Tensor。对于 stop_gradient 为 False 的 Tensor, +如果它是由用户创建的,它也会是叶子 Tensor。 **代码示例** @@ -92,7 +92,7 @@ is_leaf item(*args) ::::::::: -将Tensor中特定位置的元素转化为Python标量,如果未指定位置,则该Tensor必须为单元素Tensor。 +将 Tensor 中特定位置的元素转化为 Python 标量,如果未指定位置,则该 Tensor 必须为单元素 Tensor。 **代码示例** @@ -123,7 +123,7 @@ item(*args) name ::::::::: -查看一个Tensor的name,Tensor的name是其唯一标识符,为python的字符串类型。 +查看一个 Tensor 的 name,Tensor 的 name 是其唯一标识符,为 python 的字符串类型。 **代码示例** @@ -136,7 +136,7 @@ name ndim ::::::::: -查看一个Tensor的维度,也称作rank。 +查看一个 Tensor 的维度,也称作 rank。 **代码示例** @@ -149,7 +149,7 @@ ndim persistable ::::::::: -查看一个Tensor的persistable属性,该属性为True时表示持久性变量,持久性变量在每次迭代之后都不会删除。模型参数、学习率等Tensor,都是 +查看一个 Tensor 的 persistable 属性,该属性为 True 时表示持久性变量,持久性变量在每次迭代之后都不会删除。模型参数、学习率等 Tensor,都是 持久性变量。 **代码示例** @@ -164,8 +164,8 @@ persistable place ::::::::: -查看一个Tensor的设备位置,Tensor可能的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存, -其与GPU之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能, +查看一个 Tensor 的设备位置,Tensor 可能的设备位置有三种:CPU/GPU/固定内存,其中固定内存也称为不可分页内存或锁页内存, +其与 GPU 之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能, 因为其减少了用于存储虚拟内存数据的可分页内存。 **代码示例** @@ -179,7 +179,7 @@ place shape ::::::::: -查看一个Tensor的shape,shape是Tensor的一个重要的概念,其描述了tensor在每个维度上的元素数量。 +查看一个 Tensor 的 shape,shape 是 Tensor 的一个重要的概念,其描述了 tensor 在每个维度上的元素数量。 **代码示例** @@ -192,8 +192,8 @@ shape stop_gradient ::::::::: -查看一个Tensor是否计算并传播梯度,如果stop_gradient为True,则该Tensor不会计算梯度,并会阻绝Autograd的梯度传播。 -反之,则会计算梯度并传播梯度。用户自行创建的的Tensor,默认是True,模型参数的stop_gradient都为False。 +查看一个 Tensor 是否计算并传播梯度,如果 stop_gradient 为 True,则该 Tensor 不会计算梯度,并会阻绝 Autograd 的梯度传播。 +反之,则会计算梯度并传播梯度。用户自行创建的的 Tensor,默认是 True,模型参数的 stop_gradient 都为 False。 **代码示例** @@ -206,7 +206,7 @@ stop_gradient abs(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -215,7 +215,7 @@ abs(name=None) angle(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -224,7 +224,7 @@ angle(name=None) acos(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -233,7 +233,7 @@ acos(name=None) add(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -247,7 +247,7 @@ Inplace 版本的 :ref:`cn_api_tensor_add` API,对输入 `x` 采用 Inplace add_n(inputs, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -256,7 +256,7 @@ add_n(inputs, name=None) addmm(x, y, beta=1.0, alpha=1.0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -265,7 +265,7 @@ addmm(x, y, beta=1.0, alpha=1.0, name=None) all(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -274,7 +274,7 @@ all(axis=None, keepdim=False, name=None) allclose(y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -283,7 +283,7 @@ allclose(y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -292,7 +292,7 @@ isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) any(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -301,7 +301,7 @@ any(axis=None, keepdim=False, name=None) argmax(axis=None, keepdim=False, dtype=int64, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -310,7 +310,7 @@ argmax(axis=None, keepdim=False, dtype=int64, name=None) argmin(axis=None, keepdim=False, dtype=int64, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -319,7 +319,7 @@ argmin(axis=None, keepdim=False, dtype=int64, name=None) argsort(axis=-1, descending=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -328,7 +328,7 @@ argsort(axis=-1, descending=False, name=None) asin(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -337,13 +337,13 @@ asin(name=None) astype(dtype) ::::::::: -将Tensor的类型转换为 ``dtype``,并返回一个新的Tensor。 +将 Tensor 的类型转换为 ``dtype``,并返回一个新的 Tensor。 参数: - - **dtype** (str) - 转换后的dtype,支持'bool','float16','float32','float64','int8','int16', + - **dtype** (str) - 转换后的 dtype,支持'bool','float16','float32','float64','int8','int16', 'int32','int64','uint8'。 -返回:类型转换后的新的Tensor +返回:类型转换后的新的 Tensor 返回类型:Tensor @@ -358,7 +358,7 @@ astype(dtype) atan(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -367,13 +367,13 @@ atan(name=None) backward(grad_tensor=None, retain_graph=False) ::::::::: -从当前Tensor开始计算反向的神经网络,传导并计算计算图中Tensor的梯度。 +从当前 Tensor 开始计算反向的神经网络,传导并计算计算图中 Tensor 的梯度。 参数: - - **grad_tensor** (Tensor, optional) - 当前Tensor的初始梯度值。如果 ``grad_tensor`` 是None,当前Tensor 的初始梯度值将会是值全为1.0的Tensor;如果 ``grad_tensor`` 不是None,必须和当前Tensor有相同的长度。默认值:None。 + - **grad_tensor** (Tensor, optional) - 当前 Tensor 的初始梯度值。如果 ``grad_tensor`` 是 None,当前 Tensor 的初始梯度值将会是值全为 1.0 的 Tensor;如果 ``grad_tensor`` 不是 None,必须和当前 Tensor 有相同的长度。默认值:None。 - - **retain_graph** (bool, optional) - 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP, - 需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。 + - **retain_graph** (bool, optional) - 如果为 False,反向计算图将被释放。如果在 backward()之后继续添加 OP, + 需要设置为 True,此时之前的反向计算图会保留。将其设置为 False 会更加节省内存。默认值:False。 返回:无 @@ -408,7 +408,7 @@ backward(grad_tensor=None, retain_graph=False) bincount(weights=None, minlength=0) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -453,7 +453,7 @@ bitwise_xor(y, out=None, name=None) bmm(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -462,16 +462,16 @@ bmm(y, name=None) broadcast_to(shape, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor -请参考 :ref:`cn_api_tensor_expand` ,API功能相同。 +请参考 :ref:`cn_api_tensor_expand` ,API 功能相同。 cast(dtype) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -480,7 +480,7 @@ cast(dtype) ceil(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -494,7 +494,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_ceil` API,对输入 `x` 采用 Inp cholesky(upper=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -503,7 +503,7 @@ cholesky(upper=False, name=None) chunk(chunks, axis=0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -512,7 +512,7 @@ chunk(chunks, axis=0, name=None) clear_gradient() ::::::::: -清除当前Tensor的梯度。 +清除当前 Tensor 的梯度。 返回:无 @@ -538,7 +538,7 @@ clear_gradient() clip(min=None, max=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -552,9 +552,9 @@ Inplace 版本的 :ref:`cn_api_tensor_clip` API,对输入 `x` 采用 Inplace clone() ::::::::: -复制当前Tensor,并且保留在原计算图中进行梯度传导。 +复制当前 Tensor,并且保留在原计算图中进行梯度传导。 -返回:clone后的Tensor +返回:clone 后的 Tensor **代码示例** .. code-block:: python @@ -583,7 +583,7 @@ clone() concat(axis=0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -592,7 +592,7 @@ concat(axis=0, name=None) conj(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -601,7 +601,7 @@ conj(name=None) cos(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -610,7 +610,7 @@ cos(name=None) cosh(name=None) ::::::::: -对该Tensor中的每个元素求双曲余弦。 +对该 Tensor 中的每个元素求双曲余弦。 返回类型:Tensor @@ -629,11 +629,11 @@ cosh(name=None) cpu() ::::::::: -将当前Tensor的拷贝到CPU上,且返回的Tensor不保留在原计算图中。 +将当前 Tensor 的拷贝到 CPU 上,且返回的 Tensor 不保留在原计算图中。 -如果当前Tensor已经在CPU上,则不会发生任何拷贝。 +如果当前 Tensor 已经在 CPU 上,则不会发生任何拷贝。 -返回:拷贝到CPU上的Tensor +返回:拷贝到 CPU 上的 Tensor **代码示例** .. code-block:: python @@ -651,7 +651,7 @@ cpu() cross(y, axis=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -660,15 +660,15 @@ cross(y, axis=None, name=None) cuda(device_id=None, blocking=False) ::::::::: -将当前Tensor的拷贝到GPU上,且返回的Tensor不保留在原计算图中。 +将当前 Tensor 的拷贝到 GPU 上,且返回的 Tensor 不保留在原计算图中。 -如果当前Tensor已经在GPU上,且device_id为None,则不会发生任何拷贝。 +如果当前 Tensor 已经在 GPU 上,且 device_id 为 None,则不会发生任何拷贝。 参数: - - **device_id** (int, optional) - 目标GPU的设备Id,默认为None,此时为当前Tensor的设备Id,如果当前Tensor不在GPU上,则为0。 - - **blocking** (bool, optional) - 如果为False并且当前Tensor处于固定内存上,将会发生主机到设备端的异步拷贝。否则,会发生同步拷贝。默认为False。 + - **device_id** (int, optional) - 目标 GPU 的设备 Id,默认为 None,此时为当前 Tensor 的设备 Id,如果当前 Tensor 不在 GPU 上,则为 0。 + - **blocking** (bool, optional) - 如果为 False 并且当前 Tensor 处于固定内存上,将会发生主机到设备端的异步拷贝。否则,会发生同步拷贝。默认为 False。 -返回:拷贝到GPU上的Tensor +返回:拷贝到 GPU 上的 Tensor **代码示例** .. code-block:: python @@ -687,7 +687,7 @@ cuda(device_id=None, blocking=False) cumsum(axis=None, dtype=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -698,7 +698,7 @@ deg2rad(x, name=None) 将元素从度的角度转换为弧度 -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -707,9 +707,9 @@ deg2rad(x, name=None) detach() ::::::::: -返回一个新的Tensor,从当前计算图分离。 +返回一个新的 Tensor,从当前计算图分离。 -返回:与当前计算图分离的Tensor。 +返回:与当前计算图分离的 Tensor。 **代码示例** .. code-block:: python @@ -726,7 +726,7 @@ detach() diagonal(offset=0, axis1=0, axis2=1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -735,7 +735,7 @@ diagonal(offset=0, axis1=0, axis2=1, name=None) digamma(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -744,7 +744,7 @@ digamma(name=None) dim() ::::::::: -查看一个Tensor的维度,也称作rank。 +查看一个 Tensor 的维度,也称作 rank。 **代码示例** @@ -757,7 +757,7 @@ dim() dist(y, p=2) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -766,7 +766,7 @@ dist(y, p=2) divide(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -775,7 +775,7 @@ divide(y, name=None) dot(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -784,7 +784,7 @@ dot(y, name=None) diff(x, n=1, axis=-1, prepend=None, append=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -793,9 +793,9 @@ diff(x, n=1, axis=-1, prepend=None, append=None, name=None) element_size() ::::::::: -返回Tensor单个元素在计算机中所分配的 ``bytes`` 数量。 +返回 Tensor 单个元素在计算机中所分配的 ``bytes`` 数量。 -返回:整数int +返回:整数 int **代码示例** .. code-block:: python @@ -820,7 +820,7 @@ element_size() equal(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -829,7 +829,7 @@ equal(y, name=None) equal_all(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -838,7 +838,7 @@ equal_all(y, name=None) erf(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -847,7 +847,7 @@ erf(name=None) exp(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -861,7 +861,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_exp` API,对输入 `x` 采用 Inpl expand(shape, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -870,7 +870,7 @@ expand(shape, name=None) expand_as(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -879,7 +879,7 @@ expand_as(y, name=None) exponential_(lam=1.0, name=None) ::::::::: -该OP为inplace形式,通过 ``指数分布`` 随机数来填充该Tensor。 +该 OP 为 inplace 形式,通过 ``指数分布`` 随机数来填充该 Tensor。 ``lam`` 是 ``指数分布`` 的 :math:`\lambda` 参数。随机数符合以下概率密度函数: @@ -888,12 +888,12 @@ exponential_(lam=1.0, name=None) f(x) = \lambda e^{-\lambda x} 参数: - - **x** (Tensor) - 输入Tensor,数据类型为 float32/float64。 + - **x** (Tensor) - 输入 Tensor,数据类型为 float32/float64。 - **lam** (float) - 指数分布的 :math:`\lambda` 参数。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:原Tensor +返回:原 Tensor **代码示例** .. code-block:: python @@ -918,14 +918,14 @@ eigvals(y, name=None) fill_(x, value, name=None) ::::::::: -以value值填充Tensor x中所有数据。对x的原地Inplace修改。 +以 value 值填充 Tensor x 中所有数据。对 x 的原地 Inplace 修改。 参数: - - **x** (Tensor) - 需要修改的原始Tensor。 - - **value** (float) - 以输入value值修改原始Tensor元素。 + - **x** (Tensor) - 需要修改的原始 Tensor。 + - **value** (float) - 以输入 value 值修改原始 Tensor 元素。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:修改原始Tensor x的所有元素为value以后的新的Tensor。 +返回:修改原始 Tensor x 的所有元素为 value 以后的新的 Tensor。 **代码示例** .. code-block:: python @@ -938,13 +938,13 @@ fill_(x, value, name=None) zero_(x, name=None) ::::::::: -以 0 值填充Tensor x中所有数据。对x的原地Inplace修改。 +以 0 值填充 Tensor x 中所有数据。对 x 的原地 Inplace 修改。 参数: - - **x** (Tensor) - 需要修改的原始Tensor。 + - **x** (Tensor) - 需要修改的原始 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:修改原始Tensor x的所有元素为 0 以后的新的Tensor。 +返回:修改原始 Tensor x 的所有元素为 0 以后的新的 Tensor。 **代码示例** .. code-block:: python @@ -957,18 +957,18 @@ zero_(x, name=None) fill_diagonal_(x, value, offset=0, wrap=False, name=None) ::::::::: -以value值填充输入Tensor x的对角线元素值。对x的原地Inplace修改。 -输入Tensor x维度至少是2维,当维度大于2维时要求所有维度值相等。 -当维度等于2维时,两个维度可以不等,且此时wrap选项生效,详见wrap参数说明。 +以 value 值填充输入 Tensor x 的对角线元素值。对 x 的原地 Inplace 修改。 +输入 Tensor x 维度至少是 2 维,当维度大于 2 维时要求所有维度值相等。 +当维度等于 2 维时,两个维度可以不等,且此时 wrap 选项生效,详见 wrap 参数说明。 参数: - - **x** (Tensor) - 需要修改对角线元素值的原始Tensor。 - - **value** (float) - 以输入value值修改原始Tensor对角线元素。 - - **offset** (int, optional) - 所选取对角线相对原始主对角线位置的偏移量,正向右上方偏移,负向左下方偏移,默认为0。 - - **wrap** (bool, optional) - 对于2维Tensor,height>width时是否循环填充,默认为False。 + - **x** (Tensor) - 需要修改对角线元素值的原始 Tensor。 + - **value** (float) - 以输入 value 值修改原始 Tensor 对角线元素。 + - **offset** (int, optional) - 所选取对角线相对原始主对角线位置的偏移量,正向右上方偏移,负向左下方偏移,默认为 0。 + - **wrap** (bool, optional) - 对于 2 维 Tensor,height>width 时是否循环填充,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:修改原始Tensor x的对角线元素为value以后的新的Tensor。 +返回:修改原始 Tensor x 的对角线元素为 value 以后的新的 Tensor。 **代码示例** .. code-block:: python @@ -984,19 +984,19 @@ fill_diagonal_(x, value, offset=0, wrap=False, name=None) fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None) ::::::::: -将输入Tensor y填充到Tensor x的以dim1、dim2所指定对角线维度作为最后一个维度的局部子Tensor中,输入Tensor x其余维度作为该局部子Tensor的shape中的前几个维度。 -其中输入Tensor y的维度要求是:最后一个维度与dim1、dim2指定的对角线维度相同,其余维度与输入Tensor x其余维度相同,且先后顺序一致。 -例如,有输入Tensor x,x.shape = (2,3,4,5)时,若dim1=2,dim2=3,则y.shape=(2,3,4);若dim1=1,dim2=2,则y.shape=(2,5,3); +将输入 Tensor y 填充到 Tensor x 的以 dim1、dim2 所指定对角线维度作为最后一个维度的局部子 Tensor 中,输入 Tensor x 其余维度作为该局部子 Tensor 的 shape 中的前几个维度。 +其中输入 Tensor y 的维度要求是:最后一个维度与 dim1、dim2 指定的对角线维度相同,其余维度与输入 Tensor x 其余维度相同,且先后顺序一致。 +例如,有输入 Tensor x,x.shape = (2,3,4,5)时,若 dim1=2,dim2=3,则 y.shape=(2,3,4);若 dim1=1,dim2=2,则 y.shape=(2,5,3); 参数: - - **x** (Tensor) - 需要填充局部对角线区域的原始Tensor。 - - **y** (Tensor) - 需要被填充到原始Tensor x对角线区域的输入Tensor。 - - **offset** (int, optional) - 选取局部区域对角线位置相对原始主对角线位置的偏移量,正向右上方偏移,负向左下方偏移,默认为0。 - - **dim1** (int, optional) - 指定对角线所参考第一个维度,默认为0。 - - **dim2** (int, optional) - 指定对角线所参考第二个维度,默认为1。 + - **x** (Tensor) - 需要填充局部对角线区域的原始 Tensor。 + - **y** (Tensor) - 需要被填充到原始 Tensor x 对角线区域的输入 Tensor。 + - **offset** (int, optional) - 选取局部区域对角线位置相对原始主对角线位置的偏移量,正向右上方偏移,负向左下方偏移,默认为 0。 + - **dim1** (int, optional) - 指定对角线所参考第一个维度,默认为 0。 + - **dim2** (int, optional) - 指定对角线所参考第二个维度,默认为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:将y的值填充到输入Tensor x对角线区域以后所组合成的新Tensor。 +返回:将 y 的值填充到输入 Tensor x 对角线区域以后所组合成的新 Tensor。 **代码示例** .. code-block:: python @@ -1024,7 +1024,7 @@ Inplace 版本的 :ref:`cn_api_fill_diagonal_tensor` API,对输入 `x` 采用 flatten(start_axis=0, stop_axis=-1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1038,7 +1038,7 @@ Inplace 版本的 :ref:`cn_api_paddle_flatten` API,对输入 `x` 采用 Inplac flip(axis, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1047,7 +1047,7 @@ flip(axis, name=None) rot90(k=1, axis=[0, 1], name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1056,7 +1056,7 @@ rot90(k=1, axis=[0, 1], name=None) floor(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1070,7 +1070,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_floor` API,对输入 `x` 采用 In floor_divide(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1079,16 +1079,16 @@ floor_divide(y, name=None) floor_mod(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor -mod函数的别名,请参考 :ref:`cn_api_tensor_mod` +mod 函数的别名,请参考 :ref:`cn_api_tensor_mod` gather(index, axis=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1097,7 +1097,7 @@ gather(index, axis=None, name=None) gather_nd(index, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1108,16 +1108,16 @@ gcd(x, y, name=None) 计算两个输入的按元素绝对值的最大公约数 -返回:计算后的Tensor +返回:计算后的 Tensor 请参考 :ref:`cn_api_paddle_tensor_gcd` gradient() ::::::::: -与 ``Tensor.grad`` 相同,查看一个Tensor的梯度,数据类型为numpy\.ndarray。 +与 ``Tensor.grad`` 相同,查看一个 Tensor 的梯度,数据类型为 numpy\.ndarray。 -返回:该Tensor的梯度 +返回:该 Tensor 的梯度 返回类型:numpy\.ndarray **代码示例** @@ -1133,7 +1133,7 @@ gradient() greater_equal(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1142,7 +1142,7 @@ greater_equal(y, name=None) greater_than(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1151,7 +1151,7 @@ greater_than(y, name=None) heaviside(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1160,7 +1160,7 @@ heaviside(y, name=None) histogram(bins=100, min=0, max=0) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1169,7 +1169,7 @@ histogram(bins=100, min=0, max=0) imag(name=None) ::::::::: -返回:包含原复数Tensor的虚部数值 +返回:包含原复数 Tensor 的虚部数值 返回类型:Tensor @@ -1178,7 +1178,7 @@ imag(name=None) is_floating_point(x) ::::::::: -返回:判断输入Tensor的数据类型是否为浮点类型 +返回:判断输入 Tensor 的数据类型是否为浮点类型 返回类型:bool @@ -1187,7 +1187,7 @@ is_floating_point(x) increment(value=1.0, in_place=True) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1196,7 +1196,7 @@ increment(value=1.0, in_place=True) index_sample(index) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1205,7 +1205,7 @@ index_sample(index) index_select(index, axis=0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1214,7 +1214,7 @@ index_select(index, axis=0, name=None) repeat_interleave(repeats, axis=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1223,7 +1223,7 @@ repeat_interleave(repeats, axis=None, name=None) inv(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1232,7 +1232,7 @@ inv(name=None) is_empty(cond=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1241,7 +1241,7 @@ is_empty(cond=None) isfinite(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1250,7 +1250,7 @@ isfinite(name=None) isinf(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1259,7 +1259,7 @@ isinf(name=None) isnan(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1268,7 +1268,7 @@ isnan(name=None) kthvalue(k, axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1277,7 +1277,7 @@ kthvalue(k, axis=None, keepdim=False, name=None) kron(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1288,14 +1288,14 @@ lcm(x, y, name=None) 计算两个输入的按元素绝对值的最小公倍数 -返回:计算后的Tensor +返回:计算后的 Tensor 请参考 :ref:`cn_api_paddle_tensor_lcm` less_equal(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1304,7 +1304,7 @@ less_equal(y, name=None) less_than(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1313,7 +1313,7 @@ less_than(y, name=None) lgamma(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1322,7 +1322,7 @@ lgamma(name=None) log(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1331,7 +1331,7 @@ log(name=None) log10(name=None) ::::::::: -返回:以10为底数,对当前Tensor逐元素计算对数。 +返回:以 10 为底数,对当前 Tensor 逐元素计算对数。 返回类型:Tensor @@ -1340,7 +1340,7 @@ log10(name=None) log2(name=None) ::::::::: -返回:以2为底数,对当前Tensor逐元素计算对数。 +返回:以 2 为底数,对当前 Tensor 逐元素计算对数。 返回类型:Tensor @@ -1349,7 +1349,7 @@ log2(name=None) log1p(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1358,7 +1358,7 @@ log1p(name=None) logcumsumexp(x, axis=None, dtype=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1367,7 +1367,7 @@ logcumsumexp(x, axis=None, dtype=None, name=None) logical_and(y, out=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1376,7 +1376,7 @@ logical_and(y, out=None, name=None) logical_not(out=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1385,7 +1385,7 @@ logical_not(out=None, name=None) logical_or(y, out=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1394,7 +1394,7 @@ logical_or(y, out=None, name=None) logical_xor(y, out=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1403,7 +1403,7 @@ logical_xor(y, out=None, name=None) logsumexp(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1412,7 +1412,7 @@ logsumexp(axis=None, keepdim=False, name=None) masked_select(mask, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1421,7 +1421,7 @@ masked_select(mask, name=None) matmul(y, transpose_x=False, transpose_y=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1430,7 +1430,7 @@ matmul(y, transpose_x=False, transpose_y=False, name=None) matrix_power(x, n, name=None) ::::::::: -返回:经过矩阵幂运算后的Tensor +返回:经过矩阵幂运算后的 Tensor 返回类型:Tensor @@ -1439,7 +1439,7 @@ matrix_power(x, n, name=None) max(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1448,7 +1448,7 @@ max(axis=None, keepdim=False, name=None) amax(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1457,7 +1457,7 @@ amax(axis=None, keepdim=False, name=None) maximum(y, axis=-1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1466,7 +1466,7 @@ maximum(y, axis=-1, name=None) mean(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1484,7 +1484,7 @@ median(axis=None, keepdim=False, name=None) nanmedian(axis=None, keepdim=True, name=None) ::::::::: -返回:沿着 ``axis`` 忽略NAN元素进行中位数计算的结果 +返回:沿着 ``axis`` 忽略 NAN 元素进行中位数计算的结果 返回类型:Tensor @@ -1493,7 +1493,7 @@ nanmedian(axis=None, keepdim=True, name=None) min(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1502,7 +1502,7 @@ min(axis=None, keepdim=False, name=None) amin(axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1511,7 +1511,7 @@ amin(axis=None, keepdim=False, name=None) minimum(y, axis=-1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1520,7 +1520,7 @@ minimum(y, axis=-1, name=None) mm(mat2, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1529,7 +1529,7 @@ mm(mat2, name=None) mod(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1538,7 +1538,7 @@ mod(y, name=None) mode(axis=-1, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1547,7 +1547,7 @@ mode(axis=-1, keepdim=False, name=None) multiplex(index) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1556,7 +1556,7 @@ multiplex(index) multiply(y, axis=-1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1565,7 +1565,7 @@ multiply(y, axis=-1, name=None) mv(vec, name=None) ::::::::: -返回:当前Tensor向量 ``vec`` 的乘积 +返回:当前 Tensor 向量 ``vec`` 的乘积 返回类型:Tensor @@ -1574,7 +1574,7 @@ mv(vec, name=None) ndimension() ::::::::: -查看一个Tensor的维度,也称作rank。 +查看一个 Tensor 的维度,也称作 rank。 **代码示例** @@ -1587,7 +1587,7 @@ ndimension() neg(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1596,7 +1596,7 @@ neg(name=None) nonzero(as_tuple=False) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1605,7 +1605,7 @@ nonzero(as_tuple=False) norm(p=fro, axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1614,7 +1614,7 @@ norm(p=fro, axis=None, keepdim=False, name=None) not_equal(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1623,7 +1623,7 @@ not_equal(y, name=None) numel(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1632,9 +1632,9 @@ numel(name=None) numpy() ::::::::: -将当前Tensor转化为numpy\.ndarray。 +将当前 Tensor 转化为 numpy\.ndarray。 -返回:Tensor转化成的numpy\.ndarray。 +返回:Tensor 转化成的 numpy\.ndarray。 返回类型:numpy\.ndarray **代码示例** @@ -1652,11 +1652,11 @@ numpy() pin_memory(y, name=None) ::::::::: -将当前Tensor的拷贝到固定内存上,且返回的Tensor不保留在原计算图中。 +将当前 Tensor 的拷贝到固定内存上,且返回的 Tensor 不保留在原计算图中。 -如果当前Tensor已经在固定内存上,则不会发生任何拷贝。 +如果当前 Tensor 已经在固定内存上,则不会发生任何拷贝。 -返回:拷贝到固定内存上的Tensor +返回:拷贝到固定内存上的 Tensor **代码示例** .. code-block:: python @@ -1673,7 +1673,7 @@ pin_memory(y, name=None) pow(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1682,7 +1682,7 @@ pow(y, name=None) prod(axis=None, keepdim=False, dtype=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1691,7 +1691,7 @@ prod(axis=None, keepdim=False, dtype=None, name=None) quantile(q, axis=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1702,7 +1702,7 @@ rad2deg(x, name=None) 将元素从弧度的角度转换为度 -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1711,7 +1711,7 @@ rad2deg(x, name=None) rank() ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1720,7 +1720,7 @@ rank() real(name=None) ::::::::: -返回:Tensor,包含原复数Tensor的实部数值 +返回:Tensor,包含原复数 Tensor 的实部数值 返回类型:Tensor @@ -1729,7 +1729,7 @@ real(name=None) reciprocal(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1802,16 +1802,16 @@ register_hook(hook) remainder(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor -mod函数的别名,请参考 :ref:`cn_api_tensor_mod` +mod 函数的别名,请参考 :ref:`cn_api_tensor_mod` reshape(shape, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1825,7 +1825,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_reshape` API,对输入 `x` 采用 reverse(axis, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1834,7 +1834,7 @@ reverse(axis, name=None) roll(shifts, axis=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1843,7 +1843,7 @@ roll(shifts, axis=None, name=None) round(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1857,7 +1857,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_round` API,对输入 `x` 采用 In rsqrt(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1871,7 +1871,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_rsqrt` API,对输入 `x` 采用 In scale(scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1885,7 +1885,7 @@ Inplace 版本的 :ref:`cn_api_paddle_tensor_unsqueeze` API,对输入 `x` 采 scatter(index, updates, overwrite=True, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1899,7 +1899,7 @@ Inplace 版本的 :ref:`cn_api_paddle_cn_scatter` API,对输入 `x` 采用 Inp scatter_nd(updates, shape, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1908,7 +1908,7 @@ scatter_nd(updates, shape, name=None) scatter_nd_add(index, updates, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1917,10 +1917,10 @@ scatter_nd_add(index, updates, name=None) set_value(value) ::::::::: -设置当前Tensor的值。 +设置当前 Tensor 的值。 参数: - - **value** (Tensor|np.ndarray) - 需要被设置的值,类型为Tensor或者numpy\.array。 + - **value** (Tensor|np.ndarray) - 需要被设置的值,类型为 Tensor 或者 numpy\.array。 **代码示例** .. code-block:: python @@ -1936,7 +1936,7 @@ set_value(value) linear.weight.set_value(custom_weight) # change existing weight out = linear(input) # call with different weight -返回:计算后的Tensor +返回:计算后的 Tensor shard_index(index_num, nshards, shard_id, ignore_value=-1) ::::::::: @@ -1949,7 +1949,7 @@ shard_index(index_num, nshards, shard_id, ignore_value=-1) sign(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1958,7 +1958,7 @@ sign(name=None) sin(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1967,7 +1967,7 @@ sin(name=None) sinh(name=None) ::::::::: -对该Tensor中逐个元素求双曲正弦。 +对该 Tensor 中逐个元素求双曲正弦。 **代码示例** .. code-block:: python @@ -1982,7 +1982,7 @@ sinh(name=None) size() ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -1991,7 +1991,7 @@ size() slice(axes, starts, ends) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2000,7 +2000,7 @@ slice(axes, starts, ends) sort(axis=-1, descending=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2009,7 +2009,7 @@ sort(axis=-1, descending=False, name=None) split(num_or_sections, axis=0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2018,7 +2018,7 @@ split(num_or_sections, axis=0, name=None) sqrt(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2032,7 +2032,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_sqrt` API,对输入 `x` 采用 Inp square(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2041,7 +2041,7 @@ square(name=None) squeeze(axis=None, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2055,7 +2055,7 @@ Inplace 版本的 :ref:`cn_api_paddle_tensor_squeeze` API,对输入 `x` 采用 stack(axis=0, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2064,7 +2064,7 @@ stack(axis=0, name=None) stanh(scale_a=0.67, scale_b=1.7159, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2073,7 +2073,7 @@ stanh(scale_a=0.67, scale_b=1.7159, name=None) std(axis=None, unbiased=True, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2082,7 +2082,7 @@ std(axis=None, unbiased=True, keepdim=False, name=None) strided_slice(axes, starts, ends, strides) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2091,7 +2091,7 @@ strided_slice(axes, starts, ends, strides) subtract(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2105,7 +2105,7 @@ Inplace 版本的 :ref:`cn_api_paddle_tensor_subtract` API,对输入 `x` 采 sum(axis=None, dtype=None, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2114,7 +2114,7 @@ sum(axis=None, dtype=None, keepdim=False, name=None) t(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2123,7 +2123,7 @@ t(name=None) tanh(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2137,7 +2137,7 @@ Inplace 版本的 :ref:`cn_api_fluid_layers_tan` API,对输入 `x` 采用 Inpl tile(repeat_times, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2146,7 +2146,7 @@ tile(repeat_times, name=None) tolist() ::::::::: -返回:Tensor对应结构的list +返回:Tensor 对应结构的 list 返回类型:python list @@ -2155,7 +2155,7 @@ tolist() topk(k, axis=None, largest=True, sorted=True, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2164,7 +2164,7 @@ topk(k, axis=None, largest=True, sorted=True, name=None) trace(offset=0, axis1=0, axis2=1, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2173,7 +2173,7 @@ trace(offset=0, axis1=0, axis2=1, name=None) transpose(perm, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2182,7 +2182,7 @@ transpose(perm, name=None) triangular_solve(b, upper=True, transpose=False, unitriangular=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2191,7 +2191,7 @@ triangular_solve(b, upper=True, transpose=False, unitriangular=False, name=None) trunc(name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2200,7 +2200,7 @@ trunc(name=None) frac(name=None) ::::::::: -返回:计算后的tensor +返回:计算后的 tensor 返回类型:Tensor @@ -2209,7 +2209,7 @@ frac(name=None) tensordot(y, axes=2, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2218,7 +2218,7 @@ tensordot(y, axes=2, name=None) unbind(axis=0) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2227,17 +2227,17 @@ unbind(axis=0) uniform_(min=-1.0, max=1.0, seed=0, name=None) ::::::::: -Inplace版本的 :ref:`cn_api_tensor_uniform`,返回一个从均匀分布采样的随机数填充的Tensor。输出Tensor将被置于输入x的位置。 +Inplace 版本的 :ref:`cn_api_tensor_uniform`,返回一个从均匀分布采样的随机数填充的 Tensor。输出 Tensor 将被置于输入 x 的位置。 参数: - - **x** (Tensor) - 待被随机数填充的输入Tensor。 - - **min** (float|int, optional) - 生成随机数的下界,min包含在该范围内。默认为-1.0。 - - **max** (float|int, optional) - 生成随机数的上界,max不包含在该范围内。默认为1.0。 - - **seed** (int, optional) - 用于生成随机数的随机种子。如果seed为0,将使用全局默认生成器的种子(可通过paddle.seed设置)。 - 注意如果seed不为0,该操作每次将生成同一个随机值。默认为0。 + - **x** (Tensor) - 待被随机数填充的输入 Tensor。 + - **min** (float|int, optional) - 生成随机数的下界,min 包含在该范围内。默认为-1.0。 + - **max** (float|int, optional) - 生成随机数的上界,max 不包含在该范围内。默认为 1.0。 + - **seed** (int, optional) - 用于生成随机数的随机种子。如果 seed 为 0,将使用全局默认生成器的种子(可通过 paddle.seed 设置)。 + 注意如果 seed 不为 0,该操作每次将生成同一个随机值。默认为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -返回:由服从范围在[min, max)的均匀分布的随机数所填充的输入Tensor x。 +返回:由服从范围在[min, max)的均匀分布的随机数所填充的输入 Tensor x。 返回类型:Tensor @@ -2257,7 +2257,7 @@ Inplace版本的 :ref:`cn_api_tensor_uniform`,返回一个从均匀分布采 unique(return_index=False, return_inverse=False, return_counts=False, axis=None, dtype=int64, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2266,7 +2266,7 @@ unique(return_index=False, return_inverse=False, return_counts=False, axis=None, unsqueeze(axis, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2280,7 +2280,7 @@ Inplace 版本的 :ref:`cn_api_paddle_tensor_unsqueeze` API,对输入 `x` 采 unstack(axis=0, num=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2289,7 +2289,7 @@ unstack(axis=0, num=None) var(axis=None, unbiased=True, keepdim=False, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2298,7 +2298,7 @@ var(axis=None, unbiased=True, keepdim=False, name=None) where(y, name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2307,7 +2307,7 @@ where(y, name=None) multi_dot(x, name=None) ::::::::: -返回:多个矩阵相乘后的Tensor +返回:多个矩阵相乘后的 Tensor 返回类型:Tensor @@ -2316,7 +2316,7 @@ multi_dot(x, name=None) solve(x, y name=None) ::::::::: -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2325,7 +2325,7 @@ solve(x, y name=None) logit(eps=None, name=None) ::::::::: -返回:计算logit后的Tensor +返回:计算 logit 后的 Tensor 返回类型:Tensor @@ -2336,7 +2336,7 @@ lerp(x, y, weight, name=None) 基于给定的 weight 计算 x 与 y 的线性插值 -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2370,9 +2370,9 @@ is_integer() take_along_axis(arr, index, axis) ::::::::: -基于输入索引矩阵,沿着指定axis从arr矩阵里选取1d切片。索引矩阵必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐。 +基于输入索引矩阵,沿着指定 axis 从 arr 矩阵里选取 1d 切片。索引矩阵必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐。 -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2381,9 +2381,9 @@ take_along_axis(arr, index, axis) put_along_axis(arr, index, value, axis, reduce="assign") ::::::::: -基于输入index矩阵,将输入value沿着指定axis放置入arr矩阵。索引矩阵和value必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐。 +基于输入 index 矩阵,将输入 value 沿着指定 axis 放置入 arr 矩阵。索引矩阵和 value 必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐。 -返回:计算后的Tensor +返回:计算后的 Tensor 返回类型:Tensor @@ -2392,6 +2392,6 @@ put_along_axis(arr, index, value, axis, reduce="assign") erfinv(x, name=None) ::::::::: -对输入x进行逆误差函数计算 +对输入 x 进行逆误差函数计算 请参考 :ref:`cn_api_paddle_tensor_erfinv` diff --git a/docs/api/paddle/abs_cn.rst b/docs/api/paddle/abs_cn.rst index 7c7da51437f..8bc1ee7a6ff 100644 --- a/docs/api/paddle/abs_cn.rst +++ b/docs/api/paddle/abs_cn.rst @@ -15,12 +15,12 @@ abs 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/acos_cn.rst b/docs/api/paddle/acos_cn.rst index 20db4d110e6..9c633486ee2 100644 --- a/docs/api/paddle/acos_cn.rst +++ b/docs/api/paddle/acos_cn.rst @@ -8,19 +8,19 @@ acos -arccosine函数。 +arccosine 函数。 .. math:: out = cos^{-1}(x) 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 diff --git a/docs/api/paddle/acosh_cn.rst b/docs/api/paddle/acosh_cn.rst index 040fd441a2c..e0176ce63ee 100644 --- a/docs/api/paddle/acosh_cn.rst +++ b/docs/api/paddle/acosh_cn.rst @@ -8,19 +8,19 @@ acosh -Arccosh函数。 +Arccosh 函数。 .. math:: out = acosh(x) 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 diff --git a/docs/api/paddle/add_cn.rst b/docs/api/paddle/add_cn.rst index 6ea4126e7fb..896d3ba04fb 100644 --- a/docs/api/paddle/add_cn.rst +++ b/docs/api/paddle/add_cn.rst @@ -16,18 +16,18 @@ add .. math:: Out = X + Y -- :math:`X`:多维Tensor。 -- :math:`Y`:多维Tensor。 +- :math:`X`:多维 Tensor。 +- :math:`Y`:多维 Tensor。 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 - - y (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 + - y (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -多维Tensor,数据类型与 ``x`` 相同,维度为广播后的形状。 +多维 Tensor,数据类型与 ``x`` 相同,维度为广播后的形状。 代码示例 diff --git a/docs/api/paddle/add_n_cn.rst b/docs/api/paddle/add_n_cn.rst index c32a3b6028d..ab39e2bc76d 100644 --- a/docs/api/paddle/add_n_cn.rst +++ b/docs/api/paddle/add_n_cn.rst @@ -48,7 +48,7 @@ add_n 返回 :::::::::::: -Tensor,输入 ``inputs`` 求和后的结果,shape和数据类型与 ``inputs`` 一致。 +Tensor,输入 ``inputs`` 求和后的结果,shape 和数据类型与 ``inputs`` 一致。 代码示例 diff --git a/docs/api/paddle/addmm_cn.rst b/docs/api/paddle/addmm_cn.rst index 02176315f24..3b530efbfff 100644 --- a/docs/api/paddle/addmm_cn.rst +++ b/docs/api/paddle/addmm_cn.rst @@ -22,8 +22,8 @@ addmm - **input** (Tensor) - 输入 Tensor input,数据类型支持 float32、float64。 - **x** (Tensor) - 输入 Tensor x,数据类型支持 float32、float64。 - **y** (Tensor) - 输入 Tensor y,数据类型支持 float32、float64。 - - **alpha** (float,可选) - 乘以x*y的标量,数据类型支持float32、float64,默认值为1.0。 - - **beta** (float,可选) - 乘以input的标量,数据类型支持float32、float64,默认值为1.0。 + - **alpha** (float,可选) - 乘以 x*y 的标量,数据类型支持 float32、float64,默认值为 1.0。 + - **beta** (float,可选) - 乘以 input 的标量,数据类型支持 float32、float64,默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/all_cn.rst b/docs/api/paddle/all_cn.rst index 0bc41b3f790..0561626e564 100644 --- a/docs/api/paddle/all_cn.rst +++ b/docs/api/paddle/all_cn.rst @@ -5,18 +5,18 @@ all .. py:function:: paddle.all(x, axis=None, keepdim=False, name=None) -对指定维度上的Tensor元素进行逻辑与运算,并输出相应的计算结果。 +对指定维度上的 Tensor 元素进行逻辑与运算,并输出相应的计算结果。 参数 ::::::::: - - **x** (Tensor)- 输入变量为多维Tensor,数据类型为bool。 - - **axis** (int | list | tuple,可选)- 计算逻辑与运算的维度。如果为None,则计算所有元素的逻辑与并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(x),rank(x)]` 范围内。如果 :math:`axis [i] <0`,则维度将变为 :math:`rank+axis[i]`,默认值为None。 + - **x** (Tensor)- 输入变量为多维 Tensor,数据类型为 bool。 + - **axis** (int | list | tuple,可选)- 计算逻辑与运算的维度。如果为 None,则计算所有元素的逻辑与并返回包含单个元素的 Tensor 变量,否则必须在 :math:`[−rank(x),rank(x)]` 范围内。如果 :math:`axis [i] <0`,则维度将变为 :math:`rank+axis[i]`,默认值为 None。 - **keepdim** (bool,可选) - 是否在输出 Tensor 中保留减小的维度。除非 keepdim 为 True,否则输出 Tensor 的维度将比输入 Tensor 小一维,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,在指定维度上进行逻辑与运算的Tensor,数据类型和输入数据类型一致。 + Tensor,在指定维度上进行逻辑与运算的 Tensor,数据类型和输入数据类型一致。 代码示例 diff --git a/docs/api/paddle/allclose_cn.rst b/docs/api/paddle/allclose_cn.rst index fcb67085ca5..8e734fc15d1 100644 --- a/docs/api/paddle/allclose_cn.rst +++ b/docs/api/paddle/allclose_cn.rst @@ -5,26 +5,26 @@ allclose .. py:function:: paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) -逐个检查x和y的所有元素是否均满足如下条件: +逐个检查 x 和 y 的所有元素是否均满足如下条件: .. math:: \left| x - y \right| \leq atol + rtol \times \left| y \right| -该API的行为类似于 :math:`numpy.allclose`,即当两个待比较Tensor的所有元素均在一定容忍误差范围内视为相等则该API返回True值。 +该 API 的行为类似于 :math:`numpy.allclose`,即当两个待比较 Tensor 的所有元素均在一定容忍误差范围内视为相等则该 API 返回 True 值。 参数 :::::::::::: - **x** (Tensor) - 输入的 `Tensor`,数据类型为:float32、float64。 - **y** (Tensor) - 输入的 `Tensor`,数据类型为:float32、float64。 - - **rtol** (float,可选) - 相对容忍误差,默认值为1e-5。 - - **atol** (float,可选) - 绝对容忍误差,默认值为1e-8。 - - **equal_nan** (bool,可选) - 如果设置为True,则两个NaN数值将被视为相等,默认值为False。 + - **rtol** (float,可选) - 相对容忍误差,默认值为 1e-5。 + - **atol** (float,可选) - 绝对容忍误差,默认值为 1e-8。 + - **equal_nan** (bool,可选) - 如果设置为 True,则两个 NaN 数值将被视为相等,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -计算得到的布尔类型单值Tensor。 +计算得到的布尔类型单值 Tensor。 代码示例 :::::::::::: diff --git a/docs/api/paddle/amax_cn.rst b/docs/api/paddle/amax_cn.rst index 7916bbd4f73..476356f9dda 100644 --- a/docs/api/paddle/amax_cn.rst +++ b/docs/api/paddle/amax_cn.rst @@ -6,7 +6,7 @@ amax .. py:function:: paddle.amax(x, axis=None, keepdim=False, name=None) -对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 +对指定维度上的 Tensor 元素求最大值运算,并输出相应的计算结果。 .. note:: @@ -14,14 +14,14 @@ amax 参数 ::::::::: - - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64,维度不超过4维。 - - **axis** (list | int,可选)- 求最大值运算的维度。如果为None,则计算所有元素的最大值并返回包含单个元素的Tensor变量,否则必须在 :math:`[-x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] <0`,则维度将变为 :math:`x.ndim+axis[i]`,默认值为None。 - - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为 False,结果张量的维度将比输入张量的小,默认值为False。 + - **x** (Tensor)- Tensor,支持数据类型为 float32,float64,int32,int64,维度不超过 4 维。 + - **axis** (list | int,可选)- 求最大值运算的维度。如果为 None,则计算所有元素的最大值并返回包含单个元素的 Tensor 变量,否则必须在 :math:`[-x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] <0`,则维度将变为 :math:`x.ndim+axis[i]`,默认值为 None。 + - **keepdim** (bool)- 是否在输出 Tensor 中保留减小的维度。如果 keepdim 为 False,结果张量的维度将比输入张量的小,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,在指定axis上进行求最大值运算的Tensor,数据类型和输入数据类型一致。 + Tensor,在指定 axis 上进行求最大值运算的 Tensor,数据类型和输入数据类型一致。 代码示例 diff --git a/docs/api/paddle/amin_cn.rst b/docs/api/paddle/amin_cn.rst index 6b2ef3632fc..b42f13885e5 100644 --- a/docs/api/paddle/amin_cn.rst +++ b/docs/api/paddle/amin_cn.rst @@ -6,7 +6,7 @@ amin .. py:function:: paddle.amin(x, axis=None, keepdim=False, name=None) -对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 +对指定维度上的 Tensor 元素求最小值运算,并输出相应的计算结果。 .. note:: @@ -14,14 +14,14 @@ amin 参数 ::::::::: - - **x** (Tensor)- Tensor,支持数据类型为float32,float64,int32,int64,维度不超过4维。 - - **axis** (list | int,可选)- 求最小值运算的维度。如果为None,则计算所有元素的最小值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] < 0`,则维度将变为 :math:`x.ndim+axis[i]`,默认值为None。 - - **keepdim** (bool)- 是否在输出Tensor中保留减小的维度。如果keepdim 为False,结果张量的维度将比输入张量的小,默认值为False。 + - **x** (Tensor)- Tensor,支持数据类型为 float32,float64,int32,int64,维度不超过 4 维。 + - **axis** (list | int,可选)- 求最小值运算的维度。如果为 None,则计算所有元素的最小值并返回包含单个元素的 Tensor 变量,否则必须在 :math:`[−x.ndim, x.ndim]` 范围内。如果 :math:`axis[i] < 0`,则维度将变为 :math:`x.ndim+axis[i]`,默认值为 None。 + - **keepdim** (bool)- 是否在输出 Tensor 中保留减小的维度。如果 keepdim 为 False,结果张量的维度将比输入张量的小,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,在指定axis上进行求最小值运算的Tensor,数据类型和输入数据类型一致。 + Tensor,在指定 axis 上进行求最小值运算的 Tensor,数据类型和输入数据类型一致。 代码示例 diff --git a/docs/api/paddle/amp/GradScaler_cn.rst b/docs/api/paddle/amp/GradScaler_cn.rst index e5bafac6e34..b2379a931b6 100644 --- a/docs/api/paddle/amp/GradScaler_cn.rst +++ b/docs/api/paddle/amp/GradScaler_cn.rst @@ -7,30 +7,30 @@ GradScaler -GradScaler用于动态图模式下的"自动混合精度"的训练。它控制loss的缩放比例,有助于避免浮点数溢出的问题。这个类具有 ``scale()``、 ``unscale_()``、 ``step()``、 ``update()``、 ``minimize()``和参数的``get()/set()``等方法。 +GradScaler 用于动态图模式下的"自动混合精度"的训练。它控制 loss 的缩放比例,有助于避免浮点数溢出的问题。这个类具有 ``scale()``、 ``unscale_()``、 ``step()``、 ``update()``、 ``minimize()``和参数的``get()/set()``等方法。 -``scale()`` 用于让loss乘上一个缩放的比例。 -``unscale_()`` 用于让loss除去一个缩放的比例。 -``step()`` 与 ``optimizer.step()`` 类似,执行参数的更新,不更新缩放比例loss_scaling。 +``scale()`` 用于让 loss 乘上一个缩放的比例。 +``unscale_()`` 用于让 loss 除去一个缩放的比例。 +``step()`` 与 ``optimizer.step()`` 类似,执行参数的更新,不更新缩放比例 loss_scaling。 ``update()`` 更新缩放比例。 -``minimize()`` 与 ``optimizer.minimize()`` 类似,执行参数的更新,同时更新缩放比例loss_scaling,等效与``step()``+``update()``。 +``minimize()`` 与 ``optimizer.minimize()`` 类似,执行参数的更新,同时更新缩放比例 loss_scaling,等效与``step()``+``update()``。 -通常,GradScaler和 ``paddle.amp.auto_cast`` 一起使用,来实现动态图模式下的"自动混合精度"。 +通常,GradScaler 和 ``paddle.amp.auto_cast`` 一起使用,来实现动态图模式下的"自动混合精度"。 参数 ::::::::: - - **enable** (bool,可选) - 是否使用loss scaling。默认值为True。 - - **init_loss_scaling** (float,可选) - 初始loss scaling因子。默认值为32768.0。 - - **incr_ratio** (float,可选) - 增大loss scaling时使用的乘数。默认值为2.0。 - - **decr_ratio** (float,可选) - 减小loss scaling时使用的小于1的乘数。默认值为0.5。 - - **incr_every_n_steps** (int,可选) - 连续n个steps的梯度都是有限值时,增加loss scaling。默认值为1000。 - - **decr_every_n_nan_or_inf** (int,可选) - 累计出现n个steps的梯度为nan或者inf时,减小loss scaling。默认值为2。 - - **use_dynamic_loss_scaling** (bool,可选) - 是否使用动态的loss scaling。如果不使用,则使用固定的loss scaling;如果使用,则会动态更新loss scaling。默认值为True。 + - **enable** (bool,可选) - 是否使用 loss scaling。默认值为 True。 + - **init_loss_scaling** (float,可选) - 初始 loss scaling 因子。默认值为 32768.0。 + - **incr_ratio** (float,可选) - 增大 loss scaling 时使用的乘数。默认值为 2.0。 + - **decr_ratio** (float,可选) - 减小 loss scaling 时使用的小于 1 的乘数。默认值为 0.5。 + - **incr_every_n_steps** (int,可选) - 连续 n 个 steps 的梯度都是有限值时,增加 loss scaling。默认值为 1000。 + - **decr_every_n_nan_or_inf** (int,可选) - 累计出现 n 个 steps 的梯度为 nan 或者 inf 时,减小 loss scaling。默认值为 2。 + - **use_dynamic_loss_scaling** (bool,可选) - 是否使用动态的 loss scaling。如果不使用,则使用固定的 loss scaling;如果使用,则会动态更新 loss scaling。默认值为 True。 返回 ::::::::: - 一个GradScaler对象。 + 一个 GradScaler 对象。 代码示例 @@ -58,16 +58,16 @@ GradScaler用于动态图模式下的"自动混合精度"的训练。它控制lo scale(var) ''''''''' -将Tensor乘上缩放因子,返回缩放后的输出。 -如果这个 :class:`GradScaler` 的实例不使用loss scaling,则返回的输出将保持不变。 +将 Tensor 乘上缩放因子,返回缩放后的输出。 +如果这个 :class:`GradScaler` 的实例不使用 loss scaling,则返回的输出将保持不变。 **参数** -- **var** (Tensor) - 需要进行缩放的Tensor。 +- **var** (Tensor) - 需要进行缩放的 Tensor。 **返回** -缩放后的Tensor或者原Tensor。 +缩放后的 Tensor 或者原 Tensor。 **代码示例** @@ -93,8 +93,8 @@ minimize(optimizer, *args, **kwargs) ''''''''' 这个函数与 ``optimizer.minimize()`` 类似,用于执行参数更新。 -如果参数缩放后的梯度包含NAN或者INF,则跳过参数更新。否则,首先让缩放过梯度的参数取消缩放,然后更新参数。 -最终,更新loss scaling的比例。 +如果参数缩放后的梯度包含 NAN 或者 INF,则跳过参数更新。否则,首先让缩放过梯度的参数取消缩放,然后更新参数。 +最终,更新 loss scaling 的比例。 **参数** @@ -126,7 +126,7 @@ step(optimizer) ''''''''' 这个函数与 ``optimizer.step()`` 类似,用于执行参数更新。 -如果参数缩放后的梯度包含NAN或者INF,则跳过参数更新。否则,首先让缩放过梯度的参数取消缩放,然后更新参数。 +如果参数缩放后的梯度包含 NAN 或者 INF,则跳过参数更新。否则,首先让缩放过梯度的参数取消缩放,然后更新参数。 该函数与 ``update()`` 函数一起使用,效果等同于 ``minimize()``。 **参数** @@ -209,11 +209,11 @@ unscale_(optimizer) is_enable() ''''''''' -判断是否开启loss scaling策略。 +判断是否开启 loss scaling 策略。 **返回** -bool,采用loss scaling策略返回True,否则返回False。 +bool,采用 loss scaling 策略返回 True,否则返回 False。 **代码示例** @@ -233,11 +233,11 @@ bool,采用loss scaling策略返回True,否则返回False。 is_use_dynamic_loss_scaling() ''''''''' -判断是否动态调节loss scaling的缩放比例。 +判断是否动态调节 loss scaling 的缩放比例。 **返回** -bool,动态调节loss scaling缩放比例返回True,否则返回False。 +bool,动态调节 loss scaling 缩放比例返回 True,否则返回 False。 **代码示例** @@ -257,11 +257,11 @@ bool,动态调节loss scaling缩放比例返回True,否则返回False。 get_init_loss_scaling() ''''''''' -返回初始化的loss scaling缩放比例。 +返回初始化的 loss scaling 缩放比例。 **返回** -float,初始化的loss scaling缩放比例。 +float,初始化的 loss scaling 缩放比例。 **代码示例** @@ -281,7 +281,7 @@ float,初始化的loss scaling缩放比例。 set_init_loss_scaling(new_init_loss_scaling) ''''''''' -利用输入的new_init_loss_scaling对初始缩放比例参数init_loss_scaling重新赋值。 +利用输入的 new_init_loss_scaling 对初始缩放比例参数 init_loss_scaling 重新赋值。 **参数** @@ -307,11 +307,11 @@ set_init_loss_scaling(new_init_loss_scaling) get_incr_ratio() ''''''''' -返回增大loss scaling时使用的乘数。 +返回增大 loss scaling 时使用的乘数。 **返回** -float,增大loss scaling时使用的乘数。 +float,增大 loss scaling 时使用的乘数。 **代码示例** @@ -331,11 +331,11 @@ float,增大loss scaling时使用的乘数。 set_incr_ratio(new_incr_ratio) ''''''''' -利用输入的new_incr_ratio对增大loss scaling时使用的乘数重新赋值。 +利用输入的 new_incr_ratio 对增大 loss scaling 时使用的乘数重新赋值。 **参数** -- **new_incr_ratio** (float) - 用于更新增大loss scaling时使用的乘数,该值需>1.0。 +- **new_incr_ratio** (float) - 用于更新增大 loss scaling 时使用的乘数,该值需>1.0。 **代码示例** @@ -357,11 +357,11 @@ set_incr_ratio(new_incr_ratio) get_decr_ratio() ''''''''' -返回缩小loss scaling时使用的乘数。 +返回缩小 loss scaling 时使用的乘数。 **返回** -float,缩小loss scaling时使用的乘数。 +float,缩小 loss scaling 时使用的乘数。 **代码示例** @@ -381,11 +381,11 @@ float,缩小loss scaling时使用的乘数。 set_decr_ratio(new_decr_ratio) ''''''''' -利用输入的new_decr_ratio对缩小loss scaling时使用的乘数重新赋值。 +利用输入的 new_decr_ratio 对缩小 loss scaling 时使用的乘数重新赋值。 **参数** -- **new_decr_ratio** (float) - 用于更新缩小loss scaling时使用的乘数,该值需<1.0。 +- **new_decr_ratio** (float) - 用于更新缩小 loss scaling 时使用的乘数,该值需<1.0。 **代码示例** @@ -407,11 +407,11 @@ set_decr_ratio(new_decr_ratio) get_incr_every_n_steps() ''''''''' -连续n个steps的梯度都是有限值时,增加loss scaling,返回对应的值n。 +连续 n 个 steps 的梯度都是有限值时,增加 loss scaling,返回对应的值 n。 **返回** -int,参数incr_every_n_steps。 +int,参数 incr_every_n_steps。 **代码示例** @@ -431,11 +431,11 @@ int,参数incr_every_n_steps。 set_incr_every_n_steps(new_incr_every_n_steps) ''''''''' -利用输入的new_incr_every_n_steps对参数incr_every_n_steps重新赋值。 +利用输入的 new_incr_every_n_steps 对参数 incr_every_n_steps 重新赋值。 **参数** -- **new_incr_every_n_steps** (int) - 用于更新参数incr_every_n_steps。 +- **new_incr_every_n_steps** (int) - 用于更新参数 incr_every_n_steps。 **代码示例** @@ -457,11 +457,11 @@ set_incr_every_n_steps(new_incr_every_n_steps) get_decr_every_n_nan_or_inf() ''''''''' -累计出现n个steps的梯度为nan或者inf时,减小loss scaling,返回对应的值n。 +累计出现 n 个 steps 的梯度为 nan 或者 inf 时,减小 loss scaling,返回对应的值 n。 **返回** -int,参数decr_every_n_nan_or_inf。 +int,参数 decr_every_n_nan_or_inf。 **代码示例** @@ -481,11 +481,11 @@ int,参数decr_every_n_nan_or_inf。 set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) ''''''''' -利用输入的new_decr_every_n_nan_or_inf对参数decr_every_n_nan_or_inf重新赋值。 +利用输入的 new_decr_every_n_nan_or_inf 对参数 decr_every_n_nan_or_inf 重新赋值。 **参数** -- **new_decr_every_n_nan_or_inf** (int) - 用于更新参数decr_every_n_nan_or_inf。 +- **new_decr_every_n_nan_or_inf** (int) - 用于更新参数 decr_every_n_nan_or_inf。 **代码示例** @@ -507,11 +507,11 @@ set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) state_dict() ''''''''' -以字典的形式存储GradScaler对象的状态参数,如果该对象的enable为False,则返回一个空的字典。 +以字典的形式存储 GradScaler 对象的状态参数,如果该对象的 enable 为 False,则返回一个空的字典。 **返回** -dict,字典存储的参数包括:scale(tensor):loss scaling因子、incr_ratio(float):增大loss scaling时使用的乘数、decr_ratio(float):减小loss scaling时使用的小于1的乘数、incr_every_n_steps(int):连续n个steps的梯度都是有限值时,增加loss scaling、decr_every_n_nan_or_inf(int):累计出现n个steps的梯度为nan或者inf时,减小loss scaling、incr_count(int):连续未跳过参数更新的次数、decr_count(int):连续跳过参数更新的次数、use_dynamic_loss_scaling(bool):是否使用动态loss scaling策略。 +dict,字典存储的参数包括:scale(tensor):loss scaling 因子、incr_ratio(float):增大 loss scaling 时使用的乘数、decr_ratio(float):减小 loss scaling 时使用的小于 1 的乘数、incr_every_n_steps(int):连续 n 个 steps 的梯度都是有限值时,增加 loss scaling、decr_every_n_nan_or_inf(int):累计出现 n 个 steps 的梯度为 nan 或者 inf 时,减小 loss scaling、incr_count(int):连续未跳过参数更新的次数、decr_count(int):连续跳过参数更新的次数、use_dynamic_loss_scaling(bool):是否使用动态 loss scaling 策略。 **代码示例** @@ -531,11 +531,11 @@ dict,字典存储的参数包括:scale(tensor):loss scaling因子、incr_rat load_state_dict(state_dict) ''''''''' -利用输入的state_dict设置或更新GradScaler对象的属性参数。 +利用输入的 state_dict 设置或更新 GradScaler 对象的属性参数。 **参数** -- **state_dict** (dict) - 用于设置或更新GradScaler对象的属性参数,dict需要是``GradScaler.state_dict()``的返回值。 +- **state_dict** (dict) - 用于设置或更新 GradScaler 对象的属性参数,dict 需要是``GradScaler.state_dict()``的返回值。 **代码示例** diff --git a/docs/api/paddle/amp/Overview_cn.rst b/docs/api/paddle/amp/Overview_cn.rst index d53c89e9121..76acbc24b6e 100644 --- a/docs/api/paddle/amp/Overview_cn.rst +++ b/docs/api/paddle/amp/Overview_cn.rst @@ -3,34 +3,34 @@ paddle.amp --------------------- -paddle.amp 目录下包含飞桨框架支持的动态图自动混合精度(AMP)相关的API。具体如下: +paddle.amp 目录下包含飞桨框架支持的动态图自动混合精度(AMP)相关的 API。具体如下: -- :ref:`AMP相关API ` -- :ref:`开启AMP后默认转化为float16计算的相关OP ` -- :ref:`开启AMP后默认使用float32计算的相关OP ` +- :ref:`AMP 相关 API ` +- :ref:`开启 AMP 后默认转化为 float16 计算的相关 OP ` +- :ref:`开启 AMP 后默认使用 float32 计算的相关 OP ` .. _about_amp: -AMP相关API +AMP 相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`auto_cast ` ", "创建AMP上下文环境" + " :ref:`auto_cast ` ", "创建 AMP 上下文环境" " :ref:`decorate ` ", "根据选定混合精度训练模式,改写神经网络参数数据类型" - " :ref:`GradScaler ` ", "控制loss的缩放比例" + " :ref:`GradScaler ` ", "控制 loss 的缩放比例" .. _about_amp_white_list_ops: -开启AMP后默认转化为float16计算的相关OP +开启 AMP 后默认转化为 float16 计算的相关 OP ::::::::::::::::::::::: .. csv-table:: - :header: "OP名称", "OP功能" + :header: "OP 名称", "OP 功能" :widths: 10, 30 "conv2d", "卷积计算" @@ -40,11 +40,11 @@ AMP相关API .. _about_amp_black_list_ops: -开启AMP后默认使用float32计算的相关OP +开启 AMP 后默认使用 float32 计算的相关 OP ::::::::::::::::::::::: .. csv-table:: - :header: "OP名称", "OP功能" + :header: "OP 名称", "OP 功能" :widths: 10, 30 "exp", "指数运算" @@ -53,8 +53,8 @@ AMP相关API "mean", "取平均值" "sum", "求和运算" "cos_sim", "余弦相似度" - "softmax", "softmax操作" - "softmax_with_cross_entropy", "softmax交叉熵损失函数" + "softmax", "softmax 操作" + "softmax_with_cross_entropy", "softmax 交叉熵损失函数" "sigmoid_cross_entropy_with_logits", "按元素的概率误差" "cross_entropy", "交叉熵" "cross_entropy2", "交叉熵" diff --git a/docs/api/paddle/amp/auto_cast_cn.rst b/docs/api/paddle/amp/auto_cast_cn.rst index 5eff47a9a6d..360e0092786 100644 --- a/docs/api/paddle/amp/auto_cast_cn.rst +++ b/docs/api/paddle/amp/auto_cast_cn.rst @@ -14,11 +14,11 @@ auto_cast 参数 ::::::::: - - **enable** (bool,可选) - 是否开启自动混合精度。默认值为True。 + - **enable** (bool,可选) - 是否开启自动混合精度。默认值为 True。 - **custom_white_list** (set|list,可选) - 自定义算子白名单。这个名单中的算子在支持 float16 计算时会被认为是数值安全的,并且对性能至关重要。如果设置了白名单,该名单中的算子会使用 float16 计算。 - **custom_black_list** (set|list,可选) - 自定义算子黑名单。这个名单中的算子在支持 float16 计算时会被认为是数值危险的,它们的影响也可能会在下游操作中观察到。这些算子通常不会转为 float16 计算。 - **level** (str,可选) - 混合精度训练模式,可为 ``O1`` 或 ``O2`` 模式,默认 ``O1`` 模式。 - - **dtype** (str,可选) - 使用的数据类型,可以是float16 或 bfloat16。默认为 float16。 + - **dtype** (str,可选) - 使用的数据类型,可以是 float16 或 bfloat16。默认为 float16。 代码示例 diff --git a/docs/api/paddle/amp/decorate_cn.rst b/docs/api/paddle/amp/decorate_cn.rst index a5ce298f6b5..a3205fb6af2 100644 --- a/docs/api/paddle/amp/decorate_cn.rst +++ b/docs/api/paddle/amp/decorate_cn.rst @@ -7,18 +7,18 @@ decorate 装饰神经网络参数,来支持动态图模式下执行的算子的自动混合精度策略(AMP)。 -在``O1``模式下,该函数不做任何处理,直接返回输入的models和optimizers。在``O2``模式下,将对输入的网络参数数据类型由float32转为float16,(除BatchNorm和LayerNorm)。 -通过该函数可为支持master weight策略的优化器开启master weight策略,以保证训练精度。通过 ``save_dtype`` 可指定 ``paddle.save`` 和 ``paddle.jit.save`` 存储的网络参数数据类型。 +在``O1``模式下,该函数不做任何处理,直接返回输入的 models 和 optimizers。在``O2``模式下,将对输入的网络参数数据类型由 float32 转为 float16,(除 BatchNorm 和 LayerNorm)。 +通过该函数可为支持 master weight 策略的优化器开启 master weight 策略,以保证训练精度。通过 ``save_dtype`` 可指定 ``paddle.save`` 和 ``paddle.jit.save`` 存储的网络参数数据类型。 参数 :::::::::::: - - **models** (Layer|list of Layer) - 网络模型。在``O2``模式下,输入的模型参数将由float32转为float16。 - - **optimizers** (Optimizer|list of Optimizer,可选) - 优化器,默认值为None,若传入优化器或由优化器组成的list列表,将依据master_weight对优化器的master_weight属性进行设置。 + - **models** (Layer|list of Layer) - 网络模型。在``O2``模式下,输入的模型参数将由 float32 转为 float16。 + - **optimizers** (Optimizer|list of Optimizer,可选) - 优化器,默认值为 None,若传入优化器或由优化器组成的 list 列表,将依据 master_weight 对优化器的 master_weight 属性进行设置。 - **level** (str,可选) - 混合精度训练模式,默认``O1``模式。 - - **master_weight** (bool|None,可选) - 是否使用master weight策略。支持master weight策略的优化器包括``adam``、``adamW``、``momentum``,默认值为None,在``O2``模式下使用master weight策略。 - - **save_dtype** (str|None,可选) - 网络存储类型,可为float16、float32、float64。通过 ``save_dtype`` 可指定通过 ``paddle.save`` 和 ``paddle.jit.save`` 存储的网络参数数据类型。默认为None,采用现有网络参数类型进行存储。 + - **master_weight** (bool|None,可选) - 是否使用 master weight 策略。支持 master weight 策略的优化器包括``adam``、``adamW``、``momentum``,默认值为 None,在``O2``模式下使用 master weight 策略。 + - **save_dtype** (str|None,可选) - 网络存储类型,可为 float16、float32、float64。通过 ``save_dtype`` 可指定通过 ``paddle.save`` 和 ``paddle.jit.save`` 存储的网络参数数据类型。默认为 None,采用现有网络参数类型进行存储。 代码示例 diff --git a/docs/api/paddle/angle_cn.rst b/docs/api/paddle/angle_cn.rst index 9f2f290f9ae..0d4ffc00a09 100644 --- a/docs/api/paddle/angle_cn.rst +++ b/docs/api/paddle/angle_cn.rst @@ -14,12 +14,12 @@ angle 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:complex64, complex128 或 float32, float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:complex64, complex128 或 float32, float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出实数Tensor,与 ``x`` 的数值精度一致。 +输出实数 Tensor,与 ``x`` 的数值精度一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/arange_cn.rst b/docs/api/paddle/arange_cn.rst index 6d740ea9a0a..2163a40f7df 100644 --- a/docs/api/paddle/arange_cn.rst +++ b/docs/api/paddle/arange_cn.rst @@ -5,21 +5,21 @@ arange .. py:function:: paddle.arange(start=0, end=None, step=1, dtype=None, name=None) -返回以步长 ``step`` 均匀分隔给定数值区间[ ``start`` , ``end`` )的1-D Tensor,数据类型为 ``dtype`` 。 +返回以步长 ``step`` 均匀分隔给定数值区间[ ``start`` , ``end`` )的 1-D Tensor,数据类型为 ``dtype`` 。 当 ``dtype`` 表示浮点类型时,为了避免浮点计算误差,建议给 ``end`` 加上一个极小值 epsilon,使边界可以更加明确。 参数 :::::::::: - - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是 Tensor 时,是形状为[1]且数据类型为 int32、int64、float32、float64 的 Tensor。如果仅指定 ``start``,而 ``end`` 为 None,则区间为[0, ``start``)。默认值为0。 + - **start** (float|int|Tensor) - 区间起点(且区间包括此值)。当 ``start`` 类型是 Tensor 时,是形状为[1]且数据类型为 int32、int64、float32、float64 的 Tensor。如果仅指定 ``start``,而 ``end`` 为 None,则区间为[0, ``start``)。默认值为 0。 - **end** (float|int|Tensor,可选) - 区间终点(且通常区间不包括此值)。当 ``end`` 类型是 Tensor 时,是形状为[1]且数据类型为 int32、int64、float32、float64 的 Tensor。默认值为 None。 - - **step** (float|int|Tensor,可选) - 均匀分割的步长。当 ``step`` 类型是 Tensor 时,是形状为[1]且数据类型为 int32、int64、float32、float64的Tensor。默认值为1。 - - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 int32、int64、float32、float64。当该参数值为 None 时,输出Tensor的数据类型为int64。默认值为None。 + - **step** (float|int|Tensor,可选) - 均匀分割的步长。当 ``step`` 类型是 Tensor 时,是形状为[1]且数据类型为 int32、int64、float32、float64 的 Tensor。默认值为 1。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 int32、int64、float32、float64。当该参数值为 None 时,输出 Tensor 的数据类型为 int64。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor,以步长 ``step`` 均匀分割给定数值区间[start, end)后得到的1-D Tensor,数据类型为 ``dtype`` 。 + Tensor,以步长 ``step`` 均匀分割给定数值区间[start, end)后得到的 1-D Tensor,数据类型为 ``dtype`` 。 代码示例 diff --git a/docs/api/paddle/argmax_cn.rst b/docs/api/paddle/argmax_cn.rst index 115c3ea6688..2eccf2028f1 100644 --- a/docs/api/paddle/argmax_cn.rst +++ b/docs/api/paddle/argmax_cn.rst @@ -11,9 +11,9 @@ argmax 参数 :::::::: - **x** (Tensor) - 输入的多维 ``Tensor``,支持的数据类型:float32、float64、int16、int32、int64、uint8。 - - **axis** (int,可选) - 指定对输入Tensor进行运算的轴,``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数,``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为None,将会对输入的 `x` 进行平铺展开,返回最大值的索引。 - - **keepdim** (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 keepdim 为True,则输出Tensor和 x 具有相同的维度(减少的维度除外,减少的维度的大小为1),默认值为False。 - - **dtype** (np.dtype|str,可选) - 输出Tensor的数据类型,可选值为int32,int64,默认值为int64,将返回int64类型的结果。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴,``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的维度个数,``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为 None,将会对输入的 `x` 进行平铺展开,返回最大值的索引。 + - **keepdim** (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 keepdim 为 True,则输出 Tensor 和 x 具有相同的维度(减少的维度除外,减少的维度的大小为 1),默认值为 False。 + - **dtype** (np.dtype|str,可选) - 输出 Tensor 的数据类型,可选值为 int32,int64,默认值为 int64,将返回 int64 类型的结果。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/argmin_cn.rst b/docs/api/paddle/argmin_cn.rst index 05bead0ba8a..48e34934ff5 100644 --- a/docs/api/paddle/argmin_cn.rst +++ b/docs/api/paddle/argmin_cn.rst @@ -11,7 +11,7 @@ argmin 参数 :::::::: - **x** (Tensor) - 输入的多维 ``Tensor``,支持的数据类型:float32、float64、int16、int32、int64、uint8。 - - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为 None,将会对输入的 `x` 进行平铺展开,返回最小值的索引。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴, ``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的维度个数, ``axis`` 为负数时,进行计算的 ``axis`` 与 ``axis`` + R 一致。默认值为 None,将会对输入的 `x` 进行平铺展开,返回最小值的索引。 - **keepdim** (bool,可选) - 是否保留进行最小值索引操作的轴,默认值为 False。 - **dtype** (np.dtype|str,可选) - 输出 Tensor 的数据类型,可选值为 int32、int64,默认值为'int64',将返回 int64 类型的结果。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/asin_cn.rst b/docs/api/paddle/asin_cn.rst index 4ecd7380d62..38ac0590470 100644 --- a/docs/api/paddle/asin_cn.rst +++ b/docs/api/paddle/asin_cn.rst @@ -8,7 +8,7 @@ asin -arcsine函数。 +arcsine 函数。 .. math:: out = sin^{-1}(x) @@ -16,12 +16,12 @@ arcsine函数。 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/asinh_cn.rst b/docs/api/paddle/asinh_cn.rst index 796c59fbeca..7fd46746915 100644 --- a/docs/api/paddle/asinh_cn.rst +++ b/docs/api/paddle/asinh_cn.rst @@ -5,19 +5,19 @@ asinh .. py:function:: paddle.asinh(x, name=None) -Arcsinh函数。 +Arcsinh 函数。 .. math:: out = asinh(x) 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 diff --git a/docs/api/paddle/atan2_cn.rst b/docs/api/paddle/atan2_cn.rst index 62cd3a59c89..dd1d5efd367 100644 --- a/docs/api/paddle/atan2_cn.rst +++ b/docs/api/paddle/atan2_cn.rst @@ -8,7 +8,7 @@ atan2 -对x/y进行逐元素的arctangent运算,通过符号确定象限 +对 x/y 进行逐元素的 arctangent 运算,通过符号确定象限 .. math:: atan2(x,y)=\left\{\begin{matrix} @@ -23,14 +23,14 @@ atan2 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:int32、int64、float16、float32、float64。 -- **y** (Tensor) - 输入的Tensor,数据类型为:int32、int64、float16、float32、float64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:int32、int64、float16、float32、float64。 +- **y** (Tensor) - 输入的 Tensor,数据类型为:int32、int64、float16、float32、float64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同(输入为int时,输出数据类型为float64)。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同(输入为 int 时,输出数据类型为 float64)。 代码示例 ::::::::: diff --git a/docs/api/paddle/atan_cn.rst b/docs/api/paddle/atan_cn.rst index 9e28b318a26..f3ade7fa423 100644 --- a/docs/api/paddle/atan_cn.rst +++ b/docs/api/paddle/atan_cn.rst @@ -8,7 +8,7 @@ atan -arctangent函数。 +arctangent 函数。 .. math:: out = tan^{-1}(x) @@ -16,12 +16,12 @@ arctangent函数。 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/atanh_cn.rst b/docs/api/paddle/atanh_cn.rst index efd038ec3dd..83ea34b3cee 100644 --- a/docs/api/paddle/atanh_cn.rst +++ b/docs/api/paddle/atanh_cn.rst @@ -5,19 +5,19 @@ atanh .. py:function:: paddle.atanh(x, name=None) -Arctanh函数。 +Arctanh 函数。 .. math:: out = atanh(x) 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 diff --git a/docs/api/paddle/autograd/PyLayerContext_cn.rst b/docs/api/paddle/autograd/PyLayerContext_cn.rst index 51949d5d381..3f272a62998 100644 --- a/docs/api/paddle/autograd/PyLayerContext_cn.rst +++ b/docs/api/paddle/autograd/PyLayerContext_cn.rst @@ -40,7 +40,7 @@ save_for_backward(self, *tensors) 用于暂存 ``backward`` 需要的 ``Tensor``,在 ``backward`` 中调用 ``saved_tensor`` 获取这些 ``Tensor`` 。 .. note:: - 这个API只能被调用一次,且只能在 ``forward`` 中调用。 + 这个 API 只能被调用一次,且只能在 ``forward`` 中调用。 **参数** diff --git a/docs/api/paddle/autograd/PyLayer_cn.rst b/docs/api/paddle/autograd/PyLayer_cn.rst index 6715d8452a8..f8267ef0b6b 100644 --- a/docs/api/paddle/autograd/PyLayer_cn.rst +++ b/docs/api/paddle/autograd/PyLayer_cn.rst @@ -5,7 +5,7 @@ PyLayer .. py:class:: paddle.autograd.PyLayer -Paddle通过创建 ``PyLayer`` 子类的方式实现Python端自定义算子,这个子类必须遵守以下规则: +Paddle 通过创建 ``PyLayer`` 子类的方式实现 Python 端自定义算子,这个子类必须遵守以下规则: 1. 子类必须包含静态的 ``forward`` 和 ``backward`` 函数,它们的第一个参数必须是 :ref:`cn_api_autograd_PyLayerContext`,如果 ``backward`` 的某个返回值在 ``forward`` 中对应的 ``Tensor`` 是需要梯度,这个返回值必须为 ``Tensor`` 。 @@ -67,7 +67,7 @@ forward(ctx, *args, **kwargs) **返回** -Tensor或至少包含一个Tensor的list/tuple +Tensor 或至少包含一个 Tensor 的 list/tuple **代码示例** @@ -141,7 +141,7 @@ apply(cls, *args, **kwargs) **返回** -Tensor或至少包含一个Tensor的list/tuple +Tensor 或至少包含一个 Tensor 的 list/tuple **代码示例** diff --git a/docs/api/paddle/autograd/backward_cn.rst b/docs/api/paddle/autograd/backward_cn.rst index 27ac7c5d6a8..b72e302e466 100644 --- a/docs/api/paddle/autograd/backward_cn.rst +++ b/docs/api/paddle/autograd/backward_cn.rst @@ -11,9 +11,9 @@ backward 参数 :::::::::::: - - **tensors** (list[Tensor]) – 将要计算梯度的Tensors列表。Tensors中不能包含有相同的Tensor。 - - **grad_tensors** (None|list[Tensor|None],可选) – ``tensors`` 的初始梯度值。如果非None,必须和 ``tensors`` 有相同的长度,并且如果其中某一Tensor元素为None,则该初始梯度值为填充1.0 的默认值;如果是None,所有的 ``tensors`` 的初始梯度值为填充1.0 的默认值。默认值:None。 - - **retain_graph** (bool,可选) – 如果为False,反向计算图将被释放。如果在backward()之后继续添加OP,需要设置为True,此时之前的反向计算图会保留。将其设置为False会更加节省内存。默认值:False。 + - **tensors** (list[Tensor]) – 将要计算梯度的 Tensors 列表。Tensors 中不能包含有相同的 Tensor。 + - **grad_tensors** (None|list[Tensor|None],可选) – ``tensors`` 的初始梯度值。如果非 None,必须和 ``tensors`` 有相同的长度,并且如果其中某一 Tensor 元素为 None,则该初始梯度值为填充 1.0 的默认值;如果是 None,所有的 ``tensors`` 的初始梯度值为填充 1.0 的默认值。默认值:None。 + - **retain_graph** (bool,可选) – 如果为 False,反向计算图将被释放。如果在 backward()之后继续添加 OP,需要设置为 True,此时之前的反向计算图会保留。将其设置为 False 会更加节省内存。默认值:False。 返回 diff --git a/docs/api/paddle/batch_cn.rst b/docs/api/paddle/batch_cn.rst index a9b9c936e6e..015cb8b5ec0 100644 --- a/docs/api/paddle/batch_cn.rst +++ b/docs/api/paddle/batch_cn.rst @@ -5,17 +5,17 @@ batch .. py:function:: paddle.batch(reader, batch_size, drop_last=False) -该接口是一个reader的装饰器。返回的reader将输入reader的数据打包成指定的batch_size大小的批处理数据(batched data)。 +该接口是一个 reader 的装饰器。返回的 reader 将输入 reader 的数据打包成指定的 batch_size 大小的批处理数据(batched data)。 .. warning:: - 不推荐使用这个API,如有数据加载需求推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` + 不推荐使用这个 API,如有数据加载需求推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` 参数 :::::::::::: - - **reader** (generator)- 读取数据的数据reader。 + - **reader** (generator)- 读取数据的数据 reader。 - **batch_size** (int)- 批尺寸。 - - **drop_last** (bool) - 若设置为True,则当最后一个batch不等于batch_size时,丢弃最后一个batch;若设置为False,则不会。默认值为False。 + - **drop_last** (bool) - 若设置为 True,则当最后一个 batch 不等于 batch_size 时,丢弃最后一个 batch;若设置为 False,则不会。默认值为 False。 返回 :::::::::::: diff --git a/docs/api/paddle/bincount_cn.rst b/docs/api/paddle/bincount_cn.rst index f866b241909..cf44eb97389 100644 --- a/docs/api/paddle/bincount_cn.rst +++ b/docs/api/paddle/bincount_cn.rst @@ -5,19 +5,19 @@ bincount .. py:function:: paddle.bincount(x, weights=None, minlength=0, name=None): -统计输入张量中每个元素出现的次数,如果传入weights张量则每次计数加一时会乘以weights张量对应的值 +统计输入张量中每个元素出现的次数,如果传入 weights 张量则每次计数加一时会乘以 weights 张量对应的值 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor。必须是一维Tensor,其中元素必须大于等于0,数据类型为int32, int64。 - - **weights** (Tensor,可选) - weights Tensor,代表输入Tensor中每个元素的权重。长度必须与输入Tensor相同。数据类型为int32, int64, float32或float64。默认为None - - **minlength** (int,可选) - 输出Tensor的最小长度,如果大于输入Tensor中的最大值,则多出的位置补0。该值必须大于等于0。默认为0。 + - **x** (Tensor) - 输入 Tensor。必须是一维 Tensor,其中元素必须大于等于 0,数据类型为 int32, int64。 + - **weights** (Tensor,可选) - weights Tensor,代表输入 Tensor 中每个元素的权重。长度必须与输入 Tensor 相同。数据类型为 int32, int64, float32 或 float64。默认为 None + - **minlength** (int,可选) - 输出 Tensor 的最小长度,如果大于输入 Tensor 中的最大值,则多出的位置补 0。该值必须大于等于 0。默认为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,维度为1。 +Tensor,维度为 1。 代码示例: :::::::::::: diff --git a/docs/api/paddle/bitwise_and_cn.rst b/docs/api/paddle/bitwise_and_cn.rst index ea09f5c524e..8370d36efdc 100644 --- a/docs/api/paddle/bitwise_and_cn.rst +++ b/docs/api/paddle/bitwise_and_cn.rst @@ -5,20 +5,20 @@ bitwise_and .. py:function:: paddle.bitwise_and(x, y, out=None, name=None) -对Tensor ``x`` 和 ``y`` 逐元素进行 ``按位与`` 运算。 +对 Tensor ``x`` 和 ``y`` 逐元素进行 ``按位与`` 运算。 .. math:: Out = X \& Y .. note:: - ``paddle.bitwise_and`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.bitwise_and`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - **x** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - **y** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 返回 :::::::::::: diff --git a/docs/api/paddle/bitwise_not_cn.rst b/docs/api/paddle/bitwise_not_cn.rst index 6ba22b14a81..6e782cd5576 100644 --- a/docs/api/paddle/bitwise_not_cn.rst +++ b/docs/api/paddle/bitwise_not_cn.rst @@ -5,19 +5,19 @@ bitwise_not .. py:function:: paddle.bitwise_not(x, out=None, name=None) -对Tensor ``x`` 逐元素进行 ``按位取反`` 运算。 +对 Tensor ``x`` 逐元素进行 ``按位取反`` 运算。 .. math:: Out = \sim X .. note:: - ``paddle.bitwise_not`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.bitwise_not`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - **x** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 返回 :::::::::::: diff --git a/docs/api/paddle/bitwise_or_cn.rst b/docs/api/paddle/bitwise_or_cn.rst index e36faf4d2b6..b3608df66b3 100644 --- a/docs/api/paddle/bitwise_or_cn.rst +++ b/docs/api/paddle/bitwise_or_cn.rst @@ -5,20 +5,20 @@ bitwise_or .. py:function:: paddle.bitwise_or(x, y, out=None, name=None) -对Tensor ``x`` 和 ``y`` 逐元素进行 ``按位或`` 运算。 +对 Tensor ``x`` 和 ``y`` 逐元素进行 ``按位或`` 运算。 .. math:: Out = X | Y .. note:: - ``paddle.bitwise_or`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.bitwise_or`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - **x** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - **y** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 返回 :::::::::::: diff --git a/docs/api/paddle/bitwise_xor_cn.rst b/docs/api/paddle/bitwise_xor_cn.rst index fc44d812d0a..3b8a7eeabfe 100644 --- a/docs/api/paddle/bitwise_xor_cn.rst +++ b/docs/api/paddle/bitwise_xor_cn.rst @@ -5,20 +5,20 @@ bitwise_xor .. py:function:: paddle.bitwise_xor(x, y, out=None, name=None) -对Tensor ``x`` 和 ``y`` 逐元素进行 ``按位异或`` 运算。 +对 Tensor ``x`` 和 ``y`` 逐元素进行 ``按位异或`` 运算。 .. math:: Out = X ^\wedge Y .. note:: - ``paddle.bitwise_xor`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.bitwise_xor`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - **x** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - **y** (Tensor)- 输入的 N-D `Tensor`,数据类型为:bool,uint8,int8,int16,int32,int64。 - - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **out** (Tensor,可选)- 输出的结果 `Tensor`,是与输入数据类型相同的 N-D `Tensor`。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 返回 :::::::::::: diff --git a/docs/api/paddle/bmm_cn.rst b/docs/api/paddle/bmm_cn.rst index 0981cf5127c..d3b6f4ecf77 100644 --- a/docs/api/paddle/bmm_cn.rst +++ b/docs/api/paddle/bmm_cn.rst @@ -8,9 +8,9 @@ bmm -对输入x及输入y进行矩阵相乘。 +对输入 x 及输入 y 进行矩阵相乘。 -两个输入的维度必须等于3,并且矩阵x和矩阵y的第一维必须相等。同时矩阵x的第二维必须等于矩阵y的第三维。 +两个输入的维度必须等于 3,并且矩阵 x 和矩阵 y 的第一维必须相等。同时矩阵 x 的第二维必须等于矩阵 y 的第三维。 例如:若 x 和 y 分别为 (b, m, k) 和 (b, k, n) 的矩阵,则函数的输出为一个 (b, m, n) 的矩阵。 diff --git a/docs/api/paddle/broadcast_shape_cn.rst b/docs/api/paddle/broadcast_shape_cn.rst index 56410d1a49a..a77f2c0ac00 100644 --- a/docs/api/paddle/broadcast_shape_cn.rst +++ b/docs/api/paddle/broadcast_shape_cn.rst @@ -6,16 +6,16 @@ broadcast_shape .. py:function:: paddle.broadcast_shape(x_shape, y_shape) -该函数返回对x_shape大小的张量和y_shape大小的张量做broadcast操作后得到的shape,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 +该函数返回对 x_shape 大小的张量和 y_shape 大小的张量做 broadcast 操作后得到的 shape,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: - - x_shape (list[int]|tuple[int]) - 输入Tensor的shape。 - - y_shape (list[int]|tuple[int]) - 输入Tensor的shape。 + - x_shape (list[int]|tuple[int]) - 输入 Tensor 的 shape。 + - y_shape (list[int]|tuple[int]) - 输入 Tensor 的 shape。 返回 ::::::::: -broadcast操作后的shape,返回类型为 list[int]。 +broadcast 操作后的 shape,返回类型为 list[int]。 代码示例 diff --git a/docs/api/paddle/broadcast_tensors_cn.rst b/docs/api/paddle/broadcast_tensors_cn.rst index c2280f91f9a..a234b86d531 100644 --- a/docs/api/paddle/broadcast_tensors_cn.rst +++ b/docs/api/paddle/broadcast_tensors_cn.rst @@ -5,20 +5,20 @@ broadcast_tensors .. py:function:: paddle.broadcast_tensors(inputs, name=None) -根据Broadcast规范对一组输入 ``inputs`` 进行Broadcast操作 -输入应符合Broadcast规范 +根据 Broadcast 规范对一组输入 ``inputs`` 进行 Broadcast 操作 +输入应符合 Broadcast 规范 .. note:: - 如想了解更多Broadcasting内容,请参见 :ref:`cn_user_guide_broadcasting` 。 + 如想了解更多 Broadcasting 内容,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: - - inputs (list(Tensor)|tuple(Tensor)) - 一组输入Tensor,数据类型为:bool、float32、float64、int32或int64。所有的输入Tensor均需要满足rank <= 5。 + - inputs (list(Tensor)|tuple(Tensor)) - 一组输入 Tensor,数据类型为:bool、float32、float64、int32 或 int64。所有的输入 Tensor 均需要满足 rank <= 5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``list(Tensor)``,一组Broadcast后的 ``Tensor``,其顺序与 ``input`` 一一对应。 +``list(Tensor)``,一组 Broadcast 后的 ``Tensor``,其顺序与 ``input`` 一一对应。 代码示例 ::::::::: diff --git a/docs/api/paddle/broadcast_to_cn.rst b/docs/api/paddle/broadcast_to_cn.rst index 4b24d1c4db6..4aa6ccb5f15 100644 --- a/docs/api/paddle/broadcast_to_cn.rst +++ b/docs/api/paddle/broadcast_to_cn.rst @@ -7,12 +7,12 @@ broadcast_to 根据 ``shape`` 指定的形状广播 ``x``,广播后,``x`` 的形状和 ``shape`` 指定的形状一致。 -``x`` 的维数和 ``shape`` 的元素数应小于等于6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 +``x`` 的维数和 ``shape`` 的元素数应小于等于 6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为 1。 参数 ::::::::: - - x (Tensor) - 输入的 Tensor,数据类型为:bool、float32、float64、int32或int64。 - - shape (tuple|list|Tensor) - 给定输入 ``x`` 扩展后的形状,若 ``shape`` 为 list 或者 tuple,则其中的元素值应该为整数或者1-D Tensor,若 ``shape`` 类型为 Tensor,则其应该为1-D Tensor。 + - x (Tensor) - 输入的 Tensor,数据类型为:bool、float32、float64、int32 或 int64。 + - shape (tuple|list|Tensor) - 给定输入 ``x`` 扩展后的形状,若 ``shape`` 为 list 或者 tuple,则其中的元素值应该为整数或者 1-D Tensor,若 ``shape`` 类型为 Tensor,则其应该为 1-D Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/callbacks/Callback_cn.rst b/docs/api/paddle/callbacks/Callback_cn.rst index 002ac1c8e86..5b32b2a8880 100644 --- a/docs/api/paddle/callbacks/Callback_cn.rst +++ b/docs/api/paddle/callbacks/Callback_cn.rst @@ -5,7 +5,7 @@ Callback .. py:class:: paddle.callbacks.Callback() - ``Callback`` 是一个基类,用于实现用户自定义的callback。如果想使用除 :ref:`EarlyStopping <_cn_api_paddle_callbacks_EarlyStopping>` 外的自定义策略终止训练,可以通过在自定义的callback类中设置 ``model.stop_training=True`` 来实现。 + ``Callback`` 是一个基类,用于实现用户自定义的 callback。如果想使用除 :ref:`EarlyStopping <_cn_api_paddle_callbacks_EarlyStopping>` 外的自定义策略终止训练,可以通过在自定义的 callback 类中设置 ``model.stop_training=True`` 来实现。 代码示例 :::::::::::: @@ -19,18 +19,18 @@ COPY-FROM: paddle.callbacks.Callback set_params(params) ''''''''' -设置参数,类型是dict,包含字段如下: +设置参数,类型是 dict,包含字段如下: - 'batch_size':整数,批大小 -- ‘epochs’:整数,总共epochs -- 'steps':整数,一个epoch内的step数 -- 'verbose':整数,0,1,2,表示输出信息的模式,0是静默模式,1是进度条模式,2是每次打印一行。 -- ‘metrics’:字符串数组,评估指标的名字,包含’loss‘,以及paddle.metric.Metric获取的名字。 +- ‘epochs’:整数,总共 epochs +- 'steps':整数,一个 epoch 内的 step 数 +- 'verbose':整数,0,1,2,表示输出信息的模式,0 是静默模式,1 是进度条模式,2 是每次打印一行。 +- ‘metrics’:字符串数组,评估指标的名字,包含’loss‘,以及 paddle.metric.Metric 获取的名字。 set_model(model) ''''''''' -设置paddle.Model实例。 +设置 paddle.Model 实例。 on_train_begin(logs=None) ''''''''' @@ -39,7 +39,7 @@ on_train_begin(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。 + - **logs** (dict|None):日志信息是 dict 或 None。 on_train_end(logs=None) ''''''''' @@ -48,7 +48,7 @@ on_train_end(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'loss',评估指标metric的名字,以及'batch_size'。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'loss',评估指标 metric 的名字,以及'batch_size'。 on_eval_begin(logs=None) @@ -58,7 +58,7 @@ on_eval_begin(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'steps'和'metrics'。'steps'是验证集的总共步长数,'metrics'是一个list[str],包含'loss'和所设置的paddle.metric.Metric的名字。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'steps'和'metrics'。'steps'是验证集的总共步长数,'metrics'是一个 list[str],包含'loss'和所设置的 paddle.metric.Metric 的名字。 on_eval_end(logs=None) ''''''''' @@ -67,7 +67,7 @@ on_eval_end(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'loss',评估指标metric的名字,以及'batch_size'。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'loss',评估指标 metric 的名字,以及'batch_size'。 on_predict_begin(logs=None) @@ -77,7 +77,7 @@ on_predict_begin(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。 + - **logs** (dict|None):日志信息是 dict 或 None。 on_predict_end(logs=None) @@ -87,88 +87,88 @@ on_predict_end(logs=None) **参数** - - **logs** (dict|None):日志信息是dict或None。 + - **logs** (dict|None):日志信息是 dict 或 None。 on_epoch_begin(epoch, logs=None) ''''''''' -在每个epoch的一开始调用。 +在每个 epoch 的一开始调用。 **参数** - - **epoch** (int): epoch的索引。 - - **logs** (dict|None):日志信息是None。 + - **epoch** (int): epoch 的索引。 + - **logs** (dict|None):日志信息是 None。 on_epoch_end(epoch, logs=None) ''''''''' -在每个epoch的结束调用。 +在每个 epoch 的结束调用。 **参数** - - **epoch** (int): epoch的索引。 - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'loss',评估指标metric的名字,以及'batch_size'。 + - **epoch** (int): epoch 的索引。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'loss',评估指标 metric 的名字,以及'batch_size'。 on_train_batch_begin(step, logs=None) ''''''''' -在训练阶段每个batch的开始调用。 +在训练阶段每个 batch 的开始调用。 **参数** - **step** (int):训练步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的是None。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的是 None。 on_train_batch_end(step, logs=None) ''''''''' -在训练阶段每个batch的结束调用。 +在训练阶段每个 batch 的结束调用。 **参数** - **step** (int):训练步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'loss',评估指标metric的名字,以及当前'batch_size'。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'loss',评估指标 metric 的名字,以及当前'batch_size'。 on_eval_batch_begin(step, logs=None) ''''''''' -在评估阶段每个batch的开始调用。 +在评估阶段每个 batch 的开始调用。 **参数** - **step** (int):评估步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的是None。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的是 None。 on_eval_batch_end(step, logs=None) ''''''''' -在评估阶段每个batch的结束调用。 +在评估阶段每个 batch 的结束调用。 **参数** - **step** (int):训练步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。通过paddle.Model传递的dict包含的字段有'loss',评估指标metric的名字,以及当前'batch_size'。 + - **logs** (dict|None):日志信息是 dict 或 None。通过 paddle.Model 传递的 dict 包含的字段有'loss',评估指标 metric 的名字,以及当前'batch_size'。 on_predict_batch_begin(step, logs=None) ''''''''' -在推理阶段每个batch的开始调用。 +在推理阶段每个 batch 的开始调用。 **参数** - **step** (int):推理步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。 + - **logs** (dict|None):日志信息是 dict 或 None。 on_predict_batch_end(step, logs=None) ''''''''' -在推理阶段每个batch的结束调用。 +在推理阶段每个 batch 的结束调用。 **参数** - **step** (int):训练步长或迭代次数。 - - **logs** (dict|None):日志信息是dict或None。 + - **logs** (dict|None):日志信息是 dict 或 None。 diff --git a/docs/api/paddle/callbacks/EarlyStopping_cn.rst b/docs/api/paddle/callbacks/EarlyStopping_cn.rst index c08cef3953b..6835dda76cb 100644 --- a/docs/api/paddle/callbacks/EarlyStopping_cn.rst +++ b/docs/api/paddle/callbacks/EarlyStopping_cn.rst @@ -11,12 +11,12 @@ EarlyStopping :::::::::::: - **monitor** (str,可选) - 监控量。该量作为模型是否停止学习的监控指标。默认值:'loss'。 - - **mode** (str,可选) - 可以是'auto'、'min'或者'max'。在min模式下,模型会在监控量的值不再减少时停止训练;max模式下,模型会在监控量的值不再增加时停止训练;auto模式下,实际的模式会从 ``monitor`` 推断出来。如果 ``monitor`` 中有'acc',将会认为是max模式,其它情况下,都会被推断为min模式。默认值:'auto'。 - - **patience** (int,可选) - 多少个epoch模型效果未提升会使模型提前停止训练。默认值:0。 - - **verbose** (int,可选) - 可以是0或者1,0代表不打印模型提前停止训练的日志,1代表打印日志。默认值:1。 - - **min_delta** (int|float,可选) - 监控量最小改变值。当evaluation的监控变量改变值小于 ``min_delta``,就认为模型没有变化。默认值:0。 - - **baseline** (int|float,可选) - 监控量的基线。如果模型在训练 ``patience`` 个epoch后效果对比基线没有提升,将会停止训练。如果是None,代表没有基线。默认值:None。 - - **save_best_model** (bool,可选) - 是否保存效果最好的模型(监控量的值最优)。文件会保存在 ``fit`` 中传入的参数 ``save_dir`` 下,前缀名为best_model,默认值:True。 + - **mode** (str,可选) - 可以是'auto'、'min'或者'max'。在 min 模式下,模型会在监控量的值不再减少时停止训练;max 模式下,模型会在监控量的值不再增加时停止训练;auto 模式下,实际的模式会从 ``monitor`` 推断出来。如果 ``monitor`` 中有'acc',将会认为是 max 模式,其它情况下,都会被推断为 min 模式。默认值:'auto'。 + - **patience** (int,可选) - 多少个 epoch 模型效果未提升会使模型提前停止训练。默认值:0。 + - **verbose** (int,可选) - 可以是 0 或者 1,0 代表不打印模型提前停止训练的日志,1 代表打印日志。默认值:1。 + - **min_delta** (int|float,可选) - 监控量最小改变值。当 evaluation 的监控变量改变值小于 ``min_delta``,就认为模型没有变化。默认值:0。 + - **baseline** (int|float,可选) - 监控量的基线。如果模型在训练 ``patience`` 个 epoch 后效果对比基线没有提升,将会停止训练。如果是 None,代表没有基线。默认值:None。 + - **save_best_model** (bool,可选) - 是否保存效果最好的模型(监控量的值最优)。文件会保存在 ``fit`` 中传入的参数 ``save_dir`` 下,前缀名为 best_model,默认值:True。 代码示例 :::::::::::: diff --git a/docs/api/paddle/callbacks/LRScheduler_cn.rst b/docs/api/paddle/callbacks/LRScheduler_cn.rst index 5c637c35b52..69c73aa1b54 100644 --- a/docs/api/paddle/callbacks/LRScheduler_cn.rst +++ b/docs/api/paddle/callbacks/LRScheduler_cn.rst @@ -10,8 +10,8 @@ LRScheduler 参数 :::::::::::: - - **by_step** (bool,可选) - 是否每个step都更新学习率。默认值:True。 - - **by_epoch** (bool,可选) - 是否每个epoch都更新学习率。默认值:False。 + - **by_step** (bool,可选) - 是否每个 step 都更新学习率。默认值:True。 + - **by_epoch** (bool,可选) - 是否每个 epoch 都更新学习率。默认值:False。 代码示例 diff --git a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst index cc720717147..961965289ce 100644 --- a/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst +++ b/docs/api/paddle/callbacks/ModelCheckpoint_cn.rst @@ -5,14 +5,14 @@ ModelCheckpoint .. py:class:: paddle.callbacks.ModelCheckpoint(save_freq=1, save_dir=None) - ``ModelCheckpoint`` 回调类和model.fit联合使用,在训练阶段,保存模型权重和优化器状态信息。当前仅支持在固定的epoch间隔保存模型,不支持按照batch的间隔保存。 + ``ModelCheckpoint`` 回调类和 model.fit 联合使用,在训练阶段,保存模型权重和优化器状态信息。当前仅支持在固定的 epoch 间隔保存模型,不支持按照 batch 的间隔保存。 子方法可以参考基类。 参数 :::::::::::: - - **save_freq** (int,可选) - 间隔多少个epoch保存模型。默认值:1。 + - **save_freq** (int,可选) - 间隔多少个 epoch 保存模型。默认值:1。 - **save_dir** (int,可选) - 保存模型的文件夹。如果不设定,将不会保存模型。默认值:None。 diff --git a/docs/api/paddle/callbacks/Overview_cn.rst b/docs/api/paddle/callbacks/Overview_cn.rst index e1d9fe23bcc..7238cb6199e 100644 --- a/docs/api/paddle/callbacks/Overview_cn.rst +++ b/docs/api/paddle/callbacks/Overview_cn.rst @@ -3,17 +3,17 @@ paddle.callbacks --------------------- -paddle.callbacks 目录下包含飞桨框架支持的回调函数相关的API。具体如下: +paddle.callbacks 目录下包含飞桨框架支持的回调函数相关的 API。具体如下: -- :ref:`回调函数相关API ` +- :ref:`回调函数相关 API ` .. _about_callbacks: -回调函数相关API +回调函数相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Callback ` ", "回调函数的基类,用于自定义回调函数" diff --git a/docs/api/paddle/callbacks/ProgBarLogger_cn.rst b/docs/api/paddle/callbacks/ProgBarLogger_cn.rst index 22db8f7dd3e..ac208d655b8 100644 --- a/docs/api/paddle/callbacks/ProgBarLogger_cn.rst +++ b/docs/api/paddle/callbacks/ProgBarLogger_cn.rst @@ -11,9 +11,9 @@ ProgBarLogger :::::::::::: - **log_freq** (int,可选) - 损失值和指标打印的频率。默认值:1。 - - **verbose** (int,可选) - 打印信息的模式。设置为0时,不打印信息; - 设置为1时,使用进度条的形式打印信息;设置为2时,使用行的形式打印信息。 - 设置为3时,会在2的基础上打印详细的计时信息,比如 ``average_reader_cost``。 + - **verbose** (int,可选) - 打印信息的模式。设置为 0 时,不打印信息; + 设置为 1 时,使用进度条的形式打印信息;设置为 2 时,使用行的形式打印信息。 + 设置为 3 时,会在 2 的基础上打印详细的计时信息,比如 ``average_reader_cost``。 默认值:2。 diff --git a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst index d715e8f7f57..438d28c8463 100644 --- a/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst +++ b/docs/api/paddle/callbacks/ReduceLROnPlateau_cn.rst @@ -5,18 +5,18 @@ ReduceLROnPlateau .. py:class:: paddle.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=10, verbose=1, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0) - 该回调函数会在评估指标停止改善时,降低学习率。模型通常会因学习率降低2-10倍而受益。因此监视一个评价指标,如果这个指标在几个epoch内没有改善,就降低学习率。 + 该回调函数会在评估指标停止改善时,降低学习率。模型通常会因学习率降低 2-10 倍而受益。因此监视一个评价指标,如果这个指标在几个 epoch 内没有改善,就降低学习率。 参数 :::::::::::: - **monitor** (str,可选) - 监视的指标名称。默认值:'loss'。 - **factor** (float,可选) - 学习率减小的因子。`new_lr = lr * factor`。默认值:0.1。 - - **patience** (int,可选) - 多少个epoch监视的指标没有提升后就减小学习率。默认值:10。 - - **verbose** (int,可选) - 可视化的模式。0表示不打印任何信息,1表示打印信息。默认值:1。 - - **mode** (int,可选) - 必须是 `{'auto', 'min', 'max'}` 中的值。`'min'` 表示学习率会减少当监视的指标不再下降。`'max'` 表示学习率会减少当监视的指标不再上升。`'auto'` 会根据监视指标的名字来推理是使用min还是max模式,如果名字中包含acc则使用max模式,否则使用min模式。默认值:'auto'。 + - **patience** (int,可选) - 多少个 epoch 监视的指标没有提升后就减小学习率。默认值:10。 + - **verbose** (int,可选) - 可视化的模式。0 表示不打印任何信息,1 表示打印信息。默认值:1。 + - **mode** (int,可选) - 必须是 `{'auto', 'min', 'max'}` 中的值。`'min'` 表示学习率会减少当监视的指标不再下降。`'max'` 表示学习率会减少当监视的指标不再上升。`'auto'` 会根据监视指标的名字来推理是使用 min 还是 max 模式,如果名字中包含 acc 则使用 max 模式,否则使用 min 模式。默认值:'auto'。 - **min_delta** (float,可选) - 评判指标增大或减小的阈值。默认值:0。 - - **cooldown** (int,可选) - 学习率减少后至少经过多少个epoch在进行正常的减少策略。默认值:0。 + - **cooldown** (int,可选) - 学习率减少后至少经过多少个 epoch 在进行正常的减少策略。默认值:0。 - **min_lr** (int,可选) - 学习率减小后的下限。默认值:0。 diff --git a/docs/api/paddle/callbacks/VisualDL_cn.rst b/docs/api/paddle/callbacks/VisualDL_cn.rst index 0fb3dd46bf4..1a368a4f51a 100644 --- a/docs/api/paddle/callbacks/VisualDL_cn.rst +++ b/docs/api/paddle/callbacks/VisualDL_cn.rst @@ -5,7 +5,7 @@ VisualDL .. py:class:: paddle.callbacks.VisualDL(log_dir) - ``VisualDL`` 是一个visualdl( `飞桨可视化分析工具 `_ )的回调类。该类将训练过程中的损失值和评价指标储存至日志文件中后,启动面板即可查看可视化结果。 + ``VisualDL`` 是一个 visualdl( `飞桨可视化分析工具 `_ )的回调类。该类将训练过程中的损失值和评价指标储存至日志文件中后,启动面板即可查看可视化结果。 参数 :::::::::::: diff --git a/docs/api/paddle/cast_cn.rst b/docs/api/paddle/cast_cn.rst index 68dd5cdc29e..8680fa91419 100644 --- a/docs/api/paddle/cast_cn.rst +++ b/docs/api/paddle/cast_cn.rst @@ -13,8 +13,8 @@ cast 参数 :::::::::::: - - **x** (Tensor) - 输入多维Tensor,支持的数据类型为:bool、float16、float32、float64、uint8、int32、int64。 - - **dtype** (str|np.dtype) - 输出Tensor的数据类型。支持的数据类型为:bool、float16、float32、float64、int8、int32、int64、uint8。 + - **x** (Tensor) - 输入多维 Tensor,支持的数据类型为:bool、float16、float32、float64、uint8、int32、int64。 + - **dtype** (str|np.dtype) - 输出 Tensor 的数据类型。支持的数据类型为:bool、float16、float32、float64、int8、int32、int64、uint8。 返回 :::::::::::: diff --git a/docs/api/paddle/ceil_cn.rst b/docs/api/paddle/ceil_cn.rst index 1caba3897e6..51c9517b638 100644 --- a/docs/api/paddle/ceil_cn.rst +++ b/docs/api/paddle/ceil_cn.rst @@ -18,12 +18,12 @@ ceil 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64 、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/chunk_cn.rst b/docs/api/paddle/chunk_cn.rst index e2b1faa1ed9..23bc23cb741 100644 --- a/docs/api/paddle/chunk_cn.rst +++ b/docs/api/paddle/chunk_cn.rst @@ -5,19 +5,19 @@ chunk .. py:function:: paddle.chunk(x, chunks, axis=0, name=None) -将输入Tensor分割成多个子Tensor。 +将输入 Tensor 分割成多个子 Tensor。 参数 ::::::::: - - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 - - **chunks** (int) - ``chunks`` 是一个整数,表示将输入Tensor划分成多少个相同大小的子Tensor。 - - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0``,则划分的维度为 ``rank(x) + axis``。默认值为0。 + - **x** (Tensor) - 输入变量,数据类型为 bool, float16, float32,float64,int32,int64 的多维 Tensor。 + - **chunks** (int) - ``chunks`` 是一个整数,表示将输入 Tensor 划分成多少个相同大小的子 Tensor。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的 Tensor,数据类型为 int32 或 int64。表示需要分割的维度。如果 ``axis < 0``,则划分的维度为 ``rank(x) + axis``。默认值为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -分割后的Tensor列表。 +分割后的 Tensor 列表。 代码示例 diff --git a/docs/api/paddle/clip_cn.rst b/docs/api/paddle/clip_cn.rst index 3008aa6b18d..04e6132b256 100644 --- a/docs/api/paddle/clip_cn.rst +++ b/docs/api/paddle/clip_cn.rst @@ -18,13 +18,13 @@ clip :::::::::::: - **x** (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 - - **min** (float32|Tensor,可选) - 裁剪的最小值,输入中小于该值的元素将由该元素代替,若参数为空,则不对输入的最小值做限制。数据类型可以是 float32 或形状为[1]的 Tensor,类型可以为int32、float32、float64,默认值为 None。 - - **max** (float32|Tensor,可选) - 裁剪的最大值,输入中大于该值的元素将由该元素代替,若参数为空,则不对输入的最大值做限制。数据类型可以是 float32 或形状为[1]的 Tensor,类型可以为int32、float32、float64,默认值为 None。 + - **min** (float32|Tensor,可选) - 裁剪的最小值,输入中小于该值的元素将由该元素代替,若参数为空,则不对输入的最小值做限制。数据类型可以是 float32 或形状为[1]的 Tensor,类型可以为 int32、float32、float64,默认值为 None。 + - **max** (float32|Tensor,可选) - 裁剪的最大值,输入中大于该值的元素将由该元素代替,若参数为空,则不对输入的最大值做限制。数据类型可以是 float32 或形状为[1]的 Tensor,类型可以为 int32、float32、float64,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/clone_cn.rst b/docs/api/paddle/clone_cn.rst index b9b80f27191..7ad75edc280 100644 --- a/docs/api/paddle/clone_cn.rst +++ b/docs/api/paddle/clone_cn.rst @@ -5,18 +5,18 @@ clone .. py:function:: paddle.clone(x, name=None) -对输入Tensor ``x`` 进行拷贝,并返回一个新的Tensor。 +对输入 Tensor ``x`` 进行拷贝,并返回一个新的 Tensor。 -除此之外,该API提供梯度计算,在计算反向时,输出Tensor的梯度将会回传给输入Tensor。 +除此之外,该 API 提供梯度计算,在计算反向时,输出 Tensor 的梯度将会回传给输入 Tensor。 参数 ::::::::: - - x (Tensor) - 输入Tensor。 + - x (Tensor) - 输入 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,从输入拷贝的Tensor +``Tensor``,从输入拷贝的 Tensor 代码示例 ::::::::: diff --git a/docs/api/paddle/compat/floor_division_cn.rst b/docs/api/paddle/compat/floor_division_cn.rst index 461c2d71965..081d804741a 100644 --- a/docs/api/paddle/compat/floor_division_cn.rst +++ b/docs/api/paddle/compat/floor_division_cn.rst @@ -5,8 +5,8 @@ floor_division .. py:function:: paddle.compat.floor_division(x, y) -等价于Python3和Python2中的除法。 -在Python3中,结果为floor(x/y)的int值;在Python2中,结果为(x/y)的值。 +等价于 Python3 和 Python2 中的除法。 +在 Python3 中,结果为 floor(x/y)的 int 值;在 Python2 中,结果为(x/y)的值。 参数 :::::::::: @@ -17,4 +17,4 @@ floor_division 返回 :::::::::: - x//y的除法结果 + x//y 的除法结果 diff --git a/docs/api/paddle/compat/get_exception_message_cn.rst b/docs/api/paddle/compat/get_exception_message_cn.rst index 8e50609c413..5ce10295bc9 100644 --- a/docs/api/paddle/compat/get_exception_message_cn.rst +++ b/docs/api/paddle/compat/get_exception_message_cn.rst @@ -16,4 +16,4 @@ get_exception_message 返回 :::::::::: - exec的错误消息 + exec 的错误消息 diff --git a/docs/api/paddle/compat/long_type_cn.rst b/docs/api/paddle/compat/long_type_cn.rst index fe6001936e6..23be78bb237 100644 --- a/docs/api/paddle/compat/long_type_cn.rst +++ b/docs/api/paddle/compat/long_type_cn.rst @@ -5,4 +5,4 @@ long_type .. py:function:: paddle.compat.long_type() -builtins.int的别名 +builtins.int 的别名 diff --git a/docs/api/paddle/compat/round_cn.rst b/docs/api/paddle/compat/round_cn.rst index c9e987c54af..fe77a91c9d7 100644 --- a/docs/api/paddle/compat/round_cn.rst +++ b/docs/api/paddle/compat/round_cn.rst @@ -5,7 +5,7 @@ round .. py:function:: paddle.compat.round(x, d=0) -等价于Python3中的round函数。 +等价于 Python3 中的 round 函数。 参数 :::::::::: diff --git a/docs/api/paddle/compat/to_bytes_cn.rst b/docs/api/paddle/compat/to_bytes_cn.rst index f8383bc7c03..620815464a2 100644 --- a/docs/api/paddle/compat/to_bytes_cn.rst +++ b/docs/api/paddle/compat/to_bytes_cn.rst @@ -8,13 +8,13 @@ to_bytes 飞桨中的所有字符串都需要用文本字符串表示。 此函数将对象转换为具有特定编码的字节。特别是,如果对象类型是列表或集合容器,我们将迭代对象中的所有项并将其转换为字节。 -在Python3中: +在 Python3 中: - 使用特定编码将str type对象编码为bytes类型。 + 使用特定编码将 str type 对象编码为 bytes 类型。 -在Python2中: +在 Python2 中: - 使用特定的编码将unicode类型的对象编码为str类型,或者只返回object的8位字符串。 + 使用特定的编码将 unicode 类型的对象编码为 str 类型,或者只返回 object 的 8 位字符串。 参数 :::::::::: @@ -26,7 +26,7 @@ to_bytes 返回 :::::::::: - obj解码后的结果。 + obj 解码后的结果。 代码示例 ::::::::: diff --git a/docs/api/paddle/compat/to_text_cn.rst b/docs/api/paddle/compat/to_text_cn.rst index a6fbe7de613..8324357cd9b 100644 --- a/docs/api/paddle/compat/to_text_cn.rst +++ b/docs/api/paddle/compat/to_text_cn.rst @@ -8,13 +8,13 @@ to_text 飞桨中的所有字符串都需要用文本字符串表示。 此函数将对象转换为不带任何编码的文本字符串。特别是,如果对象类型是列表或集合容器,我们将迭代对象中的所有项并将其转换为文本字符串。 -在Python3中: +在 Python3 中: - 使用特定编码将bytes类型对象解码为str类型。 + 使用特定编码将 bytes 类型对象解码为 str 类型。 -在Python2中: +在 Python2 中: - 使用特定编码将str type对象解码为unicode类型。 + 使用特定编码将 str type 对象解码为 unicode 类型。 参数 :::::::::: @@ -26,7 +26,7 @@ to_text 返回 :::::::::: - obj解码后的结果。 + obj 解码后的结果。 代码示例 ::::::::: diff --git a/docs/api/paddle/complex_cn.rst b/docs/api/paddle/complex_cn.rst index 65caef9dc1b..56630f9151d 100644 --- a/docs/api/paddle/complex_cn.rst +++ b/docs/api/paddle/complex_cn.rst @@ -20,7 +20,7 @@ complex 输出 Tensor,数据类型是 complex64 或者 complex128,与 ``real`` 和 ``imag`` 的数值精度一致。 .. note:: - ``paddle.complex`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.complex`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/concat_cn.rst b/docs/api/paddle/concat_cn.rst index ad210156001..72886489744 100644 --- a/docs/api/paddle/concat_cn.rst +++ b/docs/api/paddle/concat_cn.rst @@ -12,7 +12,7 @@ concat :::::::::::: - **x** (list|tuple) - 待联结的 Tensor list 或者 Tensor tuple,支持的数据类型为:bool、float16、float32、float64、int32、int64、uint8, ``x`` 中所有 Tensor 的数据类型应该一致。 - - **axis** (int|Tensor,可选) - 指定对输入 ``x`` 进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为 int32 或者 int64。 ``axis`` 的有效范围是 [-R, R),R是输入 ``x`` 中 Tensor 的维度,``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **axis** (int|Tensor,可选) - 指定对输入 ``x`` 进行运算的轴,可以是整数或者形状为[1]的 Tensor,数据类型为 int32 或者 int64。 ``axis`` 的有效范围是 [-R, R),R 是输入 ``x`` 中 Tensor 的维度,``axis`` 为负值时与 :math:`axis + R` 等价。默认值为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/cos_cn.rst b/docs/api/paddle/cos_cn.rst index 1a891b81ae7..28fe7184e58 100644 --- a/docs/api/paddle/cos_cn.rst +++ b/docs/api/paddle/cos_cn.rst @@ -19,12 +19,12 @@ cos 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64 、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/cosh_cn.rst b/docs/api/paddle/cosh_cn.rst index 72e68ad98c3..8b2121ac7d0 100644 --- a/docs/api/paddle/cosh_cn.rst +++ b/docs/api/paddle/cosh_cn.rst @@ -19,12 +19,12 @@ cosh 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64 、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/crop_cn.rst b/docs/api/paddle/crop_cn.rst index 94d93cfbeab..05c8934c3a3 100644 --- a/docs/api/paddle/crop_cn.rst +++ b/docs/api/paddle/crop_cn.rst @@ -14,7 +14,7 @@ crop :: - * 示例1(输入为2-D Tensor): + * 示例 1(输入为 2-D Tensor): 输入: X.shape = [3, 5] @@ -31,7 +31,7 @@ crop Out.data = [[1, 2], [3, 4]] - * 示例2(输入为3-D Tensor): + * 示例 2(输入为 3-D Tensor): 输入: @@ -57,14 +57,14 @@ crop 参数 ::::::::: - - **x** (Tensor): 1-D到6-D Tensor,数据类型为float32、float64、int32或者int64。 - - **shape** (list|tuple|Tensor) - 输出Tensor的形状,数据类型为int32。如果是列表或元组,则其长度必须与x的维度大小相同,如果是Tensor,则其应该是1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的Tensor。含有Variable的方式适用于每次迭代时需要改变输出形状的情况。 - - **offsets** (list|tuple|Tensor,可选) - 每个维度上裁剪的偏移量,数据类型为int32。如果是列表或元组,则其长度必须与x的维度大小相同,如果是Tensor,则其应是1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的Variable。含有Variable的方式适用于每次迭代的偏移量(offset)都可能改变的情况。默认值:None,每个维度的偏移量为0。 + - **x** (Tensor): 1-D 到 6-D Tensor,数据类型为 float32、float64、int32 或者 int64。 + - **shape** (list|tuple|Tensor) - 输出 Tensor 的形状,数据类型为 int32。如果是列表或元组,则其长度必须与 x 的维度大小相同,如果是 Tensor,则其应该是 1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的 Tensor。含有 Variable 的方式适用于每次迭代时需要改变输出形状的情况。 + - **offsets** (list|tuple|Tensor,可选) - 每个维度上裁剪的偏移量,数据类型为 int32。如果是列表或元组,则其长度必须与 x 的维度大小相同,如果是 Tensor,则其应是 1-D Tensor。当它是列表时,每一个元素可以是整数或者形状为[1]的 Variable。含有 Variable 的方式适用于每次迭代的偏移量(offset)都可能改变的情况。默认值:None,每个维度的偏移量为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -裁剪后的Tensor,数据类型与输入(x)相同。 +裁剪后的 Tensor,数据类型与输入(x)相同。 diff --git a/docs/api/paddle/cross_cn.rst b/docs/api/paddle/cross_cn.rst index 475f5098f14..ba6d6bb7c76 100644 --- a/docs/api/paddle/cross_cn.rst +++ b/docs/api/paddle/cross_cn.rst @@ -8,13 +8,13 @@ cross 计算张量 ``x`` 和 ``y`` 在 ``axis`` 维度上的向量积(叉积)。 -``x`` 和 ``y`` 必须有相同的形状,且指定的 ``axis`` 的长度必须为3。如果未指定 ``axis``,默认选取第一个长度为3的 ``axis`` 。 +``x`` 和 ``y`` 必须有相同的形状,且指定的 ``axis`` 的长度必须为 3。如果未指定 ``axis``,默认选取第一个长度为 3 的 ``axis`` 。 参数 ::::::::: - x (Tensor) – 第一个输入张量。 - y (Tensor) – 第二个输入张量。 - - axis (int,可选) – 沿着此维进行向量积操作。默认值是9,意思是选取第一个长度为3的 ``axis`` 。 + - axis (int,可选) – 沿着此维进行向量积操作。默认值是 9,意思是选取第一个长度为 3 的 ``axis`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/cumprod_cn.rst b/docs/api/paddle/cumprod_cn.rst index a897aa6971e..135c391602f 100644 --- a/docs/api/paddle/cumprod_cn.rst +++ b/docs/api/paddle/cumprod_cn.rst @@ -7,15 +7,15 @@ cumprod -沿给定维度 ``dim`` 计算输入tensor ``x`` 的累乘。 +沿给定维度 ``dim`` 计算输入 tensor ``x`` 的累乘。 **注意**:结果的第一个元素和输入的第一个元素相同。 参数 ::::::::: - - x (Tensor) - 累乘的输入,需要进行累乘操作的tensor。 - - dim (int) - 指明需要累乘的维度,取值范围需在[-x.rank,x.rank)之间,其中x.rank表示输入tensor x的维度,-1代表最后一维。 - - dtype (str,可选) - 输出tensor的数据类型,支持int32、int64、float32、float64、complex64、complex128。如果指定了,那么在执行操作之前,输入的tensor将被转换为dtype类型。这对于防止数据类型溢出非常有用。默认为:None。 + - x (Tensor) - 累乘的输入,需要进行累乘操作的 tensor。 + - dim (int) - 指明需要累乘的维度,取值范围需在[-x.rank,x.rank)之间,其中 x.rank 表示输入 tensor x 的维度,-1 代表最后一维。 + - dtype (str,可选) - 输出 tensor 的数据类型,支持 int32、int64、float32、float64、complex64、complex128。如果指定了,那么在执行操作之前,输入的 tensor 将被转换为 dtype 类型。这对于防止数据类型溢出非常有用。默认为:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/cumsum_cn.rst b/docs/api/paddle/cumsum_cn.rst index fe93cdcce47..eaae09f013b 100644 --- a/docs/api/paddle/cumsum_cn.rst +++ b/docs/api/paddle/cumsum_cn.rst @@ -13,9 +13,9 @@ cumsum 参数 ::::::::: - - x (Tensor) - 累加的输入,需要进行累加操作的Tensor。 - - axis (int,可选) - 指明需要累加的维度。-1代表最后一维。默认:None,将输入展开为一维变量再进行累加计算。 - - dtype (str,可选) - 输出Tensor的数据类型,支持int32、int64、float32、float64。如果指定了,那么在执行操作之前,输入张量将被转换为dtype。这对于防止数据类型溢出非常有用。默认为:None。 + - x (Tensor) - 累加的输入,需要进行累加操作的 Tensor。 + - axis (int,可选) - 指明需要累加的维度。-1 代表最后一维。默认:None,将输入展开为一维变量再进行累加计算。 + - dtype (str,可选) - 输出 Tensor 的数据类型,支持 int32、int64、float32、float64。如果指定了,那么在执行操作之前,输入张量将被转换为 dtype。这对于防止数据类型溢出非常有用。默认为:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/deg2rad_cn.rst b/docs/api/paddle/deg2rad_cn.rst index e971832ce25..9da81016ad6 100644 --- a/docs/api/paddle/deg2rad_cn.rst +++ b/docs/api/paddle/deg2rad_cn.rst @@ -14,13 +14,13 @@ deg2rad 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:int32、int64、float32、float64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:int32、int64、float32、float64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同(输入为int时,输出数据类型为float32)。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同(输入为 int 时,输出数据类型为 float32)。 代码示例 ::::::::: diff --git a/docs/api/paddle/device/XPUPlace_cn.rst b/docs/api/paddle/device/XPUPlace_cn.rst index e2151c8579d..f0da7de5cdd 100644 --- a/docs/api/paddle/device/XPUPlace_cn.rst +++ b/docs/api/paddle/device/XPUPlace_cn.rst @@ -10,13 +10,13 @@ XPUPlace ``XPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 的 Baidu Kunlun XPU 设备。 -每个 ``XPUPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``XPUPlace`` 所代表的显卡编号,编号从 0 开始。 +每个 ``XPUPlace`` 有一个 ``dev_id`` (设备 id)来表明当前的 ``XPUPlace`` 所代表的显卡编号,编号从 0 开始。 ``dev_id`` 不同的 ``XPUPlace`` 所对应的内存不可相互访问。 参数 :::::::::::: - - **id** (int,可选) - XPU的设备ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 + - **id** (int,可选) - XPU 的设备 ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/cuda/Event_cn.rst b/docs/api/paddle/device/cuda/Event_cn.rst index a0c9f2019db..ab72e355fed 100644 --- a/docs/api/paddle/device/cuda/Event_cn.rst +++ b/docs/api/paddle/device/cuda/Event_cn.rst @@ -5,14 +5,14 @@ Event .. py:class:: paddle.device.cuda.Event(enable_timing=False, blocking=False, interprocess=False) -CUDA event的句柄。 +CUDA event 的句柄。 参数 :::::::::::: - - **enable_timing** (bool,可选) - Event 是否需要统计时间。默认值为False。 - - **blocking** (bool,可选) - wait()函数是否被阻塞。默认值为False。 - - **interprocess** (bool,可选) - Event是否能在进程间共享。默认值为False。 + - **enable_timing** (bool,可选) - Event 是否需要统计时间。默认值为 False。 + - **blocking** (bool,可选) - wait()函数是否被阻塞。默认值为 False。 + - **interprocess** (bool,可选) - Event 是否能在进程间共享。默认值为 False。 返回 :::::::::::: @@ -33,11 +33,11 @@ None record(CUDAStream=None) ''''''''' -记录event 到给定的stream。 +记录 event 到给定的 stream。 **参数** - - **stream** (CUDAStream,可选) - CUDA stream的句柄。如果为None,stream为当前的stream。默认值为False。 + - **stream** (CUDAStream,可选) - CUDA stream 的句柄。如果为 None,stream 为当前的 stream。默认值为 False。 **代码示例** @@ -51,11 +51,11 @@ record(CUDAStream=None) query() ''''''''' -查询event的状态。 +查询 event 的状态。 **返回** - 一个boolean 变量,用于标识当前event 获取的所有任务是否被完成。 + 一个 boolean 变量,用于标识当前 event 获取的所有任务是否被完成。 **代码示例** @@ -70,7 +70,7 @@ query() synchronize() ''''''''' -等待当前event 完成。 +等待当前 event 完成。 **代码示例** diff --git a/docs/api/paddle/device/cuda/Stream_cn.rst b/docs/api/paddle/device/cuda/Stream_cn.rst index 103be004b32..b61bae35d25 100644 --- a/docs/api/paddle/device/cuda/Stream_cn.rst +++ b/docs/api/paddle/device/cuda/Stream_cn.rst @@ -5,13 +5,13 @@ Stream .. py:class:: paddle.device.cuda.Stream(device=None, priority=None) -CUDA stream的句柄。 +CUDA stream 的句柄。 参数 :::::::::::: - - **device** (paddle.CUDAPlace()|int|None,可选) - 希望分配stream的设备。如果是None或者负数,则设备为当前的设备。如果是正数,则必须小于设备的个数。默认值为None。 - - **priority** (int|None,可选) - stream的优先级。优先级可以为1(高优先级)或者2(正常优先级)。如果优先级为None,优先级为2(正常优先级)。默认值为None。 + - **device** (paddle.CUDAPlace()|int|None,可选) - 希望分配 stream 的设备。如果是 None 或者负数,则设备为当前的设备。如果是正数,则必须小于设备的个数。默认值为 None。 + - **priority** (int|None,可选) - stream 的优先级。优先级可以为 1(高优先级)或者 2(正常优先级)。如果优先级为 None,优先级为 2(正常优先级)。默认值为 None。 代码示例 @@ -32,11 +32,11 @@ CUDA stream的句柄。 wait_event(event) ''''''''' -使所有将来提交到stream的任务等待event中已获取的任务。 +使所有将来提交到 stream 的任务等待 event 中已获取的任务。 **参数** - - **event** (CUDAEvent) - 要等待的event。 + - **event** (CUDAEvent) - 要等待的 event。 **代码示例** @@ -52,11 +52,11 @@ wait_event(event) wait_stream(stream) ''''''''' -和给定的stream 保持同步。 +和给定的 stream 保持同步。 **参数** - - **stream** (CUDAStream) - 要同步的stream。 + - **stream** (CUDAStream) - 要同步的 stream。 **代码示例** @@ -73,10 +73,10 @@ wait_stream(stream) query() ''''''''' -返回stream 中所有的操作是否完成的状态。 +返回 stream 中所有的操作是否完成的状态。 **返回** - 一个boolean值。 + 一个 boolean 值。 **代码示例** @@ -90,7 +90,7 @@ query() synchronize() ''''''''' -等待所有的stream的任务完成。 +等待所有的 stream 的任务完成。 **代码示例** @@ -104,14 +104,14 @@ synchronize() record_event(event=None) ''''''''' -标记一个CUDA event 到当前stream中。 +标记一个 CUDA event 到当前 stream 中。 **参数** - - **event** (CUDAEvent,可选) - 要标记的event。如果event 为None,新建一个event。默认值为None。 + - **event** (CUDAEvent,可选) - 要标记的 event。如果 event 为 None,新建一个 event。默认值为 None。 **返回** - 被标记的event。 + 被标记的 event。 **代码示例** diff --git a/docs/api/paddle/device/cuda/current_stream_cn.rst b/docs/api/paddle/device/cuda/current_stream_cn.rst index 3f3f33cc272..f27db97dad7 100644 --- a/docs/api/paddle/device/cuda/current_stream_cn.rst +++ b/docs/api/paddle/device/cuda/current_stream_cn.rst @@ -5,17 +5,17 @@ current_stream .. py:function:: paddle.device.cuda.current_stream(device=None) -通过device 返回当前的CUDA stream。 +通过 device 返回当前的 CUDA stream。 参数 :::::::::::: - - **device** (paddle.CUDAPlace()|int,可选) - 希望获取stream的设备或者设备ID。如果为None,则为当前的设备。默认值为None。 + - **device** (paddle.CUDAPlace()|int,可选) - 希望获取 stream 的设备或者设备 ID。如果为 None,则为当前的设备。默认值为 None。 返回 :::::::::::: - CUDAStream,设备的stream。 + CUDAStream,设备的 stream。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/cuda/device_count_cn.rst b/docs/api/paddle/device/cuda/device_count_cn.rst index 0bbd767529b..b9902c4a010 100644 --- a/docs/api/paddle/device/cuda/device_count_cn.rst +++ b/docs/api/paddle/device/cuda/device_count_cn.rst @@ -5,11 +5,11 @@ device_count .. py:function:: paddle.device.cuda.device_count() -返回值是int,表示当前程序可用的GPU数量。 +返回值是 int,表示当前程序可用的 GPU 数量。 返回 :::::::::::: - 返回一个整数,表示当前程序可用的GPU数量。 + 返回一个整数,表示当前程序可用的 GPU 数量。 代码示例 diff --git a/docs/api/paddle/device/cuda/get_device_capability_cn.rst b/docs/api/paddle/device/cuda/get_device_capability_cn.rst index a83b606ecfe..a8c7e1e4bbd 100644 --- a/docs/api/paddle/device/cuda/get_device_capability_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_capability_cn.rst @@ -5,12 +5,12 @@ get_device_capability .. py:function:: paddle.device.cuda.get_device_capability(device=None) -返回从CUDA函数 `cudaDeviceProp `_ 获取到的定义设备计算能力的主要和次要修订号。 +返回从 CUDA 函数 `cudaDeviceProp `_ 获取到的定义设备计算能力的主要和次要修订号。 参数 :::::::::: - - **device** (paddle.CUDAPlace|int,可选) - 希望获取计算能力的设备或者设备ID。如果device为None(默认),则为当前的设备。 + - **device** (paddle.CUDAPlace|int,可选) - 希望获取计算能力的设备或者设备 ID。如果 device 为 None(默认),则为当前的设备。 返回 :::::::::: diff --git a/docs/api/paddle/device/cuda/get_device_name_cn.rst b/docs/api/paddle/device/cuda/get_device_name_cn.rst index fa1c83117fe..248977319bc 100644 --- a/docs/api/paddle/device/cuda/get_device_name_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_name_cn.rst @@ -5,12 +5,12 @@ get_device_name .. py:function:: paddle.device.cuda.get_device_name(device=None) -返回从CUDA函数 `cudaDeviceProp `_ 获取到的设备名称。 +返回从 CUDA 函数 `cudaDeviceProp `_ 获取到的设备名称。 参数 :::::::::: - - **device** (paddle.CUDAPlace|int,可选) - 希望获取名称的设备或者设备ID。如果device为None(默认),则为当前的设备。 + - **device** (paddle.CUDAPlace|int,可选) - 希望获取名称的设备或者设备 ID。如果 device 为 None(默认),则为当前的设备。 返回 :::::::::: diff --git a/docs/api/paddle/device/cuda/get_device_properties_cn.rst b/docs/api/paddle/device/cuda/get_device_properties_cn.rst index 8f2e3bf6e0f..0192e8066dc 100644 --- a/docs/api/paddle/device/cuda/get_device_properties_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_properties_cn.rst @@ -11,13 +11,13 @@ get_device_properties 参数 :::::::: -**device** (paddle.CUDAPlace or int or str) - 设备、设备ID和类似于 ``gpu:x`` 的设备名称。如果 ``device`` 为空,则 ``device`` 为当前的设备。默认值为None。 +**device** (paddle.CUDAPlace or int or str) - 设备、设备 ID 和类似于 ``gpu:x`` 的设备名称。如果 ``device`` 为空,则 ``device`` 为当前的设备。默认值为 None。 返回 :::::::: -_gpuDeviceProperties:设备属性,包括标识设备的ASCII字符串、设备计算能力的主版本号以及次版本号、全局显存总量、设备上多处理器的数量。 +_gpuDeviceProperties:设备属性,包括标识设备的 ASCII 字符串、设备计算能力的主版本号以及次版本号、全局显存总量、设备上多处理器的数量。 代码示例 diff --git a/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst b/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst index 948051f9547..5f73406f125 100644 --- a/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst +++ b/docs/api/paddle/device/cuda/max_memory_allocated_cn.rst @@ -6,21 +6,21 @@ max_memory_allocated .. py:function:: paddle.device.cuda.max_memory_allocated(device=None) -返回给定设备上分配给Tensor的显存峰值。 +返回给定设备上分配给 Tensor 的显存峰值。 .. note:: - Paddle中分配给Tensor的显存块大小会进行256字节对齐,因此可能大于Tensor实际需要的显存大小。例如,一个shape为[1]的float32类型Tensor会占用256字节的显存,即使存储一个floatt32类型数据实际只需要4字节。 + Paddle 中分配给 Tensor 的显存块大小会进行 256 字节对齐,因此可能大于 Tensor 实际需要的显存大小。例如,一个 shape 为[1]的 float32 类型 Tensor 会占用 256 字节的显存,即使存储一个 floatt32 类型数据实际只需要 4 字节。 参数 :::::::: -**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备ID或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为None,则 ``device`` 为当前的设备。默认值为None。 +**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备 ID 或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为 None,则 ``device`` 为当前的设备。默认值为 None。 返回 :::::::: -一个整数,表示给定设备上分配给Tensor的显存峰值,以字节为单位。 +一个整数,表示给定设备上分配给 Tensor 的显存峰值,以字节为单位。 代码示例 :::::::: diff --git a/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst b/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst index a8fec1f7fc5..bf0827d65c9 100644 --- a/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst +++ b/docs/api/paddle/device/cuda/max_memory_reserved_cn.rst @@ -6,18 +6,18 @@ max_memory_reserved .. py:function:: paddle.device.cuda.max_memory_reserved(device=None) -返回给定设备上由Allocator管理的显存峰值。 +返回给定设备上由 Allocator 管理的显存峰值。 参数 :::::::: -**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备ID或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为None,则 ``device`` 为当前的设备。默认值为None。 +**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备 ID 或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为 None,则 ``device`` 为当前的设备。默认值为 None。 返回 :::::::: -一个整数,表示给定设备上当前由Allocator管理的显存峰值,以字节为单位。 +一个整数,表示给定设备上当前由 Allocator 管理的显存峰值,以字节为单位。 代码示例 :::::::: diff --git a/docs/api/paddle/device/cuda/memory_allocated_cn.rst b/docs/api/paddle/device/cuda/memory_allocated_cn.rst index a3c328e82ed..830bf0bc8ee 100644 --- a/docs/api/paddle/device/cuda/memory_allocated_cn.rst +++ b/docs/api/paddle/device/cuda/memory_allocated_cn.rst @@ -6,21 +6,21 @@ memory_allocated .. py:function:: paddle.device.cuda.memory_allocated(device=None) -返回给定设备上当前分配给Tensor的显存大小。 +返回给定设备上当前分配给 Tensor 的显存大小。 .. note:: - Paddle中分配给Tensor的显存块大小会进行256字节对齐,因此可能大于Tensor实际需要的显存大小。例如,一个shape为[1]的float32类型Tensor会占用256字节的显存,即使存储一个floatt32类型数据实际只需要4字节。 + Paddle 中分配给 Tensor 的显存块大小会进行 256 字节对齐,因此可能大于 Tensor 实际需要的显存大小。例如,一个 shape 为[1]的 float32 类型 Tensor 会占用 256 字节的显存,即使存储一个 floatt32 类型数据实际只需要 4 字节。 参数 :::::::: -**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备ID或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为None,则 ``device`` 为当前的设备。默认值为None。 +**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备 ID 或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为 None,则 ``device`` 为当前的设备。默认值为 None。 返回 :::::::: -一个整数,表示给定设备上当前分配给Tensor的显存大小,以字节为单位。 +一个整数,表示给定设备上当前分配给 Tensor 的显存大小,以字节为单位。 代码示例 :::::::: diff --git a/docs/api/paddle/device/cuda/memory_reserved_cn.rst b/docs/api/paddle/device/cuda/memory_reserved_cn.rst index 5789a6d268c..87814c35509 100644 --- a/docs/api/paddle/device/cuda/memory_reserved_cn.rst +++ b/docs/api/paddle/device/cuda/memory_reserved_cn.rst @@ -6,18 +6,18 @@ memory_reserved .. py:function:: paddle.device.cuda.memory_reserved(device=None) -返回给定设备上当前由Allocator管理的显存大小。 +返回给定设备上当前由 Allocator 管理的显存大小。 参数 :::::::: -**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备ID或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为None,则 ``device`` 为当前的设备。默认值为None。 +**device** (paddle.CUDAPlace|int|str,可选) - 设备、设备 ID 或形如 ``gpu:x`` 的设备名称。如果 ``device`` 为 None,则 ``device`` 为当前的设备。默认值为 None。 返回 :::::::: -一个整数,表示给定设备上当前由Allocator管理的显存大小,以字节为单位。 +一个整数,表示给定设备上当前由 Allocator 管理的显存大小,以字节为单位。 代码示例 :::::::: diff --git a/docs/api/paddle/device/cuda/stream_guard_cn.rst b/docs/api/paddle/device/cuda/stream_guard_cn.rst index e30313d4bfb..deb7efab876 100644 --- a/docs/api/paddle/device/cuda/stream_guard_cn.rst +++ b/docs/api/paddle/device/cuda/stream_guard_cn.rst @@ -5,13 +5,13 @@ stream_guard .. py:function:: paddle.device.cuda.stream_guard(stream) -可以切换当前的CUDA stream为输入指定的stream。 +可以切换当前的 CUDA stream 为输入指定的 stream。 参数 :::::::::::: - - **stream** (paddle.device.cuda.Stream) - 指定的CUDA stream。如果为None,则不进行stream流切换。 + - **stream** (paddle.device.cuda.Stream) - 指定的 CUDA stream。如果为 None,则不进行 stream 流切换。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/cuda/synchronize_cn.rst b/docs/api/paddle/device/cuda/synchronize_cn.rst index e820d6cd061..b3ca3a22954 100644 --- a/docs/api/paddle/device/cuda/synchronize_cn.rst +++ b/docs/api/paddle/device/cuda/synchronize_cn.rst @@ -5,13 +5,13 @@ synchronize .. py:function:: paddle.device.cuda.synchronize(device=None) -等待给定的CUDA 设备上的计算完成。 +等待给定的 CUDA 设备上的计算完成。 参数 :::::::::::: - - **device** (paddle.CUDAPlace()|int,可选) - 设备或者设备ID。如果为None,则为当前的设备。默认值为None。 + - **device** (paddle.CUDAPlace()|int,可选) - 设备或者设备 ID。如果为 None,则为当前的设备。默认值为 None。 返回 :::::::::::: diff --git a/docs/api/paddle/device/get_cudnn_version_cn.rst b/docs/api/paddle/device/get_cudnn_version_cn.rst index 6a44d3474f0..d9585552813 100644 --- a/docs/api/paddle/device/get_cudnn_version_cn.rst +++ b/docs/api/paddle/device/get_cudnn_version_cn.rst @@ -6,11 +6,11 @@ get_cudnn_version .. py:function:: paddle.device.get_cudnn_version() -此函数返回cudnn的版本。返回值是int,它表示cudnn版本。例如,如果返回7600,则表示cudnn的版本为7.6。 +此函数返回 cudnn 的版本。返回值是 int,它表示 cudnn 版本。例如,如果返回 7600,则表示 cudnn 的版本为 7.6。 返回 :::::::::::: -返回一个整数,表示cudnn的版本。 +返回一个整数,表示 cudnn 的版本。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/get_device_cn.rst b/docs/api/paddle/device/get_device_cn.rst index 07504eec87a..6f3b55d1deb 100644 --- a/docs/api/paddle/device/get_device_cn.rst +++ b/docs/api/paddle/device/get_device_cn.rst @@ -6,7 +6,7 @@ get_device .. py:function:: paddle.device.get_device() -该功能返回当前程序运行的全局设备,返回的是一个类似于 ``cpu``、 ``gpu:x``、 ``xpu:x``、 ``mlu:x`` 或者 ``npu:x`` 字符串,如果没有设置全局设备,当cuda可用的时候返回 ``gpu:0``,当cuda不可用的时候返回 ``cpu`` 。 +该功能返回当前程序运行的全局设备,返回的是一个类似于 ``cpu``、 ``gpu:x``、 ``xpu:x``、 ``mlu:x`` 或者 ``npu:x`` 字符串,如果没有设置全局设备,当 cuda 可用的时候返回 ``gpu:0``,当 cuda 不可用的时候返回 ``cpu`` 。 返回 :::::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_cinn_cn.rst b/docs/api/paddle/device/is_compiled_with_cinn_cn.rst index 76539f7ab48..ee45b163c97 100644 --- a/docs/api/paddle/device/is_compiled_with_cinn_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_cinn_cn.rst @@ -9,7 +9,7 @@ is_compiled_with_cinn 返回 :::::::::::: -bool,支持CINN则为True,否则为False。 +bool,支持 CINN 则为 True,否则为 False。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_cuda_cn.rst b/docs/api/paddle/device/is_compiled_with_cuda_cn.rst index d539af1b974..2cc13ba5818 100644 --- a/docs/api/paddle/device/is_compiled_with_cuda_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_cuda_cn.rst @@ -8,11 +8,11 @@ is_compiled_with_cuda -检查 ``whl`` 包是否可以被用来在GPU上运行模型。 +检查 ``whl`` 包是否可以被用来在 GPU 上运行模型。 返回 :::::::::::: -bool,支持GPU则为True,否则为False。 +bool,支持 GPU 则为 True,否则为 False。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_ipu_cn.rst b/docs/api/paddle/device/is_compiled_with_ipu_cn.rst index 882e8f7f2c5..b1992fd9246 100644 --- a/docs/api/paddle/device/is_compiled_with_ipu_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_ipu_cn.rst @@ -8,11 +8,11 @@ is_compiled_with_ipu -检查 ``whl`` 包是否可以被用来在Graphcore IPU上运行模型 +检查 ``whl`` 包是否可以被用来在 Graphcore IPU 上运行模型 返回 :::::::::: - bool,支持Graphcore IPU则为True,否则为False。 + bool,支持 Graphcore IPU 则为 True,否则为 False。 代码示例 :::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_mlu_cn.rst b/docs/api/paddle/device/is_compiled_with_mlu_cn.rst index 258b655d3e7..98a54decc4a 100644 --- a/docs/api/paddle/device/is_compiled_with_mlu_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_mlu_cn.rst @@ -8,11 +8,11 @@ is_compiled_with_mlu -检查 ``whl`` 包是否可以被用来在Cambricon MLU上运行模型 +检查 ``whl`` 包是否可以被用来在 Cambricon MLU 上运行模型 返回 :::::::::: - bool,支持MLU则为True,否则为False。 + bool,支持 MLU 则为 True,否则为 False。 代码示例 :::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_npu_cn.rst b/docs/api/paddle/device/is_compiled_with_npu_cn.rst index 37fe1ff2b48..352e27e7c6f 100644 --- a/docs/api/paddle/device/is_compiled_with_npu_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_npu_cn.rst @@ -9,7 +9,7 @@ is_compiled_with_npu 返回 :::::::::::: -bool,支持NPU则为True,否则为False。 +bool,支持 NPU 则为 True,否则为 False。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_rocm_cn.rst b/docs/api/paddle/device/is_compiled_with_rocm_cn.rst index 923058e339e..982d8fc2f0b 100644 --- a/docs/api/paddle/device/is_compiled_with_rocm_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_rocm_cn.rst @@ -8,11 +8,11 @@ is_compiled_with_rocm -检查 ``whl`` 包是否可以被用来在AMD或海光GPU(ROCm)上运行模型。 +检查 ``whl`` 包是否可以被用来在 AMD 或海光 GPU(ROCm)上运行模型。 返回 :::::::::::: -bool,支持GPU(ROCm)则为True,否则为False。 +bool,支持 GPU(ROCm)则为 True,否则为 False。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/is_compiled_with_xpu_cn.rst b/docs/api/paddle/device/is_compiled_with_xpu_cn.rst index 6590ebc3ce7..5de25bb2cba 100644 --- a/docs/api/paddle/device/is_compiled_with_xpu_cn.rst +++ b/docs/api/paddle/device/is_compiled_with_xpu_cn.rst @@ -8,11 +8,11 @@ is_compiled_with_xpu -检查 ``whl`` 包是否可以被用来在Baidu Kunlun XPU上运行模型。 +检查 ``whl`` 包是否可以被用来在 Baidu Kunlun XPU 上运行模型。 返回 :::::::::::: -bool,支持Baidu Kunlun XPU则为True,否则为False。 +bool,支持 Baidu Kunlun XPU 则为 True,否则为 False。 代码示例 :::::::::::: diff --git a/docs/api/paddle/device/set_device_cn.rst b/docs/api/paddle/device/set_device_cn.rst index bb42704f83e..6aab32c9c7d 100644 --- a/docs/api/paddle/device/set_device_cn.rst +++ b/docs/api/paddle/device/set_device_cn.rst @@ -6,16 +6,16 @@ set_device .. py:function:: paddle.device.set_device(device) -Paddle支持包括CPU和GPU在内的多种设备运行,设备可以通过字符串标识符表示,此功能可以指定OP运行的全局设备。 +Paddle 支持包括 CPU 和 GPU 在内的多种设备运行,设备可以通过字符串标识符表示,此功能可以指定 OP 运行的全局设备。 参数 :::::::::::: - - **device** (str)- 此参数确定特定的运行设备,它可以是 ``cpu``、 ``gpu``、 ``xpu``、 ``mlu``、 ``npu``、 ``gpu:x``、 ``xpu:x``、 ``mlu:x`` 或者是 ``npu:x``。其中,``x`` 是GPU、 XPU、 MLU 或者是 NPU 的编号。当 ``device`` 是 ``cpu`` 的时候,程序在CPU上运行,当device是 ``gpu:x`` 的时候,程序在GPU上运行,当device是 ``mlu:x`` 的时候,程序在MLU上运行,当device是 ``npu:x`` 的时候,程序在NPU上运行。 + - **device** (str)- 此参数确定特定的运行设备,它可以是 ``cpu``、 ``gpu``、 ``xpu``、 ``mlu``、 ``npu``、 ``gpu:x``、 ``xpu:x``、 ``mlu:x`` 或者是 ``npu:x``。其中,``x`` 是 GPU、 XPU、 MLU 或者是 NPU 的编号。当 ``device`` 是 ``cpu`` 的时候,程序在 CPU 上运行,当 device 是 ``gpu:x`` 的时候,程序在 GPU 上运行,当 device 是 ``mlu:x`` 的时候,程序在 MLU 上运行,当 device 是 ``npu:x`` 的时候,程序在 NPU 上运行。 返回 :::::::::::: -Place,设置的Place。 +Place,设置的 Place。 代码示例 :::::::::::: diff --git a/docs/api/paddle/diag_cn.rst b/docs/api/paddle/diag_cn.rst index 7f293b479ff..b0da9a9c46b 100644 --- a/docs/api/paddle/diag_cn.rst +++ b/docs/api/paddle/diag_cn.rst @@ -6,9 +6,9 @@ diag .. py:function:: paddle.diag(x, offset=0, padding_value=0, name=None) -如果 ``x`` 是向量(1-D张量),则返回带有 ``x`` 元素作为对角线的2-D方阵。 +如果 ``x`` 是向量(1-D 张量),则返回带有 ``x`` 元素作为对角线的 2-D 方阵。 -如果 ``x`` 是矩阵(2-D张量),则提取 ``x`` 的对角线元素,以1-D张量返回。 +如果 ``x`` 是矩阵(2-D 张量),则提取 ``x`` 的对角线元素,以 1-D 张量返回。 参数 ``offset`` 控制对角线偏移量: @@ -18,9 +18,9 @@ diag 参数 ::::::::: - - **x** (Tensor) - 输入的 `Tensor`。它的形状可以是一维或二维。其数据类型应为float32、float64、int32、int64。 - - **offset** (int,可选) - 对角线偏移量。正值表示上对角线,0表示主对角线,负值表示下对角线。 - - **padding_value** (int|float,可选) -使用此值来填充指定对角线以外的区域。仅在输入为一维张量时生效。默认值为0。 + - **x** (Tensor) - 输入的 `Tensor`。它的形状可以是一维或二维。其数据类型应为 float32、float64、int32、int64。 + - **offset** (int,可选) - 对角线偏移量。正值表示上对角线,0 表示主对角线,负值表示下对角线。 + - **padding_value** (int|float,可选) -使用此值来填充指定对角线以外的区域。仅在输入为一维张量时生效。默认值为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/diagflat_cn.rst b/docs/api/paddle/diagflat_cn.rst index de7fc5fa7b8..226c783cad2 100644 --- a/docs/api/paddle/diagflat_cn.rst +++ b/docs/api/paddle/diagflat_cn.rst @@ -18,8 +18,8 @@ diagflat 参数 ::::::::: - - x(Tensor):输入的 `Tensor`。它的形状可以是任意维度。其数据类型应为float32,float64,int32,int64。 - - offset(int,可选):对角线偏移量。正值表示上对角线,0表示主对角线,负值表示下对角线。 + - x(Tensor):输入的 `Tensor`。它的形状可以是任意维度。其数据类型应为 float32,float64,int32,int64。 + - offset(int,可选):对角线偏移量。正值表示上对角线,0 表示主对角线,负值表示下对角线。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/diagonal_cn.rst b/docs/api/paddle/diagonal_cn.rst index 1e13c4612b0..b2bcd92fc62 100644 --- a/docs/api/paddle/diagonal_cn.rst +++ b/docs/api/paddle/diagonal_cn.rst @@ -20,7 +20,7 @@ diagonal 参数 ::::::::: - - **x** (Tensor):输入变量,类型为Tensor,支持bool、int32、int64、float16、float32、float64数据类型。 + - **x** (Tensor):输入变量,类型为 Tensor,支持 bool、int32、int64、float16、float32、float64 数据类型。 - **offset** (int,可选)- 从指定的二维平面中获取对角线的位置,默认值为 0,既主对角线。 - **axis1** (int,可选)- 获取对角线的二维平面的第一维,默认值为 0。 - **axis2** (int,可选)- 获取对角线的二维平面的第二维,默认值为 1 diff --git a/docs/api/paddle/diff_cn.rst b/docs/api/paddle/diff_cn.rst index 8deaa8747d1..d28acb1b638 100644 --- a/docs/api/paddle/diff_cn.rst +++ b/docs/api/paddle/diff_cn.rst @@ -5,7 +5,7 @@ diff .. py:function:: paddle.diff(x, n=1, axis=-1, prepend=None, append=None, name=None) -沿着指定轴计算输入Tensor的n阶前向差值,一阶的前向差值计算公式如下: +沿着指定轴计算输入 Tensor 的 n 阶前向差值,一阶的前向差值计算公式如下: .. math:: out[i] = x[i+1] - x[i] @@ -17,15 +17,15 @@ diff :::::::::::: - **x** (Tensor) - 待计算前向差值的输入 `Tensor`。 - - **n** (int,可选) - 需要计算前向差值的次数,目前仅支持 `n=1`,默认值为1。 + - **n** (int,可选) - 需要计算前向差值的次数,目前仅支持 `n=1`,默认值为 1。 - **axis** (int,可选) - 沿着哪一维度计算前向差值,默认值为-1,也即最后一个维度。 - - **prepend** (Tensor,可选) - 在计算前向差值之前,沿着指定维度axis附加到输入x的前面,它的维度需要和输入一致,并且除了axis维外,其他维度的形状也要和输入一致,默认值为None。 - - **append** (Tensor,可选) - 在计算前向差值之前,沿着指定维度axis附加到输入x的后面,它的维度需要和输入一致,并且除了axis维外,其他维度的形状也要和输入一致,默认值为None。 + - **prepend** (Tensor,可选) - 在计算前向差值之前,沿着指定维度 axis 附加到输入 x 的前面,它的维度需要和输入一致,并且除了 axis 维外,其他维度的形状也要和输入一致,默认值为 None。 + - **append** (Tensor,可选) - 在计算前向差值之前,沿着指定维度 axis 附加到输入 x 的后面,它的维度需要和输入一致,并且除了 axis 维外,其他维度的形状也要和输入一致,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -前向差值计算后的Tensor,数据类型和输入一致。 +前向差值计算后的 Tensor,数据类型和输入一致。 代码示例: ::::::::: diff --git a/docs/api/paddle/digamma_cn.rst b/docs/api/paddle/digamma_cn.rst index d0f95e0d213..61fc8fa12cf 100644 --- a/docs/api/paddle/digamma_cn.rst +++ b/docs/api/paddle/digamma_cn.rst @@ -6,7 +6,7 @@ digamma .. py:function:: paddle.digamma(x, name=None) -逐元素计算输入Tensor的digamma函数值 +逐元素计算输入 Tensor 的 digamma 函数值 .. math:: \\Out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }\\ @@ -14,12 +14,12 @@ digamma 参数 ::::::::: - - **x** (Tensor) – 输入Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 输入 Tensor。数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``, digamma函数计算结果,数据类型和维度大小与输入一致。 +``Tensor``, digamma 函数计算结果,数据类型和维度大小与输入一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/disable_signal_handler_cn.rst b/docs/api/paddle/disable_signal_handler_cn.rst index 70d789a9d77..7b5d4f3b011 100644 --- a/docs/api/paddle/disable_signal_handler_cn.rst +++ b/docs/api/paddle/disable_signal_handler_cn.rst @@ -5,17 +5,17 @@ disable_signal_handler .. py:function:: paddle.disable_signal_handler() -关闭Paddle系统信号处理方法 +关闭 Paddle 系统信号处理方法 -Paddle默认在C++层面注册了系统信号处理方法,用于优化报错信息。 -但是一些特定的Python module可能需要使用某些系统信号,引发冲突。 -您可以通过调用本函数来关闭Paddle的系统信号处理方法 +Paddle 默认在 C++层面注册了系统信号处理方法,用于优化报错信息。 +但是一些特定的 Python module 可能需要使用某些系统信号,引发冲突。 +您可以通过调用本函数来关闭 Paddle 的系统信号处理方法 -如果您在一个Python文件中同时使用了Paddle和下述框架的一种或多种, -则请在其他框架执行前首先调用paddle.disable_signal_handler() +如果您在一个 Python 文件中同时使用了 Paddle 和下述框架的一种或多种, +则请在其他框架执行前首先调用 paddle.disable_signal_handler() -1.TVM框架 -2.ADLIK框架 +1.TVM 框架 +2.ADLIK 框架 返回 ::::::::: diff --git a/docs/api/paddle/disable_static_cn.rst b/docs/api/paddle/disable_static_cn.rst index 41ca0020c46..497ecc2a0a8 100644 --- a/docs/api/paddle/disable_static_cn.rst +++ b/docs/api/paddle/disable_static_cn.rst @@ -6,7 +6,7 @@ disable_static .. py:function:: paddle.disable_static(place=None) .. note:: - 从2.0.0版本开始,Paddle默认开启动态图模式。 + 从 2.0.0 版本开始,Paddle 默认开启动态图模式。 该接口关闭静态图模式。可通过 :ref:`cn_api_paddle_enable_static` 开启静态图模式。 @@ -14,7 +14,7 @@ disable_static 参数 :::::::::::: - - **place** (paddle.CPUPlace|paddle.CUDAPlace,可选) - 动态图运行时的设备。默认值为 ``None``,此时,会根据paddle的版本自动判断。 + - **place** (paddle.CPUPlace|paddle.CUDAPlace,可选) - 动态图运行时的设备。默认值为 ``None``,此时,会根据 paddle 的版本自动判断。 返回 :::::::::::: diff --git a/docs/api/paddle/dist_cn.rst b/docs/api/paddle/dist_cn.rst index f0cf899f7ed..ae09e3cb863 100644 --- a/docs/api/paddle/dist_cn.rst +++ b/docs/api/paddle/dist_cn.rst @@ -5,16 +5,16 @@ dist .. py:function:: paddle.dist(x, y, p=2) -计算 `(x-y)` 的 p 范数(p-norm),需要注意这不是严格意义上的范数,仅作为距离的度量。输入 `x` 和 `y` 的形状(shape)必须是可广播的(broadcastable)。其含义如下,详情请参考 `numpy的广播概念 `_ : +计算 `(x-y)` 的 p 范数(p-norm),需要注意这不是严格意义上的范数,仅作为距离的度量。输入 `x` 和 `y` 的形状(shape)必须是可广播的(broadcastable)。其含义如下,详情请参考 `numpy 的广播概念 `_ : -- 每个输入都至少有1维 -- 对两个输入的维度从后向前匹配,两个输入每一维的大小需要满足3个条件中的任意一个:相等、其中一个为1或者其中一个不存在。 +- 每个输入都至少有 1 维 +- 对两个输入的维度从后向前匹配,两个输入每一维的大小需要满足 3 个条件中的任意一个:相等、其中一个为 1 或者其中一个不存在。 定义 `z = x - y` ,`x` 和 `y` 的形状是可广播的,那么 `z` 的形状可按照下列步骤得到: -(1) 如果 `x` 和 `y` 的维数不同,先对维数较少的这个输入的维度往前补1。 +(1) 如果 `x` 和 `y` 的维数不同,先对维数较少的这个输入的维度往前补 1。 -例如,`x` 的形状为[8, 1, 6, 1],`y` 的形状为[7, 1, 5],对 `y` 的维度补1, +例如,`x` 的形状为[8, 1, 6, 1],`y` 的形状为[7, 1, 5],对 `y` 的维度补 1, x (4-D Tensor): 8 x 1 x 6 x 1 @@ -24,7 +24,7 @@ y (4-D Tensor): 1 x 7 x 1 x 5 z (4-D Tensor): 8 x 7 x 6 x 5 -若两个输入的维数相同,则输出的大小可直接用步骤2确定。以下是 `p` 取不同值时,范数的计算公式: +若两个输入的维数相同,则输出的大小可直接用步骤 2 确定。以下是 `p` 取不同值时,范数的计算公式: 当 `p = 0`,定义 $0^0 = 0$,则 z 的零范数是 `z` 中非零元素的个数。 @@ -49,9 +49,9 @@ z (4-D Tensor): 8 x 7 x 6 x 5 参数 :::::::::::: - - **x** (Tensor): 1-D 到 6-D Tensor,数据类型为float32或float64。 - - **y** (Tensor): 1-D 到 6-D Tensor,数据类型为float32或float64。 - - **p** (float,optional):用于设置需要计算的范数,数据类型为float32或float64。默认值为2。 + - **x** (Tensor): 1-D 到 6-D Tensor,数据类型为 float32 或 float64。 + - **y** (Tensor): 1-D 到 6-D Tensor,数据类型为 float32 或 float64。 + - **p** (float,optional):用于设置需要计算的范数,数据类型为 float32 或 float64。默认值为 2。 返回 :::::::::::: diff --git a/docs/api/paddle/distributed/InMemoryDataset_cn.rst b/docs/api/paddle/distributed/InMemoryDataset_cn.rst index 5dd22b996bd..85b4f92747d 100644 --- a/docs/api/paddle/distributed/InMemoryDataset_cn.rst +++ b/docs/api/paddle/distributed/InMemoryDataset_cn.rst @@ -26,21 +26,21 @@ init(**kwargs) **注意:** - **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -对InMemoryDataset的实例进行配置初始化。 +对 InMemoryDataset 的实例进行配置初始化。 **参数** - **kwargs** - 可选的关键字参数,由调用者提供,目前支持以下关键字配置。 - - **batch_size** (int) - batch size的大小。默认值为1。 - - **thread_num** (int) - 用于训练的线程数,默认值为1。 - - **use_var** (list) - 用于输入的variable列表,默认值为[]。 - - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个batch。默认值为0。 - - **fs_name** (str) - hdfs名称。默认值为""。 - - **fs_ugi** (str) - hdfs的ugi。默认值为""。 - - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的pipe命令用于数据的预处理。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 - - **download_cmd** (str) - 数据下载pipe命令。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 + - **batch_size** (int) - batch size 的大小。默认值为 1。 + - **thread_num** (int) - 用于训练的线程数,默认值为 1。 + - **use_var** (list) - 用于输入的 variable 列表,默认值为[]。 + - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个 batch。默认值为 0。 + - **fs_name** (str) - hdfs 名称。默认值为""。 + - **fs_ugi** (str) - hdfs 的 ugi。默认值为""。 + - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的 pipe 命令用于数据的预处理。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 + - **download_cmd** (str) - 数据下载 pipe 命令。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 **返回** @@ -103,21 +103,21 @@ _init_distributed_settings(**kwargs) **注意:** - **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** - **2. 本api需要在机大规模参数服务器训练下生效,敬请期待详细使用文档** + **1. 该 API 只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **2. 本 api 需要在机大规模参数服务器训练下生效,敬请期待详细使用文档** -对InMemoryDataset的实例进行分布式训练相关配置的初始化。 +对 InMemoryDataset 的实例进行分布式训练相关配置的初始化。 **参数** - **kwargs** - 可选的关键字参数,由调用者提供,目前支持以下关键字配置。 - - **merge_size** (int) - 通过样本id来设置合并,相同id的样本将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。merge_size表示合并的最小数量,默认值为-1,表示不做合并。 - - **parse_ins_id** (bool) - 是否需要解析每条样的id,默认值为False。 - - **parse_content** (bool) - 是否需要解析每条样本的content,默认值为False。 - - **fleet_send_batch_size** (int) - 设置发送batch的大小,默认值为1024。 - - **fleet_send_sleep_seconds** (int) - 设置发送batch后的睡眠时间,默认值为0。 - - **fea_eval** (bool) - 设置特征打乱特征验证模式,来修正特征级别的重要性,特征打乱需要 ``fea_eval`` 被设置为True。默认值为False。 - - **candidate_size** (int) - 特征打乱特征验证模式下,用于随机化特征的候选池大小。默认值为10000。 + - **merge_size** (int) - 通过样本 id 来设置合并,相同 id 的样本将会在 shuffle 之后进行合并,你应该在一个 data 生成器里面解析样本 id。merge_size 表示合并的最小数量,默认值为-1,表示不做合并。 + - **parse_ins_id** (bool) - 是否需要解析每条样的 id,默认值为 False。 + - **parse_content** (bool) - 是否需要解析每条样本的 content,默认值为 False。 + - **fleet_send_batch_size** (int) - 设置发送 batch 的大小,默认值为 1024。 + - **fleet_send_sleep_seconds** (int) - 设置发送 batch 后的睡眠时间,默认值为 0。 + - **fea_eval** (bool) - 设置特征打乱特征验证模式,来修正特征级别的重要性,特征打乱需要 ``fea_eval`` 被设置为 True。默认值为 False。 + - **candidate_size** (int) - 特征打乱特征验证模式下,用于随机化特征的候选池大小。默认值为 10000。 **返回** None。 @@ -149,28 +149,28 @@ update_settings(**kwargs) **注意:** - **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -对InMemoryDataset的实例通过init和_init_distributed_settings初始化的配置进行更新。 +对 InMemoryDataset 的实例通过 init 和_init_distributed_settings 初始化的配置进行更新。 **参数** - **kwargs** - 可选的关键字参数,由调用者提供,目前支持以下关键字配置。 - - **batch_size** (int) - batch size的大小。默认值为1。 - - **thread_num** (int) - 用于训练的线程数,默认值为1。 - - **use_var** (list) - 用于输入的variable列表,默认值为[]。 - - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个batch。默认值为0。 - - **fs_name** (str) - hdfs名称。默认值为""。 - - **fs_ugi** (str) - hdfs的ugi。默认值为""。 - - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的pipe命令用于数据的预处理。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 - - **download_cmd** (str) - 数据下载pipe命令。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 - - **merge_size** (int) - 通过样本id来设置合并,相同id的样本将会在shuffle之后进行合并,你应该在一个data生成器里面解析样本id。merge_size表示合并的最小数量,默认值为-1,表示不做合并。 - - **parse_ins_id** (bool) - 是否需要解析每条样的id,默认值为False。 - - **parse_content** (bool) 是否需要解析每条样本的content,默认值为False。 - - **fleet_send_batch_size** (int) - 设置发送batch的大小,默认值为1024。 - - **fleet_send_sleep_seconds** (int) - 设置发送batch后的睡眠时间,默认值为0。 - - **fea_eval** (bool) - 设置特征打乱特征验证模式,来修正特征级别的重要性,特征打乱需要 ``fea_eval`` 被设置为True。默认值为False。 - - **candidate_size** (int) - 特征打乱特征验证模式下,用于随机化特征的候选池大小。默认值为10000。 + - **batch_size** (int) - batch size 的大小。默认值为 1。 + - **thread_num** (int) - 用于训练的线程数,默认值为 1。 + - **use_var** (list) - 用于输入的 variable 列表,默认值为[]。 + - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个 batch。默认值为 0。 + - **fs_name** (str) - hdfs 名称。默认值为""。 + - **fs_ugi** (str) - hdfs 的 ugi。默认值为""。 + - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的 pipe 命令用于数据的预处理。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 + - **download_cmd** (str) - 数据下载 pipe 命令。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 + - **merge_size** (int) - 通过样本 id 来设置合并,相同 id 的样本将会在 shuffle 之后进行合并,你应该在一个 data 生成器里面解析样本 id。merge_size 表示合并的最小数量,默认值为-1,表示不做合并。 + - **parse_ins_id** (bool) - 是否需要解析每条样的 id,默认值为 False。 + - **parse_content** (bool) 是否需要解析每条样本的 content,默认值为 False。 + - **fleet_send_batch_size** (int) - 设置发送 batch 的大小,默认值为 1024。 + - **fleet_send_sleep_seconds** (int) - 设置发送 batch 后的睡眠时间,默认值为 0。 + - **fea_eval** (bool) - 设置特征打乱特征验证模式,来修正特征级别的重要性,特征打乱需要 ``fea_eval`` 被设置为 True。默认值为 False。 + - **candidate_size** (int) - 特征打乱特征验证模式下,用于随机化特征的候选池大小。默认值为 10000。 **返回** None。 @@ -202,7 +202,7 @@ load_into_memory() **注意:** - **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 向内存中加载数据。 @@ -297,7 +297,7 @@ wait_preload_done() local_shuffle() ''''''''' -局部shuffle。加载到内存的训练样本进行单机节点内部的打乱 +局部 shuffle。加载到内存的训练样本进行单机节点内部的打乱 **代码示例** @@ -327,7 +327,7 @@ local_shuffle() global_shuffle(fleet=None, thread_num=12) ''''''''' -全局shuffle。只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递fleet而非None。 +全局 shuffle。只能用在分布式模式(单机多进程或多机多进程)中。您如果在分布式模式中运行,应当传递 fleet 而非 None。 **代码示例** @@ -356,13 +356,13 @@ global_shuffle(fleet=None, thread_num=12) **参数** - - **fleet** (Fleet) – fleet单例。默认为None。 - - **thread_num** (int) - 全局shuffle时的线程数。 + - **fleet** (Fleet) – fleet 单例。默认为 None。 + - **thread_num** (int) - 全局 shuffle 时的线程数。 release_memory() ''''''''' -当数据不再使用时,释放InMemoryDataset内存数据。 +当数据不再使用时,释放 InMemoryDataset 内存数据。 **代码示例** @@ -398,14 +398,14 @@ release_memory() get_memory_data_size(fleet=None) ''''''''' -用户可以调用此函数以了解加载进内存后所有workers中的样本数量。 +用户可以调用此函数以了解加载进内存后所有 workers 中的样本数量。 .. note:: - 该函数可能会导致性能不佳,因为它具有barrier。 + 该函数可能会导致性能不佳,因为它具有 barrier。 **参数** - - **fleet** (Fleet) – fleet对象。 + - **fleet** (Fleet) – fleet 对象。 **返回** 内存数据的大小。 @@ -439,17 +439,17 @@ get_memory_data_size(fleet=None) get_shuffle_data_size(fleet=None) ''''''''' -获取shuffle数据大小,用户可以调用此函数以了解局域/全局shuffle后所有workers中的样本数量。 +获取 shuffle 数据大小,用户可以调用此函数以了解局域/全局 shuffle 后所有 workers 中的样本数量。 .. note:: - 该函数可能会导致局域shuffle性能不佳,因为它具有barrier。但其不影响局域shuffle。 + 该函数可能会导致局域 shuffle 性能不佳,因为它具有 barrier。但其不影响局域 shuffle。 **参数** - - **fleet** (Fleet) – fleet对象。 + - **fleet** (Fleet) – fleet 对象。 **返回** -shuffle数据的大小。 +shuffle 数据的大小。 **代码示例** @@ -481,7 +481,7 @@ shuffle数据的大小。 slots_shuffle(slots) ''''''''' -该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较metric,比如auc,在一个或者多个有着baseline的特征上做特征打乱来验证特征level的重要性。 +该方法是在特征层次上的一个打乱方法,经常被用在有着较大缩放率实例的稀疏矩阵上,为了比较 metric,比如 auc,在一个或者多个有着 baseline 的特征上做特征打乱来验证特征 level 的重要性。 **参数** diff --git a/docs/api/paddle/distributed/Overview_cn.rst b/docs/api/paddle/distributed/Overview_cn.rst index e3adaa502fa..f93f8315efa 100644 --- a/docs/api/paddle/distributed/Overview_cn.rst +++ b/docs/api/paddle/distributed/Overview_cn.rst @@ -3,38 +3,38 @@ paddle.distributed ============================ -paddle.distributed目录包含的API支撑飞桨框架大规模分布式训练能力。具体如下: +paddle.distributed 目录包含的 API 支撑飞桨框架大规模分布式训练能力。具体如下: -- :ref:`Fleet分布式高层API <01>` +- :ref:`Fleet 分布式高层 API <01>` - :ref:`环境配置和训练启动管理 <02>` - :ref:`数据加载 <03>` -- :ref:`集合通信算法API <04>` +- :ref:`集合通信算法 API <04>` .. _01: -Fleet分布式高层API +Fleet 分布式高层 API :::::::::::::::::::::::::: -paddle.distributed.fleet是分布式训练的统一入口API,用于配置分布式训练。 +paddle.distributed.fleet 是分布式训练的统一入口 API,用于配置分布式训练。 .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 20, 50 " :ref:`UserDefinedRoleMaker ` ", "设置和获取用户自定义的集群信息,支持集合通信(Collective)及参数服务器(ParameterServer)两种训练架构的初始化" - " :ref:`PaddleCloudRoleMaker ` ", "设置和获取paddlecloud集群信息(百度内部集群使用),支持集合通信(Collective)及参数服务器(ParameterServer)两种训练架构的初始化" + " :ref:`PaddleCloudRoleMaker ` ", "设置和获取 paddlecloud 集群信息(百度内部集群使用),支持集合通信(Collective)及参数服务器(ParameterServer)两种训练架构的初始化" " :ref:`DistributedStrategy ` ", "配置分布式通信、计算和内存优化等策略" " :ref:`fleet.init ` ", "进行分布式训练配置并初始化 " - " :ref:`fleet.init_worker ` ", "集合通信架构下,worker节点初始化 " - " :ref:`fleet.stop_worker ` ", "集合通信架构下,停止正在运行的worker节点" - " :ref:`fleet.barrier_worker ` ", "集合通信架构下,强制要求所有的worker在此处相互等待一次,保持同步" - " :ref:`fleet.init_server ` ", "参数服务器架构下,server节点的初始化 " + " :ref:`fleet.init_worker ` ", "集合通信架构下,worker 节点初始化 " + " :ref:`fleet.stop_worker ` ", "集合通信架构下,停止正在运行的 worker 节点" + " :ref:`fleet.barrier_worker ` ", "集合通信架构下,强制要求所有的 worker 在此处相互等待一次,保持同步" + " :ref:`fleet.init_server ` ", "参数服务器架构下,server 节点的初始化 " " :ref:`fleet.run_server ` ", "参数服务器架构下的进程启动" " :ref:`fleet.save_inference_model ` ", "保存用于预测的模型" " :ref:`fleet.save_persistables ` ", "保存全量模型参数" " :ref:`fleet.distributed_optimizer ` ", "基于分布式并行策略进行模型拆分和优化计算" " :ref:`UtilBase ` ", "分布式训练工具的基类,用户集合通信、文件系统操作" - " :ref:`utils.HDFSClient ` ", "Hadoop文件系统查看和管理" + " :ref:`utils.HDFSClient ` ", "Hadoop 文件系统查看和管理" " :ref:`utils.LocalFS ` ", "本地文件系统查看和管理" .. _02: @@ -43,14 +43,14 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 :::::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 20, 50 " :ref:`init_parallel_env ` ", "初始化并行训练环境,支持动态图模式" " :ref:`launch ` ", "启动分布式训练进程,支持集合通信及参数服务器架构" " :ref:`spawn ` ", "启动分布式训练进程,仅支持集合通信架构" - " :ref:`get_rank ` ", "获取当前进程的rank值" + " :ref:`get_rank ` ", "获取当前进程的 rank 值" " :ref:`get_world_size ` ", "获取当前进程数" .. _03: @@ -59,7 +59,7 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 :::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 20, 50 @@ -68,21 +68,21 @@ paddle.distributed.fleet是分布式训练的统一入口API,用于配置分 .. _04: -集合通信算法API +集合通信算法 API :::::::::::::::::::::: -在集群上,对多设备的进程组的参数数据tensor进行计算处理。 +在集群上,对多设备的进程组的参数数据 tensor 进行计算处理。 .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 20, 50 - " :ref:`reduce ` ", "规约,规约进程组内的tensor,返回结果至指定进程" + " :ref:`reduce ` ", "规约,规约进程组内的 tensor,返回结果至指定进程" " :ref:`ReduceOP ` ", "规约,指定逐元素规约操作" - " :ref:`all_reduce ` ", "组规约,规约进程组内的tensor,结果广播至每个进程" - " :ref:`all_gather ` ", "组聚合,聚合进程组内的tensor,结果广播至每个进程" - " :ref:`broadcast ` ", "广播一个tensor到每个进程" - " :ref:`scatter ` ", "分发tensor到每个进程" + " :ref:`all_reduce ` ", "组规约,规约进程组内的 tensor,结果广播至每个进程" + " :ref:`all_gather ` ", "组聚合,聚合进程组内的 tensor,结果广播至每个进程" + " :ref:`broadcast ` ", "广播一个 tensor 到每个进程" + " :ref:`scatter ` ", "分发 tensor 到每个进程" " :ref:`split ` ", "切分参数到多个设备" " :ref:`barrier ` ", "同步路障,进行阻塞操作,实现组内所有进程的同步" diff --git a/docs/api/paddle/distributed/ParallelEnv_cn.rst b/docs/api/paddle/distributed/ParallelEnv_cn.rst index d6d85b6d084..9a90c221f60 100644 --- a/docs/api/paddle/distributed/ParallelEnv_cn.rst +++ b/docs/api/paddle/distributed/ParallelEnv_cn.rst @@ -6,7 +6,7 @@ ParallelEnv .. py:class:: paddle.distributed.ParallelEnv() .. note:: - 不推荐使用这个API,如果需要获取rank和world_size,建议使用 ``paddle.distributed.get_rank()`` 和 ``paddle.distributed.get_world_size()`` 。 + 不推荐使用这个 API,如果需要获取 rank 和 world_size,建议使用 ``paddle.distributed.get_rank()`` 和 ``paddle.distributed.get_world_size()`` 。 这个类用于获取动态图模型并行执行所需的环境变量值。 @@ -52,7 +52,7 @@ rank 当前训练进程的编号。 -此属性的值等于环境变量 `PADDLE_TRAINER_ID` 的值。默认值是0。 +此属性的值等于环境变量 `PADDLE_TRAINER_ID` 的值。默认值是 0。 **代码示例** @@ -69,9 +69,9 @@ rank world_size ''''''''' -参与训练进程的数量,一般也是训练所使用GPU卡的数量。 +参与训练进程的数量,一般也是训练所使用 GPU 卡的数量。 -此属性的值等于环境变量 `PADDLE_TRAINERS_NUM` 的值。默认值为1。 +此属性的值等于环境变量 `PADDLE_TRAINERS_NUM` 的值。默认值为 1。 **代码示例** @@ -88,9 +88,9 @@ world_size device_id ''''''''' -当前用于并行训练的GPU的编号。 +当前用于并行训练的 GPU 的编号。 -此属性的值等于环境变量 `FLAGS_selected_gpus` 的值。默认值是0。 +此属性的值等于环境变量 `FLAGS_selected_gpus` 的值。默认值是 0。 **代码示例** @@ -107,7 +107,7 @@ device_id current_endpoint ''''''''' -当前训练进程的终端节点IP与相应端口,形式为(机器节点IP:端口号)。例如:127.0.0.1:6170。 +当前训练进程的终端节点 IP 与相应端口,形式为(机器节点 IP:端口号)。例如:127.0.0.1:6170。 此属性的值等于环境变量 `PADDLE_CURRENT_ENDPOINT` 的值。默认值为空字符串""。 @@ -126,7 +126,7 @@ current_endpoint trainer_endpoints ''''''''' -当前任务所有参与训练进程的终端节点IP与相应端口,用于在NCCL2初始化的时候建立通信,广播NCCL ID。 +当前任务所有参与训练进程的终端节点 IP 与相应端口,用于在 NCCL2 初始化的时候建立通信,广播 NCCL ID。 此属性的值等于环境变量 `PADDLE_TRAINER_ENDPOINTS` 的值。默认值为空字符串""。 diff --git a/docs/api/paddle/distributed/QueueDataset_cn.rst b/docs/api/paddle/distributed/QueueDataset_cn.rst index 94bf6419ff4..78315d9bbea 100644 --- a/docs/api/paddle/distributed/QueueDataset_cn.rst +++ b/docs/api/paddle/distributed/QueueDataset_cn.rst @@ -9,7 +9,7 @@ QueueyDataset -QueueyDataset是流式处理数据使用Dataset类。与InmemoryDataset继承自同一父类,用于单机训练,不支持分布式大规模参数服务器相关配置和shuffle。此类由paddle.distributed.QueueDataset直接创建。 +QueueyDataset 是流式处理数据使用 Dataset 类。与 InmemoryDataset 继承自同一父类,用于单机训练,不支持分布式大规模参数服务器相关配置和 shuffle。此类由 paddle.distributed.QueueDataset 直接创建。 代码示例 :::::::::::: @@ -26,21 +26,21 @@ init(**kwargs) **注意:** - **1. 该API只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在非** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -对QueueDataset的实例进行配置初始化。 +对 QueueDataset 的实例进行配置初始化。 **参数** - **kwargs** - 可选的关键字参数,由调用者提供,目前支持以下关键字配置。 - - **batch_size** (int) - batch size的大小。默认值为1。 - - **thread_num** (int) - 用于训练的线程数,默认值为1。 - - **use_var** (list) - 用于输入的variable列表,默认值为[]。 - - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个batch。默认值为0。 - - **fs_name** (str) - hdfs名称。默认值为""。 - - **fs_ugi** (str) - hdfs的ugi。默认值为""。 - - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的pipe命令用于数据的预处理。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 - - **download_cmd** (str) - 数据下载pipe命令。pipe命令只能使用UNIX的pipe命令,默认为"cat"。 + - **batch_size** (int) - batch size 的大小。默认值为 1。 + - **thread_num** (int) - 用于训练的线程数,默认值为 1。 + - **use_var** (list) - 用于输入的 variable 列表,默认值为[]。 + - **input_type** (int) - 输入到模型训练样本的类型。0 代表一条样本,1 代表一个 batch。默认值为 0。 + - **fs_name** (str) - hdfs 名称。默认值为""。 + - **fs_ugi** (str) - hdfs 的 ugi。默认值为""。 + - **pipe_command** (str) - 在当前的 ``dataset`` 中设置的 pipe 命令用于数据的预处理。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 + - **download_cmd** (str) - 数据下载 pipe 命令。pipe 命令只能使用 UNIX 的 pipe 命令,默认为"cat"。 **返回** @@ -104,7 +104,7 @@ None。 set_filelist(filelist) ''''''''' -在当前的worker中设置文件列表。 +在当前的 worker 中设置文件列表。 **代码示例** diff --git a/docs/api/paddle/distributed/all_gather_cn.rst b/docs/api/paddle/distributed/all_gather_cn.rst index 6665fc10239..1218d05c616 100644 --- a/docs/api/paddle/distributed/all_gather_cn.rst +++ b/docs/api/paddle/distributed/all_gather_cn.rst @@ -6,9 +6,9 @@ all_gather .. py:function:: paddle.distributed.all_gather(tensor_list, tensor, group=0) -进程组内所有进程的指定tensor进行聚合操作,并返回给所有进程聚合的结果。 -如下图所示,4个GPU分别开启4个进程,每张卡上的数据用卡号代表, -经过all_gather算子后,每张卡都会拥有所有卡的数据。 +进程组内所有进程的指定 tensor 进行聚合操作,并返回给所有进程聚合的结果。 +如下图所示,4 个 GPU 分别开启 4 个进程,每张卡上的数据用卡号代表, +经过 all_gather 算子后,每张卡都会拥有所有卡的数据。 .. image:: ./img/allgather.png :width: 800 @@ -17,9 +17,9 @@ all_gather 参数 ::::::::: - - tensor_list (list) - 操作的输出Tensor列表。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。 - - tensor (Tensor) - 操作的输入Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 - - group (int,可选) - 工作的进程组编号,默认为0。 + - tensor_list (list) - 操作的输出 Tensor 列表。列表中的每个元素均为 Tensor,每个 Tensor 的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 操作的输入 Tensor。Tensor 的数据类型为:float16、float32、float64、int32、int64。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/all_reduce_cn.rst b/docs/api/paddle/distributed/all_reduce_cn.rst index d80f169a460..8c9c126bc86 100644 --- a/docs/api/paddle/distributed/all_reduce_cn.rst +++ b/docs/api/paddle/distributed/all_reduce_cn.rst @@ -6,9 +6,9 @@ all_reduce .. py:function:: paddle.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=0) -进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 -如下图所示,4个GPU分别开启4个进程,每张卡上的数据用卡号代表,规约操作为求和, -经过all_reduce算子后,每张卡都会拥有所有卡数据的总和。 +进程组内所有进程的指定 tensor 进行归约操作,并返回给所有进程归约的结果。 +如下图所示,4 个 GPU 分别开启 4 个进程,每张卡上的数据用卡号代表,规约操作为求和, +经过 all_reduce 算子后,每张卡都会拥有所有卡数据的总和。 .. image:: ./img/allreduce.png :width: 800 @@ -17,9 +17,9 @@ all_reduce 参数 ::::::::: - - tensor (Tensor) - 操作的输入Tensor,同时也会将归约结果返回至此Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 操作的输入 Tensor,同时也会将归约结果返回至此 Tensor 中。Tensor 的数据类型为:float16、float32、float64、int32、int64。 - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 - - group (int,可选) - 工作的进程组编号,默认为0。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/alltoall_cn.rst b/docs/api/paddle/distributed/alltoall_cn.rst index ba217b0dd10..2b54b6df534 100644 --- a/docs/api/paddle/distributed/alltoall_cn.rst +++ b/docs/api/paddle/distributed/alltoall_cn.rst @@ -6,10 +6,10 @@ alltoall .. py:function:: paddle.distributed.alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True) -将in_tensor_list里面的tensors按照卡数均分并按照卡的顺序分发到所有参与的卡并将结果tensors汇总到out_tensor_list。 -如下图所示,GPU0卡的in_tensor_list会按照两张卡拆分成0_0和0_1, GPU1卡的in_tensor_list同样拆分成1_0和1_1,经过alltoall算子后, -GPU0卡的0_0会发送给GPU0,GPU0卡的0_1会发送给GPU1,GPU1卡的1_0会发送给GPU0,GPU1卡的1_1会发送给GPU1,所以GPU0卡的out_tensor_list包含0_0和1_0, -GPU1卡的out_tensor_list包含0_1和1_1。 +将 in_tensor_list 里面的 tensors 按照卡数均分并按照卡的顺序分发到所有参与的卡并将结果 tensors 汇总到 out_tensor_list。 +如下图所示,GPU0 卡的 in_tensor_list 会按照两张卡拆分成 0_0 和 0_1, GPU1 卡的 in_tensor_list 同样拆分成 1_0 和 1_1,经过 alltoall 算子后, +GPU0 卡的 0_0 会发送给 GPU0,GPU0 卡的 0_1 会发送给 GPU1,GPU1 卡的 1_0 会发送给 GPU0,GPU1 卡的 1_1 会发送给 GPU1,所以 GPU0 卡的 out_tensor_list 包含 0_0 和 1_0, +GPU1 卡的 out_tensor_list 包含 0_1 和 1_1。 .. image:: ./img/alltoall.png :width: 800 @@ -18,9 +18,9 @@ GPU1卡的out_tensor_list包含0_1和1_1。 参数 ::::::::: - - in_tensor_list (list) - 包含所有输入Tensors的一个列表。在列表里面的所有元素都必须是一个Tensor,Tensor的数据类型必须是float16、float32、 float64、int32、int64。 - - out_tensor_list (Tensor) - 包含所有输出Tensors的一个列表。在列表里面的所有元素数据类型要和输入的Tensors数据类型一致。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认地全局组。默认值:None。 + - in_tensor_list (list) - 包含所有输入 Tensors 的一个列表。在列表里面的所有元素都必须是一个 Tensor,Tensor 的数据类型必须是 float16、float32、 float64、int32、int64。 + - out_tensor_list (Tensor) - 包含所有输出 Tensors 的一个列表。在列表里面的所有元素数据类型要和输入的 Tensors 数据类型一致。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认地全局组。默认值:None。 - use_calc_stream (bool,可选) - 标识使用计算流还是通信流。默认值:True。 返回 diff --git a/docs/api/paddle/distributed/barrier_cn.rst b/docs/api/paddle/distributed/barrier_cn.rst index 8312fe6952e..eead4548499 100644 --- a/docs/api/paddle/distributed/barrier_cn.rst +++ b/docs/api/paddle/distributed/barrier_cn.rst @@ -10,7 +10,7 @@ barrier 参数 ::::::::: - - group (int,可选) - 工作的进程组编号,默认为0。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/broadcast_cn.rst b/docs/api/paddle/distributed/broadcast_cn.rst index 72f8c054975..a3756f2a0b7 100644 --- a/docs/api/paddle/distributed/broadcast_cn.rst +++ b/docs/api/paddle/distributed/broadcast_cn.rst @@ -6,8 +6,8 @@ broadcast .. py:function:: paddle.distributed.broadcast(tensor, src, group=0) -广播一个Tensor给其他所有进程。 -如下图所示,4个GPU分别开启4个进程,GPU0卡拥有数据,经过broadcast算子后,会将这个数据传播到所有卡上。 +广播一个 Tensor 给其他所有进程。 +如下图所示,4 个 GPU 分别开启 4 个进程,GPU0 卡拥有数据,经过 broadcast 算子后,会将这个数据传播到所有卡上。 .. image:: ./img/broadcast.png :width: 800 @@ -16,9 +16,9 @@ broadcast 参数 ::::::::: - - tensor (Tensor) - 如果当前进程编号是源,那么这个Tensor变量将被发送给其他进程,否则这个Tensor将接收源发送过来的数据。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 如果当前进程编号是源,那么这个 Tensor 变量将被发送给其他进程,否则这个 Tensor 将接收源发送过来的数据。Tensor 的数据类型为:float16、float32、float64、int32、int64。 - src (int) - 发送源的进程编号。 - - group (int,可选) - 工作的进程组编号,默认为0。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst index bc9aca4238a..79aa1bd0ea1 100755 --- a/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst +++ b/docs/api/paddle/distributed/fleet/DistributedStrategy_cn.rst @@ -13,7 +13,7 @@ DistributedStrategy save_to_prototxt ''''''''' -序列化当前的DistributedStrategy,并且保存到output文件中 +序列化当前的 DistributedStrategy,并且保存到 output 文件中 **代码示例** @@ -30,7 +30,7 @@ save_to_prototxt load_from_prototxt ''''''''' -加载已经序列化过的DistributedStrategy文件,并作为初始化DistributedStrategy返回 +加载已经序列化过的 DistributedStrategy 文件,并作为初始化 DistributedStrategy 返回 **代码示例** @@ -46,7 +46,7 @@ execution_strategy `Post Local SGD `__ -配置DistributedStrategy中的 `ExecutionStrategy `_ +配置 DistributedStrategy 中的 `ExecutionStrategy `_ **代码示例** @@ -65,7 +65,7 @@ execution_strategy build_strategy ''''''''' -配置DistributedStrategy中的 `BuildStrategy `_ +配置 DistributedStrategy 中的 `BuildStrategy `_ **代码示例** @@ -89,7 +89,7 @@ build_strategy auto ''''''''' -表示是否启用自动并行策略。此功能目前是实验性功能。目前,自动并行只有在用户只设置auto,不设置其它策略时才能生效。具体请参考示例代码。默认值:False +表示是否启用自动并行策略。此功能目前是实验性功能。目前,自动并行只有在用户只设置 auto,不设置其它策略时才能生效。具体请参考示例代码。默认值:False **代码示例** @@ -111,7 +111,7 @@ auto recompute ''''''''' -是否启用Recompute来优化内存空间,默认值:False +是否启用 Recompute 来优化内存空间,默认值:False **代码示例** @@ -131,19 +131,19 @@ recompute recompute_configs ''''''''' -设置Recompute策略的配置。目前来讲,用户使用Recompute策略时,必须配置 checkpoints 参数。 +设置 Recompute 策略的配置。目前来讲,用户使用 Recompute 策略时,必须配置 checkpoints 参数。 -**checkpoints(int):** Recompute策略的检查点,默认为空列表,也即不启用Recompute。 +**checkpoints(int):** Recompute 策略的检查点,默认为空列表,也即不启用 Recompute。 -**enable_offload(bool):** 是否开启recompute-offload 策略。该策略会在recompute的基础上,将原本驻留在显存中的checkpoints 卸载到Host 端的内存中,进一步更大的batch size。因为checkpoint 在内存和显存间的拷贝较慢,该策略是通过牺牲速度换取更大的batch size。默认值:False。 +**enable_offload(bool):** 是否开启 recompute-offload 策略。该策略会在 recompute 的基础上,将原本驻留在显存中的 checkpoints 卸载到 Host 端的内存中,进一步更大的 batch size。因为 checkpoint 在内存和显存间的拷贝较慢,该策略是通过牺牲速度换取更大的 batch size。默认值:False。 -**checkpoint_shape(list):** 该参数仅在 offload 开启时需要设置,用来指定 checkpoints 的各维度大小。目前offload 需要所有checkpoints 具有相同的 shape,并且各维度是确定的(不支持 -1 维度)。 +**checkpoint_shape(list):** 该参数仅在 offload 开启时需要设置,用来指定 checkpoints 的各维度大小。目前 offload 需要所有 checkpoints 具有相同的 shape,并且各维度是确定的(不支持 -1 维度)。 pipeline ''''''''' -是否启用Pipeline并行。目前,主要实现单机多GPU间的Pipeline并行和多机间的数据并行。Pipeline信息由用户定义程序中的device_guard确定。 +是否启用 Pipeline 并行。目前,主要实现单机多 GPU 间的 Pipeline 并行和多机间的数据并行。Pipeline 信息由用户定义程序中的 device_guard 确定。 **代码示例** @@ -157,9 +157,9 @@ pipeline pipeline_configs ''''''''' -设置Pipeline策略的配置。Pipeline策略下,神经网络的不同层在不同的GPU设备。相邻的GPU设备间有用于同步隐层Tensor的队列。Pipeline并行包含多种生产者-消费者形式的硬件对,如GPU-CPU、CPU-GPU、GPU-XPU。加速PIpeline并行的最佳方式是减少Tensor队列中的Tensor大小,这样生产者可以更快的为下游消费者提供数据。 +设置 Pipeline 策略的配置。Pipeline 策略下,神经网络的不同层在不同的 GPU 设备。相邻的 GPU 设备间有用于同步隐层 Tensor 的队列。Pipeline 并行包含多种生产者-消费者形式的硬件对,如 GPU-CPU、CPU-GPU、GPU-XPU。加速 PIpeline 并行的最佳方式是减少 Tensor 队列中的 Tensor 大小,这样生产者可以更快的为下游消费者提供数据。 -**micro_batch_size (int):** 每个用户定义的mini-batch中包含的更小的micro-batch的数量。 +**micro_batch_size (int):** 每个用户定义的 mini-batch 中包含的更小的 micro-batch 的数量。 **代码示例** @@ -174,9 +174,9 @@ pipeline_configs gradient_merge ''''''''' -梯度累加,是一种大Batch训练的策略。添加这一策略后,模型的参数每过 **k_steps** 步更新一次, -**k_steps** 是用户定义的步数。在不更新参数的步数里,Paddle只进行前向、反向网络的计算; -在更新参数的步数里,Paddle执行优化网络,通过特定的优化器(比如SGD、Adam), +梯度累加,是一种大 Batch 训练的策略。添加这一策略后,模型的参数每过 **k_steps** 步更新一次, +**k_steps** 是用户定义的步数。在不更新参数的步数里,Paddle 只进行前向、反向网络的计算; +在更新参数的步数里,Paddle 执行优化网络,通过特定的优化器(比如 SGD、Adam), 将累加的梯度应用到模型参数上。 **代码示例** @@ -193,7 +193,7 @@ gradient_merge_configs 设置 **distribute_strategy** 策略的配置。 -**k_steps(int):** 参数更新的周期,默认为1 +**k_steps(int):** 参数更新的周期,默认为 1 **avg(bool):** 梯度的融合方式,有两种选择: @@ -204,7 +204,7 @@ gradient_merge_configs lars ''''''''' -是否使用LARS optimizer,默认值:False +是否使用 LARS optimizer,默认值:False **代码示例** @@ -223,13 +223,13 @@ lars lars_configs ''''''''' -设置LARS优化器的参数。用户可以配置 lars_coeff,lars_weight_decay,epsilon,exclude_from_weight_decay 参数。 +设置 LARS 优化器的参数。用户可以配置 lars_coeff,lars_weight_decay,epsilon,exclude_from_weight_decay 参数。 **lars_coeff(float):** lars 系数,`原论文 `_ 中的 trust coefficient。默认值是 0.001。 **lars_weight_decay(float):** lars 公式中 weight decay 系数。默认值是 0.0005。 -**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一layer 的name 如果在列表中,这一layer 的 lars_weight_decay将被置为 0。默认值是 None。 +**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一 layer 的 name 如果在列表中,这一 layer 的 lars_weight_decay 将被置为 0。默认值是 None。 **epsilon(float):** 一个小的浮点值,目的是维持数值稳定性,避免 lars 公式中的分母为零。默认值是 0。 @@ -237,7 +237,7 @@ lars_configs lamb ''''''''' -是否使用LAMB optimizer,默认值:False +是否使用 LAMB optimizer,默认值:False **代码示例** @@ -254,16 +254,16 @@ lamb lamb_configs ''''''''' -设置LAMB优化器的参数。用户可以配置 lamb_weight_decay,exclude_from_weight_decay 参数。 +设置 LAMB 优化器的参数。用户可以配置 lamb_weight_decay,exclude_from_weight_decay 参数。 **lamb_weight_decay(float):** lars 公式中 weight decay 系数。默认值是 0.01。 -**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一layer 的name 如果在列表中,这一layer 的 lamb_weight_decay将被置为 0。默认值是 None。 +**exclude_from_weight_decay(list[str]):** 不应用 weight decay 的 layers 的名字列表,某一 layer 的 name 如果在列表中,这一 layer 的 lamb_weight_decay 将被置为 0。默认值是 None。 localsgd ''''''''' -是否使用LocalSGD optimizer,默认值:False。更多的细节请参考 `Don't Use Large Mini-Batches, Use Local SGD `_ +是否使用 LocalSGD optimizer,默认值:False。更多的细节请参考 `Don't Use Large Mini-Batches, Use Local SGD `_ **代码示例** @@ -276,7 +276,7 @@ localsgd localsgd_configs ''''''''' -设置LocalSGD优化器的参数。用户可以配置k_steps和begin_step参数。 +设置 LocalSGD 优化器的参数。用户可以配置 k_steps 和 begin_step 参数。 **代码示例** @@ -288,13 +288,13 @@ localsgd_configs strategy.localsgd_configs = {"k_steps": 4, "begin_step": 30} -**k_steps(int):** 训练过程中的全局参数更新间隔,默认值1。 +**k_steps(int):** 训练过程中的全局参数更新间隔,默认值 1。 -**begin_step(int):** 指定从第几个step之后进行local SGD算法,默认值1。 +**begin_step(int):** 指定从第几个 step 之后进行 local SGD 算法,默认值 1。 adaptive_localsgd ''''''''' -是否使用AdaptiveLocalSGD optimizer,默认值:False。更多的细节请参考`Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-Update SGD `_ +是否使用 AdaptiveLocalSGD optimizer,默认值:False。更多的细节请参考`Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-Update SGD `_ **代码示例** @@ -306,7 +306,7 @@ adaptive_localsgd adaptive_localsgd_configs ''''''''' -设置AdaptiveLocalSGD优化器的参数。用户可以配置init_k_steps和begin_step参数。 +设置 AdaptiveLocalSGD 优化器的参数。用户可以配置 init_k_steps 和 begin_step 参数。 **代码示例** @@ -318,9 +318,9 @@ adaptive_localsgd_configs strategy.adaptive_localsgd_configs = {"init_k_steps": 1, "begin_step": 30} -**init_k_steps(int):** 自适应localsgd的初始训练步长。训练后,自适应localsgd方法将自动调整步长。默认值1。 +**init_k_steps(int):** 自适应 localsgd 的初始训练步长。训练后,自适应 localsgd 方法将自动调整步长。默认值 1。 -**begin_step(int):** 指定从第几个step之后进行Adaptive LocalSGD算法,默认值1。 +**begin_step(int):** 指定从第几个 step 之后进行 Adaptive LocalSGD 算法,默认值 1。 amp ''''''''' @@ -338,23 +338,23 @@ amp amp_configs ''''''''' -设置自动混合精度训练配置。为避免梯度inf或nan,amp会根据梯度值自动调整loss scale值。目前可以通过字典设置以下配置。 +设置自动混合精度训练配置。为避免梯度 inf 或 nan,amp 会根据梯度值自动调整 loss scale 值。目前可以通过字典设置以下配置。 -**init_loss_scaling(float):** 初始loss scaling值。默认值32768。 +**init_loss_scaling(float):** 初始 loss scaling 值。默认值 32768。 -**use_dynamic_loss_scaling(bool):** 是否动态调整loss scale值。默认True。 +**use_dynamic_loss_scaling(bool):** 是否动态调整 loss scale 值。默认 True。 -**incr_every_n_steps(int):** 每经过n个连续的正常梯度值才会增大loss scaling值。默认值1000。 +**incr_every_n_steps(int):** 每经过 n 个连续的正常梯度值才会增大 loss scaling 值。默认值 1000。 -**decr_every_n_nan_or_inf(int):** 每经过n个连续的无效梯度值(nan或者inf)才会减小loss scaling值。默认值2。 +**decr_every_n_nan_or_inf(int):** 每经过 n 个连续的无效梯度值(nan 或者 inf)才会减小 loss scaling 值。默认值 2。 -**incr_ratio(float):** 每次增大loss scaling值的扩增倍数,其为大于1的浮点数。默认值2.0。 +**incr_ratio(float):** 每次增大 loss scaling 值的扩增倍数,其为大于 1 的浮点数。默认值 2.0。 -**decr_ratio(float):** 每次减小loss scaling值的比例系数,其为小于1的浮点数。默认值0.5。 +**decr_ratio(float):** 每次减小 loss scaling 值的比例系数,其为小于 1 的浮点数。默认值 0.5。 -**custom_white_list(list[str]):** 用户自定义OP开启fp16执行的白名单。 +**custom_white_list(list[str]):** 用户自定义 OP 开启 fp16 执行的白名单。 -**custom_black_list(list[str]):** 用户自定义OP禁止fp16执行的黑名单。 +**custom_black_list(list[str]):** 用户自定义 OP 禁止 fp16 执行的黑名单。 **代码示例** @@ -383,15 +383,15 @@ dgc dgc_configs ''''''''' -设置dgc策略的配置。目前用户可配置 rampup_begin_step,rampup_step,sparsity参数。 +设置 dgc 策略的配置。目前用户可配置 rampup_begin_step,rampup_step,sparsity 参数。 -**rampup_begin_step(int):** 梯度压缩的起点步。默认值0。 +**rampup_begin_step(int):** 梯度压缩的起点步。默认值 0。 -**rampup_step(int):** 使用稀疏预热的时间步长。默认值为1。例如:如果稀疏度为[0.75,0.9375,0.984375,0.996,0.999],\ -并且rampup_step为100,则在0~19步时使用0.75,在20~39步时使用0.9375,依此类推。当到达sparsity数组末尾时,此后将会使用0.999。 +**rampup_step(int):** 使用稀疏预热的时间步长。默认值为 1。例如:如果稀疏度为[0.75,0.9375,0.984375,0.996,0.999],\ +并且 rampup_step 为 100,则在 0~19 步时使用 0.75,在 20~39 步时使用 0.9375,依此类推。当到达 sparsity 数组末尾时,此后将会使用 0.999。 -**sparsity(list[float]):** 从梯度张量中获取top个重要元素,比率为(1-当前稀疏度)。默认值为[0.999]。\ -例如:如果sparsity为[0.99, 0.999],则将传输top [1%, 0.1%]的重要元素。 +**sparsity(list[float]):** 从梯度张量中获取 top 个重要元素,比率为(1-当前稀疏度)。默认值为[0.999]。\ +例如:如果 sparsity 为[0.99, 0.999],则将传输 top [1%, 0.1%]的重要元素。 **代码示例** @@ -405,7 +405,7 @@ dgc_configs fp16_allreduce ''''''''' -是否使用fp16梯度allreduce训练。默认值:False +是否使用 fp16 梯度 allreduce 训练。默认值:False **代码示例** @@ -419,8 +419,8 @@ fp16_allreduce sharding ''''''''' -是否开启sharding 策略。sharding 实现了[ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) -中 ZeRO-DP 类似的功能,其通过将模型的参数和优化器状态在ranks 间分片来支持更大模型的训练。 +是否开启 sharding 策略。sharding 实现了[ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) +中 ZeRO-DP 类似的功能,其通过将模型的参数和优化器状态在 ranks 间分片来支持更大模型的训练。 目前在混合并行(Hybrid parallelism) 模式下,sharding config 作为混合并行设置的统一入口来设置混合并行相关参数。 @@ -437,27 +437,27 @@ sharding sharding_configs ''''''''' -设置sharding策略的参数。 +设置 sharding 策略的参数。 -**sharding_segment_strategy(float, optional):** 选择sharding 中用来将前向反向program 切segments 的策略。目前可选策略有:"segment_broadcast_MB" 和 "segment_anchors"。 segment 是sharding中引入的一个内部概念,目的是用来让通信和计算相互重叠掩盖(overlap)。默认值是 segment_broadcast_MB。 +**sharding_segment_strategy(float, optional):** 选择 sharding 中用来将前向反向 program 切 segments 的策略。目前可选策略有:"segment_broadcast_MB" 和 "segment_anchors"。 segment 是 sharding 中引入的一个内部概念,目的是用来让通信和计算相互重叠掩盖(overlap)。默认值是 segment_broadcast_MB。 -**segment_broadcast_MB(float, optional):** 根据sharding 广播通信中的参数量来切segments,仅当 sharding_segment_strategy = segment_broadcast_MB时生效。sharding 会在前向和反向中引入参数广播,在该segment 策略下,每当参数广播量达到 “segment_broadcast_MB”时,在program 中切出一个segment。该参数是一个经验值,最优值会受模型大小和网咯拓扑的影响。默认值是 32。 +**segment_broadcast_MB(float, optional):** 根据 sharding 广播通信中的参数量来切 segments,仅当 sharding_segment_strategy = segment_broadcast_MB 时生效。sharding 会在前向和反向中引入参数广播,在该 segment 策略下,每当参数广播量达到 “segment_broadcast_MB”时,在 program 中切出一个 segment。该参数是一个经验值,最优值会受模型大小和网咯拓扑的影响。默认值是 32。 -**segment_anchors(list):** 根据用户选定的锚点切割 segments,仅当 sharding_segment_strategy = segment_anchors 生效。该策略可以让用户更精确的控制program 的切分,目前还在实验阶段。 +**segment_anchors(list):** 根据用户选定的锚点切割 segments,仅当 sharding_segment_strategy = segment_anchors 生效。该策略可以让用户更精确的控制 program 的切分,目前还在实验阶段。 -**sharding_degree(int, optional):** sharding并行数。sharding_degree=1 时,sharding 策略会被关闭。默认值是 8。 +**sharding_degree(int, optional):** sharding 并行数。sharding_degree=1 时,sharding 策略会被关闭。默认值是 8。 **gradient_merge_acc_step(int, optional):** 梯度累积中的累积步数。gradient_merge_acc_step=1 梯度累积会被关闭。默认值是 1。 -**optimize_offload(bool, optional):** 优化器状态卸载开关。开启后会将优化器中的状态(moment) 卸载到Host 的内存中,以到达节省GPU 显存、支持更大模型的目的。开启后,优化器状态会在训练的更新阶段经历:预取-计算-卸载(offload)三个阶段,更新阶段耗时会增加。这个策略需要权衡显存节省量和训练速度,仅推荐在开启梯度累积并且累积步数较大时开启。因为累积步数较大时,训练中更新阶段的比例将远小于前向&反向阶段,卸载引入的耗时将不明显。 +**optimize_offload(bool, optional):** 优化器状态卸载开关。开启后会将优化器中的状态(moment) 卸载到 Host 的内存中,以到达节省 GPU 显存、支持更大模型的目的。开启后,优化器状态会在训练的更新阶段经历:预取-计算-卸载(offload)三个阶段,更新阶段耗时会增加。这个策略需要权衡显存节省量和训练速度,仅推荐在开启梯度累积并且累积步数较大时开启。因为累积步数较大时,训练中更新阶段的比例将远小于前向&反向阶段,卸载引入的耗时将不明显。 -**dp_degree(int, optional):** 数据并行的路数。当dp_degree>=2 时,会在内层并行的基础上,再引入dp_degree路 数据并行。用户需要保证 global_world_size = mp_degree * sharding_degree * pp_degree * dp_degree。默认值是 1。 +**dp_degree(int, optional):** 数据并行的路数。当 dp_degree>=2 时,会在内层并行的基础上,再引入 dp_degree 路 数据并行。用户需要保证 global_world_size = mp_degree * sharding_degree * pp_degree * dp_degree。默认值是 1。 **mp_degree(int, optional):** [仅在混合并行中使用] megatron 并行数。mp_degree=1 时,mp 策略会被关闭。默认值是 1。 **pp_degree(int, optional):** [仅在混合并行中使用] pipeline 并行数。pp_degree=1 时,pipeline 策略会被关闭。默认值是 1。 -**pp_allreduce_in_optimize(bool, optional):** [仅在混合并行中使用] 在开启pipeline 并行后,将allreduce 操作从反向阶段移动到更新阶段。根据不同的网络拓扑,该选项会影响训练速度,该策略目前还在实验阶段。默认值是 False。 +**pp_allreduce_in_optimize(bool, optional):** [仅在混合并行中使用] 在开启 pipeline 并行后,将 allreduce 操作从反向阶段移动到更新阶段。根据不同的网络拓扑,该选项会影响训练速度,该策略目前还在实验阶段。默认值是 False。 .. code-block:: python diff --git a/docs/api/paddle/distributed/fleet/Fleet_cn.rst b/docs/api/paddle/distributed/fleet/Fleet_cn.rst index 45f8f87acdd..9d6fd786a93 100644 --- a/docs/api/paddle/distributed/fleet/Fleet_cn.rst +++ b/docs/api/paddle/distributed/fleet/Fleet_cn.rst @@ -6,7 +6,7 @@ Fleet .. py:class:: paddle.distributed.fleet.Fleet -Fleet是飞桨分布式训练统一API,只需要import fleet并简单初始化后即可快速开始使用飞桨大规模分布式训练 +Fleet 是飞桨分布式训练统一 API,只需要 import fleet 并简单初始化后即可快速开始使用飞桨大规模分布式训练 方法 @@ -14,14 +14,14 @@ Fleet是飞桨分布式训练统一API,只需要import fleet并简单初始化 init(role_maker=None, is_collective=False, strategy=None) ''''''''' -使用RoleMaker或其他配置初始化fleet。 +使用 RoleMaker 或其他配置初始化 fleet。 **参数** - - **role_maker** (RoleMakerBase) 已初始化好的PaddleCloudRoleMaker或UserDefineRoleMaker - - **is_collective** (bool) 在未指定role_maker的情况下,可由init方法自行初始化RoleMaker, is_collective为True则按照collective模式进行创建,is_collective=False则按照ParameterServer模式进行创建 - - **strategy** (DistributedStrategy):分布式训练的额外属性。详情请参阅paddle.distributed.fleet.DistributedStrategy。默认值:None。 + - **role_maker** (RoleMakerBase) 已初始化好的 PaddleCloudRoleMaker 或 UserDefineRoleMaker + - **is_collective** (bool) 在未指定 role_maker 的情况下,可由 init 方法自行初始化 RoleMaker, is_collective 为 True 则按照 collective 模式进行创建,is_collective=False 则按照 ParameterServer 模式进行创建 + - **strategy** (DistributedStrategy):分布式训练的额外属性。详情请参阅 paddle.distributed.fleet.DistributedStrategy。默认值:None。 **返回** None @@ -61,7 +61,7 @@ None is_first_worker() ''''''''' -返回当前节点是否为第一个`worker`节点,判断当前worker_index是否为0,如果为0则返回True,否则返回False。 +返回当前节点是否为第一个`worker`节点,判断当前 worker_index 是否为 0,如果为 0 则返回 True,否则返回 False。 **返回** True/False @@ -80,7 +80,7 @@ True/False worker_index() ''''''''' -返回当前节点的编号,每个`worker`节点被分配[0, worker_num-1]内的唯一的编码ID +返回当前节点的编号,每个`worker`节点被分配[0, worker_num-1]内的唯一的编码 ID **返回** int @@ -132,7 +132,7 @@ True/False worker_endpoints(to_string=False) ''''''''' -返回全部worker节点的ip及端口信息 +返回全部 worker 节点的 ip 及端口信息 **返回** list/string @@ -151,10 +151,10 @@ server_num() **注意:** - **该参数只在ParameterServer模式下生效** + **该参数只在 ParameterServer 模式下生效** -返回当前全部Server节点的个数 +返回当前全部 Server 节点的个数 **返回** int @@ -174,10 +174,10 @@ server_index() **注意:** - **该参数只在ParameterServer模式下生效** + **该参数只在 ParameterServer 模式下生效** -返回当前节点的编号,每个`server`节点被分配[0, server_num-1]内的唯一的编码ID +返回当前节点的编号,每个`server`节点被分配[0, server_num-1]内的唯一的编码 ID **返回** int @@ -198,10 +198,10 @@ server_endpoints(to_string=False) **注意:** - **该参数只在ParameterServer模式下生效** + **该参数只在 ParameterServer 模式下生效** -返回全部server节点的ip及端口信息 +返回全部 server 节点的 ip 及端口信息 **返回** list/string @@ -221,7 +221,7 @@ is_server() **注意:** - **该参数只在ParameterServer模式下生效** + **该参数只在 ParameterServer 模式下生效** 返回当前节点是否为`server`节点 @@ -241,7 +241,7 @@ True/False barrier_worker() ''''''''' -调用集合通信功能,强制要求所有的worker在此处相互等待一次 +调用集合通信功能,强制要求所有的 worker 在此处相互等待一次 **返回** 无 @@ -258,7 +258,7 @@ barrier_worker() init_worker() ''''''''' -worker节点在训练前的初始化,包括通信模块,参数同步等 +worker 节点在训练前的初始化,包括通信模块,参数同步等 **返回** 无 @@ -275,7 +275,7 @@ worker节点在训练前的初始化,包括通信模块,参数同步等 init_server(*args, **kwargs) ''''''''' -server节点的初始化,包括server端参数初始化,模型加载等 +server 节点的初始化,包括 server 端参数初始化,模型加载等 **返回** 无 @@ -292,7 +292,7 @@ server节点的初始化,包括server端参数初始化,模型加载等 run_server() ''''''''' -server节点的运行,此命令会将ParameterServer的进程启动并常驻直至训练结束 +server 节点的运行,此命令会将 ParameterServer 的进程启动并常驻直至训练结束 **返回** 无 @@ -310,7 +310,7 @@ server节点的运行,此命令会将ParameterServer的进程启动并常驻 stop_worker() ''''''''' -停止当前正在运行的worker节点 +停止当前正在运行的 worker 节点 **返回** 无 @@ -336,11 +336,11 @@ save_inference_model(executor, dirname, feeded_var_names, target_vars, main_prog - **executor** (Executor) – 用于保存预测模型的 ``executor``,详见 :ref:`api_guide_executor` 。 - **dirname** (str) – 指定保存预测模型结构和参数的文件目录。 - - **feeded_var_names** (list[str]) – 字符串列表,包含着Inference Program预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 + - **feeded_var_names** (list[str]) – 字符串列表,包含着 Inference Program 预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 - **target_vars** (list[Tensor]) – ``Tensor`` (详见 :ref:`api_guide_Program` )类型列表,包含着模型的所有输出变量。通过这些输出变量即可得到模型的预测结果。 - - **main_program** (Program,可选) – 通过该参数指定的 ``main_program`` 可构建一个专门用于预测的 ``Inference Program``。若为None,则使用全局默认的 ``_main_program_`` 。>默认值为None。 - - **export_for_deployment** (bool,可选) – 若为True,则 ``main_program`` 指定的Program将被修改为只支持直接预测部署的Program。否则,将存储更多的信息,方便优化和再训练。目前 -只支持设置为True,且默认值为True。 + - **main_program** (Program,可选) – 通过该参数指定的 ``main_program`` 可构建一个专门用于预测的 ``Inference Program``。若为 None,则使用全局默认的 ``_main_program_`` 。>默认值为 None。 + - **export_for_deployment** (bool,可选) – 若为 True,则 ``main_program`` 指定的 Program 将被修改为只支持直接预测部署的 Program。否则,将存储更多的信息,方便优化和再训练。目前 +只支持设置为 True,且默认值为 True。 **返回** @@ -374,7 +374,7 @@ save_persistables(executor, dirname, main_program=None) - **executor** (Executor) – 用于保存持久性变量的 ``executor``,详见 :ref:`api_guide_executor` 。 - **dirname** (str) – 用于储存持久性变量的文件目录。 - - **main_program** (Program,可选) – 需要保存持久性变量的Program( ``Program`` 含义详见 :ref:`api_guide_Program` )。如果为None,则使用default_main_Program。默认值为None>。 + - **main_program** (Program,可选) – 需要保存持久性变量的 Program( ``Program`` 含义详见 :ref:`api_guide_Program` )。如果为 None,则使用 default_main_Program。默认值为 None>。 **返回** 无 @@ -403,8 +403,8 @@ distributed_optimizer(optimizer, strategy=None) **参数** - - **optimizer** (optimizer) – paddle定义的优化器。 - - **strategy** (DistributedStrategy) – 分布式优化器的额外属性。建议在fleet.init()创建。这里的仅仅是为了兼容性。如果这里的参数strategy不是None,则它将覆盖在fleet.init()创建的DistributedStrategy,并在后续的分布式训练中生效。 + - **optimizer** (optimizer) – paddle 定义的优化器。 + - **strategy** (DistributedStrategy) – 分布式优化器的额外属性。建议在 fleet.init()创建。这里的仅仅是为了兼容性。如果这里的参数 strategy 不是 None,则它将覆盖在 fleet.init()创建的 DistributedStrategy,并在后续的分布式训练中生效。 **代码示例** @@ -424,16 +424,16 @@ distributed_model(model) **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 返回分布式数据并行模型。 **参数** - model (Layer) - 用户定义的模型,此处模型是指继承动态图Layer的网络。 + model (Layer) - 用户定义的模型,此处模型是指继承动态图 Layer 的网络。 **返回** -分布式数据并行模型,该模型同样继承动态图Layer。 +分布式数据并行模型,该模型同样继承动态图 Layer。 **代码示例** @@ -441,9 +441,9 @@ distributed_model(model) .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import paddle import paddle.nn as nn @@ -489,21 +489,21 @@ state_dict() **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -以 ``dict`` 返回当前 ``optimizer`` 使用的所有Tensor。比如对于Adam优化器,将返回 beta1, beta2, momentum 等Tensor。 +以 ``dict`` 返回当前 ``optimizer`` 使用的所有 Tensor。比如对于 Adam 优化器,将返回 beta1, beta2, momentum 等 Tensor。 **返回** -dict,当前 ``optimizer`` 使用的所有Tensor。 +dict,当前 ``optimizer`` 使用的所有 Tensor。 **代码示例** .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import numpy as np import paddle @@ -527,9 +527,9 @@ set_state_dict(state_dict) **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** -加载 ``optimizer`` 的Tensor字典给当前 ``optimizer`` 。 +加载 ``optimizer`` 的 Tensor 字典给当前 ``optimizer`` 。 **返回** None @@ -539,9 +539,9 @@ None .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import numpy as np import paddle @@ -568,7 +568,7 @@ set_lr(value) **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 手动设置当前 ``optimizer`` 的学习率。 @@ -584,9 +584,9 @@ None .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import numpy as np import paddle @@ -621,7 +621,7 @@ get_lr() **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 获取当前步骤的学习率。 @@ -634,9 +634,9 @@ float,当前步骤的学习率。 .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import numpy as np import paddle @@ -662,7 +662,7 @@ step() **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 执行一次优化器并进行参数更新。 @@ -674,9 +674,9 @@ None。 .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import paddle import paddle.nn as nn @@ -723,7 +723,7 @@ clear_grad() **注意:** - **1. 该API只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** + **1. 该 API 只在** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **模式下生效** 清除需要优化的参数的梯度。 @@ -736,9 +736,9 @@ None。 .. code-block:: python - # 这个示例需要由fleetrun启动,用法为: + # 这个示例需要由 fleetrun 启动,用法为: # fleetrun --gpus=0,1 example.py - # 脚本example.py中的代码是下面这个示例。 + # 脚本 example.py 中的代码是下面这个示例。 import paddle import paddle.nn as nn diff --git a/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst b/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst index 49f95d0b0c5..314ccbddf31 100644 --- a/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst +++ b/docs/api/paddle/distributed/fleet/PaddleCloudRoleMaker_cn.rst @@ -5,8 +5,8 @@ PaddleCloudRoleMaker .. py:class:: paddle.distributed.fleet.PaddleCloudRoleMaker -PaddleCloudRoleMaker是基于从环境变量中获取分布式相关信息进行分布式配置初始化的接口。 -它会自动根据用户在环境变量中的配置进行分布式训练环境初始化,目前PaddleCloudRoleMaker支持ParameterServer分布式训练及Collective分布式训练两种模式的初始化。 +PaddleCloudRoleMaker 是基于从环境变量中获取分布式相关信息进行分布式配置初始化的接口。 +它会自动根据用户在环境变量中的配置进行分布式训练环境初始化,目前 PaddleCloudRoleMaker 支持 ParameterServer 分布式训练及 Collective 分布式训练两种模式的初始化。 代码示例 diff --git a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst index 53246b552c3..a9d1cea57b2 100644 --- a/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst +++ b/docs/api/paddle/distributed/fleet/UserDefinedRoleMaker_cn.rst @@ -5,8 +5,8 @@ UserDefinedRoleMaker .. py:class:: paddle.distributed.fleet.UserDefinedRoleMaker -UserDefinedRoleMaker是基于从用户自定义的参数中获取分布式相关信息进行分布式配置初始化的接口 -它会自动根据用户的自定义配置进行分布式训练环境初始化,目前UserDefinedRoleMaker支持ParameterServer分布式训练及Collective分布式训练两种模式的初始化。 +UserDefinedRoleMaker 是基于从用户自定义的参数中获取分布式相关信息进行分布式配置初始化的接口 +它会自动根据用户的自定义配置进行分布式训练环境初始化,目前 UserDefinedRoleMaker 支持 ParameterServer 分布式训练及 Collective 分布式训练两种模式的初始化。 代码示例 diff --git a/docs/api/paddle/distributed/fleet/UtilBase_cn.rst b/docs/api/paddle/distributed/fleet/UtilBase_cn.rst index a3421b26fe9..ca91716f667 100644 --- a/docs/api/paddle/distributed/fleet/UtilBase_cn.rst +++ b/docs/api/paddle/distributed/fleet/UtilBase_cn.rst @@ -16,11 +16,11 @@ all_reduce(input, mode="sum", comm_world="worker") - **input** (list|numpy.array) – 归约操作的输入。 - **mode** (str) - 归约操作的模式,包含求和,取最大值和取最小值,默认为求和归约。 - - **comm_world** (str) - 归约操作的通信集合,包含:server集合(“server"),worker集合("worker")及所有节点集合("all"),默认为worker集合。 + - **comm_world** (str) - 归约操作的通信集合,包含:server 集合(“server"),worker 集合("worker")及所有节点集合("all"),默认为 worker 集合。 **返回** -Numpy.array|None:一个和 `input` 形状一致的numpy数组或None。 +Numpy.array|None:一个和 `input` 形状一致的 numpy 数组或 None。 **代码示例** @@ -64,7 +64,7 @@ barrier(comm_world="worker") **参数** - - **comm_world** (str) - 阻塞操作的通信集合,包含:server集合(“server"),worker集合("worker")及所有节点集合("all"),默认为worker集合。 + - **comm_world** (str) - 阻塞操作的通信集合,包含:server 集合(“server"),worker 集合("worker")及所有节点集合("all"),默认为 worker 集合。 **代码示例** @@ -105,11 +105,11 @@ all_gather(input, comm_world="worker") **参数** - **input** (int|float) - 聚合操作的输入。 - - **comm_world** (str) - 聚合操作的通信集合,包含:server集合(“server"),worker集合("worker")及所有节点集合("all"),默认为worker集合。 + - **comm_world** (str) - 聚合操作的通信集合,包含:server 集合(“server"),worker 集合("worker")及所有节点集合("all"),默认为 worker 集合。 **返回** - - **output** (List): List格式的聚合结果。 + - **output** (List): List 格式的聚合结果。 **代码示例** @@ -153,8 +153,8 @@ get_file_shard(files) .. code-block:: text - 示例 1:原始所有文件列表 `files` = [a, b, c ,d, e],训练节点个数 `trainer_num` = 2,那么属于零号节点的训练文件为[a, b, c],属于1号节点的训练文件为[d, e]。 - 示例 2:原始所有文件列表 `files` = [a, b],训练节点个数 `trainer_num` = 3,那么属于零号节点的训练文件为[a],属于1号节点的训练文件为[b],属于2号节点的训练文件为[]。 + 示例 1:原始所有文件列表 `files` = [a, b, c ,d, e],训练节点个数 `trainer_num` = 2,那么属于零号节点的训练文件为[a, b, c],属于 1 号节点的训练文件为[d, e]。 + 示例 2:原始所有文件列表 `files` = [a, b],训练节点个数 `trainer_num` = 3,那么属于零号节点的训练文件为[a],属于 1 号节点的训练文件为[b],属于 2 号节点的训练文件为[]。 **参数** diff --git a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst index b7d6036a709..6d856f8b4e1 100644 --- a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst @@ -4,13 +4,13 @@ HDFSClient ------------------------------- .. py:class:: paddle.distributed.fleet.utils.HDFSClient -一个HADOOP文件系统工具类。 +一个 HADOOP 文件系统工具类。 参数 :::::::::::: - - **hadoop_home** (str):HADOOP HOME地址。 - - **configs** (dict): HADOOP文件系统配置。需包含 `fs.default.name` 和 `hadoop.job.ugi` 这两个字段。 + - **hadoop_home** (str):HADOOP HOME 地址。 + - **configs** (dict): HADOOP 文件系统配置。需包含 `fs.default.name` 和 `hadoop.job.ugi` 这两个字段。 代码示例 :::::::::::: @@ -36,11 +36,11 @@ ls_dir(fs_path) **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **返回** - - Tuple,一个包含所有子目录和文件名的2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 + - Tuple,一个包含所有子目录和文件名的 2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 **代码示例** @@ -63,7 +63,7 @@ mkdirs(fs_path) **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **代码示例** @@ -82,11 +82,11 @@ mkdirs(fs_path) delete(fs_path) ''''''''' -删除HADOOP文件(或目录)。 +删除 HADOOP 文件(或目录)。 **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **代码示例** @@ -109,7 +109,7 @@ is_file(fs_path) **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **返回** @@ -136,7 +136,7 @@ is_dir(fs_path) **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **返回** @@ -163,7 +163,7 @@ is_exist(fs_path) **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **返回** @@ -186,12 +186,12 @@ is_exist(fs_path) upload(local_path, fs_path) ''''''''' -上传本地文件至HADOOP文件系统。 +上传本地文件至 HADOOP 文件系统。 **参数** - **local_path** (str):本地文件路径。 - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **代码示例** @@ -210,12 +210,12 @@ upload(local_path, fs_path) download(fs_path, local_path) ''''''''' -下载HADOOP文件至本地文件系统。 +下载 HADOOP 文件至本地文件系统。 **参数** - **local_path** (str):本地文件路径。 - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **代码示例** @@ -235,11 +235,11 @@ download(fs_path, local_path) touch(fs_path, exist_ok=True) ''''''''' -创建一个HADOOP文件。 +创建一个 HADOOP 文件。 **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 - **exist_ok** (bool):路径已存在时程序是否报错。若 `exist_ok = True`,则直接返回,反之则抛出文件存在的异常,默认不抛出异常。 **代码示例** @@ -259,7 +259,7 @@ touch(fs_path, exist_ok=True) mv(fs_src_path, fs_dst_path, overwrite=False) ''''''''' -HADOOP系统文件移动。 +HADOOP 系统文件移动。 **参数** @@ -284,11 +284,11 @@ HADOOP系统文件移动。 list_dirs(fs_path) ''''''''' -列出HADOOP文件路径下所有的子目录。 +列出 HADOOP 文件路径下所有的子目录。 **参数** - - **fs_path** (str): HADOOP文件路径。 + - **fs_path** (str): HADOOP 文件路径。 **返回** diff --git a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst index fef496fd4f2..3d23cfd024d 100644 --- a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst @@ -27,7 +27,7 @@ ls_dir(fs_path) **返回** - - Tuple,一个包含所有子目录和文件名的2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 + - Tuple,一个包含所有子目录和文件名的 2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 **代码示例** diff --git a/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst b/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst index d3d197cd199..0653978441b 100644 --- a/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/recompute_cn.rst @@ -12,12 +12,12 @@ recompute ::::::::: - function (paddle.nn.Sequential) - 模型前向传播的部分连续的层函数组成的序列, 它们的中间激活函数值将在前向传播过程中被释放掉来节省显存,并且在反向梯度计算的时候会重新被计算。 - - args (Tensor) - function的输入。 - - kwargs (Dict) - kwargs只应该包含preserve_rng_state的键值对,用来表示是否保存前向的rng,如果为True,那么在反向传播的重计算前向时会还原上次前向的rng值。默认preserve_rng_state为True。 + - args (Tensor) - function 的输入。 + - kwargs (Dict) - kwargs 只应该包含 preserve_rng_state 的键值对,用来表示是否保存前向的 rng,如果为 True,那么在反向传播的重计算前向时会还原上次前向的 rng 值。默认 preserve_rng_state 为 True。 返回 ::::::::: -function作用在输入的输出 +function 作用在输入的输出 代码示例 ::::::::: diff --git a/docs/api/paddle/distributed/get_rank_cn.rst b/docs/api/paddle/distributed/get_rank_cn.rst index 75ad8fc79ba..2b5fa024829 100644 --- a/docs/api/paddle/distributed/get_rank_cn.rst +++ b/docs/api/paddle/distributed/get_rank_cn.rst @@ -5,13 +5,13 @@ get_rank .. py:function:: paddle.distributed.get_rank() -返回当前进程的rank。 +返回当前进程的 rank。 -当前进程rank的值等于环境变量 ``PADDLE_TRAINER_ID`` 的值,默认值为0。 +当前进程 rank 的值等于环境变量 ``PADDLE_TRAINER_ID`` 的值,默认值为 0。 返回 ::::::::: -(int) 当前进程的rank。 +(int) 当前进程的 rank。 代码示例 ::::::::: diff --git a/docs/api/paddle/distributed/get_world_size_cn.rst b/docs/api/paddle/distributed/get_world_size_cn.rst index 5fadb7e882b..9f3276016bd 100644 --- a/docs/api/paddle/distributed/get_world_size_cn.rst +++ b/docs/api/paddle/distributed/get_world_size_cn.rst @@ -7,7 +7,7 @@ get_world_size 返回参与当前任务的进程数。 -当前进程数等于环境变量 ``PADDLE_TRAINERS_NUM`` 的值,默认值为1。 +当前进程数等于环境变量 ``PADDLE_TRAINERS_NUM`` 的值,默认值为 1。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/irecv_cn.rst b/docs/api/paddle/distributed/irecv_cn.rst index 742b13f654c..78af3d0649f 100644 --- a/docs/api/paddle/distributed/irecv_cn.rst +++ b/docs/api/paddle/distributed/irecv_cn.rst @@ -5,18 +5,18 @@ irecv .. py:function:: paddle.distributed.irecv(tensor, src=None, group=None) -异步接受发送来的tensor。 +异步接受发送来的 tensor。 参数 ::::::::: - tensor (Tensor) - 要接受的张量。其数据类型应为 float16、float32、float64、int32 或 int64。 - - src (int) - 接受节点的全局rank号。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认的全局组。默认值:None。 + - src (int) - 接受节点的全局 rank 号。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认的全局组。默认值:None。 返回 ::::::::: -返回Task。 +返回 Task。 注意 ::::::::: diff --git a/docs/api/paddle/distributed/is_initialized_cn.rst b/docs/api/paddle/distributed/is_initialized_cn.rst index d54cbb8945c..319b52fc6c3 100644 --- a/docs/api/paddle/distributed/is_initialized_cn.rst +++ b/docs/api/paddle/distributed/is_initialized_cn.rst @@ -14,7 +14,7 @@ is_initialized 返回 ::::::::: -如果分布式环境初始化完成,默认通信组已完成建立,则返回True;反之则返回False。 +如果分布式环境初始化完成,默认通信组已完成建立,则返回 True;反之则返回 False。 代码示例 ::::::::: diff --git a/docs/api/paddle/distributed/isend_cn.rst b/docs/api/paddle/distributed/isend_cn.rst index cafe02f426c..6c58ef17da7 100644 --- a/docs/api/paddle/distributed/isend_cn.rst +++ b/docs/api/paddle/distributed/isend_cn.rst @@ -5,18 +5,18 @@ isend .. py:function:: paddle.distributed.isend(tensor, dst, group=None) -异步的将 ``tensor`` 发送到指定的rank进程上。 +异步的将 ``tensor`` 发送到指定的 rank 进程上。 参数 ::::::::: - tensor (Tensor) - 要发送的张量。其数据类型应为 float16、float32、float64、int32 或 int64。 - - dst (int) - 目标节点的全局rank号。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认的全局组。默认值:None。 + - dst (int) - 目标节点的全局 rank 号。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认的全局组。默认值:None。 返回 ::::::::: -返回Task。 +返回 Task。 注意 diff --git a/docs/api/paddle/distributed/launch_cn.rst b/docs/api/paddle/distributed/launch_cn.rst index 00220dac88c..b3c1bdc73e9 100644 --- a/docs/api/paddle/distributed/launch_cn.rst +++ b/docs/api/paddle/distributed/launch_cn.rst @@ -55,13 +55,13 @@ Collective 参数 Parameter-Server 参数 ::::::::: - - ``--servers``:多机分布式任务中,指定参数服务器服务节点的IP和端口,例如 ``--servers="192.168.0.16:6170,192.168.0.17:6170"``。 + - ``--servers``:多机分布式任务中,指定参数服务器服务节点的 IP 和端口,例如 ``--servers="192.168.0.16:6170,192.168.0.17:6170"``。 - - ``--trainers``:多机分布式任务中,指定参数服务器训练节点的IP和端口,也可只指定IP,例如 ``--trainers="192.168.0.16:6171,192.168.0.16:6172,192.168.0.17:6171,192.168.0.17:6172"``。 + - ``--trainers``:多机分布式任务中,指定参数服务器训练节点的 IP 和端口,也可只指定 IP,例如 ``--trainers="192.168.0.16:6171,192.168.0.16:6172,192.168.0.17:6171,192.168.0.17:6172"``。 - ``--workers``: [DEPRECATED] 同 trainers。 - - ``--heter_workers``:在异构集群中启动分布式任务,指定参数服务器异构训练节点的IP和端口,例如 ``--heter_workers="192.168.0.16:6172,192.168.0.17:6172"``。 + - ``--heter_workers``:在异构集群中启动分布式任务,指定参数服务器异构训练节点的 IP 和端口,例如 ``--heter_workers="192.168.0.16:6172,192.168.0.17:6172"``。 - ``--trainer_num``:指定参数服务器训练节点的个数。 @@ -86,21 +86,21 @@ Elastic 参数 IPU 参数 ::::::::: - IPU分布式训练只需要3个参数:``--devices``,``training_script`` 和 ``training_script_args``。对于IPU的参数说明如下: - ``--devices`` 表示设备个数,例如 ``--devices=4`` 表示当前的训练程序需要4个IPUs。 + IPU 分布式训练只需要 3 个参数:``--devices``,``training_script`` 和 ``training_script_args``。对于 IPU 的参数说明如下: + ``--devices`` 表示设备个数,例如 ``--devices=4`` 表示当前的训练程序需要 4 个 IPUs。 ``training_script`` 只允许设置为 ``ipu`` 。 - ``training_script_args`` 表示启动IPU分布式训练的相关参数。请参看如下各项参数说明。 + ``training_script_args`` 表示启动 IPU 分布式训练的相关参数。请参看如下各项参数说明。 请参考 ``代码实例十``。 - - ``--hosts``:IPU分布式训练的主机ip,一个主机可包含多个进程。 + - ``--hosts``:IPU 分布式训练的主机 ip,一个主机可包含多个进程。 - ``--nproc_per_host``: 每个主机的进程数量。一个进程可包含多个实例。 - - ``--ipus_per_replica``:每个实例包含的IPU数量。一个实例可包含多个IPUs。 + - ``--ipus_per_replica``:每个实例包含的 IPU 数量。一个实例可包含多个 IPUs。 - - ``--ipu_partition``:分布式训练中使用的IPU分区名称。 + - ``--ipu_partition``:分布式训练中使用的 IPU 分区名称。 - - ``--vipu_server``:IPU设备管理服务的ip。 + - ``--vipu_server``:IPU 设备管理服务的 ip。 - ``training_script``:分布式训练任务脚本的绝对路径,例如 ``training.py`` 。 @@ -142,7 +142,7 @@ IPU 参数 .. code-block:: bash :name: code-block-example-bash1 - # 启动单机4卡任务 + # 启动单机 4 卡任务 python -m paddle.distributed.launch --devices=0,1,2,3 train.py --lr=0.01 @@ -175,7 +175,7 @@ IPU 参数 .. code-block:: bash :name: code-block-example-bash4 - # 在多机上启动,例如在 192.168.0.16, 192.168.0.17 分别启动1个 server 和2个 trainer + # 在多机上启动,例如在 192.168.0.16, 192.168.0.17 分别启动 1 个 server 和 2 个 trainer # On 192.168.0.16: @@ -255,9 +255,9 @@ IPU 参数 .. code-block:: bash :name: code-block-example-bash10 - # 使用如下命令启动IPU分布式训练 + # 使用如下命令启动 IPU 分布式训练 # 要求 `devices` 表示分布式训练的设备数量 # 要求 `training_script` 设置为 `ipu` - # 要求 `training_script_args` 表示IPU分布式训练相关参数,非训练运行脚本参数 + # 要求 `training_script_args` 表示 IPU 分布式训练相关参数,非训练运行脚本参数 # 请参看上述 `IPU 参数` 说明 python -m paddle.distributed.launch --devices 4 ipu --hosts=localhost --nproc_per_host=2 --ipus_per_replica=1 --ipu_partition=pod16 --vipu_server=127.0.0.1 train.py diff --git a/docs/api/paddle/distributed/recv_cn.rst b/docs/api/paddle/distributed/recv_cn.rst index b93f2c91d16..f00d73f38be 100644 --- a/docs/api/paddle/distributed/recv_cn.rst +++ b/docs/api/paddle/distributed/recv_cn.rst @@ -6,13 +6,13 @@ recv .. py:function:: paddle.distributed.recv(tensor, src=0, group=None, use_calc_stream=True) -发送tensor到指定接收者。 +发送 tensor 到指定接收者。 参数 ::::::::: - - tensor (Tensor) - 接收数据的Tensor。数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 接收数据的 Tensor。数据类型为:float16、float32、float64、int32、int64。 - src (int) - 发送者的标识符。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认地全局组。默认值:None。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认地全局组。默认值:None。 - use_calc_stream (bool,可选) - 标识使用计算流还是通信流。默认值:True。 返回 diff --git a/docs/api/paddle/distributed/reduce_cn.rst b/docs/api/paddle/distributed/reduce_cn.rst index 2c520e12ad5..b970f39d91c 100644 --- a/docs/api/paddle/distributed/reduce_cn.rst +++ b/docs/api/paddle/distributed/reduce_cn.rst @@ -6,9 +6,9 @@ reduce .. py:function:: paddle.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=0) -进程组内所有进程的指定tensor进行归约操作,并返回给所有进程归约的结果。 -如下图所示,4个GPU分别开启4个进程,每张卡上的数据用卡号代表,reduce的目标是第0张卡, -规约操作是求和,经过reduce操作后,第0张卡会得到所有卡数据的总和。 +进程组内所有进程的指定 tensor 进行归约操作,并返回给所有进程归约的结果。 +如下图所示,4 个 GPU 分别开启 4 个进程,每张卡上的数据用卡号代表,reduce 的目标是第 0 张卡, +规约操作是求和,经过 reduce 操作后,第 0 张卡会得到所有卡数据的总和。 .. image:: ./img/reduce.png :width: 800 @@ -17,10 +17,10 @@ reduce 参数 ::::::::: - - tensor (Tensor) - 操作的输入Tensor,结果返回至目标进程号的Tensor中。Tensor的数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 操作的输入 Tensor,结果返回至目标进程号的 Tensor 中。Tensor 的数据类型为:float16、float32、float64、int32、int64。 - dst (int) - 返回操作结果的目标进程编号。 - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD,可选) - 归约的具体操作,比如求和,取最大值,取最小值和求乘积,默认为求和归约。 - - group (int,可选) - 工作的进程组编号,默认为0。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/reduce_scatter_cn.rst b/docs/api/paddle/distributed/reduce_scatter_cn.rst index cb89b78384c..c3b2993188e 100644 --- a/docs/api/paddle/distributed/reduce_scatter_cn.rst +++ b/docs/api/paddle/distributed/reduce_scatter_cn.rst @@ -11,14 +11,14 @@ reduce_scatter ::::::::: - tensor (Tensor) – 输出的张量。 - tensor_list (list(Tensor)) – 归约和切分的张量列表。 - - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD) – 操作类型,默认ReduceOp.SUM。 - - group: (Group, optional) – 通信组;如果是None,则使用默认通信组。 - - use_calc_stream: (bool, optional) – 决定是在计算流还是通信流上做该通信操作;默认为True,表示在计算流。 + - op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD) – 操作类型,默认 ReduceOp.SUM。 + - group: (Group, optional) – 通信组;如果是 None,则使用默认通信组。 + - use_calc_stream: (bool, optional) – 决定是在计算流还是通信流上做该通信操作;默认为 True,表示在计算流。 返回 ::::::::: -返回Task。 +返回 Task。 注意 ::::::::: diff --git a/docs/api/paddle/distributed/scatter_cn.rst b/docs/api/paddle/distributed/scatter_cn.rst index d56bba74371..c19c3541e67 100644 --- a/docs/api/paddle/distributed/scatter_cn.rst +++ b/docs/api/paddle/distributed/scatter_cn.rst @@ -6,9 +6,9 @@ scatter .. py:function:: paddle.distributed.scatter(tensor, tensor_list=None, src=0, group=0) -进程组内指定进程源的tensor列表分发到其他所有进程中。 -如下图所示,4个GPU分别开启4个进程,scatter的源选择为第0张卡, -经过scatter算子后,会将第0张卡的数据平均分到所有卡上。 +进程组内指定进程源的 tensor 列表分发到其他所有进程中。 +如下图所示,4 个 GPU 分别开启 4 个进程,scatter 的源选择为第 0 张卡, +经过 scatter 算子后,会将第 0 张卡的数据平均分到所有卡上。 .. image:: ./img/scatter.png :width: 800 @@ -17,10 +17,10 @@ scatter 参数 ::::::::: - - tensor (Tensor) - 操作的输出Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 - - tensor_list (list,可选) - 操作的输入Tensor列表,默认为None。列表中的每个元素均为Tensor,每个Tensor的数据类型为:float16、float32、float64、int32、int64。 - - src (int,可选) - 操作的源进程号,该进程号的Tensor列表将分发到其他进程中。默认为0。 - - group (int,可选) - 工作的进程组编号,默认为0。 + - tensor (Tensor) - 操作的输出 Tensor。Tensor 的数据类型为:float16、float32、float64、int32、int64。 + - tensor_list (list,可选) - 操作的输入 Tensor 列表,默认为 None。列表中的每个元素均为 Tensor,每个 Tensor 的数据类型为:float16、float32、float64、int32、int64。 + - src (int,可选) - 操作的源进程号,该进程号的 Tensor 列表将分发到其他进程中。默认为 0。 + - group (int,可选) - 工作的进程组编号,默认为 0。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/send_cn.rst b/docs/api/paddle/distributed/send_cn.rst index 5cad655da6f..72fa090814a 100644 --- a/docs/api/paddle/distributed/send_cn.rst +++ b/docs/api/paddle/distributed/send_cn.rst @@ -6,13 +6,13 @@ send .. py:function:: paddle.distributed.send(tensor, dst=0, group=None, use_calc_stream=True) -发送tensor到指定接收者。 +发送 tensor 到指定接收者。 参数 ::::::::: - - tensor (Tensor) - 需要发送的Tensor。数据类型为:float16、float32、float64、int32、int64。 + - tensor (Tensor) - 需要发送的 Tensor。数据类型为:float16、float32、float64、int32、int64。 - dst (int) - 接收者的标识符。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认地全局组。默认值:None。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认地全局组。默认值:None。 - use_calc_stream (bool,可选) - 标识使用计算流还是通信流。默认值:True。 返回 diff --git a/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst b/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst index 85854ddd88d..982581bf822 100644 --- a/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst +++ b/docs/api/paddle/distributed/sharding/group_sharded_parallel_cn.rst @@ -6,26 +6,26 @@ group_sharded_parallel .. py:function:: paddle.distributed.sharding.group_sharded_parallel(model, optimizer, level, scaler=None, group=None, offload=False, sync_buffers=False, buffer_max_size=2**23, segment_size=2**20, sync_comm=False) -使用group_sharded_parallel可以对模型、优化器和GradScaler做group sharded配置。level有三个字符串选项,分别是'os','os_g','p_g_os',分别对应优化器状态切分、优化器状态+梯度切分、参数+梯度+优化器状态切分三种不同的使用场景。 +使用 group_sharded_parallel 可以对模型、优化器和 GradScaler 做 group sharded 配置。level 有三个字符串选项,分别是'os','os_g','p_g_os',分别对应优化器状态切分、优化器状态+梯度切分、参数+梯度+优化器状态切分三种不同的使用场景。 通常情况下优化器状态+梯度切分实际上是优化器状态切分的一种再优化,所以实现上可以用优化器状态+梯度切分实现优化器状态切分。 参数 ::::::::: - - model (Layer) - 需要使用group sharded的模型。 - - optimizer (Optimizer) - 需要使用group sharded的优化器。 - - level (str) - 选择group sharded的级别,分别有'os','os_g','p_g_os'。 - - scaler (GradScaler,可选) - 如果使用AMP混合精度,需要传入GradScaler,默认为None,表示不使用GradScaler。 - - group (Group,可选) - 工作的进程组编号,默认为None,表示采用默认环境Group。 - - offload (bool,可选) - 是否使用offload缓存功能,默认为False,表示不使用offload功能。 - - sync_buffers (bool,可选) - 是否需要同步模型buffers,一般在有注册模型buffers时才使用,默认为False,表示不同步模型buffers。 - - buffer_max_size (int,可选) - 在'os_g'模式中会对梯度进行聚合,此选项指定聚合buffer的大小,指定越大则占用显存也越多,默认为2**23,表示聚合buffer的维度为2**23。 - - segment_size (int,可选) - 在'p_g_os'模式中会对参数进行切分,此选项指定最小切分参数大小,默认为2**20,表示最小被切分参数的维度为2**20。 - - sync_comm (bool,可选) - 在'p_g_os'模式中是否采用同步通信,默认为False,表示使用异步通信流。 + - model (Layer) - 需要使用 group sharded 的模型。 + - optimizer (Optimizer) - 需要使用 group sharded 的优化器。 + - level (str) - 选择 group sharded 的级别,分别有'os','os_g','p_g_os'。 + - scaler (GradScaler,可选) - 如果使用 AMP 混合精度,需要传入 GradScaler,默认为 None,表示不使用 GradScaler。 + - group (Group,可选) - 工作的进程组编号,默认为 None,表示采用默认环境 Group。 + - offload (bool,可选) - 是否使用 offload 缓存功能,默认为 False,表示不使用 offload 功能。 + - sync_buffers (bool,可选) - 是否需要同步模型 buffers,一般在有注册模型 buffers 时才使用,默认为 False,表示不同步模型 buffers。 + - buffer_max_size (int,可选) - 在'os_g'模式中会对梯度进行聚合,此选项指定聚合 buffer 的大小,指定越大则占用显存也越多,默认为 2**23,表示聚合 buffer 的维度为 2**23。 + - segment_size (int,可选) - 在'p_g_os'模式中会对参数进行切分,此选项指定最小切分参数大小,默认为 2**20,表示最小被切分参数的维度为 2**20。 + - sync_comm (bool,可选) - 在'p_g_os'模式中是否采用同步通信,默认为 False,表示使用异步通信流。 返回 ::::::::: -group sharded配置后的model,optimizer和scaler +group sharded 配置后的 model,optimizer 和 scaler 代码示例 ::::::::: diff --git a/docs/api/paddle/distributed/sharding/save_group_sharded_model_cn.rst b/docs/api/paddle/distributed/sharding/save_group_sharded_model_cn.rst index bf1e2411fc9..24d3ee25551 100644 --- a/docs/api/paddle/distributed/sharding/save_group_sharded_model_cn.rst +++ b/docs/api/paddle/distributed/sharding/save_group_sharded_model_cn.rst @@ -6,17 +6,17 @@ save_group_sharded_model .. py:function:: paddle.distributed.sharding.save_group_sharded_model(model, output, optimizer=None) -使用save_group_sharded_model可以对group_sharded_parallel配置后的模型和优化器状态进行保存。 +使用 save_group_sharded_model 可以对 group_sharded_parallel 配置后的模型和优化器状态进行保存。 .. note:: - 此处需要注意,使用save_group_sharded_model保存模型,再次load时需要在调用group_sharded_parallel前对model和optimizer进行set_state_dict。 + 此处需要注意,使用 save_group_sharded_model 保存模型,再次 load 时需要在调用 group_sharded_parallel 前对 model 和 optimizer 进行 set_state_dict。 参数 ::::::::: - - model (Layer) - 使用group_sharded_parallel配置后的模型。 + - model (Layer) - 使用 group_sharded_parallel 配置后的模型。 - output (str) - 输出保存模型和优化器的文件夹路径。 - - optimizer (Optimizer,可选) - 使用group_sharded_parallel配置后的优化器,默认为None,表示不对优化器状态进行保存。 + - optimizer (Optimizer,可选) - 使用 group_sharded_parallel 配置后的优化器,默认为 None,表示不对优化器状态进行保存。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/spawn_cn.rst b/docs/api/paddle/distributed/spawn_cn.rst index 9a2e75c0771..a5d57844137 100644 --- a/docs/api/paddle/distributed/spawn_cn.rst +++ b/docs/api/paddle/distributed/spawn_cn.rst @@ -14,10 +14,10 @@ spawn ::::::::: - func (function) - 由 ``spawn`` 方法启动的进程所调用的目标函数。该目标函数需要能够被 ``pickled`` (序列化),所以目标函数必须定义为模块的一级函数,不能是内部子函数或者类方法。 - args (tuple,可选) - 传入目标函数 ``func`` 的参数。 - - nprocs (int,可选) - 启动进程的数目。默认值为-1。当 ``nproc`` 为-1时,模型执行时将会从环境变量中获取当前可用的所有设备进行使用:如果使用GPU执行任务,将会从环境变量 ``CUDA_VISIBLE_DEVICES`` 中获取当前所有可用的设备ID;如果使用XPU执行任务,将会从环境变量 ``XPU_VISIBLE_DEVICES`` 中获取当前所有可用的设备ID。 - - join (bool,可选) - 对所有启动的进程执行阻塞的 ``join``,等待进程执行结束。默认为True。 - - daemon (bool,可选) - 配置启动进程的 ``daemon`` 属性。默认为False。 - - **options (dict,可选) - 其他初始化并行执行环境的配置选项。目前支持以下选项:(1) start_method (string) - 启动子进程的方法。进程的启动方法可以是 ``spawn`` , ``fork`` , ``forkserver``。因为CUDA运行时环境不支持 ``fork`` 方法,当在子进程中使用CUDA时,需要使用 ``spawn`` 或者 ``forkserver`` 方法启动进程。默认方法为 ``spawn`` ; (2) gpus (string) - 指定训练使用的GPU ID,例如 "0,1,2,3",默认值为None ; (3) xpus (string) - 指定训练使用的XPU ID,例如 "0,1,2,3",默认值为None ; (4) ips (string) - 运行集群的节点(机器)IP,例如 "192.168.0.16,192.168.0.17",默认值为 "127.0.0.1" 。 + - nprocs (int,可选) - 启动进程的数目。默认值为-1。当 ``nproc`` 为-1 时,模型执行时将会从环境变量中获取当前可用的所有设备进行使用:如果使用 GPU 执行任务,将会从环境变量 ``CUDA_VISIBLE_DEVICES`` 中获取当前所有可用的设备 ID;如果使用 XPU 执行任务,将会从环境变量 ``XPU_VISIBLE_DEVICES`` 中获取当前所有可用的设备 ID。 + - join (bool,可选) - 对所有启动的进程执行阻塞的 ``join``,等待进程执行结束。默认为 True。 + - daemon (bool,可选) - 配置启动进程的 ``daemon`` 属性。默认为 False。 + - **options (dict,可选) - 其他初始化并行执行环境的配置选项。目前支持以下选项:(1) start_method (string) - 启动子进程的方法。进程的启动方法可以是 ``spawn`` , ``fork`` , ``forkserver``。因为 CUDA 运行时环境不支持 ``fork`` 方法,当在子进程中使用 CUDA 时,需要使用 ``spawn`` 或者 ``forkserver`` 方法启动进程。默认方法为 ``spawn`` ; (2) gpus (string) - 指定训练使用的 GPU ID,例如 "0,1,2,3",默认值为 None ; (3) xpus (string) - 指定训练使用的 XPU ID,例如 "0,1,2,3",默认值为 None ; (4) ips (string) - 运行集群的节点(机器)IP,例如 "192.168.0.16,192.168.0.17",默认值为 "127.0.0.1" 。 返回 ::::::::: diff --git a/docs/api/paddle/distributed/split_cn.rst b/docs/api/paddle/distributed/split_cn.rst index 65c6e4827ad..5a7b2896db3 100644 --- a/docs/api/paddle/distributed/split_cn.rst +++ b/docs/api/paddle/distributed/split_cn.rst @@ -10,12 +10,12 @@ split 当前,支持一下三种情形。 -情形1:并行Embedding - Embedding操作的参数是个NxM的矩阵,行数为N,列数为M。并行Embedding情形下,参数切分到num_partitions个设备,每个设备上的参数是 (N/num_partitions + 1)行、M列的矩阵。其中,最后一行作为padding idx。 +情形 1:并行 Embedding + Embedding 操作的参数是个 NxM 的矩阵,行数为 N,列数为 M。并行 Embedding 情形下,参数切分到 num_partitions 个设备,每个设备上的参数是 (N/num_partitions + 1)行、M 列的矩阵。其中,最后一行作为 padding idx。 - 假设将NxM的参数矩阵切分到两个设备device_0和device_1。那么每个设备上的参数矩阵为(N/2+1)行和M列。device_0上,输入x中的值如果介于[0, N/2-1],则其值保持不变;否则值变更为N/2,经过embedding映射为全0值。类似地,device_1上,输入x中的值V如果介于[N/2, N-1]之间,那么这些值将变更为(V-N/2);否则,值变更为N/2,经过embedding映射为全0值。最后,使用all_reduce_sum操作汇聚各个卡上的结果。 + 假设将 NxM 的参数矩阵切分到两个设备 device_0 和 device_1。那么每个设备上的参数矩阵为(N/2+1)行和 M 列。device_0 上,输入 x 中的值如果介于[0, N/2-1],则其值保持不变;否则值变更为 N/2,经过 embedding 映射为全 0 值。类似地,device_1 上,输入 x 中的值 V 如果介于[N/2, N-1]之间,那么这些值将变更为(V-N/2);否则,值变更为 N/2,经过 embedding 映射为全 0 值。最后,使用 all_reduce_sum 操作汇聚各个卡上的结果。 - 单卡Embedding情况如下图所示 + 单卡 Embedding 情况如下图所示 .. image:: ./img/split_embedding_single.png :width: 800 @@ -23,17 +23,17 @@ split :alt: single_embedding :align: center - 并行Embedding情况如下图所示 + 并行 Embedding 情况如下图所示 .. image:: ./img/split_embedding_split.png :width: 800 :alt: split_embedding :align: center -情形2:行并行Linear - Linear操作是将输入变量X(N*N)与权重矩阵W(N*M)进行矩阵相乘。行并行Linear情形下,参数切分到num_partitions个设备,每个设备上的参数是N/num_partitions行、M列的矩阵。 +情形 2:行并行 Linear + Linear 操作是将输入变量 X(N*N)与权重矩阵 W(N*M)进行矩阵相乘。行并行 Linear 情形下,参数切分到 num_partitions 个设备,每个设备上的参数是 N/num_partitions 行、M 列的矩阵。 - 单卡Linear情况如下图所示,输入变量用X表示,权重矩阵用W表示,输出变量用O表示,单卡Linear就是一个简单的矩阵乘操作,O = X * W。 + 单卡 Linear 情况如下图所示,输入变量用 X 表示,权重矩阵用 W 表示,输出变量用 O 表示,单卡 Linear 就是一个简单的矩阵乘操作,O = X * W。 .. image:: ./img/split_single.png @@ -41,27 +41,27 @@ split :alt: single_linear :align: center - 行并行Linear情况如下图所示,顾名思义,行并行是按照权重矩阵W的行切分权重矩阵为 - [[W_row1], [W_row2]],对应的输入X也按照列切成了两份[X_col1, X_col2],分别与各自对应的权重矩阵相乘, - 最后通过AllReduce规约每张卡的输出得到最终输出。 + 行并行 Linear 情况如下图所示,顾名思义,行并行是按照权重矩阵 W 的行切分权重矩阵为 + [[W_row1], [W_row2]],对应的输入 X 也按照列切成了两份[X_col1, X_col2],分别与各自对应的权重矩阵相乘, + 最后通过 AllReduce 规约每张卡的输出得到最终输出。 .. image:: ./img/split_row.png :width: 800 :alt: split_row :align: center -情形3:列并行Linear - Linear操作是将输入变量X(N*N)与权重矩阵W(N*M)进行矩阵相乘。列并行Linear情形下,参数切分到num_partitions个设备,每个设备上的参数是N行、M/num_partitions列的矩阵。 +情形 3:列并行 Linear + Linear 操作是将输入变量 X(N*N)与权重矩阵 W(N*M)进行矩阵相乘。列并行 Linear 情形下,参数切分到 num_partitions 个设备,每个设备上的参数是 N 行、M/num_partitions 列的矩阵。 - 单卡并行Linear可以看上面对应的图,列并行Linear情况如下图所示。列并行是按照权重矩阵W的列切分权重矩阵为[W_col1, W_col2], - X分别与切分出来的矩阵相乘,最后通过AllGather拼接每张卡的输出得到最终输出。 + 单卡并行 Linear 可以看上面对应的图,列并行 Linear 情况如下图所示。列并行是按照权重矩阵 W 的列切分权重矩阵为[W_col1, W_col2], + X 分别与切分出来的矩阵相乘,最后通过 AllGather 拼接每张卡的输出得到最终输出。 .. image:: ./img/split_col.png :width: 800 :alt: split_col :align: center -我们观察到,可以把上述按列切分矩阵乘法和按行切分矩阵乘法串联起来,从而省略掉一次AllGather通信操作,如下图所示。同时,我们注意到Transformer的Attention和MLP组件中各种两次矩阵乘法操作。因此,我们可以按照这种串联方式分别把Attention和MLP组件中的两次矩阵乘法串联起来,从而进一步优化性能。 +我们观察到,可以把上述按列切分矩阵乘法和按行切分矩阵乘法串联起来,从而省略掉一次 AllGather 通信操作,如下图所示。同时,我们注意到 Transformer 的 Attention 和 MLP 组件中各种两次矩阵乘法操作。因此,我们可以按照这种串联方式分别把 Attention 和 MLP 组件中的两次矩阵乘法串联起来,从而进一步优化性能。 .. image:: ./img/split_col_row.png :width: 800 @@ -71,8 +71,8 @@ split 参数 ::::::::: - - x (Tensor) - 输入Tensor。Tensor的数据类型为:float16、float32、float64、int32、int64。 - - size (list|tuple) - 指定参数形状的列表或元组,包含2个元素。 + - x (Tensor) - 输入 Tensor。Tensor 的数据类型为:float16、float32、float64、int32、int64。 + - size (list|tuple) - 指定参数形状的列表或元组,包含 2 个元素。 - operation (str) - 指定操作名称,当前支持的操作名称为'embedding'或'linear'。 - axis (int,可选) - 指定沿哪个维度切分参数。默认值:0。 - num_partitions (int,可选) - 指定参数的划分数。默认值:1。 diff --git a/docs/api/paddle/distributed/utils/global_gather_cn.rst b/docs/api/paddle/distributed/utils/global_gather_cn.rst index c8e708d0273..de53a905794 100644 --- a/docs/api/paddle/distributed/utils/global_gather_cn.rst +++ b/docs/api/paddle/distributed/utils/global_gather_cn.rst @@ -6,25 +6,25 @@ global_gather .. py:function:: paddle.distributed.utils.global_gather(x, local_count, global_count, group=None, use_calc_stream=True) -global_gather根据global_count将x的数据收集到n_expert * world_size个expert,然后根据local_count接收数据。 -其中expert是用户定义的专家网络,n_expert是指每张卡拥有的专家网络数目,world_size是指运行网络的显卡数目。 +global_gather 根据 global_count 将 x 的数据收集到 n_expert * world_size 个 expert,然后根据 local_count 接收数据。 +其中 expert 是用户定义的专家网络,n_expert 是指每张卡拥有的专家网络数目,world_size 是指运行网络的显卡数目。 -如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], -1卡的global_count是[2, 0, ,](因为篇幅问题,这里只展示在0卡运算的数据),在global_gather算子里, -global_count和local_count的意义与其在global_scatter里正好相反, -global_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个expert发送local_expert[i]个数据, -local_count[i]代表从第 (i // n_expert)张卡接收global_count[i]个数据给本卡的 第(i % n_expert)个expert。 -发送的数据会按照每张卡的每个expert排列。图中的rank0代表第0张卡,rank1代表第1张卡。 +如下图所示,world_size 是 2,n_expert 是 2,x 的 batch_size 是 4,local_count 是[2, 0, 2, 0],0 卡的 global_count 是[2, 0, , ], +1 卡的 global_count 是[2, 0, ,](因为篇幅问题,这里只展示在 0 卡运算的数据),在 global_gather 算子里, +global_count 和 local_count 的意义与其在 global_scatter 里正好相反, +global_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个 expert 发送 local_expert[i]个数据, +local_count[i]代表从第 (i // n_expert)张卡接收 global_count[i]个数据给本卡的 第(i % n_expert)个 expert。 +发送的数据会按照每张卡的每个 expert 排列。图中的 rank0 代表第 0 张卡,rank1 代表第 1 张卡。 -global_gather发送数据的流程如下: +global_gather 发送数据的流程如下: -第0张卡的global_count[0]代表向第0张卡的第0个expert发送2个数据; +第 0 张卡的 global_count[0]代表向第 0 张卡的第 0 个 expert 发送 2 个数据; -第0张卡的global_count[1]代表向第0张卡的第1个expert发送0个数据; +第 0 张卡的 global_count[1]代表向第 0 张卡的第 1 个 expert 发送 0 个数据; -第1张卡的global_count[0]代表向第0张卡的第0个expert发送2个数据; +第 1 张卡的 global_count[0]代表向第 0 张卡的第 0 个 expert 发送 2 个数据; -第1张卡的global_count[1]代表向第0张卡的第1个expert发送0个数据。 +第 1 张卡的 global_count[1]代表向第 0 张卡的第 1 个 expert 发送 0 个数据。 .. image:: ../img/global_scatter_gather.png @@ -35,15 +35,15 @@ global_gather发送数据的流程如下: 参数 ::::::::: - - x (Tensor) - 输入Tensor。Tensor的数据类型必须是float16、float32、 float64、int32、int64。 - - local_count (Tensor) - 拥有n_expert * world_size个数据的Tensor,用于表示有多少数据接收。Tensor的数据类型必须是int64。 - - global_count (Tensor) - 拥有n_expert * world_size个数据的Tensor,用于表示有多少数据发送。Tensor的数据类型必须是int64。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认地全局组。默认值:None。 + - x (Tensor) - 输入 Tensor。Tensor 的数据类型必须是 float16、float32、 float64、int32、int64。 + - local_count (Tensor) - 拥有 n_expert * world_size 个数据的 Tensor,用于表示有多少数据接收。Tensor 的数据类型必须是 int64。 + - global_count (Tensor) - 拥有 n_expert * world_size 个数据的 Tensor,用于表示有多少数据发送。Tensor 的数据类型必须是 int64。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认地全局组。默认值:None。 - use_calc_stream (bool,可选) - 标识使用计算流还是通信流。默认值:True,表示用计算流。 返回 ::::::::: -Tensor,从所有expert接收的数据。 +Tensor,从所有 expert 接收的数据。 代码示例 ::::::::: diff --git a/docs/api/paddle/distributed/utils/global_scatter_cn.rst b/docs/api/paddle/distributed/utils/global_scatter_cn.rst index 7f6d4b3486c..526062d89b7 100644 --- a/docs/api/paddle/distributed/utils/global_scatter_cn.rst +++ b/docs/api/paddle/distributed/utils/global_scatter_cn.rst @@ -6,31 +6,31 @@ global_scatter .. py:function:: paddle.distributed.utils.global_scatter(x, local_count, global_count, group=None, use_calc_stream=True) -global_scatter根据local_count将x的数据分发到n_expert * world_size个expert,然后根据global_count接收数据。 -其中expert是用户定义的专家网络,n_expert是指每张卡拥有的专家网络数目,world_size是指运行网络的显卡数目。 +global_scatter 根据 local_count 将 x 的数据分发到 n_expert * world_size 个 expert,然后根据 global_count 接收数据。 +其中 expert 是用户定义的专家网络,n_expert 是指每张卡拥有的专家网络数目,world_size 是指运行网络的显卡数目。 -如下图所示,world_size是2,n_expert是2,x的batch_size是4,local_count是[2, 0, 2, 0],0卡的global_count是[2, 0, , ], -1卡的global_count是[2, 0, ,](因为篇幅问题,这里只展示在0卡运算的数据),在global_scatter算子里, -local_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个expert发送local_expert[i]个数据, -global_count[i]代表从第 (i // n_expert)张卡接收global_count[i]个数据给本卡的 第(i % n_expert)个expert。 -图中的rank0代表第0张卡,rank1代表第1张卡。 -global_scatter发送数据的流程如下: +如下图所示,world_size 是 2,n_expert 是 2,x 的 batch_size 是 4,local_count 是[2, 0, 2, 0],0 卡的 global_count 是[2, 0, , ], +1 卡的 global_count 是[2, 0, ,](因为篇幅问题,这里只展示在 0 卡运算的数据),在 global_scatter 算子里, +local_count[i]代表向第 (i // n_expert)张卡的第 (i % n_expert)个 expert 发送 local_expert[i]个数据, +global_count[i]代表从第 (i // n_expert)张卡接收 global_count[i]个数据给本卡的 第(i % n_expert)个 expert。 +图中的 rank0 代表第 0 张卡,rank1 代表第 1 张卡。 +global_scatter 发送数据的流程如下: -local_count[0]代表从x里取出2个batch的数据向第0张卡的第0个expert发送2个数据; +local_count[0]代表从 x 里取出 2 个 batch 的数据向第 0 张卡的第 0 个 expert 发送 2 个数据; -local_count[1]代表从x里取出0个batch的数据向第0张卡的第1个expert发送0个数据; +local_count[1]代表从 x 里取出 0 个 batch 的数据向第 0 张卡的第 1 个 expert 发送 0 个数据; -local_count[2]代表从x里取出2个batch的数据向第1张卡的第0个expert发送2个数据; +local_count[2]代表从 x 里取出 2 个 batch 的数据向第 1 张卡的第 0 个 expert 发送 2 个数据; -local_count[3]代表从x里取出0个batch的数据向第1张卡的第1个expert发送0个数据; +local_count[3]代表从 x 里取出 0 个 batch 的数据向第 1 张卡的第 1 个 expert 发送 0 个数据; -所以第0张卡的global_count[0]等于2,代表从第0张卡接收2个batch的数据给第0个expert; +所以第 0 张卡的 global_count[0]等于 2,代表从第 0 张卡接收 2 个 batch 的数据给第 0 个 expert; -第0张卡的global_count[1]等于0,代表从第0张卡接收0个batch的数据给第1个expert; +第 0 张卡的 global_count[1]等于 0,代表从第 0 张卡接收 0 个 batch 的数据给第 1 个 expert; -第1张卡的global_count[0]等于2,代表从第0张卡接收2个batch的数据给第0个expert; +第 1 张卡的 global_count[0]等于 2,代表从第 0 张卡接收 2 个 batch 的数据给第 0 个 expert; -第1张卡的global_count[1]等与0,代表从第0张卡接收0个batch的数据给第1个expert。 +第 1 张卡的 global_count[1]等与 0,代表从第 0 张卡接收 0 个 batch 的数据给第 1 个 expert。 .. image:: ../img/global_scatter_gather.png @@ -40,15 +40,15 @@ local_count[3]代表从x里取出0个batch的数据向第1张卡的第1个expert 参数 ::::::::: - - x (Tensor) - 输入Tensor。Tensor的数据类型必须是float16、float32、 float64、int32、int64。 - - local_count (Tensor) - 拥有n_expert * world_size个数据的Tensor,用于表示有多少数据发送。Tensor的数据类型必须是int64。 - - global_count (Tensor) - 拥有n_expert * world_size个数据的Tensor,用于表示有多少数据接收。Tensor的数据类型必须是int64。 - - group (Group,可选) - new_group返回的Group实例,或者设置为None表示默认地全局组。默认值:None。 + - x (Tensor) - 输入 Tensor。Tensor 的数据类型必须是 float16、float32、 float64、int32、int64。 + - local_count (Tensor) - 拥有 n_expert * world_size 个数据的 Tensor,用于表示有多少数据发送。Tensor 的数据类型必须是 int64。 + - global_count (Tensor) - 拥有 n_expert * world_size 个数据的 Tensor,用于表示有多少数据接收。Tensor 的数据类型必须是 int64。 + - group (Group,可选) - new_group 返回的 Group 实例,或者设置为 None 表示默认地全局组。默认值:None。 - use_calc_stream (bool,可选) - 标识使用计算流还是通信流。默认值:True,表示使用计算流。 返回 ::::::::: -Tensor,从所有expert接收的数据,按照每个expert排列。 +Tensor,从所有 expert 接收的数据,按照每个 expert 排列。 代码示例 ::::::::: diff --git a/docs/api/paddle/distribution/Beta_cn.rst b/docs/api/paddle/distribution/Beta_cn.rst index 2c8c60f0be3..df8abea1b34 100644 --- a/docs/api/paddle/distribution/Beta_cn.rst +++ b/docs/api/paddle/distribution/Beta_cn.rst @@ -6,7 +6,7 @@ Beta .. py:class:: paddle.distribution.Beta(alpha, beta) -在概率论中,Beta分布是指一组定义在 [0,1] 区间的连续概率分布,有两个参数 +在概率论中,Beta 分布是指一组定义在 [0,1] 区间的连续概率分布,有两个参数 :math:`\alpha,\beta>0`,是狄利克雷(:ref:`cn_api_paddle_distribution_Dirichlet`) 分布的一元形式。 @@ -16,7 +16,7 @@ Beta f(x; \alpha, \beta) = \frac{1}{B(\alpha, \beta)}x^{\alpha-1}(1-x)^{\beta-1} -其中,B为Beta函数,表示归一化因子: +其中,B 为 Beta 函数,表示归一化因子: .. math:: @@ -25,11 +25,11 @@ Beta 参数 ::::::::: -- **alpha** (float|Tensor) - 即上述公式中 :math:`\alpha` 参数,大于零,支持Broadcast - 语义。当参数类型为Tensor时,表示批量创建多个不同参数的分布,``batch_shape`` (参考 :ref:`cn_api_distribution_Distribution` 基类) 为参数 - Broadcast后的形状。 -- **beta** (float|Tensor) - 即上述公式中 :math:`\beta` 参数,大于零,支持Broadcast语 - 义。当参数类型为Tensor时,表示批量创建多个不同参数的分布,``batch_shape`` (参考 :ref:`cn_api_distribution_Distribution` 基类) 为参数Broadcast +- **alpha** (float|Tensor) - 即上述公式中 :math:`\alpha` 参数,大于零,支持 Broadcast + 语义。当参数类型为 Tensor 时,表示批量创建多个不同参数的分布,``batch_shape`` (参考 :ref:`cn_api_distribution_Distribution` 基类) 为参数 + Broadcast 后的形状。 +- **beta** (float|Tensor) - 即上述公式中 :math:`\beta` 参数,大于零,支持 Broadcast 语 + 义。当参数类型为 Tensor 时,表示批量创建多个不同参数的分布,``batch_shape`` (参考 :ref:`cn_api_distribution_Distribution` 基类) 为参数 Broadcast 后的形状。 代码示例 @@ -43,19 +43,19 @@ COPY-FROM: paddle.distribution.Beta mean() ''''''''' -计算Beta分布均值。 +计算 Beta 分布均值。 variance() ''''''''' -计算Beta分布方差。 +计算 Beta 分布方差。 prob(value) ''''''''' -计算value的概率。 +计算 value 的概率。 **参数** @@ -63,13 +63,13 @@ prob(value) **返回** -- Tensor: value的概率。 +- Tensor: value 的概率。 log_prob(value) ''''''''' -计算value的对数概率。 +计算 value 的对数概率。 **参数** @@ -77,13 +77,13 @@ log_prob(value) **返回** -- Tensor: value的对数概率。 +- Tensor: value 的对数概率。 sample() ''''''''' -从Beta分布中生成满足特定形状的样本数据。 +从 Beta 分布中生成满足特定形状的样本数据。 **参数** @@ -96,4 +96,4 @@ sample() entropy() ''''''''' -计算Beta分布的信息熵。 +计算 Beta 分布的信息熵。 diff --git a/docs/api/paddle/distribution/Categorical_cn.rst b/docs/api/paddle/distribution/Categorical_cn.rst index b323c735856..57b7a35aab3 100644 --- a/docs/api/paddle/distribution/Categorical_cn.rst +++ b/docs/api/paddle/distribution/Categorical_cn.rst @@ -8,7 +8,7 @@ Categorical -类别分布是一种离散概率分布,其随机变量可以取K个相互独立类别的其中一个。 +类别分布是一种离散概率分布,其随机变量可以取 K 个相互独立类别的其中一个。 概率质量函数(pmf)为: @@ -18,13 +18,13 @@ Categorical 上面公式中: - - :math:`[x = i]` 表示:如果 :math:`x==i`,则表达式取值为1,否则取值为0。 + - :math:`[x = i]` 表示:如果 :math:`x==i`,则表达式取值为 1,否则取值为 0。 参数 :::::::::::: - - **logits** (list|numpy.ndarray|Tensor) - 类别分布对应的logits。数据类型为float32或float64。 + - **logits** (list|numpy.ndarray|Tensor) - 类别分布对应的 logits。数据类型为 float32 或 float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 代码示例 @@ -108,15 +108,15 @@ sample(shape) kl_divergence(other) ''''''''' -相对于另一个类别分布的KL散度。 +相对于另一个类别分布的 KL 散度。 **参数** - - **other** (Categorical) - 输入的另一个类别分布。数据类型为float32。 + - **other** (Categorical) - 输入的另一个类别分布。数据类型为 float32。 **返回** -相对于另一个类别分布的KL散度,数据类型为float32。 +相对于另一个类别分布的 KL 散度,数据类型为 float32。 **代码示例** @@ -150,7 +150,7 @@ entropy() **返回** -类别分布的信息熵,数据类型为float32。 +类别分布的信息熵,数据类型为 float32。 **代码示例** @@ -174,13 +174,13 @@ probs(value) ''''''''' 所选择类别的概率。 -如果 ``logtis`` 是2-D或更高阶的Tensor,那么其最后一个维度表示不同类别的概率,其它维度被看做不同的概率分布。 -同时,如果 ``value`` 是1-D Tensor,那么 ``value`` 会broadcast成与 ``logits`` 具有相同的概率分布数量。 -如果 ``value`` 为更高阶Tensor,那么 ``value`` 应该与 ``logits`` 具有相同的概率分布数量。也就是说,``value[:-1] = logits[:-1]`` 。 +如果 ``logtis`` 是 2-D 或更高阶的 Tensor,那么其最后一个维度表示不同类别的概率,其它维度被看做不同的概率分布。 +同时,如果 ``value`` 是 1-D Tensor,那么 ``value`` 会 broadcast 成与 ``logits`` 具有相同的概率分布数量。 +如果 ``value`` 为更高阶 Tensor,那么 ``value`` 应该与 ``logits`` 具有相同的概率分布数量。也就是说,``value[:-1] = logits[:-1]`` 。 **参数** - - **value** (Tensor) - 输入张量,表示选择的类别下标。数据类型为int32或int64。 + - **value** (Tensor) - 输入张量,表示选择的类别下标。数据类型为 int32 或 int64。 **返回** @@ -210,7 +210,7 @@ log_prob(value) **参数** - - **value** (Tensor) - 输入张量,表示选择的类别下标。数据类型为int32或int64。 + - **value** (Tensor) - 输入张量,表示选择的类别下标。数据类型为 int32 或 int64。 **返回** diff --git a/docs/api/paddle/distribution/Dirichlet_cn.rst b/docs/api/paddle/distribution/Dirichlet_cn.rst index 1ef625b9e0f..94f9e5a161b 100644 --- a/docs/api/paddle/distribution/Dirichlet_cn.rst +++ b/docs/api/paddle/distribution/Dirichlet_cn.rst @@ -6,7 +6,7 @@ Dirichlet .. py:class:: paddle.distribution.Dirichlet(concentration) -狄利克雷分布(Dirichlet distribution)是一类在实数域以正单纯形(standard simplex)为支撑集的高维连续概率分布,是Beta分布在高维情形的推广。 +狄利克雷分布(Dirichlet distribution)是一类在实数域以正单纯形(standard simplex)为支撑集的高维连续概率分布,是 Beta 分布在高维情形的推广。 对独立同分布(independent and identically distributed, iid)的连续随机变量 :math:`\boldsymbol X \in R_k`,和支撑集 :math:`\boldsymbol X \in (0,1), ||\boldsymbol X|| = 1`,其概率密度函数(pdf)为: @@ -15,21 +15,21 @@ Dirichlet f(\boldsymbol X; \boldsymbol \alpha) = \frac{1}{B(\boldsymbol \alpha)} \prod_{i=1}^{k}x_i^{\alpha_i-1} -其中,:math:`\boldsymbol \alpha = {\alpha_1,...,\alpha_k}, k \ge 2` 是无量纲分布参数,:math:`B(\boldsymbol \alpha)` 是多元Beta函数。 +其中,:math:`\boldsymbol \alpha = {\alpha_1,...,\alpha_k}, k \ge 2` 是无量纲分布参数,:math:`B(\boldsymbol \alpha)` 是多元 Beta 函数。 .. math:: B(\boldsymbol \alpha) = \frac{\prod_{i=1}^{k} \Gamma(\alpha_i)}{\Gamma(\alpha_0)} :math:`\alpha_0=\sum_{i=1}^{k} \alpha_i` 是分布参数的和,:math:`\Gamma(\alpha)` 为 -Gamma函数。 +Gamma 函数。 参数 ::::::::: - **concentration** (Tensor) - 浓度参数,即上述公式 :math:`\alpha` 参数。当 - concentration维度大于1时,最后一维表示参数,参数形状 - ``event_shape=concentration.shape[-1:]``,其余维为Batch维, + concentration 维度大于 1 时,最后一维表示参数,参数形状 + ``event_shape=concentration.shape[-1:]``,其余维为 Batch 维, ``batch_shape=concentration.shape[:-1]`` . @@ -56,7 +56,7 @@ variance prob(value) ''''''''' -计算value的概率。 +计算 value 的概率。 **参数** @@ -64,13 +64,13 @@ prob(value) **返回** -- Tensor: value的概率。 +- Tensor: value 的概率。 log_prob(value) ''''''''' -计算value的对数概率。 +计算 value 的对数概率。 **参数** @@ -78,13 +78,13 @@ log_prob(value) **返回** -- Tensor: value的对数概率。 +- Tensor: value 的对数概率。 sample() ''''''''' -从Beta分布中生成满足特定形状的样本数据。 +从 Beta 分布中生成满足特定形状的样本数据。 **参数** @@ -97,4 +97,4 @@ sample() entropy() ''''''''' -计算Beta分布的信息熵。 +计算 Beta 分布的信息熵。 diff --git a/docs/api/paddle/distribution/Distribution_cn.rst b/docs/api/paddle/distribution/Distribution_cn.rst index e3a0da2f4d4..c2a1b43b5f7 100644 --- a/docs/api/paddle/distribution/Distribution_cn.rst +++ b/docs/api/paddle/distribution/Distribution_cn.rst @@ -11,9 +11,9 @@ Distribution ::::::::: - **batch_shape** - 概率分布参数批量形状。一元分布 ``batch_shape=param.shape``,多元分 - 布 ``batch_shape=param.shape[:-1]``,其中param表示分布参数,支持broadcast语义。 + 布 ``batch_shape=param.shape[:-1]``,其中 param 表示分布参数,支持 broadcast 语义。 - **event_shape** - 多元概率分布维数形状。一元分布 ``event_shape=()``,多元分布 - ``event_shape=param.shape[-1:]``,其中param表示分布参数,支持broadcast语义。 + ``event_shape=param.shape[-1:]``,其中 param 表示分布参数,支持 broadcast 语义。 方法 @@ -50,8 +50,8 @@ probs(value) kl_divergence(other) ''''''''' -两个分布之间的KL散度。 +两个分布之间的 KL 散度。 **参数** - - **other** (Distribution) - Distribution的实例。 + - **other** (Distribution) - Distribution 的实例。 diff --git a/docs/api/paddle/distribution/Independent_cn.rst b/docs/api/paddle/distribution/Independent_cn.rst index 12f83e645a0..a95eeece471 100644 --- a/docs/api/paddle/distribution/Independent_cn.rst +++ b/docs/api/paddle/distribution/Independent_cn.rst @@ -38,7 +38,7 @@ property variance prob(value) ''''''''' -计算value的概率。 +计算 value 的概率。 **参数** @@ -46,13 +46,13 @@ prob(value) **返回** -- Tensor: value的概率。 +- Tensor: value 的概率。 log_prob(value) ''''''''' -计算value的对数概率。 +计算 value 的对数概率。 **参数** @@ -60,13 +60,13 @@ log_prob(value) **返回** -- Tensor: value的对数概率。 +- Tensor: value 的对数概率。 sample(shape=()) ''''''''' -从Beta分布中生成满足特定形状的样本数据。 +从 Beta 分布中生成满足特定形状的样本数据。 **参数** diff --git a/docs/api/paddle/distribution/Multinomial_cn.rst b/docs/api/paddle/distribution/Multinomial_cn.rst index 1f0afc5df8c..b10ada143be 100644 --- a/docs/api/paddle/distribution/Multinomial_cn.rst +++ b/docs/api/paddle/distribution/Multinomial_cn.rst @@ -28,7 +28,7 @@ Multinomial - **total_count** (int) - 实验次数。 - **probs** (Tensor) - 每个类别发生的概率。最后一维为事件维度,其它维为批维度。``probs`` 中 - 的每个元素取值范围为 ``[0,1]``。如果输入数据大于1,会沿着最后一维进行归一化操作。 + 的每个元素取值范围为 ``[0,1]``。如果输入数据大于 1,会沿着最后一维进行归一化操作。 代码示例 ::::::::: @@ -56,7 +56,7 @@ variance prob(value) ''''''''' -计算value的概率。 +计算 value 的概率。 **参数** @@ -64,13 +64,13 @@ prob(value) **返回** -- Tensor: value的概率。 +- Tensor: value 的概率。 log_prob(value) ''''''''' -计算value的对数概率。 +计算 value 的对数概率。 **参数** @@ -78,7 +78,7 @@ log_prob(value) **返回** -- Tensor: value的对数概率。 +- Tensor: value 的对数概率。 sample(shape=()) diff --git a/docs/api/paddle/distribution/Normal_cn.rst b/docs/api/paddle/distribution/Normal_cn.rst index 9d321f19059..1b9ff348354 100644 --- a/docs/api/paddle/distribution/Normal_cn.rst +++ b/docs/api/paddle/distribution/Normal_cn.rst @@ -27,8 +27,8 @@ Normal 参数 :::::::::::: - - **loc** (int|float|list|numpy.ndarray|Tensor) - 正态分布平均值。数据类型为int、float、list、numpy.ndarray或Tensor。 - - **scale** (int|float|list|numpy.ndarray|Tensor) - 正态分布标准差。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **loc** (int|float|list|numpy.ndarray|Tensor) - 正态分布平均值。数据类型为 int、float、list、numpy.ndarray 或 Tensor。 + - **scale** (int|float|list|numpy.ndarray|Tensor) - 正态分布标准差。数据类型为 int、float、list、numpy.ndarray 或 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 代码示例 @@ -47,12 +47,12 @@ sample(shape, seed=0) **参数** - - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **shape** (list) - 1 维列表,指定生成样本的维度。数据类型为 int32。 - **seed** (int) - 长整型数。 **返回** -Tensor,预先设计好维度的Tensor,数据类型为float32。 +Tensor,预先设计好维度的 Tensor,数据类型为 float32。 entropy() ''''''''' @@ -71,7 +71,7 @@ entropy() **返回** -Tensor,正态分布的信息熵,数据类型为float32。 +Tensor,正态分布的信息熵,数据类型为 float32。 log_prob(value) ''''''''' @@ -80,11 +80,11 @@ log_prob(value) **参数** - - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + - **value** (Tensor) - 输入张量。数据类型为 float32 或 float64。 **返回** -Tensor,对数概率,数据类型与value相同。 +Tensor,对数概率,数据类型与 value 相同。 probs(value) ''''''''' @@ -93,16 +93,16 @@ probs(value) **参数** - - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + - **value** (Tensor) - 输入张量。数据类型为 float32 或 float64。 **返回** -Tensor,概率,数据类型与value相同。 +Tensor,概率,数据类型与 value 相同。 kl_divergence(other) ''''''''' -两个正态分布之间的KL散度。 +两个正态分布之间的 KL 散度。 数学公式: @@ -125,8 +125,8 @@ kl_divergence(other) **参数** - - **other** (Normal) - Normal的实例。 + - **other** (Normal) - Normal 的实例。 **返回** -Tensor,两个正态分布之间的KL散度,数据类型为float32。 +Tensor,两个正态分布之间的 KL 散度,数据类型为 float32。 diff --git a/docs/api/paddle/distribution/Overview_cn.rst b/docs/api/paddle/distribution/Overview_cn.rst index 106eaa4cd39..ded6d5b77c9 100644 --- a/docs/api/paddle/distribution/Overview_cn.rst +++ b/docs/api/paddle/distribution/Overview_cn.rst @@ -3,12 +3,12 @@ paddle.distribution --------------------- -paddle.distribution 目录下包含飞桨框架支持的随机变量的概率分布、随机变量的变换、KL散度相关API。 +paddle.distribution 目录下包含飞桨框架支持的随机变量的概率分布、随机变量的变换、KL 散度相关 API。 具体如下: - :ref:`随机变量的概率分布 ` - :ref:`随机变量的变换 ` -- :ref:`KL散度相关API ` +- :ref:`KL 散度相关 API ` .. _about_distribution: @@ -17,19 +17,19 @@ paddle.distribution 目录下包含飞桨框架支持的随机变量的概率分 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`Distribution ` ", "Distribution概率分布抽象基类" - " :ref:`ExponentialFamily ` ", "ExponentialFamily指数型分布族基类" - " :ref:`Categorical ` ", "Categorical概率分布类" - " :ref:`Normal ` ", "Normal概率分布类" - " :ref:`Uniform ` ", "Uniform概率分布类" - " :ref:`Beta ` ", "Beta概率分布类" - " :ref:`Dirichlet ` ", "Dirichlet概率分布类" - " :ref:`Multinomial ` ", "Multinomial概率分布类" - " :ref:`Independent ` ", "Independent概率分布类" - " :ref:`TransfomedDistribution ` ", "TransformedDistribution概率分布类" + " :ref:`Distribution ` ", "Distribution 概率分布抽象基类" + " :ref:`ExponentialFamily ` ", "ExponentialFamily 指数型分布族基类" + " :ref:`Categorical ` ", "Categorical 概率分布类" + " :ref:`Normal ` ", "Normal 概率分布类" + " :ref:`Uniform ` ", "Uniform 概率分布类" + " :ref:`Beta ` ", "Beta 概率分布类" + " :ref:`Dirichlet ` ", "Dirichlet 概率分布类" + " :ref:`Multinomial ` ", "Multinomial 概率分布类" + " :ref:`Independent ` ", "Independent 概率分布类" + " :ref:`TransfomedDistribution ` ", "TransformedDistribution 概率分布类" .. _about_distribution_transform: @@ -37,7 +37,7 @@ paddle.distribution 目录下包含飞桨框架支持的随机变量的概率分 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Transform ` ", "随机变量变换的基类" @@ -45,23 +45,23 @@ paddle.distribution 目录下包含飞桨框架支持的随机变量的概率分 " :ref:`AffineTransform ` ", "仿射变换" " :ref:`ChainTransform ` ", "链式组合变换" " :ref:`ExpTransform ` ", "指数变换" - " :ref:`IndependentTransform ` ", "Independent变换" + " :ref:`IndependentTransform ` ", "Independent 变换" " :ref:`PowerTransform ` ", "幂变换" - " :ref:`ReshapeTransform ` ", "Reshape变换" - " :ref:`SigmoidTransform ` ", "Sigmoid变换" - " :ref:`SoftmaxTransform ` ", "Softmax变换" - " :ref:`StackTransform ` ", "Stack变换" - " :ref:`StickBreakingTransform ` ", "StickBreaking变换" - " :ref:`TanhTransform ` ", "Tanh变换" + " :ref:`ReshapeTransform ` ", "Reshape 变换" + " :ref:`SigmoidTransform ` ", "Sigmoid 变换" + " :ref:`SoftmaxTransform ` ", "Softmax 变换" + " :ref:`StackTransform ` ", "Stack 变换" + " :ref:`StickBreakingTransform ` ", "StickBreaking 变换" + " :ref:`TanhTransform ` ", "Tanh 变换" .. _about_distribution_kl: -KL散度相关API +KL 散度相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`register_kl ` ", "注册KL散度" - " :ref:`kl_divergence ` ", "计算KL散度" + " :ref:`register_kl ` ", "注册 KL 散度" + " :ref:`kl_divergence ` ", "计算 KL 散度" diff --git a/docs/api/paddle/distribution/ReshapeTransform_cn.rst b/docs/api/paddle/distribution/ReshapeTransform_cn.rst index cb2ce1391dd..b89d51c3767 100644 --- a/docs/api/paddle/distribution/ReshapeTransform_cn.rst +++ b/docs/api/paddle/distribution/ReshapeTransform_cn.rst @@ -5,15 +5,15 @@ ReshapeTransform .. py:class:: paddle.distribution.ReshapeTransform(in_event_shape, out_event_shape) -``ReshapeTransform`` 将输入Tensor的事件形状 ``in_event_shape`` 改变为 ``out_event_shape`` 。 +``ReshapeTransform`` 将输入 Tensor 的事件形状 ``in_event_shape`` 改变为 ``out_event_shape`` 。 其中,``in_event_shape`` 、 ``out_event_shape`` 需要包含相同的元素个数。 参数 ::::::::: -- **in_event_shape** (Sequence[int]) - Reshape前的事件形状。 -- **out_event_shape** (float|Tensor) - Reshape后的事件形状。 +- **in_event_shape** (Sequence[int]) - Reshape 前的事件形状。 +- **out_event_shape** (float|Tensor) - Reshape 后的事件形状。 代码示例 diff --git a/docs/api/paddle/distribution/SigmoidTransform_cn.rst b/docs/api/paddle/distribution/SigmoidTransform_cn.rst index 54e42ba3496..b34e2117879 100644 --- a/docs/api/paddle/distribution/SigmoidTransform_cn.rst +++ b/docs/api/paddle/distribution/SigmoidTransform_cn.rst @@ -5,7 +5,7 @@ SigmoidTransform .. py:class:: paddle.distribution.SigmoidTransform() -Sigmoid变换 :math:`y = \frac{1}{1 + \exp(-x)}` , :math:`x = \text{logit}(y)` +Sigmoid 变换 :math:`y = \frac{1}{1 + \exp(-x)}` , :math:`x = \text{logit}(y)` 代码示例 diff --git a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst index d5acd407e77..55ba1d2394f 100644 --- a/docs/api/paddle/distribution/SoftmaxTransform_cn.rst +++ b/docs/api/paddle/distribution/SoftmaxTransform_cn.rst @@ -5,12 +5,12 @@ SoftmaxTransform .. py:class:: paddle.distribution.SoftmaxTransform -Softmax变换,首先进行 :math:`y = exp(x)` 变换,然后归一化。 +Softmax 变换,首先进行 :math:`y = exp(x)` 变换,然后归一化。 -Softmax变换将向量变换为单纯形。 +Softmax 变换将向量变换为单纯形。 .. note:: -Softmax不是双射函数,所以 ``forward_log_det_jacobian`` 、 ``inverse_log_det_jacobian`` 未实现。 +Softmax 不是双射函数,所以 ``forward_log_det_jacobian`` 、 ``inverse_log_det_jacobian`` 未实现。 代码示例 ::::::::: diff --git a/docs/api/paddle/distribution/StackTransform_cn.rst b/docs/api/paddle/distribution/StackTransform_cn.rst index d57a12ac265..d7be31bd51c 100644 --- a/docs/api/paddle/distribution/StackTransform_cn.rst +++ b/docs/api/paddle/distribution/StackTransform_cn.rst @@ -5,13 +5,13 @@ StackTransform .. py:class:: paddle.distribution.StackTransform(transforms, axis=0) -``StackTransform`` 将一系列变换沿着某个特定轴作用于一个输入Tensor上。 +``StackTransform`` 将一系列变换沿着某个特定轴作用于一个输入 Tensor 上。 参数 ::::::::: - **transforms** (Sequence[Transform]) - 变换序列。 -- **axis** (int,可选) - 待变换的轴,默认值:0,表示沿着第0个轴变换。 +- **axis** (int,可选) - 待变换的轴,默认值:0,表示沿着第 0 个轴变换。 代码示例 ::::::::: diff --git a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst index 5cbb0037257..9b709fcb2c8 100644 --- a/docs/api/paddle/distribution/StickBreakingTransform_cn.rst +++ b/docs/api/paddle/distribution/StickBreakingTransform_cn.rst @@ -5,7 +5,7 @@ StickBreakingTransform .. py:class:: paddle.distribution.StickBreakingTransform() -``StickBreakingTransform`` 将一个长度为K的向量通过StackBreaking构造过程变换为标准K-单纯形。 +``StickBreakingTransform`` 将一个长度为 K 的向量通过 StackBreaking 构造过程变换为标准 K-单纯形。 代码示例 diff --git a/docs/api/paddle/distribution/TanhTransform_cn.rst b/docs/api/paddle/distribution/TanhTransform_cn.rst index e76eac26bb5..8fa09478cf2 100644 --- a/docs/api/paddle/distribution/TanhTransform_cn.rst +++ b/docs/api/paddle/distribution/TanhTransform_cn.rst @@ -5,7 +5,7 @@ TanhTransform .. py:class:: paddle.distribution.TanhTransform() -Tanh变换 :math:`y = tanh(x)` +Tanh 变换 :math:`y = tanh(x)` 代码示例 diff --git a/docs/api/paddle/distribution/TransformedDistribution_cn.rst b/docs/api/paddle/distribution/TransformedDistribution_cn.rst index 0c6d1d1e903..8497a69dd31 100644 --- a/docs/api/paddle/distribution/TransformedDistribution_cn.rst +++ b/docs/api/paddle/distribution/TransformedDistribution_cn.rst @@ -25,7 +25,7 @@ COPY-FROM: paddle.distribution.TransformedDistribution prob(value) ''''''''' -计算value的概率。 +计算 value 的概率。 **参数** @@ -33,13 +33,13 @@ prob(value) **返回** -- Tensor: value的概率。 +- Tensor: value 的概率。 log_prob(value) ''''''''' -计算value的对数概率。 +计算 value 的对数概率。 **参数** @@ -47,7 +47,7 @@ log_prob(value) **返回** -- Tensor: value的对数概率。 +- Tensor: value 的对数概率。 sample(shape=()) diff --git a/docs/api/paddle/distribution/Uniform_cn.rst b/docs/api/paddle/distribution/Uniform_cn.rst index 0e0426f8a92..e6f8d17b31d 100644 --- a/docs/api/paddle/distribution/Uniform_cn.rst +++ b/docs/api/paddle/distribution/Uniform_cn.rst @@ -24,13 +24,13 @@ Uniform :math:`high = b` 。 :math:`Z`:正态分布常量。 -参数low和high的维度必须能够支持广播。 +参数 low 和 high 的维度必须能够支持广播。 参数 ::::::::: - - **low** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的下边界。数据类型为int、float、list、numpy.ndarray或Tensor。 - - **high** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的上边界。数据类型为int、float、list、numpy.ndarray或Tensor。 + - **low** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的下边界。数据类型为 int、float、list、numpy.ndarray 或 Tensor。 + - **high** (int|float|list|numpy.ndarray|Tensor) - 均匀分布的上边界。数据类型为 int、float、list、numpy.ndarray 或 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 代码示例 @@ -49,12 +49,12 @@ sample(shape, seed=0) **参数** - - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 + - **shape** (list) - 1 维列表,指定生成样本的维度。数据类型为 int32。 - **seed** (int) - 长整型数。 **返回** -Tensor,预先设计好维度的张量,数据类型为float32。 +Tensor,预先设计好维度的张量,数据类型为 float32。 entropy() ''''''''' @@ -67,7 +67,7 @@ entropy() **返回** -Tensor,均匀分布的信息熵,数据类型为float32。 +Tensor,均匀分布的信息熵,数据类型为 float32。 log_prob(value) @@ -77,11 +77,11 @@ log_prob(value) **参数** - - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + - **value** (Tensor) - 输入张量。数据类型为 float32 或 float64。 **返回** -Tensor,对数概率,数据类型与value相同。 +Tensor,对数概率,数据类型与 value 相同。 probs(value) @@ -91,8 +91,8 @@ probs(value) **参数** - - **value** (Tensor) - 输入张量。数据类型为float32或float64。 + - **value** (Tensor) - 输入张量。数据类型为 float32 或 float64。 **返回** -Tensor,概率,数据类型与value相同。 +Tensor,概率,数据类型与 value 相同。 diff --git a/docs/api/paddle/distribution/kl_divergence_cn.rst b/docs/api/paddle/distribution/kl_divergence_cn.rst index ca396ead5ff..e8f9dc27080 100644 --- a/docs/api/paddle/distribution/kl_divergence_cn.rst +++ b/docs/api/paddle/distribution/kl_divergence_cn.rst @@ -5,7 +5,7 @@ kl_divergence .. py:function:: paddle.distribution.kl_divergence(p, q) -计算分布p和q之间的KL散度。 +计算分布 p 和 q 之间的 KL 散度。 .. math:: @@ -14,13 +14,13 @@ kl_divergence 参数 ::::::::: -- **p** (Distribution) - 概率分布实例,继承于Distribution基类。 -- **q** (Distribution) - 概率分布实例,继承于Distribution基类。 +- **p** (Distribution) - 概率分布实例,继承于 Distribution 基类。 +- **q** (Distribution) - 概率分布实例,继承于 Distribution 基类。 返回 ::::::::: -- Tensor - 分布p和分布q之间的KL散度。 +- Tensor - 分布 p 和分布 q 之间的 KL 散度。 代码示例 diff --git a/docs/api/paddle/distribution/register_kl_cn.rst b/docs/api/paddle/distribution/register_kl_cn.rst index 72ee483c893..69bb37685f0 100644 --- a/docs/api/paddle/distribution/register_kl_cn.rst +++ b/docs/api/paddle/distribution/register_kl_cn.rst @@ -5,15 +5,15 @@ register_kl .. py:function:: paddle.distribution.register_kl(cls_p, cls_q) -用于注册KL散度具体计算函数装饰器。 +用于注册 KL 散度具体计算函数装饰器。 -调用 ``kl_divergence(p,q)`` 计算KL散度时,会通过多重派发机制,即根据p和q的类型查找通过 ``register_kl`` 注册的实现函数,如果找到返回计算结果,否则,抛出 ``NotImplementError``。用户可通过该装饰器自行注册KL散度计算函数。 +调用 ``kl_divergence(p,q)`` 计算 KL 散度时,会通过多重派发机制,即根据 p 和 q 的类型查找通过 ``register_kl`` 注册的实现函数,如果找到返回计算结果,否则,抛出 ``NotImplementError``。用户可通过该装饰器自行注册 KL 散度计算函数。 参数 ::::::::: -- **cls_p** (Distribution) - 实例p的分布类型,继承于Distribution基类。 -- **cls_q** (Distribution) - 实例q的分布类型,继承于Distribution基类。 +- **cls_p** (Distribution) - 实例 p 的分布类型,继承于 Distribution 基类。 +- **cls_q** (Distribution) - 实例 q 的分布类型,继承于 Distribution 基类。 代码示例 ::::::::: diff --git a/docs/api/paddle/divide_cn.rst b/docs/api/paddle/divide_cn.rst index d8a30ec0250..91f8799baad 100644 --- a/docs/api/paddle/divide_cn.rst +++ b/docs/api/paddle/divide_cn.rst @@ -5,7 +5,7 @@ divide .. py:function:: paddle.divide(x, y, name=None) -该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 +该 OP 是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 输入 ``x`` 与输入 ``y`` 必须可以广播为相同形状,关于广播规则,请参考 :ref:`cn_user_guide_broadcasting` 等式为: @@ -13,20 +13,20 @@ divide .. math:: Out = X / Y -- :math:`X`:多维Tensor。 -- :math:`Y`:多维Tensor。 +- :math:`X`:多维 Tensor。 +- :math:`Y`:多维 Tensor。 参数 ::::::::: - - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 - - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 - - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + - x(Tensor)- 多维 Tensor。数据类型为 float32 、float64、int32 或 int64。 + - y(Tensor)- 多维 Tensor。数据类型为 float32 、float64、int32 或 int64。 + - name(str,可选)- 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: - ``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 + ``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 diff --git a/docs/api/paddle/dot_cn.rst b/docs/api/paddle/dot_cn.rst index a50569b59a3..f62a177d15b 100644 --- a/docs/api/paddle/dot_cn.rst +++ b/docs/api/paddle/dot_cn.rst @@ -10,13 +10,13 @@ dot .. note:: - 支持1维和2维Tensor。如果是2维Tensor,矩阵的第一个维度是batch_size,将会在多个样本上进行点积计算。 + 支持 1 维和 2 维 Tensor。如果是 2 维 Tensor,矩阵的第一个维度是 batch_size,将会在多个样本上进行点积计算。 参数 ::::::::: - - **x** (Tensor)- 1维或2维 ``Tensor``。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Tensor)- 1维或2维 ``Tensor``。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **x** (Tensor)- 1 维或 2 维 ``Tensor``。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 1 维或 2 维 ``Tensor``。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/einsum_cn.rst b/docs/api/paddle/einsum_cn.rst index f16a484d092..014fc764b0d 100644 --- a/docs/api/paddle/einsum_cn.rst +++ b/docs/api/paddle/einsum_cn.rst @@ -28,8 +28,8 @@ Einstein 求和是一种采用 Einstein 标记法描述的张量求和,输入 **关于求和标记的约定** - - 维度分量下标:张量的维度分量下标使用英文字母表示,不区分大小写,如'ijk'表示张量维度分量为i,j,k - - 下标对应输入操作数:维度下标以`,`分段,按顺序1-1对应输入操作数 + - 维度分量下标:张量的维度分量下标使用英文字母表示,不区分大小写,如'ijk'表示张量维度分量为 i,j,k + - 下标对应输入操作数:维度下标以`,`分段,按顺序 1-1 对应输入操作数 - 广播维度:省略号`...`表示维度的广播分量,例如,'i...j'表示首末分量除外的维度需进行广播对齐 - 自由标和哑标:输入标记中仅出现一次的下标为自由标,重复出现的下标为哑标,哑标对应的维度分量将被规约消去 - 输出:输出张量的维度分量既可由输入标记自动推导,也可以用输出标记定制化 @@ -44,8 +44,8 @@ Einstein 求和是一种采用 Einstein 标记法描述的张量求和,输入 - 哑标出现在输出标记中则自动提升为自由标 - 输出标记中未出现的自由标被降为哑标 - 例子 - - '...ij, ...jk',该标记中i,k为自由标,j为哑标,输出维度'...ik' - - 'ij -> i',i为自由标,j为哑标 + - '...ij, ...jk',该标记中 i,k 为自由标,j 为哑标,输出维度'...ik' + - 'ij -> i',i 为自由标,j 为哑标 - '...ij, ...jk -> ...ijk',i,j,k 均为自由标 - '...ij, ...jk -> ij',若输入张量中的广播维度不为空,则该标记为无效标记 @@ -58,11 +58,11 @@ Einsum 求和过程理论上等价于如下四步,但实现中实际执行的 - 第三步,维度规约:将哑标对应的维度分量求和消除 - 第四步,转置输出:若存在输出标记,则按标记进行转置,否则按广播维度+字母序自由标的顺序转置,返回转之后的张量作为输出 -**关于trace和diagonal的标记约定(待实现功能)** +**关于 trace 和 diagonal 的标记约定(待实现功能)** - 在单个输入张量的标记中重复出现的下标称为对角标,对角标对应的坐标轴需进行对角化操作,如'i...i'表示需对首尾坐标轴进行对角化 - - 若无输出标记或输出标记中不包含对角标,则对角标对应维度规约为标量,相应维度取消,等价于trace操作 - - 若输出标记中包含对角标,则保留对角标维度,等价于diagonal操作 + - 若无输出标记或输出标记中不包含对角标,则对角标对应维度规约为标量,相应维度取消,等价于 trace 操作 + - 若输出标记中包含对角标,则保留对角标维度,等价于 diagonal 操作 参数 ::::: diff --git a/docs/api/paddle/empty_like_cn.rst b/docs/api/paddle/empty_like_cn.rst index 5a61ad33273..5d4ec73f2bb 100644 --- a/docs/api/paddle/empty_like_cn.rst +++ b/docs/api/paddle/empty_like_cn.rst @@ -6,18 +6,18 @@ empty_like .. py:function:: paddle.empty_like(x, dtype=None, name=None) -根据参数 ``x`` 的shape和数据类型 ``dtype`` 创建未初始化的Tensor。如果 ``dtype`` 为None,则Tensor的数据类型与 ``x`` 相同。 +根据参数 ``x`` 的 shape 和数据类型 ``dtype`` 创建未初始化的 Tensor。如果 ``dtype`` 为 None,则 Tensor 的数据类型与 ``x`` 相同。 参数 :::::::::::: - - **x** (Tensor) – 输入Tensor,输出Tensor和x具有相同的形状,x的数据类型可以是bool、float16、float32、float64、int32、int64。 - - **dtype** (np.dtype|str,可选)- 输出变量的数据类型,可以是bool、float16、float32、float64、int32、int64。若参数为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **x** (Tensor) – 输入 Tensor,输出 Tensor 和 x 具有相同的形状,x 的数据类型可以是 bool、float16、float32、float64、int32、int64。 + - **dtype** (np.dtype|str,可选)- 输出变量的数据类型,可以是 bool、float16、float32、float64、int32、int64。若参数为 None,则输出变量的数据类型和输入变量相同,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -返回一个根据 ``x`` 和 ``dtype`` 创建并且尚未初始化的Tensor。 +返回一个根据 ``x`` 和 ``dtype`` 创建并且尚未初始化的 Tensor。 代码示例 :::::::::::: diff --git a/docs/api/paddle/enable_static_cn.rst b/docs/api/paddle/enable_static_cn.rst index b88c49137c4..5a2938e8606 100644 --- a/docs/api/paddle/enable_static_cn.rst +++ b/docs/api/paddle/enable_static_cn.rst @@ -6,7 +6,7 @@ enable_static .. py:function:: paddle.enable_static() .. note:: - 从2.0.0版本开始,Paddle默认开启动态图模式。 + 从 2.0.0 版本开始,Paddle 默认开启动态图模式。 开启静态图模式。可通过 :ref:`cn_api_paddle_disable_static` 关闭静态图模式。 diff --git a/docs/api/paddle/equal_all_cn.rst b/docs/api/paddle/equal_all_cn.rst index 4050dad0f43..b6a712c4d06 100644 --- a/docs/api/paddle/equal_all_cn.rst +++ b/docs/api/paddle/equal_all_cn.rst @@ -14,13 +14,13 @@ equal_all 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出结果为Tensor,Tensor数据类型为bool。 +输出结果为 Tensor,Tensor 数据类型为 bool。 代码示例 :::::::::::: diff --git a/docs/api/paddle/equal_cn.rst b/docs/api/paddle/equal_cn.rst index 738d788a128..3a7efa22b9c 100644 --- a/docs/api/paddle/equal_cn.rst +++ b/docs/api/paddle/equal_cn.rst @@ -13,8 +13,8 @@ equal 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/erfinv_cn.rst b/docs/api/paddle/erfinv_cn.rst index 28804eff72d..72d20c70ceb 100644 --- a/docs/api/paddle/erfinv_cn.rst +++ b/docs/api/paddle/erfinv_cn.rst @@ -4,8 +4,8 @@ erfinv ------------------------------- .. py:function:: paddle.erfinv(x) -计算输入矩阵x的逆误差函数。 -请参考erf计算公式 :ref:`cn_api_fluid_layers_erf` +计算输入矩阵 x 的逆误差函数。 +请参考 erf 计算公式 :ref:`cn_api_fluid_layers_erf` .. math:: erfinv(erf(x)) = x @@ -13,13 +13,13 @@ erfinv 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:float32、float64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与 ``x`` 数据类型相同。 +输出 Tensor,与 ``x`` 数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/exp_cn.rst b/docs/api/paddle/exp_cn.rst index d710b6bf118..3a365281ea8 100644 --- a/docs/api/paddle/exp_cn.rst +++ b/docs/api/paddle/exp_cn.rst @@ -8,7 +8,7 @@ exp -对输入,逐元素进行以自然数e为底指数运算。 +对输入,逐元素进行以自然数 e 为底指数运算。 .. math:: out = e^x @@ -16,12 +16,12 @@ exp 参数 :::::::::::: - - **x** (Tensor) - 该OP的输入为多维Tensor。数据类型为float32、float64。 + - **x** (Tensor) - 该 OP 的输入为多维 Tensor。数据类型为 float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出为Tensor,与 ``x`` 维度相同、数据类型相同。 +输出为 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/expand_as_cn.rst b/docs/api/paddle/expand_as_cn.rst index 7e96e66dc98..08f35f3d69f 100644 --- a/docs/api/paddle/expand_as_cn.rst +++ b/docs/api/paddle/expand_as_cn.rst @@ -7,7 +7,7 @@ expand_as 根据 ``y`` 的形状扩展 ``x``,扩展后,``x`` 的形状和 ``y`` 的形状相同。 -``x`` 的维数和 ``y`` 的维数应小于等于6,并且 ``y`` 的维数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为1。 +``x`` 的维数和 ``y`` 的维数应小于等于 6,并且 ``y`` 的维数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为 1。 参数 ::::::::: diff --git a/docs/api/paddle/expand_cn.rst b/docs/api/paddle/expand_cn.rst index 776e962c485..096ed6f720e 100644 --- a/docs/api/paddle/expand_cn.rst +++ b/docs/api/paddle/expand_cn.rst @@ -7,11 +7,11 @@ expand 根据 ``shape`` 指定的形状扩展 ``x``,扩展后,``x`` 的形状和 ``shape`` 指定的形状一致。 -``x`` 的维数和 ``shape`` 的元素数应小于等于6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为 1。 +``x`` 的维数和 ``shape`` 的元素数应小于等于 6,并且 ``shape`` 中的元素数应该大于等于 ``x`` 的维数。扩展的维度的维度值应该为 1。 参数 ::::::::: - - x (Tensor) - 输入的 Tensor,数据类型为:bool、float32、float64、int32或int64。 + - x (Tensor) - 输入的 Tensor,数据类型为:bool、float32、float64、int32 或 int64。 - shape (tuple|list|Tensor) - 给定输入 ``x`` 扩展后的形状,若 ``shape`` 为 list 或者 tuple,则其中的元素值应该为整数或者 1-D Tensor,若 ``shape`` 类型为 Tensor,则其应该为 1-D Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/expm1_cn.rst b/docs/api/paddle/expm1_cn.rst index fd0d068fb73..21714678c4f 100644 --- a/docs/api/paddle/expm1_cn.rst +++ b/docs/api/paddle/expm1_cn.rst @@ -8,7 +8,7 @@ expm1 -对输入,逐元素进行以自然数e为底指数运算并减1。 +对输入,逐元素进行以自然数 e 为底指数运算并减 1。 .. math:: out = e^x - 1 @@ -16,13 +16,13 @@ expm1 参数 ::::::::: -- **x** (Tensor) - 该OP的输入为多维Tensor。数据类型为:float16、float32、float64。 +- **x** (Tensor) - 该 OP 的输入为多维 Tensor。数据类型为:float16、float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出为Tensor,与 ``x`` 维度相同、数据类型相同。 +输出为 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/eye_cn.rst b/docs/api/paddle/eye_cn.rst index 990da3aa682..2c7fd17df2d 100644 --- a/docs/api/paddle/eye_cn.rst +++ b/docs/api/paddle/eye_cn.rst @@ -5,19 +5,19 @@ eye .. py:function:: paddle.eye(num_rows, num_columns=None, dtype=None, name=None) -构建二维Tensor(主对角线元素为1,其他元素为0)。 +构建二维 Tensor(主对角线元素为 1,其他元素为 0)。 参数 :::::::::::: - - **num_rows** (int) - 生成2-D Tensor的行数,数据类型为非负int32。 - - **num_columns** (int,可选) - 生成2-D Tensor的列数,数据类型为非负int32。若为None,则默认等于num_rows。 - - **dtype** (np.dtype|str,可选) - 返回Tensor的数据类型,可为float16、float32、float64、int32、int64。若为None,则默认等于float32。 + - **num_rows** (int) - 生成 2-D Tensor 的行数,数据类型为非负 int32。 + - **num_columns** (int,可选) - 生成 2-D Tensor 的列数,数据类型为非负 int32。若为 None,则默认等于 num_rows。 + - **dtype** (np.dtype|str,可选) - 返回 Tensor 的数据类型,可为 float16、float32、float64、int32、int64。若为 None,则默认等于 float32。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - ``shape`` 为 [num_rows, num_columns]的Tensor。 + ``shape`` 为 [num_rows, num_columns]的 Tensor。 代码示例 :::::::::::: diff --git a/docs/api/paddle/fft/Overview_cn.rst b/docs/api/paddle/fft/Overview_cn.rst index 1442ac0707c..37a4107355d 100644 --- a/docs/api/paddle/fft/Overview_cn.rst +++ b/docs/api/paddle/fft/Overview_cn.rst @@ -3,7 +3,7 @@ paddle.fft --------------------- -paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API。具体如下: +paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关 API。具体如下: - :ref:`标准快速傅里叶变换 ` - :ref:`实数傅里叶变换 ` @@ -16,7 +16,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.fft.fft ` ", "一维离散傅里叶变换" " :ref:`paddle.fft.ifft ` ", "一维逆向离散傅里叶变换" @@ -31,7 +31,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.fft.rfft ` ", "一维离散实数傅里叶变换" " :ref:`paddle.fft.irfft ` ", "一维离散实数傅里叶变换的逆变换" @@ -46,7 +46,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.fft.hfft ` ", "一维离散厄米特傅里叶变换" " :ref:`paddle.fft.ihfft ` ", "一维离散厄米特傅里叶变换的逆变换" @@ -61,7 +61,7 @@ paddle.fft 目录下包含飞桨框架支持的快速傅里叶变换的相关API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.fft.fftfreq ` ", "计算傅里叶变换采样频率" " :ref:`paddle.fft.rfftfreq ` ", "计算傅里叶变换采样频率,用于 ``rfft``, ``irfft``" diff --git a/docs/api/paddle/fft/fft_cn.rst b/docs/api/paddle/fft/fft_cn.rst index 12c5dbc94a7..27a716e0500 100644 --- a/docs/api/paddle/fft/fft_cn.rst +++ b/docs/api/paddle/fft/fft_cn.rst @@ -13,7 +13,7 @@ fft ::::::::: - **x** (Tensor) - 输入 Tensor,数据类型为实数或复数。 -- **n** (int,可选) - 输出 Tensor 中傅里叶变换轴的长度。如果 ``n`` 比输入Tensor 中对应 +- **n** (int,可选) - 输出 Tensor 中傅里叶变换轴的长度。如果 ``n`` 比输入 Tensor 中对应 轴的长度小,输入数据会被截断。如果 ``n`` 比输入 Tensor 中对应轴的长度大,则输入会被补零 。如果 ``n`` 没有被指定,则使用输入 Tensor 中由 ``axis`` 指定的轴的长度。 - **axis** (int,可选) - 傅里叶变换的轴。如果没有指定,默认使用最后一维。 diff --git a/docs/api/paddle/fft/ifft_cn.rst b/docs/api/paddle/fft/ifft_cn.rst index b0d411e3705..6f6c64e56fb 100644 --- a/docs/api/paddle/fft/ifft_cn.rst +++ b/docs/api/paddle/fft/ifft_cn.rst @@ -11,7 +11,7 @@ ifft ::::::::: - **x** (Tensor) - 输入 Tensor,数据类型为实数或复数。 -- **n** (int,可选) - 输出 Tensor 中在傅里叶变换轴的长度。如果 ``n`` 比输入Tensor 中 +- **n** (int,可选) - 输出 Tensor 中在傅里叶变换轴的长度。如果 ``n`` 比输入 Tensor 中 对应轴的长度小,输入数据会被截断。如果 ``n`` 比输入 Tensor 中对应轴的长度大,则输入会被 补零。如果 ``n`` 没有被指定,则使用输入 Tensor 中由 ``axis`` 指定的轴的长度。 - **axis** (int, optional) - 傅里叶变换的轴。如果没有指定,默认使用最后一维。 diff --git a/docs/api/paddle/fft/ihfft_cn.rst b/docs/api/paddle/fft/ihfft_cn.rst index 8a688c89db0..1021542011e 100644 --- a/docs/api/paddle/fft/ihfft_cn.rst +++ b/docs/api/paddle/fft/ihfft_cn.rst @@ -12,7 +12,7 @@ ihfft ::::::::: - **x** (Tensor) - 输入 Tensor,数据类型为实数。 -- **n** (int,可选) - 傅里叶变换点数。如果 ``n`` 比输入Tensor 中对应轴 +- **n** (int,可选) - 傅里叶变换点数。如果 ``n`` 比输入 Tensor 中对应轴 的长度小,输入数据会被截断。如果 ``n`` 比输入 Tensor 中对应轴的长度大,则输入会被补零。如果 ``n`` 没有被指定,则使用输入 Tensor 中由 ``axis`` 指定的轴的长度。 - **axis** (int,可选) - 傅里叶变换的轴。如果没有指定,默认使用最后一维。 diff --git a/docs/api/paddle/fft/rfft_cn.rst b/docs/api/paddle/fft/rfft_cn.rst index dead78e8e0f..7917a4f9d54 100644 --- a/docs/api/paddle/fft/rfft_cn.rst +++ b/docs/api/paddle/fft/rfft_cn.rst @@ -12,7 +12,7 @@ rfft ::::::::: - **x** (Tensor) - 输入 Tensor,数据类型为实数。 -- **n** (int,可选) - 傅里叶变换点数。如果 ``n`` 比输入Tensor 中对应轴 +- **n** (int,可选) - 傅里叶变换点数。如果 ``n`` 比输入 Tensor 中对应轴 的长度小,输入数据会被截断。如果 ``n`` 比输入 Tensor 中对应轴的长度大,则输入会被补零。如果 ``n`` 没有被指定,则使用输入 Tensor 中由 ``axis`` 指定的轴的长度。 - **axis** (int,可选) - 傅里叶变换的轴。如果没有指定,默认使用最后一维。 diff --git a/docs/api/paddle/flatten_cn.rst b/docs/api/paddle/flatten_cn.rst index dfa934df5a1..926458b45a3 100644 --- a/docs/api/paddle/flatten_cn.rst +++ b/docs/api/paddle/flatten_cn.rst @@ -42,7 +42,7 @@ flatten 参数 :::::::::::: - - **x** (Tensor) - 多维 Tensor,数据类型可以为float32、float64、int8、int32或int64。 + - **x** (Tensor) - 多维 Tensor,数据类型可以为 float32、float64、int8、int32 或 int64。 - **start_axis** (int) - flatten 展开的起始维度。 - **stop_axis** (int) - flatten 展开的结束维度。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -65,6 +65,6 @@ flatten out = paddle.flatten(img, start_axis=1, stop_axis=2) # out shape is [2, 12, 4] - # 在动态图模式下,输出out与输入img共享数据 + # 在动态图模式下,输出 out 与输入 img 共享数据 img[0, 0, 0, 0] = -1 print(out[0, 0, 0]) # [-1] diff --git a/docs/api/paddle/flip_cn.rst b/docs/api/paddle/flip_cn.rst index 8d3ee9a6f3a..a03e27e7519 100644 --- a/docs/api/paddle/flip_cn.rst +++ b/docs/api/paddle/flip_cn.rst @@ -13,7 +13,7 @@ flip 参数 :::::::::::: - - **x** (Tensor) - 输入的 Tensor。维度为多维,数据类型为bool、int32、int64、float32或float64。 + - **x** (Tensor) - 输入的 Tensor。维度为多维,数据类型为 bool、int32、int64、float32 或 float64。 - **axis** (list|tuple|int) - 需要翻转的轴。当 axis 是 int 型时,且 axis<0,实际的计算维度为 ndim(x) + axis;axis 是 list 或 tuple 时,``axis[i] < 0`` 时,实际的计算维度为 ndim(x) + axis[i],其中 i 为 axis 的索引。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/floor_cn.rst b/docs/api/paddle/floor_cn.rst index 262e916c44a..f1d0e9edf15 100644 --- a/docs/api/paddle/floor_cn.rst +++ b/docs/api/paddle/floor_cn.rst @@ -16,12 +16,12 @@ floor 参数 :::::::::::: - - **x** - 输入为多维Tensor。数据类型必须为float32或float64。 + - **x** - 输入为多维 Tensor。数据类型必须为 float32 或 float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出为Tensor,与 ``x`` 维度相同、数据类型相同。 +输出为 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/floor_divide_cn.rst b/docs/api/paddle/floor_divide_cn.rst index bf1e028c1b4..b6af9e822be 100644 --- a/docs/api/paddle/floor_divide_cn.rst +++ b/docs/api/paddle/floor_divide_cn.rst @@ -13,14 +13,14 @@ floor_divide .. math:: Out = X // Y -- :math:`X`:多维Tensor。 -- :math:`Y`:多维Tensor。 +- :math:`X`:多维 Tensor。 +- :math:`Y`:多维 Tensor。 参数 ::::::::: - - x(Tensor)- 多维Tensor。数据类型为int32或int64。 - - y(Tensor)- 多维Tensor。数据类型为int32或int64。 - - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + - x(Tensor)- 多维 Tensor。数据类型为 int32 或 int64。 + - y(Tensor)- 多维 Tensor。数据类型为 int32 或 int64。 + - name(str,可选)- 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 diff --git a/docs/api/paddle/flops_cn.rst b/docs/api/paddle/flops_cn.rst index 793563f5142..c6f17f3cd0c 100644 --- a/docs/api/paddle/flops_cn.rst +++ b/docs/api/paddle/flops_cn.rst @@ -56,7 +56,7 @@ int,网络模型的计算量。 return x lenet = LeNet() - # m 是 nn.Layer 的一个实类,x 是m的输入,y 是网络层的输出。 + # m 是 nn.Layer 的一个实类,x 是 m 的输入,y 是网络层的输出。 def count_leaky_relu(m, x, y): x = x[0] nelements = x.numel() diff --git a/docs/api/paddle/fmax_cn.rst b/docs/api/paddle/fmax_cn.rst index e3c665e8b43..0b5d6ffe183 100644 --- a/docs/api/paddle/fmax_cn.rst +++ b/docs/api/paddle/fmax_cn.rst @@ -6,7 +6,7 @@ fmax .. py:function:: paddle.fmax(x, y, name=None) -比较两个Tensor对应位置的元素,返回一个包含该元素最大值的新Tensor。如果两个元素其中一个是nan值,则直接返回另一个值,如果两者都是nan值,则返回第一个nan值。 +比较两个 Tensor 对应位置的元素,返回一个包含该元素最大值的新 Tensor。如果两个元素其中一个是 nan 值,则直接返回另一个值,如果两者都是 nan 值,则返回第一个 nan 值。 等式是: @@ -14,17 +14,17 @@ fmax out = fmax(x, y) .. note:: - ``paddle.fmax`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.fmax`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: - - **x** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **x** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - ``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 + ``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 代码示例 diff --git a/docs/api/paddle/fmin_cn.rst b/docs/api/paddle/fmin_cn.rst index 4f4ac58f8fe..ffb902579da 100644 --- a/docs/api/paddle/fmin_cn.rst +++ b/docs/api/paddle/fmin_cn.rst @@ -6,7 +6,7 @@ fmin .. py:function:: paddle.fmin(x, y, name=None) -比较两个Tensor对应位置的元素,返回一个包含该元素最小值的新Tensor。如果两个元素其中一个是nan值,则直接返回另一个值,如果两者都是nan值,则返回第一个nan值。 +比较两个 Tensor 对应位置的元素,返回一个包含该元素最小值的新 Tensor。如果两个元素其中一个是 nan 值,则直接返回另一个值,如果两者都是 nan 值,则返回第一个 nan 值。 等式是: @@ -14,17 +14,17 @@ fmin out = fmin(x, y) .. note:: - ``paddle.fmin`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.fmin`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: - - **x** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **x** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - ``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 + ``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 代码示例 diff --git a/docs/api/paddle/frac_cn.rst b/docs/api/paddle/frac_cn.rst index 0ece40fab53..b9b9d9ea88e 100644 --- a/docs/api/paddle/frac_cn.rst +++ b/docs/api/paddle/frac_cn.rst @@ -11,7 +11,7 @@ frac 参数 ::::::::: - - **x** (Tensor):输入变量,类型为 Tensor,支持int32、int64、float32、float64数据类型。 + - **x** (Tensor):输入变量,类型为 Tensor,支持 int32、int64、float32、float64 数据类型。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/full_cn.rst b/docs/api/paddle/full_cn.rst index d8832a44f9e..1a5cede2625 100644 --- a/docs/api/paddle/full_cn.rst +++ b/docs/api/paddle/full_cn.rst @@ -7,19 +7,19 @@ full -创建形状大小为 ``shape`` 并且数据类型为 ``dtype`` 的Tensor,其中元素值均为 ``fill_value`` 。 +创建形状大小为 ``shape`` 并且数据类型为 ``dtype`` 的 Tensor,其中元素值均为 ``fill_value`` 。 参数 :::::::::::: - - **shape** (list|tuple|Tensor) – 指定创建Tensor的形状(shape),数据类型为int32 或者int64。 - - **fill_value** (bool|float|int|Tensor) - 用于初始化输出Tensor的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 - - **dtype** (np.dtype|str,可选)- 输出变量的数据类型。若为None,则输出变量的数据类型和输入变量相同,默认值为None。 + - **shape** (list|tuple|Tensor) – 指定创建 Tensor 的形状(shape),数据类型为 int32 或者 int64。 + - **fill_value** (bool|float|int|Tensor) - 用于初始化输出 Tensor 的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 + - **dtype** (np.dtype|str,可选)- 输出变量的数据类型。若为 None,则输出变量的数据类型和输入变量相同,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -返回一个存储结果的Tensor,数据类型和dtype相同。 +返回一个存储结果的 Tensor,数据类型和 dtype 相同。 代码示例 diff --git a/docs/api/paddle/full_like_cn.rst b/docs/api/paddle/full_like_cn.rst index 2d1ae581dc5..823b7d66cb5 100644 --- a/docs/api/paddle/full_like_cn.rst +++ b/docs/api/paddle/full_like_cn.rst @@ -11,9 +11,9 @@ full_like 参数 :::::::::::: - - **x** (Tensor) – 输入 Tensor,输出 Tensor 和 x 具有相同的形状,x的数据类型可以是 bool、float16、float32、float64、int32、int64。 + - **x** (Tensor) – 输入 Tensor,输出 Tensor 和 x 具有相同的形状,x 的数据类型可以是 bool、float16、float32、float64、int32、int64。 - **fill_value** (bool|float|int) - 用于初始化输出张量的常量数据的值。注意:该参数不可超过输出变量数据类型的表示范围。 - - **dtype** (np.dtype|str,可选) - 输出变量的数据类型。若参数为None,则输出变量的数据类型和输入变量相同,默认值为 None。 + - **dtype** (np.dtype|str,可选) - 输出变量的数据类型。若参数为 None,则输出变量的数据类型和输入变量相同,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/gather_nd_cn.rst b/docs/api/paddle/gather_nd_cn.rst index df91a8ef00c..536eba76abf 100644 --- a/docs/api/paddle/gather_nd_cn.rst +++ b/docs/api/paddle/gather_nd_cn.rst @@ -5,7 +5,7 @@ gather_nd .. py:function:: paddle.gather_nd(x, index, name=None) -:code:`gather` 的高维推广,并且支持多轴同时索引。:code:`index` 是一个K维度的张量,它可以认为是从 :code:`x` 中取K-1维张量,每一个元素是一个切片: +:code:`gather` 的高维推广,并且支持多轴同时索引。:code:`index` 是一个 K 维度的张量,它可以认为是从 :code:`x` 中取 K-1 维张量,每一个元素是一个切片: .. math:: output[(i_0, ..., i_{K-2})] = x[index[(i_0, ..., i_{K-2})]] @@ -53,13 +53,13 @@ gather_nd :::::::::::: - **x** (Tensor) - 输入 Tensor,数据类型可以是 int32、int64、float32、float64、bool。 - - **index** (Tensor) - 输入的索引 Tensor,其数据类型 int32 或者 int64。它的维度 :code:`index.rank` 必须大于1,并且 :code:`index.shape[-1] <= x.rank` 。 + - **index** (Tensor) - 输入的索引 Tensor,其数据类型 int32 或者 int64。它的维度 :code:`index.rank` 必须大于 1,并且 :code:`index.shape[-1] <= x.rank` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -shape 为index.shape[:-1] + x.shape[index.shape[-1]:]的Tensor,数据类型与 :code:`x` 一致。 +shape 为 index.shape[:-1] + x.shape[index.shape[-1]:]的 Tensor,数据类型与 :code:`x` 一致。 代码示例 diff --git a/docs/api/paddle/gcd_cn.rst b/docs/api/paddle/gcd_cn.rst index 30e9e14a429..c02392747ae 100644 --- a/docs/api/paddle/gcd_cn.rst +++ b/docs/api/paddle/gcd_cn.rst @@ -11,20 +11,20 @@ gcd gcd(0,0)=0, gcd(0, y)=|y| - 如果x和y的shape不一致,会对两个shape进行广播操作,得到一致的shape(并作为输出结果的shape), + 如果 x 和 y 的 shape 不一致,会对两个 shape 进行广播操作,得到一致的 shape(并作为输出结果的 shape), 请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:int32,int64。 -- **y** (Tensor) - 输入的Tensor,数据类型为:int32,int64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:int32,int64。 +- **y** (Tensor) - 输入的 Tensor,数据类型为:int32,int64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与输入数据类型相同。 +输出 Tensor,与输入数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/get_cuda_rng_state_cn.rst b/docs/api/paddle/get_cuda_rng_state_cn.rst index 8c668f253fd..51721050957 100644 --- a/docs/api/paddle/get_cuda_rng_state_cn.rst +++ b/docs/api/paddle/get_cuda_rng_state_cn.rst @@ -4,7 +4,7 @@ get_cuda_rng_state ------------------------------- .. py:function:: paddle.get_cuda_rng_state() -获取cuda随机数生成器的状态信息。 +获取 cuda 随机数生成器的状态信息。 参数 diff --git a/docs/api/paddle/get_default_dtype_cn.rst b/docs/api/paddle/get_default_dtype_cn.rst index cb082ae3735..2df3f5e98a9 100644 --- a/docs/api/paddle/get_default_dtype_cn.rst +++ b/docs/api/paddle/get_default_dtype_cn.rst @@ -6,7 +6,7 @@ get_default_dtype .. py:function:: paddle.get_default_dtype() -得到当前全局的dtype。该值初始是float32。 +得到当前全局的 dtype。该值初始是 float32。 参数 @@ -17,7 +17,7 @@ get_default_dtype 返回 :::::::::::: - string,这个全局dtype仅支持float16、float32、float64。 + string,这个全局 dtype 仅支持 float16、float32、float64。 代码示例 :::::::::::: diff --git a/docs/api/paddle/get_flags_cn.rst b/docs/api/paddle/get_flags_cn.rst index 8e5f94383d0..cfdbbf86bba 100644 --- a/docs/api/paddle/get_flags_cn.rst +++ b/docs/api/paddle/get_flags_cn.rst @@ -6,13 +6,13 @@ get_flags .. py:function:: paddle.get_flags(flags) -获取指定的Paddle 环境变量FLAGS状态。详情请查看 :ref:`cn_guides_flags_flags`。 +获取指定的 Paddle 环境变量 FLAGS 状态。详情请查看 :ref:`cn_guides_flags_flags`。 参数 :::::::::::: - - **flags** (list of FLAGS [*]) - 想要获取的FLAGS标志列表。 + - **flags** (list of FLAGS [*]) - 想要获取的 FLAGS 标志列表。 返回 :::::::::::: diff --git a/docs/api/paddle/grad_cn.rst b/docs/api/paddle/grad_cn.rst index 00fb29bd5fa..ac8b7c235ba 100644 --- a/docs/api/paddle/grad_cn.rst +++ b/docs/api/paddle/grad_cn.rst @@ -7,26 +7,26 @@ grad .. py:function:: paddle.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False, only_inputs=True, allow_unused=False, no_grad_vars=None) .. note:: - 该API仅支持**动态图模式**。 + 该 API 仅支持**动态图模式**。 对于每个 `inputs`,计算所有 `outputs` 相对于其的梯度和。 参数 ::::::::: - - **outputs** (Tensor|list(Tensor)|tuple(Tensor)) – 用于计算梯度的图的输出变量,或多个输出变量构成的list/tuple。 - - **inputs** (Tensor|list(Tensor)|tuple(Tensor)) - 用于计算梯度的图的输入变量,或多个输入变量构成的list/tuple。该API的每个返回值对应每个 `inputs` 的梯度。 - - **grad_outputs** (Tensor|list(Tensor|None)|tuple(Tensor|None),可选) - `outputs` 变量梯度的初始值。若 `grad_outputs` 为None,则 `outputs` 梯度的初始值均为全1的Tensor。若 `grad_outputs` 不为None,它必须与 `outputs` 的长度相等,此时,若 `grad_outputs` 的第i个元素为None,则第i个 `outputs` 的梯度初始值为全1的Tensor;若 `grad_outputs` 的第i个元素为Tensor,则第i个 `outputs` 的梯度初始值为 `grad_outputs` 的第i个元素。默认值为None。 - - **retain_graph** (bool,可选) - 是否保留计算梯度的前向图。若值为True,则前向图会保留,用户可对同一张图求两次反向。若值为False,则前向图会释放。默认值为None,表示值与 `create_graph` 相等。 - - **create_graph** (bool,可选) - 是否创建计算过程中的反向图。若值为True,则可支持计算高阶导数。若值为False,则计算过程中的反向图会释放。默认值为False。 - - **only_inputs** (bool,可选) - 是否只计算 `inputs` 的梯度。若值为False,则图中所有叶节点变量的梯度均会计算,并进行累加。若值为True,则只会计算 `inputs` 的梯度。默认值为True。only_inputs=False功能正在开发中,目前尚不支持。 - - **allow_unused** (bool,可选) - 决定当某些 `inputs` 变量不在计算图中时抛出错误还是返回None。若某些 `inputs` 变量不在计算图中(即它们的梯度为None),则当allowed_unused=False时会抛出错误,当allow_unused=True时会返回None作为这些变量的梯度。默认值为False。 - - **no_grad_vars** (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor),可选) - 指明不需要计算梯度的变量。默认值为None。 + - **outputs** (Tensor|list(Tensor)|tuple(Tensor)) – 用于计算梯度的图的输出变量,或多个输出变量构成的 list/tuple。 + - **inputs** (Tensor|list(Tensor)|tuple(Tensor)) - 用于计算梯度的图的输入变量,或多个输入变量构成的 list/tuple。该 API 的每个返回值对应每个 `inputs` 的梯度。 + - **grad_outputs** (Tensor|list(Tensor|None)|tuple(Tensor|None),可选) - `outputs` 变量梯度的初始值。若 `grad_outputs` 为 None,则 `outputs` 梯度的初始值均为全 1 的 Tensor。若 `grad_outputs` 不为 None,它必须与 `outputs` 的长度相等,此时,若 `grad_outputs` 的第 i 个元素为 None,则第 i 个 `outputs` 的梯度初始值为全 1 的 Tensor;若 `grad_outputs` 的第 i 个元素为 Tensor,则第 i 个 `outputs` 的梯度初始值为 `grad_outputs` 的第 i 个元素。默认值为 None。 + - **retain_graph** (bool,可选) - 是否保留计算梯度的前向图。若值为 True,则前向图会保留,用户可对同一张图求两次反向。若值为 False,则前向图会释放。默认值为 None,表示值与 `create_graph` 相等。 + - **create_graph** (bool,可选) - 是否创建计算过程中的反向图。若值为 True,则可支持计算高阶导数。若值为 False,则计算过程中的反向图会释放。默认值为 False。 + - **only_inputs** (bool,可选) - 是否只计算 `inputs` 的梯度。若值为 False,则图中所有叶节点变量的梯度均会计算,并进行累加。若值为 True,则只会计算 `inputs` 的梯度。默认值为 True。only_inputs=False 功能正在开发中,目前尚不支持。 + - **allow_unused** (bool,可选) - 决定当某些 `inputs` 变量不在计算图中时抛出错误还是返回 None。若某些 `inputs` 变量不在计算图中(即它们的梯度为 None),则当 allowed_unused=False 时会抛出错误,当 allow_unused=True 时会返回 None 作为这些变量的梯度。默认值为 False。 + - **no_grad_vars** (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor),可选) - 指明不需要计算梯度的变量。默认值为 None。 返回 ::::::::: -tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第i个返回的变量是所有 `outputs` 相对于第i个 `inputs` 的梯度之和。 +tuple(Tensor),其长度等于 `inputs` 中的变量个数,且第 i 个返回的变量是所有 `outputs` 相对于第 i 个 `inputs` 的梯度之和。 代码示例 1 ::::::::: diff --git a/docs/api/paddle/greater_equal_cn.rst b/docs/api/paddle/greater_equal_cn.rst index 2484e3c03bc..668a7bf41d7 100644 --- a/docs/api/paddle/greater_equal_cn.rst +++ b/docs/api/paddle/greater_equal_cn.rst @@ -13,8 +13,8 @@ greater_equal 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/histogram_cn.rst b/docs/api/paddle/histogram_cn.rst index 10ab4640f5c..7d48f54dd92 100644 --- a/docs/api/paddle/histogram_cn.rst +++ b/docs/api/paddle/histogram_cn.rst @@ -5,15 +5,15 @@ histogram .. py:function:: paddle.histogram(input, bins=100, min=0, max=0, name=None) -计算输入 Tensor 的直方图。以 min 和 max 为 range 边界,将其均分成 bins 个直条,然后将排序好的数据划分到各个直条(bins)中。如果 min 和 max 都为0,则利用数据中的最大最小值作为边界。 +计算输入 Tensor 的直方图。以 min 和 max 为 range 边界,将其均分成 bins 个直条,然后将排序好的数据划分到各个直条(bins)中。如果 min 和 max 都为 0,则利用数据中的最大最小值作为边界。 参数 :::::::::::: - - **input** (Tensor) - 输入Tensor。维度为多维,数据类型为int32、int64、float32或float64。 - - **bins** (int,可选) - 直方图 bins(直条)的个数,默认为100。 - - **min** (int,可选) - range的下边界(包含),默认为0。 - - **max** (int,可选) - range的上边界(包含),默认为0。 + - **input** (Tensor) - 输入 Tensor。维度为多维,数据类型为 int32、int64、float32 或 float64。 + - **bins** (int,可选) - 直方图 bins(直条)的个数,默认为 100。 + - **min** (int,可选) - range 的下边界(包含),默认为 0。 + - **max** (int,可选) - range 的上边界(包含),默认为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/hub/Overview_cn.rst b/docs/api/paddle/hub/Overview_cn.rst index c07bdcb2be4..a1c06c1af9c 100644 --- a/docs/api/paddle/hub/Overview_cn.rst +++ b/docs/api/paddle/hub/Overview_cn.rst @@ -3,22 +3,22 @@ paddle.hub ------------------------------- -paddle.hub 是预训练模型库的集合,用来复用社区生产力,方便加载发布在github、gitee以及本地的预训练模型。飞桨提供框架模型拓展相关的API以及支持的模型库列表。具体如下: +paddle.hub 是预训练模型库的集合,用来复用社区生产力,方便加载发布在 github、gitee 以及本地的预训练模型。飞桨提供框架模型拓展相关的 API 以及支持的模型库列表。具体如下: -- :ref:`查看和加载API ` +- :ref:`查看和加载 API ` - :ref:`支持模型库列表 ` .. _about_hub_functions: -查看和加载API +查看和加载 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`list ` ", "查看Repo支持的模型列表" + " :ref:`list ` ", "查看 Repo 支持的模型列表" " :ref:`help ` ", "查看指定模型的文档" " :ref:`load ` ", "加载指定模型" diff --git a/docs/api/paddle/hub/help_cn.rst b/docs/api/paddle/hub/help_cn.rst index e82b1831cf4..b8170901de2 100644 --- a/docs/api/paddle/hub/help_cn.rst +++ b/docs/api/paddle/hub/help_cn.rst @@ -6,21 +6,21 @@ help .. py:function:: paddle.hub.help(repo_dir, model, source='github', force_reload=False) -用于查看repo提供的功能/模型的文档。 +用于查看 repo 提供的功能/模型的文档。 参数 ::::::::: - - **repo_dir** (str) - repo地址,支持git地址形式和local地址。git地址由repo拥有者/repo名字:repo分支组成,实例:PaddlePaddle/PaddleClas:develop;local地址为repo的本地路径。 + - **repo_dir** (str) - repo 地址,支持 git 地址形式和 local 地址。git 地址由 repo 拥有者/repo 名字:repo 分支组成,实例:PaddlePaddle/PaddleClas:develop;local 地址为 repo 的本地路径。 - **model** (str) - 模型的名字。 - - **source** (str,可选) - 指定repo托管的位置,支持github、gitee和local,默认值:github。 + - **source** (str,可选) - 指定 repo 托管的位置,支持 github、gitee 和 local,默认值:github。 - **force_reload** (bool,可选) - 指定是否强制拉取,默认值: False。 返回 ::::::::: - ``str`` ,repo提供的指定模型的文档。 + ``str`` ,repo 提供的指定模型的文档。 代码示例 diff --git a/docs/api/paddle/hub/list_cn.rst b/docs/api/paddle/hub/list_cn.rst index e304b9a2338..bfcc9ffad13 100644 --- a/docs/api/paddle/hub/list_cn.rst +++ b/docs/api/paddle/hub/list_cn.rst @@ -6,21 +6,21 @@ list .. py:function:: paddle.hub.list(repo_dir, source='github', force_reload=False) -用于查看指定repo提供的功能或者模型列表。 +用于查看指定 repo 提供的功能或者模型列表。 参数 ::::::::: - - **repo_dir** (str) - repo地址,支持git地址形式和local地址。git地址由repo拥有者/repo名字:repo分支组成,实例:PaddlePaddle/PaddleClas:develop;local地址为repo的本地路径。 - - **source** (str,可选) - 指定repo托管的位置,支持github、gitee和local,默认值:github。 + - **repo_dir** (str) - repo 地址,支持 git 地址形式和 local 地址。git 地址由 repo 拥有者/repo 名字:repo 分支组成,实例:PaddlePaddle/PaddleClas:develop;local 地址为 repo 的本地路径。 + - **source** (str,可选) - 指定 repo 托管的位置,支持 github、gitee 和 local,默认值:github。 - **force_reload** (bool,可选) - 指定是否强制拉取,默认值: False。 返回 ::::::::: - ``list`` ,repo提供的模型/功能列表。 + ``list`` ,repo 提供的模型/功能列表。 代码示例 diff --git a/docs/api/paddle/hub/load_cn.rst b/docs/api/paddle/hub/load_cn.rst index 32ab26ce9f3..584c20ce319 100644 --- a/docs/api/paddle/hub/load_cn.rst +++ b/docs/api/paddle/hub/load_cn.rst @@ -5,22 +5,22 @@ load .. py:function:: paddle.hub.load(repo_dir, model, source='github', force_reload=False, **kwargs) -用于加载repo提供的功能/模型列表。 +用于加载 repo 提供的功能/模型列表。 参数 ::::::::: - - **repo_dir** (str) - repo地址,支持git地址形式和local地址。git地址由repo拥有者/repo名字:repo分支组成,实例:PaddlePaddle/PaddleClas:develop;local地址为repo的本地路径。 + - **repo_dir** (str) - repo 地址,支持 git 地址形式和 local 地址。git 地址由 repo 拥有者/repo 名字:repo 分支组成,实例:PaddlePaddle/PaddleClas:develop;local 地址为 repo 的本地路径。 - **model** (str)- 模型的名字。 - - **source** (str,可选) - 指定repo托管的位置,支持github、gitee和local,默认值:github。 + - **source** (str,可选) - 指定 repo 托管的位置,支持 github、gitee 和 local,默认值:github。 - **force_reload** (bool,可选) - 指定是否强制拉取,默认值: False。 - **\*\*kwargs** (any,可选) - 模型参数。 返回 ::::::::: - ``paddle.nn.Layer`` ,repo提供的指定模型实例。 + ``paddle.nn.Layer`` ,repo 提供的指定模型实例。 代码示例 diff --git a/docs/api/paddle/in_dynamic_mode_cn.rst b/docs/api/paddle/in_dynamic_mode_cn.rst index 9051484c1bb..00582397e11 100644 --- a/docs/api/paddle/in_dynamic_mode_cn.rst +++ b/docs/api/paddle/in_dynamic_mode_cn.rst @@ -6,15 +6,15 @@ in_dynamic_mode .. py:function:: paddle.in_dynamic_mode() .. note:: - 从2.0.0版本开始,Paddle默认开启动态图模式。 + 从 2.0.0 版本开始,Paddle 默认开启动态图模式。 -该接口查看paddle当前是否在动态图模式中运行。 +该接口查看 paddle 当前是否在动态图模式中运行。 可以通过 :ref:`cn_api_paddle_enable_static` 开启静态图模式,:ref:`cn_api_paddle_disable_static` 关闭静态图模式。 返回 :::::::::::: -bool,如果paddle当前是在动态图模式运行,则返回 ``True``,否则返回 ``False``。 +bool,如果 paddle 当前是在动态图模式运行,则返回 ``True``,否则返回 ``False``。 代码示例 diff --git a/docs/api/paddle/increment_cn.rst b/docs/api/paddle/increment_cn.rst index d2a4376c82c..d33bafc8649 100644 --- a/docs/api/paddle/increment_cn.rst +++ b/docs/api/paddle/increment_cn.rst @@ -14,7 +14,7 @@ increment ::::::::: - **x** (Tensor) – 输入张量,必须始终只有一个元素。支持的数据类型:float32、float64、int32、int64。 - - **value** (float,可选) – ``x`` 的数值增量。默认值为1.0。 + - **value** (float,可选) – ``x`` 的数值增量。默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/incubate/LookAhead_cn.rst b/docs/api/paddle/incubate/LookAhead_cn.rst index 7fb6d276372..ea7ce3d2b2a 100644 --- a/docs/api/paddle/incubate/LookAhead_cn.rst +++ b/docs/api/paddle/incubate/LookAhead_cn.rst @@ -4,9 +4,9 @@ LookAhead ------------------------------- .. py:function:: class paddle.incubate.LookAhead(inner_optimizer, alpha=0.5, k=5, name=None) -此API为论文 `Lookahead Optimizer: k steps forward, 1 step back `_ 中Lookahead优化器的实现。 -Lookahead保留两组参数:fast_params和slow_params。每次训练迭代中inner_optimizer更新fast_params。 -Lookahead每k次训练迭代更新slow_params和fast_params,如下所示: +此 API 为论文 `Lookahead Optimizer: k steps forward, 1 step back `_ 中 Lookahead 优化器的实现。 +Lookahead 保留两组参数:fast_params 和 slow_params。每次训练迭代中 inner_optimizer 更新 fast_params。 +Lookahead 每 k 次训练迭代更新 slow_params 和 fast_params,如下所示: .. math:: @@ -17,9 +17,9 @@ Lookahead每k次训练迭代更新slow_params和fast_params,如下所示: 参数 ::::::::: - - **inner_optimizer** (inner_optimizer) - 每次迭代更新fast params的优化器。 - - **alpha** (float,可选) - Lookahead的学习率。默认值为0.5。 - - **k** (int,可选) - slow params每k次迭代更新一次。默认值为5。 + - **inner_optimizer** (inner_optimizer) - 每次迭代更新 fast params 的优化器。 + - **alpha** (float,可选) - Lookahead 的学习率。默认值为 0.5。 + - **k** (int,可选) - slow params 每 k 次迭代更新一次。默认值为 5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 代码示例 @@ -127,13 +127,13 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) **参数** - **loss** (Tensor) - 包含要最小化的值的张量。 - - **startup_program** (Program,可选) - :ref:`cn_api_fluid_Program`。在 ``parameters`` 中初始化参数。默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 - - **parameters** (list,可选) - 列出更新最小化 ``loss`` 的 ``Tensor`` 或 ``Tensor.name``。默认值为None,此时所有参数都会被更新。 - - **no_grad_set** (set,可选) - 不需要更新的 ``Tensor`` 或 ``Tensor.name`` 的集合。默认值为None。 + - **startup_program** (Program,可选) - :ref:`cn_api_fluid_Program`。在 ``parameters`` 中初始化参数。默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 + - **parameters** (list,可选) - 列出更新最小化 ``loss`` 的 ``Tensor`` 或 ``Tensor.name``。默认值为 None,此时所有参数都会被更新。 + - **no_grad_set** (set,可选) - 不需要更新的 ``Tensor`` 或 ``Tensor.name`` 的集合。默认值为 None。 **返回** -tuple: tuple (optimize_ops, params_grads),由 ``minimize`` 添加的操作列表和 ``(param, grad)`` 张量对的列表,其中param是参数,grad参数对应的梯度值。在静态图模式中,返回的元组可以传给 ``Executor.run()`` 中的 ``fetch_list`` 来表示程序剪枝。这样程序在运行之前会通过 ``feed`` 和 ``fetch_list`` 被剪枝,详情请参考 ``Executor`` 。 +tuple: tuple (optimize_ops, params_grads),由 ``minimize`` 添加的操作列表和 ``(param, grad)`` 张量对的列表,其中 param 是参数,grad 参数对应的梯度值。在静态图模式中,返回的元组可以传给 ``Executor.run()`` 中的 ``fetch_list`` 来表示程序剪枝。这样程序在运行之前会通过 ``feed`` 和 ``fetch_list`` 被剪枝,详情请参考 ``Executor`` 。 **代码示例** diff --git a/docs/api/paddle/incubate/ModelAverage_cn.rst b/docs/api/paddle/incubate/ModelAverage_cn.rst index 337b22c62d5..2bd598c7c14 100644 --- a/docs/api/paddle/incubate/ModelAverage_cn.rst +++ b/docs/api/paddle/incubate/ModelAverage_cn.rst @@ -46,7 +46,7 @@ minimize(loss, startup_program=None, parameters=None, no_grad_set=None) **返回** -tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由 (param, param_grad) 组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由 (param, param_grad) 组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -75,7 +75,7 @@ apply(executor=None, need_restore=True) **参数** - **executor** (Executor) – 静态图模式下当前网络的执行器;动态图模式下默认值为 None。 - - **need_restore** (bool) - 恢复标志变量;设为True 时,执行完成后会将网络的 ``Parameters``恢复为网络默认的值,设为 False 将不会恢复。默认值为 True。 + - **need_restore** (bool) - 恢复标志变量;设为 True 时,执行完成后会将网络的 ``Parameters``恢复为网络默认的值,设为 False 将不会恢复。默认值为 True。 **代码示例** diff --git a/docs/api/paddle/incubate/autograd/Hessian_cn.rst b/docs/api/paddle/incubate/autograd/Hessian_cn.rst index 054ab6aaff7..662cff20e6c 100644 --- a/docs/api/paddle/incubate/autograd/Hessian_cn.rst +++ b/docs/api/paddle/incubate/autograd/Hessian_cn.rst @@ -7,15 +7,15 @@ Hessian 计算函数 ``func`` 在 ``xs`` 处的海森矩阵。 -其中,函数 ``func`` 的输入可以为Tensor或Tensor序列,输出要求为只包含单个元素的Tensor, -``is_batched`` 表示是否支持batch, ``True`` 表示支持并默认第零维作为batch维。 +其中,函数 ``func`` 的输入可以为 Tensor 或 Tensor 序列,输出要求为只包含单个元素的 Tensor, +``is_batched`` 表示是否支持 batch, ``True`` 表示支持并默认第零维作为 batch 维。 -在计算海森矩阵时,所有输入Tensor会沿着batch维外的其它维度进行展平,且当输入为Tensor序列时, -所有展平后的Tensor会被拼接成一个新的Tensor。因此,``Hessian`` 最终的输出为一个二维(不包含 -batch)或三维(包含batch,第零维为batch)的Tensor。 +在计算海森矩阵时,所有输入 Tensor 会沿着 batch 维外的其它维度进行展平,且当输入为 Tensor 序列时, +所有展平后的 Tensor 会被拼接成一个新的 Tensor。因此,``Hessian`` 最终的输出为一个二维(不包含 +batch)或三维(包含 batch,第零维为 batch)的 Tensor。 -例如,假设 ``is_batched=True``,输入Tensor经过展平并拼接后的形状为 ``(B, M)``,输出 -Tensor形状为 ``(B, 1)``,则最终输出海森矩阵形状为 ``(B, M, M)``。其中,``B`` 为batch +例如,假设 ``is_batched=True``,输入 Tensor 经过展平并拼接后的形状为 ``(B, M)``,输出 +Tensor 形状为 ``(B, 1)``,则最终输出海森矩阵形状为 ``(B, M, M)``。其中,``B`` 为 batch 维大小,``M`` 为展平并拼接后的输入大小。 可以通过对 ``Hessian`` 对象多维索引获取整个矩阵或子矩阵的实际结果,子矩阵会以惰性求值方式计算, @@ -25,18 +25,18 @@ Tensor形状为 ``(B, 1)``,则最终输出海森矩阵形状为 ``(B, M, M)`` 当前暂不支持省略号索引。 .. warning:: - 该API目前为Beta版本,函数签名在未来版本可能发生变化。 + 该 API 目前为 Beta 版本,函数签名在未来版本可能发生变化。 参数 ::::::::: -- **func** (Callable) - Python函数,输入参数为 ``xs``,输出为只包含一个元素Tensor,即 - 如果 ``is_batched=True``,输出形状为 ``(B, 1)`` , ``B`` 表示batch大小, +- **func** (Callable) - Python 函数,输入参数为 ``xs``,输出为只包含一个元素 Tensor,即 + 如果 ``is_batched=True``,输出形状为 ``(B, 1)`` , ``B`` 表示 batch 大小, ``is_batched=False``,输出形状为 ``(1)`` 。 -- **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为Tensor或 - Tensor序列。 -- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` - 表示不包含batch。默认值为 ``False`` 。 +- **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为 Tensor 或 + Tensor 序列。 +- **is_batched** (bool) - ``True`` 表示包含 batch 维,且默认第零维为 batch 维,``False`` + 表示不包含 batch。默认值为 ``False`` 。 返回 ::::::::: diff --git a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst index be8c4362428..8902f133f8e 100644 --- a/docs/api/paddle/incubate/autograd/Jacobian_cn.rst +++ b/docs/api/paddle/incubate/autograd/Jacobian_cn.rst @@ -7,16 +7,16 @@ Jacobian 计算函数 ``func`` 在 ``xs`` 处的雅可比矩阵。 -其中,函数 ``func`` 的输入、输出可以为Tensor或Tensor序列,``is_batched=True`` 表示是否支 -持batch, ``True`` 表示输入和输出的第零维是batch。 +其中,函数 ``func`` 的输入、输出可以为 Tensor 或 Tensor 序列,``is_batched=True`` 表示是否支 +持 batch, ``True`` 表示输入和输出的第零维是 batch。 -在计算雅可比矩阵时,输入Tensor batch维外的其它维度会被展平,且当输入为Tensor序列时, -所有展平后的Tensor会被拼接成一个新的Tensor。输出按照同样规则进行处理。因此,``Jacobian`` 最终 -的输出为一个二维(不包含batch)或三维(包含batch,第零维为batch)的Tensor。 +在计算雅可比矩阵时,输入 Tensor batch 维外的其它维度会被展平,且当输入为 Tensor 序列时, +所有展平后的 Tensor 会被拼接成一个新的 Tensor。输出按照同样规则进行处理。因此,``Jacobian`` 最终 +的输出为一个二维(不包含 batch)或三维(包含 batch,第零维为 batch)的 Tensor。 -例如,假设 ``is_batched=True``,输入Tensor经过展平并拼接后的形状为 ``(B, M)``,输出 -Tensor经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比矩阵形状为 ``(B, M, N)`` 。 -其中,``B`` 为batch维大小,``M`` 为展平并拼接后的输入大小,``N`` 为展平并拼接后的输出大小。 +例如,假设 ``is_batched=True``,输入 Tensor 经过展平并拼接后的形状为 ``(B, M)``,输出 +Tensor 经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比矩阵形状为 ``(B, M, N)`` 。 +其中,``B`` 为 batch 维大小,``M`` 为展平并拼接后的输入大小,``N`` 为展平并拼接后的输出大小。 ``Jacobian`` 对象被创建后,并没有发生实际的计算过程,而是采用惰性求值方法进行计算,可以通过 对 ``Jacobian`` 多维索引获取整个雅可比矩阵或子矩阵的实际结果,并且实际计算也发生在这一过程,已 @@ -27,22 +27,22 @@ Tensor经过展平并拼接后的形状为 ``(B, N)``,则最终输出雅可比 第 ``1`` 行到第 ``3`` 进行求值,并且 ``1`` 到 ``3`` 行的计算结果会以行的粒度进行缓存,下次再 获取上述某一行或多行结果时不会发生重复计算。 -更多索引方式可以参考Paddle官网 `索引和切片 `_ 。 +更多索引方式可以参考 Paddle 官网 `索引和切片 `_ 。 .. note:: 当前暂不支持省略号索引。 .. warning:: - 该API目前为Beta版本,函数签名在未来版本可能发生变化。 + 该 API 目前为 Beta 版本,函数签名在未来版本可能发生变化。 参数 ::::::::: -- **func** (Callable) - Python函数,输入参数为 ``xs``,输出为Tensor或Tensor序列。 -- **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为Tensor或 - Tensor序列。 -- **is_batched** (bool) - ``True`` 表示包含batch维,且默认第零维为batch维,``False`` - 表示不包含batch。默认值为 ``False`` 。 +- **func** (Callable) - Python 函数,输入参数为 ``xs``,输出为 Tensor 或 Tensor 序列。 +- **xs** (Tensor|Sequence[Tensor]) - 函数 ``func`` 的输入参数,数据类型为 Tensor 或 + Tensor 序列。 +- **is_batched** (bool) - ``True`` 表示包含 batch 维,且默认第零维为 batch 维,``False`` + 表示不包含 batch。默认值为 ``False`` 。 返回 ::::::::: diff --git a/docs/api/paddle/incubate/autograd/Overview_cn.rst b/docs/api/paddle/incubate/autograd/Overview_cn.rst index 1bdf4ab6128..6d4dee7afac 100644 --- a/docs/api/paddle/incubate/autograd/Overview_cn.rst +++ b/docs/api/paddle/incubate/autograd/Overview_cn.rst @@ -3,20 +3,20 @@ paddle.incubate.autograd --------------------- -paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关的一些探索性API。具体如下: +paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关的一些探索性 API。具体如下: -- :ref:`自动微分机制切换API ` -- :ref:`自动微分基础算子与原生算子转换API ` -- :ref:`函数式自动微分API ` +- :ref:`自动微分机制切换 API ` +- :ref:`自动微分基础算子与原生算子转换 API ` +- :ref:`函数式自动微分 API ` .. _mode_switching_apis: -自动微分机制切换API +自动微分机制切换 API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.incubate.autograd.enable_prim ` ", "开启基于自动微分基础算子的自动微分机制" " :ref:`paddle.incubate.autograd.disable_prim ` ", "关闭基于自动微分基础算子的自动微分机制" @@ -25,22 +25,22 @@ paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关 .. _transform_apis: -自动微分基础算子与原生算子转换API +自动微分基础算子与原生算子转换 API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.incubate.autograd.prim2orig ` ", "自动微分基础算子转换为等价功能原生算子" .. _functional_apis: -函数式自动微分API +函数式自动微分 API ========================== .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.incubate.autograd.jvp ` ", "雅可比矩阵与向量乘积" " :ref:`paddle.incubate.autograd.vjp ` ", "向量与雅可比矩阵乘积" @@ -67,7 +67,7 @@ paddle.incubate.autograd 目录下包含飞桨框架提供的自动微分相关 原生算子体系和自动微分基础算子体系之间的转化: 一方面,原生算子体系中的算子语义往往比较复杂,需要拆分为多个自动微分基础算子的组合。 -另一方面,自动微分基础算子由于没有kernel实现,不能直接执行,在进行完自动微分变换之后,需要转化为同语义的原生算子才可以执行。 +另一方面,自动微分基础算子由于没有 kernel 实现,不能直接执行,在进行完自动微分变换之后,需要转化为同语义的原生算子才可以执行。 通过定义原生算子和自动微分基础算子之间的转化规则,在程序变换 orig2prim 和 prim2orig 中应用对应的规则,分别完成原生算子到自动微分基础算子和自动微分基础算子到原生算子之间的转化。 自动微分规则及其对应的程序变换: @@ -200,9 +200,9 @@ linearize 和 transpose 程序变换的想法来自 `JAX `_ 。 -fused_multi_head_attention 算子目前只支持在GPU下运行,其包含的计算功能如下: +fused_multi_head_attention 算子目前只支持在 GPU 下运行,其包含的计算功能如下: .. code-block:: ipython @@ -39,8 +39,8 @@ fused_multi_head_attention 算子目前只支持在GPU下运行,其包含的 out = layer_norm(x + dropout(linear_bias + out)) -值得注意的是,该API中,q, k, v 的 weight 被统一存储在一个权重张量中,形状为 `[3, num_heads, head_dim, embed_dim]` , -如果想得到单独的q, k 或v的 weight,可以通过转置和切分得到。 +值得注意的是,该 API 中,q, k, v 的 weight 被统一存储在一个权重张量中,形状为 `[3, num_heads, head_dim, embed_dim]` , +如果想得到单独的 q, k 或 v 的 weight,可以通过转置和切分得到。 参数 @@ -50,19 +50,19 @@ fused_multi_head_attention 算子目前只支持在GPU下运行,其包含的 - **x** (Tensor) - 输入的 ``Tensor``,代表 Query,是一个三维 tensor,形状为 ``[batch_size, sequence_length, embed_dim]``。其中,batch_size 是一次训练所处理的样本个数(句子个数);sequence_length 代表每一个样本序列(每句话)中的 word 个数;embed_dim 代表 word 经过 embedding 后得到的向量长度。 - **qkv_weight** (Tensor) - 代表 Attention 中计算 q, k, v 时的权重,是一个四维 tensor,形状为 ``[3, num_heads, head_dim, embed_dim]``。其中,3 代表 qkv_weight 是包含了 q, k, v 三个权重矩阵,num_heads 代表 multi-head attention 中的 head 数量,head_dim 代表 head 的维度。 - **linear_weight** (Tensor) - 代表 linear 的权重,二维 tensor,形状为 ``[embed_dim, embed_dim]`` 。 - - **normalize_before** (bool,可选) - 代表是采用 pre_layer_norm 的结构(True)还是 post_layer_norm 的结构(False)。若为True,则为 pre_layer_norm 结构,代表在 multi-head attention 和 ffn 之前各执行一次 ``layer_norm``。若为False,则为 post_layer_norm 结构,代表在 multi-head attention 和 ffn 之后各执行一次 ``layer_norm``。默认值:``False`` 。 - - **pre_ln_scale** (Tensor,可选) - 代表 normalize_before 为True 时,multi-head attention 中第一个 ``layer_norm`` 的权重,一维tensor,形状为 ``[embed_dim]`` 。 - - **pre_ln_bias** (Tensor,可选) - 代表 normalize_before 为True 时,multi_head attention 中第一个 ``layer_norm`` 的偏置,一维tensor,形状为 ``[embed_dim]`` 。 - - **ln_scale** (Tensor,可选) - 代表 normalize_before 为True 时,multi-head attention 中第二个 (False时的第一个) ``layer_norm`` 的权重,一维tensor,形状为 ``[embed_dim]`` 。 - - **ln_bias** (Tensor,可选) - 代表 normalize_before 为True 时,multi-head attention 中第二个 (False时的第一个) ``layer_norm`` 的偏置,一维tensor,形状为 ``[embed_dim]`` 。 - - **pre_ln_epsilon** (float,可选) - 代表 normalize_before 为True 时,multi-head attention 中第一个 ``layer_norm`` 为了数值稳定加在分母上的值。默认值为 1e-05 。 + - **normalize_before** (bool,可选) - 代表是采用 pre_layer_norm 的结构(True)还是 post_layer_norm 的结构(False)。若为 True,则为 pre_layer_norm 结构,代表在 multi-head attention 和 ffn 之前各执行一次 ``layer_norm``。若为 False,则为 post_layer_norm 结构,代表在 multi-head attention 和 ffn 之后各执行一次 ``layer_norm``。默认值:``False`` 。 + - **pre_ln_scale** (Tensor,可选) - 代表 normalize_before 为 True 时,multi-head attention 中第一个 ``layer_norm`` 的权重,一维 tensor,形状为 ``[embed_dim]`` 。 + - **pre_ln_bias** (Tensor,可选) - 代表 normalize_before 为 True 时,multi_head attention 中第一个 ``layer_norm`` 的偏置,一维 tensor,形状为 ``[embed_dim]`` 。 + - **ln_scale** (Tensor,可选) - 代表 normalize_before 为 True 时,multi-head attention 中第二个 (False 时的第一个) ``layer_norm`` 的权重,一维 tensor,形状为 ``[embed_dim]`` 。 + - **ln_bias** (Tensor,可选) - 代表 normalize_before 为 True 时,multi-head attention 中第二个 (False 时的第一个) ``layer_norm`` 的偏置,一维 tensor,形状为 ``[embed_dim]`` 。 + - **pre_ln_epsilon** (float,可选) - 代表 normalize_before 为 True 时,multi-head attention 中第一个 ``layer_norm`` 为了数值稳定加在分母上的值。默认值为 1e-05 。 - **qkv_bias** (Tensor,可选) - 代表 Attention 中计算 q, k, v 时的偏置,是一个三维 tensor,形状为 ``[3, num_heads, head_dim]`` 。 - - **linear_bias** (Tensor,可选) - 代表 ``linear`` 的偏置,一维tensor,形状为 ``[embed_dim]`` 。 - - **cache_kv** (Tensor,可选) - 代表自回归生成模型中cache结构的部分,五维tensor,形状为 ``[2, bsz, num_head, seq_len, head_dim]``。默认值为None。 - - **attn_mask** (Tensor,可选)- 用于限制 multi-head attention中对当前词产生影响的其他词的范围。形状会被广播为 ``[batch_size, num_heads, sequence_length, sequence_length ]`` 。 - - **dropout_rate** (float,可选) - 代表 multi-head attention 之后的 dropout 算子的 dropout 比例,默认为0.5。 - - **attn_dropout_rate** (float,可选) - 代表 multi-head attention 中的 dropout 算子的 dropout 比例,默认为0.5。 - - **ln_epsilon** (float,可选) - 代表 normalize_before 为True 时,multi-head attention 中第二个 (False时的第一个) ``layer_norm`` 为了数值稳定加在分母上的值。默认值为 1e-05 。 + - **linear_bias** (Tensor,可选) - 代表 ``linear`` 的偏置,一维 tensor,形状为 ``[embed_dim]`` 。 + - **cache_kv** (Tensor,可选) - 代表自回归生成模型中 cache 结构的部分,五维 tensor,形状为 ``[2, bsz, num_head, seq_len, head_dim]``。默认值为 None。 + - **attn_mask** (Tensor,可选)- 用于限制 multi-head attention 中对当前词产生影响的其他词的范围。形状会被广播为 ``[batch_size, num_heads, sequence_length, sequence_length ]`` 。 + - **dropout_rate** (float,可选) - 代表 multi-head attention 之后的 dropout 算子的 dropout 比例,默认为 0.5。 + - **attn_dropout_rate** (float,可选) - 代表 multi-head attention 中的 dropout 算子的 dropout 比例,默认为 0.5。 + - **ln_epsilon** (float,可选) - 代表 normalize_before 为 True 时,multi-head attention 中第二个 (False 时的第一个) ``layer_norm`` 为了数值稳定加在分母上的值。默认值为 1e-05 。 - **training** (bool):标记是否为训练阶段。默认:True。 - **mode** (str):丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认:'upscale_in_train'。计算方法如下: @@ -75,7 +75,7 @@ fused_multi_head_attention 算子目前只支持在GPU下运行,其包含的 - train: out = input * mask - inference: out = input * (1.0 - p) - - **ring_id** (int,可选) - 分布式tensor parallel运行下通讯所使用的NCCL id。默认值为 -1 。 + - **ring_id** (int,可选) - 分布式 tensor parallel 运行下通讯所使用的 NCCL id。默认值为 -1 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst b/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst index 17aaf8dc99a..853c0350fb9 100644 --- a/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst +++ b/docs/api/paddle/incubate/optimizer/functional/minimize_bfgs_cn.rst @@ -12,7 +12,7 @@ minimize_bfgs x_{k+1} = x_{k} + H_k \nabla{f_k} -如果 :math:`H_k` 是函数 :math:`f` 在 :math:`x_k`的逆海森矩阵,此时就是牛顿法。如果 :math:`H_k` 满足对称性和正定性,用来作为逆海森矩阵的近似,则为高斯-牛顿法。在实际算法中,近似逆海森矩阵是通过整个或部分搜索历史的梯度计算得到,前者对应BFGS,后者对应于L-BFGS。 +如果 :math:`H_k` 是函数 :math:`f` 在 :math:`x_k`的逆海森矩阵,此时就是牛顿法。如果 :math:`H_k` 满足对称性和正定性,用来作为逆海森矩阵的近似,则为高斯-牛顿法。在实际算法中,近似逆海森矩阵是通过整个或部分搜索历史的梯度计算得到,前者对应 BFGS,后者对应于 L-BFGS。 参考 @@ -21,16 +21,16 @@ minimize_bfgs 参数 ::::::::: - - **objective_func** (callable) - 待优化的目标函数,接受1维 Tensor 并返回一个标量。 + - **objective_func** (callable) - 待优化的目标函数,接受 1 维 Tensor 并返回一个标量。 - **initial_position** (Tensor) - 迭代的初始位置,与 ``objective_func`` 的输入形状相同。 - - **max_iters** (int,可选) - BFGS迭代的最大次数。默认值:50。 + - **max_iters** (int,可选) - BFGS 迭代的最大次数。默认值:50。 - **tolerance_grad** (float,可选) - 当梯度的范数小于该值时,终止迭代。当前使用正无穷范数。默认值:1e-7。 - - **tolerance_change** (float,可选) - 当函数值/x值/其他参数 两次迭代的改变量小于该值时,终止迭代。默认值:1e-9。 - - **initial_inverse_hessian_estimate** (Tensor,可选) - 函数在初始位置时的近似逆海森矩阵,必须满足对称性和正定性。当为None时,将使用N阶单位矩阵,其中N为 ``initial_position`` 的size。默认值:None。 + - **tolerance_change** (float,可选) - 当函数值/x 值/其他参数 两次迭代的改变量小于该值时,终止迭代。默认值:1e-9。 + - **initial_inverse_hessian_estimate** (Tensor,可选) - 函数在初始位置时的近似逆海森矩阵,必须满足对称性和正定性。当为 None 时,将使用 N 阶单位矩阵,其中 N 为 ``initial_position`` 的 size。默认值:None。 - **line_search_fn** (str,可选) - 指定要使用的线搜索方法,目前只支持值为'strong wolfe'方法,未来将支持'Hager Zhang'方法。默认值:'strong wolfe'。 - **max_line_search_iters** (int,可选) - 线搜索的最大迭代次数。默认值:50。 - - **initial_step_length** (float,可选) - 线搜索中第一次迭代时的步长,不同的初始步长可能会产生不同的优化结果。对于高斯牛顿类方法初始的试验步长应该总是1。默认值:1.0。 - - **dtype** ('float32' | 'float64',可选) - 在算法中使用的数据类型,输入参数的数据类型必须与dtype保持一致。默认值:'float32'。 + - **initial_step_length** (float,可选) - 线搜索中第一次迭代时的步长,不同的初始步长可能会产生不同的优化结果。对于高斯牛顿类方法初始的试验步长应该总是 1。默认值:1.0。 + - **dtype** ('float32' | 'float64',可选) - 在算法中使用的数据类型,输入参数的数据类型必须与 dtype 保持一致。默认值:'float32'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst b/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst index 2128af1eb04..51ed2f94652 100644 --- a/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst +++ b/docs/api/paddle/incubate/optimizer/functional/minimize_lbfgs_cn.rst @@ -10,7 +10,7 @@ minimize_lbfgs .. math:: x_{k+1} = x_{k} + H_k \nabla{f_k} -如果 :math:`H_k` 是函数 :math:`f` 在 :math:`x_k`的逆海森矩阵,此时就是牛顿法。如果 :math:`H_k` 满足对称性和正定性,用来作为逆海森矩阵的近似,则为高斯-牛顿法。在实际算法中,近似逆海森矩阵是通过整个或部分搜索历史的梯度计算得到,前者对应BFGS,后者对应于L-BFGS。 +如果 :math:`H_k` 是函数 :math:`f` 在 :math:`x_k`的逆海森矩阵,此时就是牛顿法。如果 :math:`H_k` 满足对称性和正定性,用来作为逆海森矩阵的近似,则为高斯-牛顿法。在实际算法中,近似逆海森矩阵是通过整个或部分搜索历史的梯度计算得到,前者对应 BFGS,后者对应于 L-BFGS。 参考 @@ -19,17 +19,17 @@ minimize_lbfgs 参数 ::::::::: - - **objective_func** (callable) - 待优化的目标函数,接受1维 Tensor 并返回一个标量。 + - **objective_func** (callable) - 待优化的目标函数,接受 1 维 Tensor 并返回一个标量。 - **initial_position** (Tensor) - 迭代的初始位置,与 ``objective_func`` 的输入形状相同。 - **history_size** (Scalar,可选) - 指定储存的向量对{si,yi}数量。默认值:100。 - - **max_iters** (int,可选) - BFGS迭代的最大次数。默认值:50。 + - **max_iters** (int,可选) - BFGS 迭代的最大次数。默认值:50。 - **tolerance_grad** (float,可选) - 当梯度的范数小于该值时,终止迭代。当前使用正无穷范数。默认值:1e-7。 - - **tolerance_change** (float,可选) - 当函数值/x值/其他参数 两次迭代的改变量小于该值时,终止迭代。默认值:1e-9。 - - **initial_inverse_hessian_estimate** (Tensor,可选) - 函数在初始位置时的近似逆海森矩阵,必须满足对称性和正定性。当为None时,将使用N阶单位矩阵,其中N为 ``initial_position`` 的size。默认值:None。 + - **tolerance_change** (float,可选) - 当函数值/x 值/其他参数 两次迭代的改变量小于该值时,终止迭代。默认值:1e-9。 + - **initial_inverse_hessian_estimate** (Tensor,可选) - 函数在初始位置时的近似逆海森矩阵,必须满足对称性和正定性。当为 None 时,将使用 N 阶单位矩阵,其中 N 为 ``initial_position`` 的 size。默认值:None。 - **line_search_fn** (str,可选) - 指定要使用的线搜索方法,目前只支持值为'strong wolfe'方法,未来将支持'Hager Zhang'方法。默认值:'strong wolfe'。 - **max_line_search_iters** (int,可选) - 线搜索的最大迭代次数。默认值:50。 - - **initial_step_length** (float,可选) - 线搜索中第一次迭代时的步长,不同的初始步长可能会产生不同的优化结果。对于高斯牛顿类方法初始的试验步长应该总是1。默认值:1.0。 - - **dtype** ('float32' | 'float64',可选) - 在算法中使用的数据类型,输入参数的数据类型必须与dtype保持一致。默认值:'float32'。 + - **initial_step_length** (float,可选) - 线搜索中第一次迭代时的步长,不同的初始步长可能会产生不同的优化结果。对于高斯牛顿类方法初始的试验步长应该总是 1。默认值:1.0。 + - **dtype** ('float32' | 'float64',可选) - 在算法中使用的数据类型,输入参数的数据类型必须与 dtype 保持一致。默认值:'float32'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/incubate/segment_max_cn.rst b/docs/api/paddle/incubate/segment_max_cn.rst index c585ec1308a..835d756772e 100644 --- a/docs/api/paddle/incubate/segment_max_cn.rst +++ b/docs/api/paddle/incubate/segment_max_cn.rst @@ -9,7 +9,7 @@ segment_max 分段求最大值函数。 此运算符,将 ``segment_ids`` 中相同索引对应的 ``data`` 的元素,进行求最大值操作。其中 ``segment_ids`` 是一个单调非减序列。 -具体而言,该算子计算一个Tensor ``out``,使得 +具体而言,该算子计算一个 Tensor ``out``,使得 .. math:: @@ -26,7 +26,7 @@ segment_max 返回 ::::::::: - Tensor,分段求最大值的结果。空的segment_id对应的默认值为0。 + Tensor,分段求最大值的结果。空的 segment_id 对应的默认值为 0。 代码示例 ::::::::: diff --git a/docs/api/paddle/incubate/segment_mean_cn.rst b/docs/api/paddle/incubate/segment_mean_cn.rst index 200e549c491..d3de3cbe934 100644 --- a/docs/api/paddle/incubate/segment_mean_cn.rst +++ b/docs/api/paddle/incubate/segment_mean_cn.rst @@ -9,7 +9,7 @@ segment_mean 分段求均值函数。 此运算符,将 ``segment_ids`` 中相同索引对应的 ``data`` 的元素,进行求均值操作。其中 ``segment_ids`` 是一个单调非减序列。 -具体而言,该算子计算一个Tensor ``out``,使得 +具体而言,该算子计算一个 Tensor ``out``,使得 .. math:: @@ -26,7 +26,7 @@ segment_mean 返回 ::::::::: - Tensor,分段求均值的结果。空的segment_id对应的默认值为0。 + Tensor,分段求均值的结果。空的 segment_id 对应的默认值为 0。 代码示例 ::::::::: diff --git a/docs/api/paddle/incubate/segment_min_cn.rst b/docs/api/paddle/incubate/segment_min_cn.rst index 8abc34e9c38..bf4df0e2149 100644 --- a/docs/api/paddle/incubate/segment_min_cn.rst +++ b/docs/api/paddle/incubate/segment_min_cn.rst @@ -9,7 +9,7 @@ segment_min 分段求最小值函数。 此运算符,将 ``segment_ids`` 中相同索引对应的 ``data`` 的元素,进行求最小值操作。其中 ``segment_ids`` 是一个单调非减序列。 -具体而言,该算子计算一个Tensor ``out``,使得 +具体而言,该算子计算一个 Tensor ``out``,使得 .. math:: @@ -26,7 +26,7 @@ segment_min 返回 ::::::::: - Tensor,分段求最小值的结果。空的segment_id对应的默认值为0。 + Tensor,分段求最小值的结果。空的 segment_id 对应的默认值为 0。 代码示例 ::::::::: diff --git a/docs/api/paddle/incubate/segment_sum_cn.rst b/docs/api/paddle/incubate/segment_sum_cn.rst index 5737674dbbf..37f7785e264 100644 --- a/docs/api/paddle/incubate/segment_sum_cn.rst +++ b/docs/api/paddle/incubate/segment_sum_cn.rst @@ -9,7 +9,7 @@ segment_sum 分段求和函数。 此运算符,将 ``segment_ids`` 中相同索引对应的 ``data`` 的元素,进行求和操作。其中 ``segment_ids`` 是一个单调非减序列。 -具体而言,该算子计算一个Tensor ``out``,使得 +具体而言,该算子计算一个 Tensor ``out``,使得 .. math:: @@ -28,7 +28,7 @@ segment_sum 返回 ::::::::: - Tensor,分段求和的结果。空的segment_id对应的默认值为0。 + Tensor,分段求和的结果。空的 segment_id 对应的默认值为 0。 代码示例 ::::::::: diff --git a/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst b/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst index cea5652eae3..82029a1af09 100644 --- a/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst +++ b/docs/api/paddle/incubate/softmax_mask_fuse_cn.rst @@ -5,18 +5,18 @@ softmax_mask_fuse .. py:function:: paddle.incubate.softmax_mask_fuse(x, mask, name=None) -该op是对输入 ``x`` 进行被输入 ``mask`` mask后的softmax操作。该op主要针对加速Transformer架构而设计。将 ``tmp = x + mask, rst = softmax(tmp)`` 两个操作合为一个操作。计算公式为: +该 op 是对输入 ``x`` 进行被输入 ``mask`` mask 后的 softmax 操作。该 op 主要针对加速 Transformer 架构而设计。将 ``tmp = x + mask, rst = softmax(tmp)`` 两个操作合为一个操作。计算公式为: .. math:: out = softmax(x + mask) .. note:: - 该API只可在GPU上运行 + 该 API 只可在 GPU 上运行 参数 ::::::::: - - x (4-D Tensor) - 输入的Tensor,必须为4D的shape,数据类型为:float16、float32。x的第四维必须大于等于32,并且小于8192。 - - mask (4-D Tensor) - 输入的Tensor,必须为4D的shape,数据类型为:float16、float32。mask的第二维必须为1,其余维度必须与x的对应维度相同。 + - x (4-D Tensor) - 输入的 Tensor,必须为 4D 的 shape,数据类型为:float16、float32。x 的第四维必须大于等于 32,并且小于 8192。 + - mask (4-D Tensor) - 输入的 Tensor,必须为 4D 的 shape,数据类型为:float16、float32。mask 的第二维必须为 1,其余维度必须与 x 的对应维度相同。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst b/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst index e04de7f4c9c..2ec227d895a 100644 --- a/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst +++ b/docs/api/paddle/incubate/softmax_mask_fuse_upper_triangle_cn.rst @@ -5,17 +5,17 @@ softmax_mask_fuse_upper_triangle .. py:function:: paddle.incubate.softmax_mask_fuse_upper_triangle(x) -该op是对输入 ``x`` 进行被mask的softmax操作,该op总是mask住x的上三角矩阵部分(不包含对角线部分)。该op主要针对加速Transformer架构而设计。将 ``tmp = x + mask, rst = softmax(tmp)`` 两个操作合为一个操作。计算公式为: +该 op 是对输入 ``x`` 进行被 mask 的 softmax 操作,该 op 总是 mask 住 x 的上三角矩阵部分(不包含对角线部分)。该 op 主要针对加速 Transformer 架构而设计。将 ``tmp = x + mask, rst = softmax(tmp)`` 两个操作合为一个操作。计算公式为: .. math:: out = softmax(LowerTriangular(x)) .. note:: - 该API只可在GPU上运行 + 该 API 只可在 GPU 上运行 参数 ::::::::: - - x (4-D Tensor) - 输入的Tensor,必须为4D的shape,数据类型为:float16、float32。x的第四维必须大于等于32,并且小于8192。第三维与第四维必须相同。 + - x (4-D Tensor) - 输入的 Tensor,必须为 4D 的 shape,数据类型为:float16、float32。x 的第四维必须大于等于 32,并且小于 8192。第三维与第四维必须相同。 返回 ::::::::: diff --git a/docs/api/paddle/index_sample_cn.rst b/docs/api/paddle/index_sample_cn.rst index 93d7a4221cc..ac04e011f4c 100644 --- a/docs/api/paddle/index_sample_cn.rst +++ b/docs/api/paddle/index_sample_cn.rst @@ -8,7 +8,7 @@ index_sample -对输入 ``x`` 中的元素进行批量抽样,取 ``index`` 指定的对应下标的元素,按index中出现的先后顺序组织,填充为一个新的张量。 +对输入 ``x`` 中的元素进行批量抽样,取 ``index`` 指定的对应下标的元素,按 index 中出现的先后顺序组织,填充为一个新的张量。 ``x`` 与 ``index`` 都是 ``2-D`` 张量。``index`` 的第一维度与输入 ``x`` 的第一维度必须相同,``index`` 的第二维度没有大小要求,可以重复索引相同下标元素。 diff --git a/docs/api/paddle/index_select_cn.rst b/docs/api/paddle/index_select_cn.rst index 7d05ab5b580..e7c7e165c47 100644 --- a/docs/api/paddle/index_select_cn.rst +++ b/docs/api/paddle/index_select_cn.rst @@ -7,20 +7,20 @@ index_select -沿着指定轴 ``axis`` 对输入 ``x`` 进行索引,取 ``index`` 中指定的相应项,创建并返回到一个新的Tensor。这里 ``index`` 是一个 ``1-D`` Tensor。除 ``axis`` 轴外,返回的Tensor其余维度大小和输入 ``x`` 相等,``axis`` 维度的大小等于 ``index`` 的大小。 +沿着指定轴 ``axis`` 对输入 ``x`` 进行索引,取 ``index`` 中指定的相应项,创建并返回到一个新的 Tensor。这里 ``index`` 是一个 ``1-D`` Tensor。除 ``axis`` 轴外,返回的 Tensor 其余维度大小和输入 ``x`` 相等,``axis`` 维度的大小等于 ``index`` 的大小。 参数 ::::::::: - - **x** (Tensor)– 输入Tensor。 ``x`` 的数据类型可以是float32,float64,int32,int64。 - - **index** (Tensor)– 包含索引下标的1-D Tensor。 - - **axis** (int,可选) – 索引轴,若未指定,则默认选取第0维。 + - **x** (Tensor)– 输入 Tensor。 ``x`` 的数据类型可以是 float32,float64,int32,int64。 + - **index** (Tensor)– 包含索引下标的 1-D Tensor。 + - **axis** (int,可选) – 索引轴,若未指定,则默认选取第 0 维。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,返回一个数据类型同输入的Tensor。 +Tensor,返回一个数据类型同输入的 Tensor。 代码示例 diff --git a/docs/api/paddle/inner_cn.rst b/docs/api/paddle/inner_cn.rst index 883bd6e6f60..88db9c2735f 100644 --- a/docs/api/paddle/inner_cn.rst +++ b/docs/api/paddle/inner_cn.rst @@ -6,23 +6,23 @@ inner .. py:function:: paddle.inner(x, y, name=None) -计算两个Tensor的内积。 +计算两个 Tensor 的内积。 -对于1维Tensor计算普通内积,对于大于1维的Tensor计算最后一个维度的乘积和,此时两个输入Tensor最后一个维度长度需要相等。 +对于 1 维 Tensor 计算普通内积,对于大于 1 维的 Tensor 计算最后一个维度的乘积和,此时两个输入 Tensor 最后一个维度长度需要相等。 参数 :::::::::::: ::::::::: - - **x** (Tensor) - 一个N维Tensor或者标量Tensor,如果是N维Tensor最后一个维度长度需要跟y保持一致。 - - **y** (Tensor) - 一个N维Tensor或者标量Tensor,如果是N维Tensor最后一个维度长度需要跟x保持一致。 + - **x** (Tensor) - 一个 N 维 Tensor 或者标量 Tensor,如果是 N 维 Tensor 最后一个维度长度需要跟 y 保持一致。 + - **y** (Tensor) - 一个 N 维 Tensor 或者标量 Tensor,如果是 N 维 Tensor 最后一个维度长度需要跟 x 保持一致。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: ::::::::: - - Tensor, x、y的内积结果,Tensor shape为 x.shape[:-1] + y.shape[:-1]。 + - Tensor, x、y 的内积结果,Tensor shape 为 x.shape[:-1] + y.shape[:-1]。 代码示例: :::::::::: diff --git a/docs/api/paddle/io/BatchSampler_cn.rst b/docs/api/paddle/io/BatchSampler_cn.rst index 814a73df440..d2f276b2093 100644 --- a/docs/api/paddle/io/BatchSampler_cn.rst +++ b/docs/api/paddle/io/BatchSampler_cn.rst @@ -5,22 +5,22 @@ BatchSampler .. py:class:: paddle.io.BatchSampler(dataset=None, sampler=None, shuffle=False, batch_size=1, drop_last=False) -批采样器的基础实现,用于 ``paddle.io.DataLoader`` 中迭代式获取mini-batch的样本下标数组,数组长度与 ``batch_size`` 一致。 +批采样器的基础实现,用于 ``paddle.io.DataLoader`` 中迭代式获取 mini-batch 的样本下标数组,数组长度与 ``batch_size`` 一致。 所有用于 ``paddle.io.DataLoader`` 中的批采样器都必须是 ``paddle.io.BatchSampler`` 的子类并实现以下方法: ``__iter__``:迭代式返回批样本下标数组。 -``__len__``:每epoch中mini-batch数。 +``__len__``:每 epoch 中 mini-batch 数。 参数 :::::::::::: - - **dataset** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 - - **sampler** (Sampler) - 此参数必须是 ``paddle.io.Sampler`` 的子类实例,用于迭代式获取样本下标。``dataset`` 和 ``sampler`` 参数只能设置一个。默认值为None。 - - **shuffle** (bool) - 是否需要在生成样本下标时打乱顺序。默认值为False。 - - **batch_size** (int) - 每mini-batch中包含的样本数。默认值为1。 - - **drop_last** (bool) - 是否需要丢弃最后无法凑整一个mini-batch的样本。默认值为False。 + - **dataset** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 + - **sampler** (Sampler) - 此参数必须是 ``paddle.io.Sampler`` 的子类实例,用于迭代式获取样本下标。``dataset`` 和 ``sampler`` 参数只能设置一个。默认值为 None。 + - **shuffle** (bool) - 是否需要在生成样本下标时打乱顺序。默认值为 False。 + - **batch_size** (int) - 每 mini-batch 中包含的样本数。默认值为 1。 + - **drop_last** (bool) - 是否需要丢弃最后无法凑整一个 mini-batch 的样本。默认值为 False。 见 ``paddle.io.DataLoader`` 。 diff --git a/docs/api/paddle/io/DataLoader_cn.rst b/docs/api/paddle/io/DataLoader_cn.rst index 05419163b15..af2ad234912 100644 --- a/docs/api/paddle/io/DataLoader_cn.rst +++ b/docs/api/paddle/io/DataLoader_cn.rst @@ -5,48 +5,48 @@ DataLoader .. py:class:: paddle.io.DataLoader(dataset, feed_list=None, places=None, return_list=False, batch_sampler=None, batch_size=1, shuffle=False, drop_last=False, collate_fn=None, num_workers=0, use_buffer_reader=True, use_shared_memory=True, prefetch_factor=2, timeout=0, worker_init_fn=None) -DataLoader返回一个迭代器,该迭代器根据 ``batch_sampler`` 给定的顺序迭代一次给定的 ``dataset`` +DataLoader 返回一个迭代器,该迭代器根据 ``batch_sampler`` 给定的顺序迭代一次给定的 ``dataset`` -DataLoader支持单进程和多进程的数据加载方式,当 ``num_workers`` 大于0时,将使用多进程方式异步加载数据。 +DataLoader 支持单进程和多进程的数据加载方式,当 ``num_workers`` 大于 0 时,将使用多进程方式异步加载数据。 -DataLoader当前支持 ``map-style`` 和 ``iterable-style`` 的数据集,``map-style`` 的数据集可通过下标索引样本,请参考 ``paddle.io.Dataset`` ; ``iterable-style`` 数据集只能迭代式地获取样本,类似Python迭代器,请参考 ``paddle.io.IterableDataset`` 。 +DataLoader 当前支持 ``map-style`` 和 ``iterable-style`` 的数据集,``map-style`` 的数据集可通过下标索引样本,请参考 ``paddle.io.Dataset`` ; ``iterable-style`` 数据集只能迭代式地获取样本,类似 Python 迭代器,请参考 ``paddle.io.IterableDataset`` 。 .. note:: - 当前还不支持在子进程中进行GPU Tensor的操作,请不要在子进程流程中使用GPU Tensor,例如 ``dataset`` 中的预处理,``collate_fn`` 等,``numpy array`` 和CPU Tensor操作已支持。 + 当前还不支持在子进程中进行 GPU Tensor 的操作,请不要在子进程流程中使用 GPU Tensor,例如 ``dataset`` 中的预处理,``collate_fn`` 等,``numpy array`` 和 CPU Tensor 操作已支持。 ``batch_sampler`` 请参考 ``paddle.io.BatchSampler`` -**禁用自动组batch** +**禁用自动组 batch** -在如NLP等任务中,用户需求自定义组batch的方式,不希望 ``DataLoader`` 自动组batch, ``DataLoader`` 支持在 ``batch_size`` 和 ``batch_sampler`` 均为None的时候禁用自动组batch功能,此时需求从 ``dataset`` 中获取的数据为已经组好batch的数据,该数据将不做任何处理直接传到 ``collate_fn`` 或 ``default_collate_fn`` 中。 +在如 NLP 等任务中,用户需求自定义组 batch 的方式,不希望 ``DataLoader`` 自动组 batch, ``DataLoader`` 支持在 ``batch_size`` 和 ``batch_sampler`` 均为 None 的时候禁用自动组 batch 功能,此时需求从 ``dataset`` 中获取的数据为已经组好 batch 的数据,该数据将不做任何处理直接传到 ``collate_fn`` 或 ``default_collate_fn`` 中。 .. note:: - 当禁用自动组batch时,``default_collate_fn`` 将不对输入数据做任何处理。 + 当禁用自动组 batch 时,``default_collate_fn`` 将不对输入数据做任何处理。 参数 :::::::::::: - - **dataset** (Dataset) - DataLoader从此参数给定数据集中加载数据,此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例。 - - **feed_list** (list(Tensor)|tuple(Tensor),可选) - feed变量列表,由 ``paddle.static.data()`` 创建。当 ``return_list`` 为False时,此参数必须设置。默认值为None。 - - **places** (list(Place)|tuple(Place),可选) - 数据需要放置到的Place列表。在静态图和动态图模式中,此参数均必须设置。在动态图模式中,此参数列表长度必须是1。默认值为None。 - - **return_list** (bool,可选) - 每个设备上的数据是否以list形式返回。若return_list = False,每个设备上的返回数据均是str -> Tensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(Tensor)。在动态图模式下,此参数必须为True。默认值为False。 - - **batch_sampler** (BatchSampler,可选) - ``paddle.io.BatchSampler`` 或其子类的实例,DataLoader通过 ``batch_sampler`` 产生的mini-batch索引列表来 ``dataset`` 中索引样本并组成mini-batch。默认值为None。 - - **batch_size** (int|None,可选) - 每mini-batch中样本个数,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为1。 - - **shuffle** (bool,可选) - 生成mini-batch索引列表时是否对索引打乱顺序,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为False。 - - **drop_last** (bool,可选) - 是否丢弃因数据集样本数不能被 ``batch_size`` 整除而产生的最后一个不完整的mini-batch,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为False。 - - **collate_fn** (callable,可选) - 通过此参数指定如果将样本列表组合为mini-batch数据,当 ``collate_fn`` 为None时,默认为将样本个字段在第0维上堆叠(同 ``np.stack(..., axis=0)`` )为mini-batch的数据。默认值为None。 - - **num_workers** (int,可选) - 用于加载数据的子进程个数,若为0即为不开启子进程,在主进程中进行数据加载。默认值为0。 - - **use_buffer_reader** (bool,可选) - 是否使用缓存读取器。若 ``use_buffer_reader`` 为True,DataLoader会异步地预读取一定数量(默认读取下一个)的 mini-batch 的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。默认值为True。 - - **prefetch_factor** (int,可选) - 缓存的mini-batch的个数。若 ``use_buffer_reader`` 为True,DataLoader会异步地预读取 ``prefetch_factor`` 个mini-batch。默认值为2。 - - **use_shared_memory** (bool,可选) - 是否使用共享内存来提升子进程将数据放入进程间队列的速度,该参数尽在多进程模式下有效(即 ``num_workers > 0`` ),请确认机器上有足够的共享内存空间(如Linux系统下 ``/dev/shm/`` 目录空间大小)再设置此参数。默认为True。 - - **timeout** (int,可选) - 从子进程输出队列获取mini-batch数据的超时时间。默认值为0。 - - **worker_init_fn** (callable,可选) - 子进程初始化函数,此函数会被子进程初始化时被调用,并传递 ``worker id`` 作为参数。默认值为None。 + - **dataset** (Dataset) - DataLoader 从此参数给定数据集中加载数据,此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例。 + - **feed_list** (list(Tensor)|tuple(Tensor),可选) - feed 变量列表,由 ``paddle.static.data()`` 创建。当 ``return_list`` 为 False 时,此参数必须设置。默认值为 None。 + - **places** (list(Place)|tuple(Place),可选) - 数据需要放置到的 Place 列表。在静态图和动态图模式中,此参数均必须设置。在动态图模式中,此参数列表长度必须是 1。默认值为 None。 + - **return_list** (bool,可选) - 每个设备上的数据是否以 list 形式返回。若 return_list = False,每个设备上的返回数据均是 str -> Tensor 的映射表,其中映射表的 key 是每个输入变量的名称。若 return_list = True,则每个设备上的返回数据均是 list(Tensor)。在动态图模式下,此参数必须为 True。默认值为 False。 + - **batch_sampler** (BatchSampler,可选) - ``paddle.io.BatchSampler`` 或其子类的实例,DataLoader 通过 ``batch_sampler`` 产生的 mini-batch 索引列表来 ``dataset`` 中索引样本并组成 mini-batch。默认值为 None。 + - **batch_size** (int|None,可选) - 每 mini-batch 中样本个数,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为 1。 + - **shuffle** (bool,可选) - 生成 mini-batch 索引列表时是否对索引打乱顺序,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为 False。 + - **drop_last** (bool,可选) - 是否丢弃因数据集样本数不能被 ``batch_size`` 整除而产生的最后一个不完整的 mini-batch,为 ``batch_sampler`` 的替代参数,若 ``batch_sampler`` 未设置,会根据 ``batch_size`` ``shuffle`` ``drop_last`` 创建一个 ``paddle.io.BatchSampler``。默认值为 False。 + - **collate_fn** (callable,可选) - 通过此参数指定如果将样本列表组合为 mini-batch 数据,当 ``collate_fn`` 为 None 时,默认为将样本个字段在第 0 维上堆叠(同 ``np.stack(..., axis=0)`` )为 mini-batch 的数据。默认值为 None。 + - **num_workers** (int,可选) - 用于加载数据的子进程个数,若为 0 即为不开启子进程,在主进程中进行数据加载。默认值为 0。 + - **use_buffer_reader** (bool,可选) - 是否使用缓存读取器。若 ``use_buffer_reader`` 为 True,DataLoader 会异步地预读取一定数量(默认读取下一个)的 mini-batch 的数据,可加速数据读取过程,但同时会占用少量的 CPU/GPU 存储,即一个 batch 输入数据的存储空间。默认值为 True。 + - **prefetch_factor** (int,可选) - 缓存的 mini-batch 的个数。若 ``use_buffer_reader`` 为 True,DataLoader 会异步地预读取 ``prefetch_factor`` 个 mini-batch。默认值为 2。 + - **use_shared_memory** (bool,可选) - 是否使用共享内存来提升子进程将数据放入进程间队列的速度,该参数尽在多进程模式下有效(即 ``num_workers > 0`` ),请确认机器上有足够的共享内存空间(如 Linux 系统下 ``/dev/shm/`` 目录空间大小)再设置此参数。默认为 True。 + - **timeout** (int,可选) - 从子进程输出队列获取 mini-batch 数据的超时时间。默认值为 0。 + - **worker_init_fn** (callable,可选) - 子进程初始化函数,此函数会被子进程初始化时被调用,并传递 ``worker id`` 作为参数。默认值为 None。 返回 :::::::::::: -DataLoader,迭代 ``dataset`` 数据的迭代器,迭代器返回的数据中的每个元素都是一个Tensor。 +DataLoader,迭代 ``dataset`` 数据的迭代器,迭代器返回的数据中的每个元素都是一个 Tensor。 代码示例 @@ -119,33 +119,33 @@ from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=T ''''''''' .. warning:: - 这个API将在未来版本废弃,推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` + 这个 API 将在未来版本废弃,推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` .. note:: - 框架保证DataLoader的数据加载顺序与用户提供的数据源读取顺序一致。 + 框架保证 DataLoader 的数据加载顺序与用户提供的数据源读取顺序一致。 -创建一个DataLoader对象用于加载Python生成器产生的数据。数据会由Python线程预先读取,并异步送入一个队列中。 +创建一个 DataLoader 对象用于加载 Python 生成器产生的数据。数据会由 Python 线程预先读取,并异步送入一个队列中。 -本方法创建的DataLoader对象提供了3个方法设置数据源,分别是 :code:`set_sample_generator` , :code:`set_sample_list_generator` 和 +本方法创建的 DataLoader 对象提供了 3 个方法设置数据源,分别是 :code:`set_sample_generator` , :code:`set_sample_list_generator` 和 :code:`set_batch_generator`。请查阅下述示例代码了解它们的使用方法。 -如果iterable = True,本方法创建的DataLoader对象是一个Python生成器,可以for-range的方法循环迭代。 +如果 iterable = True,本方法创建的 DataLoader 对象是一个 Python 生成器,可以 for-range 的方法循环迭代。 -如果iterable = False,本方法创建的DataLoader对象提供 :code:`start()` 和 :code:`reset()` 方法控制数据读取过程。 +如果 iterable = False,本方法创建的 DataLoader 对象提供 :code:`start()` 和 :code:`reset()` 方法控制数据读取过程。 **参数** - - **feed_list** (list(Tensor)|tuple(Tensor)) - feed变量列表,由 ``paddle.static.data()`` 创建。 - - **capacity** (int) - DataLoader对象内部维护队列的容量大小。单位是batch数量。若reader读取速度较快,建议设置较大的capacity值。 - - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader``。若use_double_buffer=True,DataLoader会异步地预读取下一个batch的数据,可加速数据读取过程,但同时会占用少量的CPU/GPU存储,即一个batch输入数据的存储空间。 - - **iterable** (bool) - 所创建的DataLoader对象是否可迭代。 - - **return_list** (bool) - 每个设备上的数据是否以list形式返回。仅在iterable = True模式下有效。若return_list = False,每个设备上的返回数据均是str -> LoDTensor的映射表,其中映射表的key是每个输入变量的名称。若return_list = True,则每个设备上的返回数据均是list(LoDTensor)。推荐在静态图模式下使用return_list = False,在动态图模式下使用return_list = True。 - - **use_multiprocess** (bool) - 设置是否是用多进程加速动态图的数据载入过程。注意:该参数的设置仅在动态图模式下有效,在静态图模式下,该参数设置与否均无任何影响。默认值为False。 - - **drop_last** (bool):是否丢弃最后的不足CPU/GPU设备数的批次。默认值为True。在网络训练时,用户不能设置drop_last=False,此时所有CPU/GPU设备均应从DataLoader中读取到数据。在网络预测时,用户可以设置drop_last=False,此时最后不足CPU/GPU设备数的批次可以进行预测。 + - **feed_list** (list(Tensor)|tuple(Tensor)) - feed 变量列表,由 ``paddle.static.data()`` 创建。 + - **capacity** (int) - DataLoader 对象内部维护队列的容量大小。单位是 batch 数量。若 reader 读取速度较快,建议设置较大的 capacity 值。 + - **use_double_buffer** (bool) - 是否使用 ``double_buffer_reader``。若 use_double_buffer=True,DataLoader 会异步地预读取下一个 batch 的数据,可加速数据读取过程,但同时会占用少量的 CPU/GPU 存储,即一个 batch 输入数据的存储空间。 + - **iterable** (bool) - 所创建的 DataLoader 对象是否可迭代。 + - **return_list** (bool) - 每个设备上的数据是否以 list 形式返回。仅在 iterable = True 模式下有效。若 return_list = False,每个设备上的返回数据均是 str -> LoDTensor 的映射表,其中映射表的 key 是每个输入变量的名称。若 return_list = True,则每个设备上的返回数据均是 list(LoDTensor)。推荐在静态图模式下使用 return_list = False,在动态图模式下使用 return_list = True。 + - **use_multiprocess** (bool) - 设置是否是用多进程加速动态图的数据载入过程。注意:该参数的设置仅在动态图模式下有效,在静态图模式下,该参数设置与否均无任何影响。默认值为 False。 + - **drop_last** (bool):是否丢弃最后的不足 CPU/GPU 设备数的批次。默认值为 True。在网络训练时,用户不能设置 drop_last=False,此时所有 CPU/GPU 设备均应从 DataLoader 中读取到数据。在网络预测时,用户可以设置 drop_last=False,此时最后不足 CPU/GPU 设备数的批次可以进行预测。 **返回** - 被创建的DataLoader对象。 + 被创建的 DataLoader 对象。 **代码示例 1** @@ -403,19 +403,19 @@ from_dataset(dataset, places, drop_last=True) ''''''''' .. warning:: - 这个API将在未来版本废弃,推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` + 这个 API 将在未来版本废弃,推荐使用支持多进程并发加速的 ``paddle.io.DataLoader`` -创建一个DataLoader对象用于加载Dataset产生的数据。目前,Dataset仅支持Linux系统下使用。 +创建一个 DataLoader 对象用于加载 Dataset 产生的数据。目前,Dataset 仅支持 Linux 系统下使用。 **参数** - - **dataset** (InMemoryDataset|QueueDataset) - Dataset对象。 - - **places** (list(CUDAPlace)|list(CPUPlace)) - DataLoader对象返回数据所在的place。 - - **drop_last** (bool) - 是否丢弃最后样本数量不足batch size的batch。若drop_last = True则丢弃,若drop_last = False则不丢弃。 + - **dataset** (InMemoryDataset|QueueDataset) - Dataset 对象。 + - **places** (list(CUDAPlace)|list(CPUPlace)) - DataLoader 对象返回数据所在的 place。 + - **drop_last** (bool) - 是否丢弃最后样本数量不足 batch size 的 batch。若 drop_last = True 则丢弃,若 drop_last = False 则不丢弃。 **返回** - 被创建的DataLoader对象,可以for-range的方式循环迭代。 + 被创建的 DataLoader 对象,可以 for-range 的方式循环迭代。 **代码示例** diff --git a/docs/api/paddle/io/Dataset_cn.rst b/docs/api/paddle/io/Dataset_cn.rst index 0440de3926e..0c03fa8d1e4 100644 --- a/docs/api/paddle/io/Dataset_cn.rst +++ b/docs/api/paddle/io/Dataset_cn.rst @@ -5,7 +5,7 @@ Dataset .. py:class:: paddle.io.Dataset -概述Dataset的方法和行为的抽象类。 +概述 Dataset 的方法和行为的抽象类。 映射式(map-style)数据集需要继承这个基类,映射式数据集为可以通过一个键值索引并获取指定样本的数据集,所有映射式数据集须实现以下方法: diff --git a/docs/api/paddle/io/DistributedBatchSampler_cn.rst b/docs/api/paddle/io/DistributedBatchSampler_cn.rst index a2d205b5df3..757f3834108 100644 --- a/docs/api/paddle/io/DistributedBatchSampler_cn.rst +++ b/docs/api/paddle/io/DistributedBatchSampler_cn.rst @@ -5,21 +5,21 @@ DistributedBatchSampler .. py:class:: paddle.io.DistributedBatchSampler(dataset=None, batch_size, num_replicas=None, rank=None, shuffle=False, drop_last=False) -分布式批采样器加载数据的一个子集。每个进程可以传递给DataLoader一个DistributedBatchSampler的实例,每个进程加载原始数据的一个子集。 +分布式批采样器加载数据的一个子集。每个进程可以传递给 DataLoader 一个 DistributedBatchSampler 的实例,每个进程加载原始数据的一个子集。 .. note:: - 假定Dataset的大小是固定的。 + 假定 Dataset 的大小是固定的。 参数 :::::::::::: - - **dataset** (paddle.io.Dataset) - 此参数必须是 ``paddle.io.Dataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 - - **batch_size** (int) - 每mini-batch中包含的样本数。 - - **num_replicas** (int, optional) - 分布式训练时的进程个数。如果是None,会依据 ``paddle.distributed.ParallenEnv`` 获取值。默认是None。 - - **rank** (int, optional) - num_replicas个进程中的进程序号。如果是None,会依据 ``paddle.distributed.ParallenEnv`` 获取值。默认是None。 - - **shuffle** (bool) - 是否需要在生成样本下标时打乱顺序。默认值为False。 - - **drop_last** (bool) - 是否需要丢弃最后无法凑整一个mini-batch的样本。默认值为False。 + - **dataset** (paddle.io.Dataset) - 此参数必须是 ``paddle.io.Dataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 + - **batch_size** (int) - 每 mini-batch 中包含的样本数。 + - **num_replicas** (int, optional) - 分布式训练时的进程个数。如果是 None,会依据 ``paddle.distributed.ParallenEnv`` 获取值。默认是 None。 + - **rank** (int, optional) - num_replicas 个进程中的进程序号。如果是 None,会依据 ``paddle.distributed.ParallenEnv`` 获取值。默认是 None。 + - **shuffle** (bool) - 是否需要在生成样本下标时打乱顺序。默认值为 False。 + - **drop_last** (bool) - 是否需要丢弃最后无法凑整一个 mini-batch 的样本。默认值为 False。 返回 @@ -61,11 +61,11 @@ DistributedBatchSampler,返回样本下标数组的迭代器。 set_epoch(epoch) ''''''''' -设置epoch数。当设置``shuffle=True``时,此epoch被用作随机种子。默认情况下,用户可以不用此接口设置,每个epoch时,所有的进程(workers)使用不同的顺序。如果每个epoch设置相同的数字,每个epoch数据的读取顺序将会相同。 +设置 epoch 数。当设置``shuffle=True``时,此 epoch 被用作随机种子。默认情况下,用户可以不用此接口设置,每个 epoch 时,所有的进程(workers)使用不同的顺序。如果每个 epoch 设置相同的数字,每个 epoch 数据的读取顺序将会相同。 **参数** - - **epoch** (int) - epoch数。 + - **epoch** (int) - epoch 数。 **代码示例** diff --git a/docs/api/paddle/io/IterableDataset_cn.rst b/docs/api/paddle/io/IterableDataset_cn.rst index a90a182856e..670d5dcd893 100644 --- a/docs/api/paddle/io/IterableDataset_cn.rst +++ b/docs/api/paddle/io/IterableDataset_cn.rst @@ -7,7 +7,7 @@ IterableDataset 概述迭代式数据集的方法和行为的抽象类。 -迭代式(iterable style)数据集需要继承这个基类,迭代式数据集为只能依次迭代式获取样本的数据集,类似Python中的迭代器,所有迭代式数据集须实现以下方法: +迭代式(iterable style)数据集需要继承这个基类,迭代式数据集为只能依次迭代式获取样本的数据集,类似 Python 中的迭代器,所有迭代式数据集须实现以下方法: ``__iter__``:依次返回数据赝本。 diff --git a/docs/api/paddle/io/Overview_cn.rst b/docs/api/paddle/io/Overview_cn.rst index f36069a9d97..6ddcb620d8f 100644 --- a/docs/api/paddle/io/Overview_cn.rst +++ b/docs/api/paddle/io/Overview_cn.rst @@ -3,23 +3,23 @@ paddle.io --------------------- -paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API。具体如下: +paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的 API。具体如下: -- :ref:`多进程数据读取器相关API ` -- :ref:`数据集定义相关API ` -- :ref:`数据集操作相关API ` -- :ref:`采样器相关API ` -- :ref:`批采样器相关API ` +- :ref:`多进程数据读取器相关 API ` +- :ref:`数据集定义相关 API ` +- :ref:`数据集操作相关 API ` +- :ref:`采样器相关 API ` +- :ref:`批采样器相关 API ` .. _about_dataloader: -多进程数据读取器相关API +多进程数据读取器相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`DataLoader ` ", "多进程数据读取器" @@ -27,11 +27,11 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API .. _about_dataset_define: -数据集定义相关API +数据集定义相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Dataset ` ", "映射式(map-style)数据集基类定义接口" @@ -40,25 +40,25 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API .. _about_dataset_operate: -数据集操作相关API +数据集操作相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`ChainDataset ` ", "数据集样本级联接口" " :ref:`ComposeDataset ` ", "数据集字段组合接口" " :ref:`Subset ` ", "数据集取子集接口" - " :ref:`random_split ` ", "给定子集合dataset的长度数组,随机切分出原数据集合的非重复子集合" + " :ref:`random_split ` ", "给定子集合 dataset 的长度数组,随机切分出原数据集合的非重复子集合" .. _about_sampler: -采样器相关API +采样器相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Sampler ` ", "采样器基类定义接口" @@ -68,11 +68,11 @@ paddle.io 目录下包含飞桨框架数据集定义、数据读取相关的API .. _about_batch_sampler: -批采样器相关API +批采样器相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`BatchSampler ` ", "批采样器接口" diff --git a/docs/api/paddle/io/RandomSampler_cn.rst b/docs/api/paddle/io/RandomSampler_cn.rst index ae14652f706..c18664f8e0f 100644 --- a/docs/api/paddle/io/RandomSampler_cn.rst +++ b/docs/api/paddle/io/RandomSampler_cn.rst @@ -9,10 +9,10 @@ RandomSampler 参数 ::::::::: - - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 - **replacement** (bool) - 如果为 ``False`` 则会采样整个数据集,如果为 ``True`` 则会按 ``num_samples`` 指定的样本数采集。默认值为 ``False`` 。 - - **num_samples** (int) - 如果 ``replacement`` 设置为 ``True`` 则按此参数采集对应的样本数。默认值为None。 - - **generator** (Generator) - 指定采样 ``data_source`` 的采样器。默认值为None。 + - **num_samples** (int) - 如果 ``replacement`` 设置为 ``True`` 则按此参数采集对应的样本数。默认值为 None。 + - **generator** (Generator) - 指定采样 ``data_source`` 的采样器。默认值为 None。 返回 ::::::::: diff --git a/docs/api/paddle/io/Sampler_cn.rst b/docs/api/paddle/io/Sampler_cn.rst index 45b3c8ef239..42fc350f95d 100644 --- a/docs/api/paddle/io/Sampler_cn.rst +++ b/docs/api/paddle/io/Sampler_cn.rst @@ -16,7 +16,7 @@ Sampler 参数 :::::::::::: - - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 可见 ``paddle.io.BatchSampler`` 和 ``paddle.io.DataLoader`` diff --git a/docs/api/paddle/io/SequenceSampler_cn.rst b/docs/api/paddle/io/SequenceSampler_cn.rst index b1ebd77e94b..1c12b78f2c0 100644 --- a/docs/api/paddle/io/SequenceSampler_cn.rst +++ b/docs/api/paddle/io/SequenceSampler_cn.rst @@ -10,7 +10,7 @@ SequenceSampler 参数 :::::::::::: - - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **data_source** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 返回 :::::::::::: diff --git a/docs/api/paddle/io/TensorDataset_cn.rst b/docs/api/paddle/io/TensorDataset_cn.rst index 01b0d0a24bb..1e63689a474 100644 --- a/docs/api/paddle/io/TensorDataset_cn.rst +++ b/docs/api/paddle/io/TensorDataset_cn.rst @@ -7,12 +7,12 @@ TensorDataset 由张量列表定义的数据集。 -每个张量的形状应为[N,...],而N是样本数,每个张量表示样本中一个字段,TensorDataset中通过在第一维索引张量来获取每个样本。 +每个张量的形状应为[N,...],而 N 是样本数,每个张量表示样本中一个字段,TensorDataset 中通过在第一维索引张量来获取每个样本。 参数 :::::::::::: - - **tensors** (list of Tensors) - Tensor列表,这些Tensor的第一维形状相同 + - **tensors** (list of Tensors) - Tensor 列表,这些 Tensor 的第一维形状相同 返回 :::::::::::: diff --git a/docs/api/paddle/io/WeightedRandomSampler_cn.rst b/docs/api/paddle/io/WeightedRandomSampler_cn.rst index 4d9ee2b2b2b..425443aefd9 100644 --- a/docs/api/paddle/io/WeightedRandomSampler_cn.rst +++ b/docs/api/paddle/io/WeightedRandomSampler_cn.rst @@ -10,9 +10,9 @@ WeightedRandomSampler 参数 ::::::::: - - **weights** (numpy.ndarray|paddle.Tensor|tuple|list) - 权重序列,需要是numpy数组,paddle.Tensor,list或者tuple类型。 + - **weights** (numpy.ndarray|paddle.Tensor|tuple|list) - 权重序列,需要是 numpy 数组,paddle.Tensor,list 或者 tuple 类型。 - **num_samples** (int) - 采样样本数。 - - **replacement** (bool) - 是否采用有放回的采样,默认值为True + - **replacement** (bool) - 是否采用有放回的采样,默认值为 True 返回 ::::::::: diff --git a/docs/api/paddle/io/get_worker_info_cn.rst b/docs/api/paddle/io/get_worker_info_cn.rst index f81a75dba66..eb4ce9afef1 100644 --- a/docs/api/paddle/io/get_worker_info_cn.rst +++ b/docs/api/paddle/io/get_worker_info_cn.rst @@ -9,7 +9,7 @@ get_worker_info ``num_workers``:子进程数。 -``id``:子进程逻辑序号,从0到 ``num_workers - 1`` +``id``:子进程逻辑序号,从 0 到 ``num_workers - 1`` ``dataset``:各子进程中数据集实例。 diff --git a/docs/api/paddle/io/random_split_cn.rst b/docs/api/paddle/io/random_split_cn.rst index d304f5e536a..2f11ee114e1 100644 --- a/docs/api/paddle/io/random_split_cn.rst +++ b/docs/api/paddle/io/random_split_cn.rst @@ -5,14 +5,14 @@ random_split .. py:class:: paddle.io.random_split(dataset, lengths, generator=None) -给定子集合dataset的长度数组,随机切分出原数据集合的非重复子集合。 +给定子集合 dataset 的长度数组,随机切分出原数据集合的非重复子集合。 参数 :::::::::::: - - **dataset** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的Python对象,用于生成样本下标。默认值为None。 + - **dataset** (Dataset) - 此参数必须是 ``paddle.io.Dataset`` 或 ``paddle.io.IterableDataset`` 的一个子类实例或实现了 ``__len__`` 的 Python 对象,用于生成样本下标。默认值为 None。 - **lengths** (list) - 总和为原数组长度的,子集合长度数组。 - - **generator** (Generator) - 指定采样 ``data_source`` 的采样器。默认值为None。 + - **generator** (Generator) - 指定采样 ``data_source`` 的采样器。默认值为 None。 返回 :::::::::::: diff --git a/docs/api/paddle/is_floating_point_cn.rst b/docs/api/paddle/is_floating_point_cn.rst index 7d451ddfeeb..64dd3354508 100644 --- a/docs/api/paddle/is_floating_point_cn.rst +++ b/docs/api/paddle/is_floating_point_cn.rst @@ -4,17 +4,17 @@ is_floating_point ------------------------------- .. py:function:: paddle.is_floating_point(x) -判断输入Tensor的数据类型是否为浮点类型。 +判断输入 Tensor 的数据类型是否为浮点类型。 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor。 +- **x** (Tensor) - 输入的 Tensor。 返回 ::::::::: -输入Tensor是否为浮点类型。 +输入 Tensor 是否为浮点类型。 代码示例 ::::::::: diff --git a/docs/api/paddle/isclose_cn.rst b/docs/api/paddle/isclose_cn.rst index b17ddd22e2e..db066282bf8 100644 --- a/docs/api/paddle/isclose_cn.rst +++ b/docs/api/paddle/isclose_cn.rst @@ -5,26 +5,26 @@ isclose .. py:function:: paddle.isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None) -逐个检查x和y的所有元素是否均满足如下条件: +逐个检查 x 和 y 的所有元素是否均满足如下条件: .. math:: \left| x - y \right| \leq atol + rtol \times \left| y \right| -该API的行为类似于 :math:`numpy.isclose`,即逐个比较两个Tensor的所有元素是否在一定容忍误差范围内视为相等。 +该 API 的行为类似于 :math:`numpy.isclose`,即逐个比较两个 Tensor 的所有元素是否在一定容忍误差范围内视为相等。 参数 ::::::::: - **x** (Tensor) - 输入的 `Tensor`,数据类型为:float32、float64。 - **y** (Tensor) - 输入的 `Tensor`,数据类型为:float32、float64。 - - **rtol** (float,可选) - 相对容忍误差,默认值为1e-5。 - - **atol** (float,可选) - 绝对容忍误差,默认值为1e-8。 - - **equal_nan** (bool,可选) - 如果设置为True,则两个NaN数值将被视为相等,默认值为False。 + - **rtol** (float,可选) - 相对容忍误差,默认值为 1e-5。 + - **atol** (float,可选) - 绝对容忍误差,默认值为 1e-8。 + - **equal_nan** (bool,可选) - 如果设置为 True,则两个 NaN 数值将被视为相等,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -计算得到的布尔类型Tensor。 +计算得到的布尔类型 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/isinf_cn.rst b/docs/api/paddle/isinf_cn.rst index 80385cbae0c..e676b507fe9 100644 --- a/docs/api/paddle/isinf_cn.rst +++ b/docs/api/paddle/isinf_cn.rst @@ -5,7 +5,7 @@ isinf .. py:function:: paddle.isinf(x, name=None) -返回输入tensor的每一个值是否为 `+/-INF` 。 +返回输入 tensor 的每一个值是否为 `+/-INF` 。 参数 ::::::::: @@ -14,7 +14,7 @@ isinf 返回 ::::::::: -``Tensor``,每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-INF` 。 +``Tensor``,每个元素是一个 bool 值,表示输入 `x` 的每个元素是否为 `+/-INF` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/isnan_cn.rst b/docs/api/paddle/isnan_cn.rst index 596118dab52..725c0845035 100644 --- a/docs/api/paddle/isnan_cn.rst +++ b/docs/api/paddle/isnan_cn.rst @@ -5,7 +5,7 @@ isnan .. py:function:: paddle.isnan(x, name=None) -返回输入tensor的每一个值是否为 `+/-NaN` 。 +返回输入 tensor 的每一个值是否为 `+/-NaN` 。 参数 ::::::::: @@ -14,7 +14,7 @@ isnan 返回 ::::::::: -``Tensor``,每个元素是一个bool值,表示输入 `x` 的每个元素是否为 `+/-NaN` 。 +``Tensor``,每个元素是一个 bool 值,表示输入 `x` 的每个元素是否为 `+/-NaN` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/jit/Overview_cn.rst b/docs/api/paddle/jit/Overview_cn.rst index 82df69dc7b0..ae2b6ca7056 100644 --- a/docs/api/paddle/jit/Overview_cn.rst +++ b/docs/api/paddle/jit/Overview_cn.rst @@ -3,37 +3,37 @@ paddle.jit -------------- -paddle.jit 目录下包含飞桨框架支持动态图转静态图相关的API。具体如下: +paddle.jit 目录下包含飞桨框架支持动态图转静态图相关的 API。具体如下: -- :ref:`动态图转静态图相关API ` -- :ref:`Debug动态图转静态图相关 ` +- :ref:`动态图转静态图相关 API ` +- :ref:`Debug 动态图转静态图相关 ` .. _about_dygraph_to_static: -动态图转静态图相关API +动态图转静态图相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`to_static ` ", "动转静to_static装饰器" + " :ref:`to_static ` ", "动转静 to_static 装饰器" " :ref:`save ` ", "动转静模型存储接口" " :ref:`load ` ", "动转静模型载入接口" - " :ref:`ProgramTranslator ` ", "动转静控制主类ProgramTranslator" - " :ref:`TracedLayer ` ", "备选根据trace动转静的接口TracedLayer" + " :ref:`ProgramTranslator ` ", "动转静控制主类 ProgramTranslator" + " :ref:`TracedLayer ` ", "备选根据 trace 动转静的接口 TracedLayer" " :ref:`TranslatedLayer ` ", "是一个命令式编程模式 :ref:`cn_api_fluid_dygraph_Layer` 的继承类" .. _about_debug: -Debug动态图转静态图相关 +Debug 动态图转静态图相关 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`set_code_level ` ", "设置代码级别,打印该级别动转静转化后的代码" diff --git a/docs/api/paddle/jit/ProgramTranslator_cn.rst b/docs/api/paddle/jit/ProgramTranslator_cn.rst index c461669962b..069e5f4c001 100644 --- a/docs/api/paddle/jit/ProgramTranslator_cn.rst +++ b/docs/api/paddle/jit/ProgramTranslator_cn.rst @@ -23,7 +23,7 @@ ProgramTranslator 单例对象。 import paddle - # 以下两种调用方法得到同一个对象,因为ProgramTranslator是个单例 + # 以下两种调用方法得到同一个对象,因为 ProgramTranslator 是个单例 paddle.jit.ProgramTranslator() paddle.jit.ProgramTranslator.get_instance() @@ -36,7 +36,7 @@ enable(enable_static) **参数** - - **enable_static** (bool) - 设置True或者False来打开或关闭动静转化。 + - **enable_static** (bool) - 设置 True 或者 False 来打开或关闭动静转化。 **返回** @@ -62,13 +62,13 @@ None。 prog_trans.enable(False) x = paddle.ones([1, 2]) - # ProgramTranslator被关闭所以func会以动态图模式运行 + # ProgramTranslator 被关闭所以 func 会以动态图模式运行 print(func(x)) # [[0. 0.]] get_output(dygraph_func, *args, **kwargs) ''''''''' -返回动态图函数输出的Tensor,但是该动态图函数的数值计算过程会被转化为静态图模式运行。 +返回动态图函数输出的 Tensor,但是该动态图函数的数值计算过程会被转化为静态图模式运行。 **参数** @@ -77,7 +77,7 @@ get_output(dygraph_func, *args, **kwargs) **返回** -包含数值结果的Tensor或者Tensor的元组,是输入动态图函数的返回值。 +包含数值结果的 Tensor 或者 Tensor 的元组,是输入动态图函数的返回值。 **代码示例** @@ -103,7 +103,7 @@ get_output(dygraph_func, *args, **kwargs) get_func(dygraph_func) ''''''''' -返回一个可调用函数,该函数将输入动态图函数接口转化为静态图组网接口。组网接口不像动态图接口,其并不直接返回数据结果。用户需要自行处理对应的Program和Eexecutor。 +返回一个可调用函数,该函数将输入动态图函数接口转化为静态图组网接口。组网接口不像动态图接口,其并不直接返回数据结果。用户需要自行处理对应的 Program 和 Eexecutor。 **参数** @@ -135,7 +135,7 @@ get_func(dygraph_func) get_program(dygraph_func, *args, **kwargs) ''''''''' -返回动态图函数转化后的静态图Program和输入输出Varaible。用户可以使用Executor来执行该Program。 +返回动态图函数转化后的静态图 Program 和输入输出 Varaible。用户可以使用 Executor 来执行该 Program。 **参数** @@ -146,10 +146,10 @@ get_program(dygraph_func, *args, **kwargs) 元组(main_program, startup_program, inputs, outputs) - - main_program:转化后的main program。 - - startup_program:转化后的startup program。 - - inputs:输入Tensor的列表,这些Tensor可以在执行去feed。 - - outputs:输出Tensor的列表,这些Tensor可以在运行时被fetch。 + - main_program:转化后的 main program。 + - startup_program:转化后的 startup program。 + - inputs:输入 Tensor 的列表,这些 Tensor 可以在执行去 feed。 + - outputs:输出 Tensor 的列表,这些 Tensor 可以在运行时被 fetch。 **代码示例** @@ -171,9 +171,9 @@ get_program(dygraph_func, *args, **kwargs) x = paddle.ones([1, 2]) main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x) print([i.name for i in inputs]) - # [u'generated_tensor_0'] 需要被feed的输入Tensor名字,对应x + # [u'generated_tensor_0'] 需要被 feed 的输入 Tensor 名字,对应 x print([o.name for o in outputs]) - # [u'_generated_var_4'] 需要被fetch的输出Tensor名字,对应x_v + # [u'_generated_var_4'] 需要被 fetch 的输出 Tensor 名字,对应 x_v get_code(dygraph_func) ''''''''' @@ -212,11 +212,11 @@ get_code(dygraph_func) get_program_cache() ''''''''' -返回ProgramCache单例。这个方法是PaddlePaddle开发者用来管理ProgramTranslator中的Program缓存,普通用户不需要使用这个方法。 +返回 ProgramCache 单例。这个方法是 PaddlePaddle 开发者用来管理 ProgramTranslator 中的 Program 缓存,普通用户不需要使用这个方法。 **返回** -ProgramTranslator中的ProgramCache。 +ProgramTranslator 中的 ProgramCache。 **代码示例** diff --git a/docs/api/paddle/jit/TracedLayer_cn.rst b/docs/api/paddle/jit/TracedLayer_cn.rst index 86e0dca6515..1e7bc731060 100644 --- a/docs/api/paddle/jit/TracedLayer_cn.rst +++ b/docs/api/paddle/jit/TracedLayer_cn.rst @@ -9,13 +9,13 @@ TracedLayer -TracedLayer用于将前向动态图模型转换为静态图模型,主要用于将动态图保存后做在线C++预测。除此以外,用户也可使用转换后的静态图模型在Python端做预测,通常比原先的动态图性能更好。 +TracedLayer 用于将前向动态图模型转换为静态图模型,主要用于将动态图保存后做在线 C++预测。除此以外,用户也可使用转换后的静态图模型在 Python 端做预测,通常比原先的动态图性能更好。 -TracedLayer使用 ``Executor`` 和 ``CompiledProgram`` 运行静态图模型。转换后的静态图模型与原动态图模型共享参数。 +TracedLayer 使用 ``Executor`` 和 ``CompiledProgram`` 运行静态图模型。转换后的静态图模型与原动态图模型共享参数。 -所有的TracedLayer对象均不应通过构造函数创建,而应通过调用静态方法 ``TracedLayer.trace(layer, inputs)`` 创建。 +所有的 TracedLayer 对象均不应通过构造函数创建,而应通过调用静态方法 ``TracedLayer.trace(layer, inputs)`` 创建。 -TracedLayer只能用于将data independent的动态图模型转换为静态图模型,即待转换的动态图模型不应随tensor数据或维度的变化而变化。 +TracedLayer 只能用于将 data independent 的动态图模型转换为静态图模型,即待转换的动态图模型不应随 tensor 数据或维度的变化而变化。 方法 :::::::::::: @@ -23,16 +23,16 @@ TracedLayer只能用于将data independent的动态图模型转换为静态图 **static** trace(layer, inputs) ''''''''' -创建TracedLayer对象的唯一接口,该接口会调用 ``layer(*inputs)`` 方法运行动态图模型并将其转换为静态图模型。 +创建 TracedLayer 对象的唯一接口,该接口会调用 ``layer(*inputs)`` 方法运行动态图模型并将其转换为静态图模型。 **参数** - - **layer** (dygraph.Layer) - 待追踪的动态图layer对象。 - - **inputs** (list(Variable)) - 动态图layer对象的输入变量列表。 + - **layer** (dygraph.Layer) - 待追踪的动态图 layer 对象。 + - **inputs** (list(Variable)) - 动态图 layer 对象的输入变量列表。 **返回** -tuple,包含2个元素,其中第一个元素是 ``layer(*inputs)`` 的输出结果,第二个元素是转换后得到的TracedLayer对象。 +tuple,包含 2 个元素,其中第一个元素是 ``layer(*inputs)`` 的输出结果,第二个元素是转换后得到的 TracedLayer 对象。 **代码示例** @@ -53,7 +53,7 @@ tuple,包含2个元素,其中第一个元素是 ``layer(*inputs)`` 的输出 in_var = paddle.uniform(shape=[2, 3], dtype='float32') out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var]) - # 内部使用Executor运行静态图模型 + # 内部使用 Executor 运行静态图模型 out_static_graph = static_layer([in_var]) print(len(out_static_graph)) # 1 print(out_static_graph[0].shape) # (2, 10) @@ -68,8 +68,8 @@ set_strategy(build_strategy=None, exec_strategy=None) **参数** - - **build_strategy** (BuildStrategy,可选) - TracedLayer内部 ``CompiledProgram`` 的构建策略。 - - **exec_strategy** (ExecutionStrategy,可选) - TracedLayer内部 ``CompiledProgram`` 的执行策略。 + - **build_strategy** (BuildStrategy,可选) - TracedLayer 内部 ``CompiledProgram`` 的构建策略。 + - **exec_strategy** (ExecutionStrategy,可选) - TracedLayer 内部 ``CompiledProgram`` 的执行策略。 **返回** @@ -106,15 +106,15 @@ set_strategy(build_strategy=None, exec_strategy=None) save_inference_model(path, feed=None, fetch=None) ''''''''' -将TracedLayer保存为用于预测部署的模型。保存的预测模型可被C++预测接口加载。 +将 TracedLayer 保存为用于预测部署的模型。保存的预测模型可被 C++预测接口加载。 ``path`` 是存储目标的前缀,存储的模型结构 ``Program`` 文件的后缀为 ``.pdmodel``,存储的持久参数变量文件的后缀为 ``.pdiparams``。 **参数** - **path** (str) - 存储模型的路径前缀。格式为 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 - - **feed** (list(int),可选) - 预测模型输入变量的索引。若为None,则TracedLayer的所有输入变量均会作为预测模型的输入。默认值为None。 - - **fetch** (list(int),可选) - 预测模型输出变量的索引。若为None,则TracedLayer的所有输出变量均会作为预测模型的输出。默认值为None。 + - **feed** (list(int),可选) - 预测模型输入变量的索引。若为 None,则 TracedLayer 的所有输入变量均会作为预测模型的输入。默认值为 None。 + - **fetch** (list(int),可选) - 预测模型输出变量的索引。若为 None,则 TracedLayer 的所有输出变量均会作为预测模型的输出。默认值为 None。 **返回** diff --git a/docs/api/paddle/jit/TranslatedLayer_cn.rst b/docs/api/paddle/jit/TranslatedLayer_cn.rst index eaa756a157e..9f0ca6a9784 100644 --- a/docs/api/paddle/jit/TranslatedLayer_cn.rst +++ b/docs/api/paddle/jit/TranslatedLayer_cn.rst @@ -6,7 +6,7 @@ TranslatedLayer .. py:class:: paddle.jit.TranslatedLayer(programs, persistable_vars) ``TranslatedLayer`` 是一个命令式编程模式 :ref:`cn_api_fluid_dygraph_Layer` 的继承类, -通过 :ref:`cn_api_paddle_jit_load` 载入构建。能够像一般 ``Layer`` 一样在train或者eval模式下使用。 +通过 :ref:`cn_api_paddle_jit_load` 载入构建。能够像一般 ``Layer`` 一样在 train 或者 eval 模式下使用。 .. note:: ``TranslatedLayer`` 对象不能够通过构造函数创建,仅能够通过 :ref:`cn_api_paddle_jit_load` 接口载入构建。 @@ -103,11 +103,11 @@ TranslatedLayer program(method_name='forward'): ''''''''' -获取TranslatedLayer中指定方法对应的Program。 +获取 TranslatedLayer 中指定方法对应的 Program。 **参数** - - **method_name** (string) - 要获取的Porgram对应的方法名。默认值为"forward"。 + - **method_name** (string) - 要获取的 Porgram 对应的方法名。默认值为"forward"。 **返回** Program diff --git a/docs/api/paddle/jit/load_cn.rst b/docs/api/paddle/jit/load_cn.rst index 4810cd5d351..fb70a159243 100644 --- a/docs/api/paddle/jit/load_cn.rst +++ b/docs/api/paddle/jit/load_cn.rst @@ -6,12 +6,12 @@ load .. py:function:: paddle.jit.load(path, **configs) -将接口 ``paddle.jit.save`` 或者 ``paddle.static.save_inference_model`` 存储的模型载入为 ``paddle.jit.TranslatedLayer``,用于预测推理或者fine-tune训练。 +将接口 ``paddle.jit.save`` 或者 ``paddle.static.save_inference_model`` 存储的模型载入为 ``paddle.jit.TranslatedLayer``,用于预测推理或者 fine-tune 训练。 .. note:: - 如果载入的模型是通过 ``paddle.static.save_inference_model`` 存储的,在使用它进行fine-tune训练时会存在一些局限: - 1. 命令式编程模式不支持 ``LoDTensor``,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; - 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; + 如果载入的模型是通过 ``paddle.static.save_inference_model`` 存储的,在使用它进行 fine-tune 训练时会存在一些局限: + 1. 命令式编程模式不支持 ``LoDTensor``,所有原先输入变量或者参数依赖于 LoD 信息的模型暂时无法使用; + 2. 所有存储模型的 feed 变量都需要被传入 ``Translatedlayer`` 的 forward 方法; 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 @@ -19,8 +19,8 @@ load ::::::::: - **path** (str) - 载入模型的路径前缀。格式为 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 - **config** (dict,可选) - 其他用于兼容的载入配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项: - (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; - (2) params_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件。 + (1) model_filename (str) - paddle 1.x 版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; + (2) params_filename (str) - paddle 1.x 版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件。 返回 ::::::::: @@ -29,7 +29,7 @@ TranslatedLayer,一个能够执行存储模型的 ``Layer`` 对象。 代码示例 ::::::::: -1. 载入由接口 ``paddle.jit.save`` 存储的模型进行预测推理及fine-tune训练。 +1. 载入由接口 ``paddle.jit.save`` 存储的模型进行预测推理及 fine-tune 训练。 .. code-block:: python @@ -117,7 +117,7 @@ TranslatedLayer,一个能够执行存储模型的 ``Layer`` 对象。 -2. 兼容载入由接口 ``paddle.fluid.io.save_inference_model`` 存储的模型进行预测推理及fine-tune训练。 +2. 兼容载入由接口 ``paddle.fluid.io.save_inference_model`` 存储的模型进行预测推理及 fine-tune 训练。 .. code-block:: python diff --git a/docs/api/paddle/jit/save_cn.rst b/docs/api/paddle/jit/save_cn.rst index 7fe0518288c..51bbeddde63 100644 --- a/docs/api/paddle/jit/save_cn.rst +++ b/docs/api/paddle/jit/save_cn.rst @@ -5,27 +5,27 @@ save .. py:function:: paddle.jit.save(layer, path, input_spec=None, **configs) -将输入的 ``Layer`` 或 ``function`` 存储为 ``paddle.jit.TranslatedLayer`` 格式的模型,载入后可用于预测推理或者fine-tune训练。 +将输入的 ``Layer`` 或 ``function`` 存储为 ``paddle.jit.TranslatedLayer`` 格式的模型,载入后可用于预测推理或者 fine-tune 训练。 该接口会将输入 ``Layer`` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``path`` 。 -``path`` 是存储目标的前缀,存储的模型结构 ``Program`` 文件的后缀为 ``.pdmodel`` ,存储的持久参数变量文件的后缀为 ``.pdiparams``,同时这里也会将一些变量描述信息存储至文件,文件后缀为 ``.pdiparams.info``,这些额外的信息将在fine-tune训练中使用。 +``path`` 是存储目标的前缀,存储的模型结构 ``Program`` 文件的后缀为 ``.pdmodel`` ,存储的持久参数变量文件的后缀为 ``.pdiparams``,同时这里也会将一些变量描述信息存储至文件,文件后缀为 ``.pdiparams.info``,这些额外的信息将在 fine-tune 训练中使用。 -存储的模型能够被以下API完整地载入使用: +存储的模型能够被以下 API 完整地载入使用: - ``paddle.jit.load`` - ``paddle.static.load_inference_model`` - - 其他预测库API + - 其他预测库 API .. note:: - 当使用 ``paddle.jit.save`` 保存 ``function`` 时,``function`` 不能包含参数变量。如果必须保存参数变量,请用Layer封装function,然后按照处理Layer的方式调用相应的API。 + 当使用 ``paddle.jit.save`` 保存 ``function`` 时,``function`` 不能包含参数变量。如果必须保存参数变量,请用 Layer 封装 function,然后按照处理 Layer 的方式调用相应的 API。 参数 ::::::::: - layer (Layer|function) - 需要存储的 ``Layer`` 对象或者 ``function``。 - path (str) - 存储模型的路径前缀。格式为 ``dirname/file_prefix`` 或者 ``file_prefix`` 。 - - input_spec (list[InputSpec|Tensor],可选) - 描述存储模型forward方法的输入,可以通过InputSpec或者示例Tensor进行描述。如果为 ``None``,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - - **configs (dict,可选) - 其他用于兼容的存储配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项:(1) output_spec (list[Tensor]) - 选择存储模型的输出目标。默认情况下,所有原 ``Layer`` forward方法的返回值均会作为存储模型的输出。如果传入的 ``output_spec`` 列表不是所有的输出变量,存储的模型将会根据 ``output_spec`` 所包含的结果被裁剪。 + - input_spec (list[InputSpec|Tensor],可选) - 描述存储模型 forward 方法的输入,可以通过 InputSpec 或者示例 Tensor 进行描述。如果为 ``None``,所有原 ``Layer`` forward 方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 + - **configs (dict,可选) - 其他用于兼容的存储配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项:(1) output_spec (list[Tensor]) - 选择存储模型的输出目标。默认情况下,所有原 ``Layer`` forward 方法的返回值均会作为存储模型的输出。如果传入的 ``output_spec`` 列表不是所有的输出变量,存储的模型将会根据 ``output_spec`` 所包含的结果被裁剪。 返回 ::::::::: diff --git a/docs/api/paddle/jit/set_code_level_cn.rst b/docs/api/paddle/jit/set_code_level_cn.rst index 0f40c7e763f..af5af237224 100644 --- a/docs/api/paddle/jit/set_code_level_cn.rst +++ b/docs/api/paddle/jit/set_code_level_cn.rst @@ -18,7 +18,7 @@ set_code_level 参数 :::::::::::: - - **level** (int) - 打印的代码级别。默认值为100,这意味着打印的是所有 AST Transformer 转化后的代码。 + - **level** (int) - 打印的代码级别。默认值为 100,这意味着打印的是所有 AST Transformer 转化后的代码。 - **also_to_stdout** (bool) - 表示是否也将代码输出到 ``sys.stdout``。默认值 False,表示仅输出到 ``sys.stderr``。 diff --git a/docs/api/paddle/jit/set_verbosity_cn.rst b/docs/api/paddle/jit/set_verbosity_cn.rst index 19e0a3f7c3a..877b7b7836a 100644 --- a/docs/api/paddle/jit/set_verbosity_cn.rst +++ b/docs/api/paddle/jit/set_verbosity_cn.rst @@ -18,7 +18,7 @@ set_verbosity 参数 :::::::::::: - - **level** (int) - 日志详细级别。值越大,表示越详细。默认值为0,表示不显示日志。 + - **level** (int) - 日志详细级别。值越大,表示越详细。默认值为 0,表示不显示日志。 - **also_to_stdout** (bool) - 表示是否也将日志信息输出到 ``sys.stdout``。默认值 False,表示仅输出到 ``sys.stderr``。 代码示例 diff --git a/docs/api/paddle/jit/to_static_cn.rst b/docs/api/paddle/jit/to_static_cn.rst index 4784abf35e3..cdfc3b20ce1 100644 --- a/docs/api/paddle/jit/to_static_cn.rst +++ b/docs/api/paddle/jit/to_static_cn.rst @@ -5,7 +5,7 @@ to_static .. py:decorator:: paddle.jit.to_static -本装饰器将函数内的动态图API转化为静态图API。此装饰器自动处理静态图模式下的Program和Executor,并将结果作为动态图Tensor返回。输出的动态图Tensor可以继续进行动态图训练、预测或其他运算。如果被装饰的函数里面调用其他动态图函数,被调用的函数也会被转化为静态图函数。 +本装饰器将函数内的动态图 API 转化为静态图 API。此装饰器自动处理静态图模式下的 Program 和 Executor,并将结果作为动态图 Tensor 返回。输出的动态图 Tensor 可以继续进行动态图训练、预测或其他运算。如果被装饰的函数里面调用其他动态图函数,被调用的函数也会被转化为静态图函数。 参数 diff --git a/docs/api/paddle/kron_cn.rst b/docs/api/paddle/kron_cn.rst index fa72e073740..b1bac9746d9 100644 --- a/docs/api/paddle/kron_cn.rst +++ b/docs/api/paddle/kron_cn.rst @@ -14,7 +14,7 @@ Kronecker Product 算子。 计算两个张量的克罗内克积,结果是一个合成的张量,由第二个张量经过第一个张量中的元素缩放后的组块构成。 -预设两个张量 $X$ 和 $Y$ 的秩 (rank) 相同,如有必要,将会在秩较小的张量的形状前面补上1。令 $X$ 的形状是 [$r_0$, $r_1$, ..., $r_N$],$Y$ 的形状是 +预设两个张量 $X$ 和 $Y$ 的秩 (rank) 相同,如有必要,将会在秩较小的张量的形状前面补上 1。令 $X$ 的形状是 [$r_0$, $r_1$, ..., $r_N$],$Y$ 的形状是 [$s_0$, $s_1$, ..., $s_N$],那么输出张量的形状是 [$r_{0}s_{0}$, $r_{1}s_{1}$, ..., $r_{N}s_{N}$],其中的元素是 $X$ 和 $Y$ 中的元素的乘积。 公式为 diff --git a/docs/api/paddle/kthvalue_cn.rst b/docs/api/paddle/kthvalue_cn.rst index f27d9a8d58d..a50f2048a2d 100644 --- a/docs/api/paddle/kthvalue_cn.rst +++ b/docs/api/paddle/kthvalue_cn.rst @@ -5,19 +5,19 @@ kthvalue .. py:function:: paddle.kthvalue(x, k, axis=None, keepdim=False, name=None) -在指定的轴上查找第k小的元素和其对应所在的索引信息。 +在指定的轴上查找第 k 小的元素和其对应所在的索引信息。 参数 ::::::::: - - **x** (Tensor) - 一个输入的N-D ``Tensor``,支持的数据类型:float32、float64、int32、int64。 + - **x** (Tensor) - 一个输入的 N-D ``Tensor``,支持的数据类型:float32、float64、int32、int64。 - **k** (int,Tensor) - 需要沿轴查找的第 ``k`` 小,所对应的 ``k`` 值。 - - **axis** (int,可选) - 指定对输入Tensor进行运算的轴,``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 - - **keepdim** (bool,可选)- 是否保留指定的轴。如果是True,维度会与输入x一致,对应所指定的轴的size为1。否则,由于对应轴被展开,输出的维度会比输入小1。默认值为1。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴,``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的 Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 + - **keepdim** (bool,可选)- 是否保留指定的轴。如果是 True,维度会与输入 x 一致,对应所指定的轴的 size 为 1。否则,由于对应轴被展开,输出的维度会比输入小 1。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -tuple(Tensor),返回第k小的元素和对应的索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是int64。 +tuple(Tensor),返回第 k 小的元素和对应的索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是 int64。 代码示例 ::::::::: diff --git a/docs/api/paddle/lcm_cn.rst b/docs/api/paddle/lcm_cn.rst index f64500245b7..dae84f713d6 100644 --- a/docs/api/paddle/lcm_cn.rst +++ b/docs/api/paddle/lcm_cn.rst @@ -11,20 +11,20 @@ lcm lcm(0,0)=0, lcm(0, y)=0 - 如果x和y的shape不一致,会对两个shape进行广播操作,得到一致的shape(并作为输出结果的shape), + 如果 x 和 y 的 shape 不一致,会对两个 shape 进行广播操作,得到一致的 shape(并作为输出结果的 shape), 请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:int32,int64。 -- **y** (Tensor) - 输入的Tensor,数据类型为:int32,int64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:int32,int64。 +- **y** (Tensor) - 输入的 Tensor,数据类型为:int32,int64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与输入数据类型相同。 +输出 Tensor,与输入数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/lerp_cn.rst b/docs/api/paddle/lerp_cn.rst index 3ebe34b004f..84c095844d6 100644 --- a/docs/api/paddle/lerp_cn.rst +++ b/docs/api/paddle/lerp_cn.rst @@ -11,15 +11,15 @@ lerp 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,作为线性插值开始的点,数据类型为:float32、float64。 -- **y** (Tensor) - 输入的Tensor,作为线性插值结束的点,数据类型为:float32、float64。 -- **weight** (float|Tensor) - 给定的权重值,weight为Tensor时数据类型为:float32、float64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,作为线性插值开始的点,数据类型为:float32、float64。 +- **y** (Tensor) - 输入的 Tensor,作为线性插值结束的点,数据类型为:float32、float64。 +- **weight** (float|Tensor) - 给定的权重值,weight 为 Tensor 时数据类型为:float32、float64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与 ``x`` 数据类型相同。 +输出 Tensor,与 ``x`` 数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/less_than_cn.rst b/docs/api/paddle/less_than_cn.rst index f6e42d97490..c29e1ec244c 100644 --- a/docs/api/paddle/less_than_cn.rst +++ b/docs/api/paddle/less_than_cn.rst @@ -13,8 +13,8 @@ less_than 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/lgamma_cn.rst b/docs/api/paddle/lgamma_cn.rst index 6a0bf674f64..e58e3692709 100644 --- a/docs/api/paddle/lgamma_cn.rst +++ b/docs/api/paddle/lgamma_cn.rst @@ -15,12 +15,12 @@ lgamma 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/linalg/Overview_cn.rst b/docs/api/paddle/linalg/Overview_cn.rst index 33618d18b49..b05614cf736 100644 --- a/docs/api/paddle/linalg/Overview_cn.rst +++ b/docs/api/paddle/linalg/Overview_cn.rst @@ -3,21 +3,21 @@ paddle.linalg --------------------- -paddle.linalg 目录下包含飞桨框架支持的线性代数相关API。具体如下: +paddle.linalg 目录下包含飞桨框架支持的线性代数相关 API。具体如下: -- :ref:`矩阵属性相关API ` -- :ref:`矩阵计算相关API ` -- :ref:`矩阵分解相关API ` -- :ref:`线性方程求解相关API ` +- :ref:`矩阵属性相关 API ` +- :ref:`矩阵计算相关 API ` +- :ref:`矩阵分解相关 API ` +- :ref:`线性方程求解相关 API ` .. _about_matrix_property: -矩阵属性相关API +矩阵属性相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.linalg.det ` ", "计算方阵的行列式" @@ -29,15 +29,15 @@ paddle.linalg 目录下包含飞桨框架支持的线性代数相关API。具体 .. _about_matrix_functions: -矩阵计算相关API +矩阵计算相关 API ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`paddle.linalg.multi_dot ` ", "2个或更多矩阵的乘法,会自动选择计算量最少的乘法顺序" - " :ref:`paddle.linalg.matrix_power ` ", "计算方阵的n次幂" + " :ref:`paddle.linalg.multi_dot ` ", "2 个或更多矩阵的乘法,会自动选择计算量最少的乘法顺序" + " :ref:`paddle.linalg.matrix_power ` ", "计算方阵的 n 次幂" " :ref:`paddle.linalg.inv ` ", "计算方阵的逆矩阵" " :ref:`paddle.linalg.pinv ` ", "计算矩阵的广义逆" " :ref:`paddle.linalg.cov ` ", "计算矩阵的协方差矩阵" @@ -45,34 +45,34 @@ paddle.linalg 目录下包含飞桨框架支持的线性代数相关API。具体 .. _about_matrix_decompositions: -矩阵分解相关API +矩阵分解相关 API ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.linalg.eig ` ", "计算一般方阵的特征值与特征向量" " :ref:`paddle.linalg.eigvals ` ", "计算一般方阵的特征值" " :ref:`paddle.linalg.eigh ` ", "计算厄米特矩阵或者实数对称矩阵的特征值和特征向量" " :ref:`paddle.linalg.eigvalsh ` ", "计算厄米特矩阵或者实数对称矩阵的特征值" - " :ref:`paddle.linalg.cholesky ` ", "计算一个实数对称正定矩阵的Cholesky分解" + " :ref:`paddle.linalg.cholesky ` ", "计算一个实数对称正定矩阵的 Cholesky 分解" " :ref:`paddle.linalg.svd ` ", "计算矩阵的奇异值分解" - " :ref:`paddle.linalg.qr ` ", "计算矩阵的正交三角分解(也称QR分解)" - " :ref:`paddle.linalg.lu ` ", "计算矩阵的LU分解" - " :ref:`paddle.linalg.lu_unpack ` ", "对矩阵的LU分解结果进行展开得到各单独矩阵" + " :ref:`paddle.linalg.qr ` ", "计算矩阵的正交三角分解(也称 QR 分解)" + " :ref:`paddle.linalg.lu ` ", "计算矩阵的 LU 分解" + " :ref:`paddle.linalg.lu_unpack ` ", "对矩阵的 LU 分解结果进行展开得到各单独矩阵" .. _about_solvers: -线性方程求解相关API +线性方程求解相关 API ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`paddle.linalg.lstsq ` ", "求解线性方程组的最小二乘问题" " :ref:`paddle.linalg.solve ` ", "计算具有唯一解的线性方程组,方程左边为方阵,右边为矩阵" " :ref:`paddle.linalg.triangular_solve ` ", "计算具有唯一解的线性方程组,方程左边为上(下)三角方阵,右边为矩阵" - " :ref:`paddle.linalg.cholesky_solve ` ", "通过Cholesky分解矩阵,计算具有唯一解的线性方程组" + " :ref:`paddle.linalg.cholesky_solve ` ", "通过 Cholesky 分解矩阵,计算具有唯一解的线性方程组" diff --git a/docs/api/paddle/linalg/cholesky_cn.rst b/docs/api/paddle/linalg/cholesky_cn.rst index 890dcc88883..f9647a10133 100644 --- a/docs/api/paddle/linalg/cholesky_cn.rst +++ b/docs/api/paddle/linalg/cholesky_cn.rst @@ -8,20 +8,20 @@ cholesky -计算一个对称正定矩阵或一批对称正定矩阵的Cholesky分解。如果 `upper` 是 `True`, -则分解形式为 :math:`A = U ^ {T} U`,返回的矩阵U是上三角矩阵。 +计算一个对称正定矩阵或一批对称正定矩阵的 Cholesky 分解。如果 `upper` 是 `True`, +则分解形式为 :math:`A = U ^ {T} U`,返回的矩阵 U 是上三角矩阵。 否则,分解形式为 :math:`A = LL ^ {T}`,并返回矩阵 :math:`L` 是下三角矩阵。 参数 :::::::::::: - - **x** (Tensor)- 输入变量为多维Tensor,它的维度应该为 `[*, M, N]`,其中*为零或更大的批次尺寸,并且最里面的两个维度上的矩阵都应为对称的正定矩阵,支持数据类型为float32、float64。 + - **x** (Tensor)- 输入变量为多维 Tensor,它的维度应该为 `[*, M, N]`,其中*为零或更大的批次尺寸,并且最里面的两个维度上的矩阵都应为对称的正定矩阵,支持数据类型为 float32、float64。 - **upper** (bool)- 指示是否返回上三角矩阵或下三角矩阵。默认值:False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,与 `x` 具有相同形状和数据类型。它代表了Cholesky分解生成的三角矩阵。 +Tensor,与 `x` 具有相同形状和数据类型。它代表了 Cholesky 分解生成的三角矩阵。 代码示例 :::::::::::: diff --git a/docs/api/paddle/linalg/cholesky_solve_cn.rst b/docs/api/paddle/linalg/cholesky_solve_cn.rst index a4ee74caada..cb727dce7f7 100644 --- a/docs/api/paddle/linalg/cholesky_solve_cn.rst +++ b/docs/api/paddle/linalg/cholesky_solve_cn.rst @@ -5,21 +5,21 @@ cholesky_solve .. py:function:: paddle.linalg.cholesky_solve(x, y, upper=False, name=None) -对 A @ X = B 的线性方程求解,其中A是方阵,输入x、y分别是矩阵B和矩阵A的Cholesky分解矩阵u。 +对 A @ X = B 的线性方程求解,其中 A 是方阵,输入 x、y 分别是矩阵 B 和矩阵A 的 Cholesky 分解矩阵 u。 -输入x、y是2维矩阵,或者2维矩阵以batch形式组成的3维矩阵。如果输入是batch形式的3维矩阵,则输出也是batch形式的3维矩阵。 +输入 x、y 是 2 维矩阵,或者 2 维矩阵以 batch 形式组成的 3 维矩阵。如果输入是 batch 形式的 3 维矩阵,则输出也是 batch 形式的 3 维矩阵。 参数 :::::::::::: - - **x** (Tensor) - 线性方程中的B矩阵。是2维矩阵或者2维矩阵以batch形式组成的3维矩阵。 - - **y** (Tensor) - 线性方程中A矩阵的Cholesky分解矩阵u,上三角或者下三角矩阵。是2维矩阵或者2维矩阵以batch形式组成的3维矩阵。 - - **upper** (bool,可选) - 输入x是否是上三角矩阵,True为上三角矩阵,False为下三角矩阵。默认值False。 + - **x** (Tensor) - 线性方程中的 B 矩阵。是 2 维矩阵或者 2 维矩阵以 batch 形式组成的 3 维矩阵。 + - **y** (Tensor) - 线性方程中 A 矩阵的 Cholesky 分解矩阵 u,上三角或者下三角矩阵。是 2 维矩阵或者 2 维矩阵以 batch 形式组成的 3 维矩阵。 + - **upper** (bool,可选) - 输入 x 是否是上三角矩阵,True 为上三角矩阵,False 为下三角矩阵。默认值 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,线性方程的解X。 +Tensor,线性方程的解 X。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/cond_cn.rst b/docs/api/paddle/linalg/cond_cn.rst index b6a91ba5554..a58386f7a09 100644 --- a/docs/api/paddle/linalg/cond_cn.rst +++ b/docs/api/paddle/linalg/cond_cn.rst @@ -11,7 +11,7 @@ cond 参数 :::::::::::: - - **x** (Tensor):输入可以是形状为 ``(*, m, n)`` 的矩阵Tensor, ``*`` 为零或更大的批次维度,此时 ``p`` 为 `2` 或 `-2`;也可以是形状为 ``(*, n, n)`` 的可逆(批)方阵Tensor,此时 ``p`` 为任意已支持的值。数据类型为 float32 或 float64 。 + - **x** (Tensor):输入可以是形状为 ``(*, m, n)`` 的矩阵 Tensor, ``*`` 为零或更大的批次维度,此时 ``p`` 为 `2` 或 `-2`;也可以是形状为 ``(*, n, n)`` 的可逆(批)方阵 Tensor,此时 ``p`` 为任意已支持的值。数据类型为 float32 或 float64 。 - **p** (float|string,可选):范数种类。目前支持的值为 `fro` 、 `nuc` 、 `1` 、 `-1` 、 `2` 、 `-2` 、 `inf` 、 `-inf`。默认值为 `None`,即范数种类为 `2` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/linalg/corrcoef_cn.rst b/docs/api/paddle/linalg/corrcoef_cn.rst index 5b86e82739b..e8bcdf99c38 100644 --- a/docs/api/paddle/linalg/corrcoef_cn.rst +++ b/docs/api/paddle/linalg/corrcoef_cn.rst @@ -6,24 +6,24 @@ corrcoef .. py:function:: paddle.linalg.corrcoef(x, rowvar=True, name=None) -相关系数矩阵表示输入矩阵中每对变量的相关性。例如,对于N维样本X=[x1,x2,…xN]T,则相关系数矩阵 +相关系数矩阵表示输入矩阵中每对变量的相关性。例如,对于 N 维样本 X=[x1,x2,…xN]T,则相关系数矩阵 元素 `Rij` 是 `xi` 和 `xj` 的相关性。元素 `Rii` 是 `xi` 本身的协方差。 皮尔逊积矩相关系数 `R` 和协方差矩阵 `C` 的关系如下: .. math:: R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } } - `R` 的值在-1到1之间。 + `R` 的值在-1 到 1 之间。 参数 ::::::::: - - **x** (Tensor) - 一个N(N<=2)维矩阵,包含多个变量。默认矩阵的每行是一个观测变量,由参数rowvar设置。 - - **rowvar** (bool,可选) - 若是True,则每行作为一个观测变量;若是False,则每列作为一个观测变量。默认True。 + - **x** (Tensor) - 一个 N(N<=2)维矩阵,包含多个变量。默认矩阵的每行是一个观测变量,由参数 rowvar 设置。 + - **rowvar** (bool,可选) - 若是 True,则每行作为一个观测变量;若是 False,则每列作为一个观测变量。默认 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - 输入x的皮尔逊积矩相关系数矩阵。 + 输入 x 的皮尔逊积矩相关系数矩阵。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/cov_cn.rst b/docs/api/paddle/linalg/cov_cn.rst index ae70e269a66..453a36ee701 100644 --- a/docs/api/paddle/linalg/cov_cn.rst +++ b/docs/api/paddle/linalg/cov_cn.rst @@ -6,25 +6,25 @@ cov .. py:function:: paddle.linalg.cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None) -给定输入Tensor和权重,计算输入Tensor的协方差矩阵。 +给定输入 Tensor 和权重,计算输入 Tensor 的协方差矩阵。 协方差矩阵是一个方阵,用于指示每两个输入元素之间的协方差值。 -例如对于有N个元素的输入X=[x1,x2,…xN]T,协方差矩阵的元素Cij表示输入xi和xj之间的协方差,Cij表示xi其自身的协方差。 +例如对于有 N 个元素的输入 X=[x1,x2,…xN]T,协方差矩阵的元素 Cij 表示输入 xi 和 xj 之间的协方差,Cij 表示 xi 其自身的协方差。 参数 :::::::::::: - - **x** (Tensor) - 一个N(N<=2)维矩阵,包含多个变量。默认矩阵的每行是一个观测变量,由参数rowvar设置。 - - **rowvar** (bool,可选) - 若是True,则每行作为一个观测变量;若是False,则每列作为一个观测变量。默认True。 - - **ddof** (bool,可选) - 若是True,返回无偏估计结果;若是False,返回普通平均值计算结果。默认True。 - - **fweights** (Tensor,可选) - 包含整数频率权重的1维Tensor,表示每一个观测向量的重复次数。其维度值应该与输入x的观测维度值相等,为None则不起作用,默认None。 - - **aweights** (Tensor,可选) - 包含整数观测权重的1维Tensor,表示每一个观测向量的重要性,重要性越高对应值越大。其维度值应该与输入x的观测维度值相等,为None则不起作用,默认None。 + - **x** (Tensor) - 一个 N(N<=2)维矩阵,包含多个变量。默认矩阵的每行是一个观测变量,由参数 rowvar 设置。 + - **rowvar** (bool,可选) - 若是 True,则每行作为一个观测变量;若是 False,则每列作为一个观测变量。默认 True。 + - **ddof** (bool,可选) - 若是 True,返回无偏估计结果;若是 False,返回普通平均值计算结果。默认 True。 + - **fweights** (Tensor,可选) - 包含整数频率权重的 1 维 Tensor,表示每一个观测向量的重复次数。其维度值应该与输入 x 的观测维度值相等,为 None 则不起作用,默认 None。 + - **aweights** (Tensor,可选) - 包含整数观测权重的 1 维 Tensor,表示每一个观测向量的重要性,重要性越高对应值越大。其维度值应该与输入 x 的观测维度值相等,为 None 则不起作用,默认 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,输入x的协方差矩阵。假设x是[m,n]的矩阵,rowvar=True,则输出为[m,m]的矩阵。 +Tensor,输入 x 的协方差矩阵。假设 x 是[m,n]的矩阵,rowvar=True,则输出为[m,m]的矩阵。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/det_cn.rst b/docs/api/paddle/linalg/det_cn.rst index 9b5caad8321..cb42c1d1ad3 100644 --- a/docs/api/paddle/linalg/det_cn.rst +++ b/docs/api/paddle/linalg/det_cn.rst @@ -9,12 +9,12 @@ det 参数 :::::::::::: - - **x** (Tensor):输入一个或批量矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32、float64。 + - **x** (Tensor):输入一个或批量矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32、float64。 返回 :::::::::::: -Tensor,输出矩阵的行列式值 Shape为 ``[*]`` 。 +Tensor,输出矩阵的行列式值 Shape 为 ``[*]`` 。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/eig_cn.rst b/docs/api/paddle/linalg/eig_cn.rst index 3f01779f929..8a6458ed44b 100644 --- a/docs/api/paddle/linalg/eig_cn.rst +++ b/docs/api/paddle/linalg/eig_cn.rst @@ -8,23 +8,23 @@ eig 计算一般方阵 ``x`` 的的特征值和特征向量。 .. note:: - - 如果输入矩阵 ``x`` 为Hermitian矩阵或实对称阵,请使用更快的API :ref:`cn_api_linalg_eigh` 。 + - 如果输入矩阵 ``x`` 为 Hermitian 矩阵或实对称阵,请使用更快的 API :ref:`cn_api_linalg_eigh` 。 - 如果只计算特征值,请使用 :ref:`cn_api_linalg_eigvals` 。 - 如果矩阵 ``x`` 不是方阵,请使用 :ref:`cn_api_linalg_svd` 。 - - 该API当前只能在CPU上执行。 + - 该 API 当前只能在 CPU 上执行。 - 对于输入是实数和复数类型,输出的数据类型均为复数。 参数 :::::::::::: - - **x** (Tensor) - 输入一个或一批矩阵。``x`` 的形状应为 ``[*, M, M]``,数据类型支持float32、float64、complex64和complex128。 + - **x** (Tensor) - 输入一个或一批矩阵。``x`` 的形状应为 ``[*, M, M]``,数据类型支持 float32、float64、complex64 和 complex128。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor Eigenvalues,输出Shape为 ``[*, M]`` 的矩阵,表示特征值。 - - Tensor Eigenvectors,输出Shape为 ``[*, M, M]`` 矩阵,表示特征向量。 + - Tensor Eigenvalues,输出 Shape 为 ``[*, M]`` 的矩阵,表示特征值。 + - Tensor Eigenvectors,输出 Shape 为 ``[*, M, M]`` 矩阵,表示特征向量。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/eigh_cn.rst b/docs/api/paddle/linalg/eigh_cn.rst index 81511d85227..3b08bd21edf 100644 --- a/docs/api/paddle/linalg/eigh_cn.rst +++ b/docs/api/paddle/linalg/eigh_cn.rst @@ -9,15 +9,15 @@ eigh 参数 :::::::::::: - - **x** (Tensor):输入一个或一批厄米特矩阵或者实数对称矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32、float64、complex64、complex128。 + - **x** (Tensor):输入一个或一批厄米特矩阵或者实数对称矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32、float64、complex64、complex128。 - **UPLO** (str,可选):表示计算上三角或者下三角矩阵,默认值为 'L',表示计算下三角矩阵的特征值和特征向量,'U'表示计算上三角矩阵的特征值和特征向量。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor out_value,输出矩阵的特征值,输出顺序按照从小到大进行排序。Shape为 ``[*, M]`` 。 - - Tensor out_vector,输出矩阵的特征向量,与特征值一一对应,Shape为 ``[*, M, M]`` 。 + - Tensor out_value,输出矩阵的特征值,输出顺序按照从小到大进行排序。Shape 为 ``[*, M]`` 。 + - Tensor out_vector,输出矩阵的特征向量,与特征值一一对应,Shape 为 ``[*, M, M]`` 。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/eigvals_cn.rst b/docs/api/paddle/linalg/eigvals_cn.rst index 0f9b50fb834..ad90ea3518a 100644 --- a/docs/api/paddle/linalg/eigvals_cn.rst +++ b/docs/api/paddle/linalg/eigvals_cn.rst @@ -8,19 +8,19 @@ eigvals .. note:: -该API的反向实现尚未完成,若你的代码需要对其进行反向传播,请使用ref:`cn_api_linalg_eig`。 +该 API 的反向实现尚未完成,若你的代码需要对其进行反向传播,请使用 ref:`cn_api_linalg_eig`。 参数 ::::::::: - - **x** (Tensor)- 需要计算特征值的方阵。输入的Tensor维度为 ``[*, M, M]``,其中 ``*`` 表示矩阵的批次维度。支持 ``float32`` 、 ``float64`` 、 ``complex64`` 和 ``complex128`` 四种数据类型。 + - **x** (Tensor)- 需要计算特征值的方阵。输入的 Tensor 维度为 ``[*, M, M]``,其中 ``*`` 表示矩阵的批次维度。支持 ``float32`` 、 ``float64`` 、 ``complex64`` 和 ``complex128`` 四种数据类型。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,包含x的所有未排序特征值。返回的Tensor具有与x相同的批次维度。即使输入的x是实数tensor,返回的也会是复数的结果。 +``Tensor``,包含 x 的所有未排序特征值。返回的 Tensor 具有与 x 相同的批次维度。即使输入的 x 是实数 tensor,返回的也会是复数的结果。 代码示例 diff --git a/docs/api/paddle/linalg/eigvalsh_cn.rst b/docs/api/paddle/linalg/eigvalsh_cn.rst index 1d813c3749b..92ee2d66b42 100644 --- a/docs/api/paddle/linalg/eigvalsh_cn.rst +++ b/docs/api/paddle/linalg/eigvalsh_cn.rst @@ -9,14 +9,14 @@ eigvalsh 参数 :::::::::::: - - **x** (Tensor):输入一个或一批厄米特矩阵或者实数对称矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32, float64,complex64,complex128。 + - **x** (Tensor):输入一个或一批厄米特矩阵或者实数对称矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32, float64,complex64,complex128。 - **UPLO** (str,可选):表示计算上三角或者下三角矩阵,默认值为 'L',表示计算下三角矩阵的特征值,'U'表示计算上三角矩阵的特征值。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,输出矩阵的特征值,输出顺序按照从小到大进行排序。Shape为 ``[*, M]`` 。 +Tensor,输出矩阵的特征值,输出顺序按照从小到大进行排序。Shape 为 ``[*, M]`` 。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/inv_cn.rst b/docs/api/paddle/linalg/inv_cn.rst index a01ede4ebbc..802beb0e874 100644 --- a/docs/api/paddle/linalg/inv_cn.rst +++ b/docs/api/paddle/linalg/inv_cn.rst @@ -6,11 +6,11 @@ inv .. py:function:: paddle.linalg.inv(x, name=None) -计算方阵的逆。方阵是行数和列数相等的矩阵。输入可以是一个方阵(2-D张量),或者是批次方阵(维数大于2时)。 +计算方阵的逆。方阵是行数和列数相等的矩阵。输入可以是一个方阵(2-D 张量),或者是批次方阵(维数大于 2 时)。 参数 ::::::::: - - **x** (Tensor) – 输入张量,最后两维的大小必须相等。如果输入张量的维数大于2,则被视为2-D矩阵的批次(batch)。支持的数据类型:float32,float64。 + - **x** (Tensor) – 输入张量,最后两维的大小必须相等。如果输入张量的维数大于 2,则被视为 2-D 矩阵的批次(batch)。支持的数据类型:float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/linalg/lstsq_cn.rst b/docs/api/paddle/linalg/lstsq_cn.rst index 77fb9703df9..dac046859ed 100644 --- a/docs/api/paddle/linalg/lstsq_cn.rst +++ b/docs/api/paddle/linalg/lstsq_cn.rst @@ -11,8 +11,8 @@ lstsq 参数 :::::::::::: - - **x** (Tensor):形状为 ``(*, M, N)`` 的矩阵Tensor, ``*`` 为零或更大的批次维度。数据类型为 float32 或 float64 。 - - **y** (Tensor):形状为 ``(*, M, K)`` 的矩阵Tensor, ``*`` 为零或更大的批次维度。数据类型为 float32 或 float64 。 + - **x** (Tensor):形状为 ``(*, M, N)`` 的矩阵 Tensor, ``*`` 为零或更大的批次维度。数据类型为 float32 或 float64 。 + - **y** (Tensor):形状为 ``(*, M, K)`` 的矩阵 Tensor, ``*`` 为零或更大的批次维度。数据类型为 float32 或 float64 。 - **rcond** (float,可选):默认值为 `None`,用来决定 ``x`` 有效秩的 float 型浮点数。当 ``rcond`` 为 `None` 时,该值会被设为 ``max(M, N)`` 乘 ``x`` 数据类型对应的机器精度。 - **driver** (str,可选):默认值为 `None`,用来指定计算使用的 LAPACK 库方法。CPU 下该参数的合法值为 'gels','gelsy' (默认),'gelsd','gelss';CUDA 下该参数的合法值为 'gels' (默认) 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/linalg/lu_cn.rst b/docs/api/paddle/linalg/lu_cn.rst index 1bd61d1cfbb..4886eed07cc 100644 --- a/docs/api/paddle/linalg/lu_cn.rst +++ b/docs/api/paddle/linalg/lu_cn.rst @@ -5,11 +5,11 @@ lu .. py:function:: paddle.linalg.lu(x, pivot=True, get_infos=False, name=None) -对输入的N维(N>=2)矩阵x进行LU分解。 +对输入的 N 维(N>=2)矩阵 x 进行 LU 分解。 -返回LU分解矩阵L、U和旋转矩阵P。L是下三角矩阵,U是上三角矩阵,拼接成单个矩阵LU,函数直接返回LU。 +返回 LU 分解矩阵 L、U 和旋转矩阵 P。L 是下三角矩阵,U 是上三角矩阵,拼接成单个矩阵 LU,函数直接返回 LU。 -如果pivot为True则返回旋转矩阵P对应序列pivot,序列pivot转换到矩阵P可以经如下伪代码实现: +如果 pivot 为 True 则返回旋转矩阵 P 对应序列 pivot,序列 pivot 转换到矩阵 P 可以经如下伪代码实现: .. code-block:: text @@ -20,24 +20,24 @@ lu .. note:: - pivot选项只在gpu下起作用,cpu下暂不支持为False,会报错。 + pivot 选项只在 gpu 下起作用,cpu 下暂不支持为 False,会报错。 -LU和pivot可以通过调用paddle.linalg.lu_unpack展开获得L、U、P矩阵。 +LU 和 pivot 可以通过调用 paddle.linalg.lu_unpack 展开获得 L、U、P 矩阵。 参数 :::::::::::: - - **x** (Tensor) - 需要进行LU分解的输入矩阵x,x是维度大于2维的矩阵。 - - **pivot** (bool,可选) - LU分解时是否进行旋转。若为True则执行旋转操作,若为False则不执行旋转操作,该选项只在gpu下起作用,cpu下暂不支持为False,会报错。默认True。 - - **get_infos** (bool,可选) - 是否返回分解状态信息,若为True,则返回分解状态Tensor,否则不返回。默认False。 + - **x** (Tensor) - 需要进行 LU 分解的输入矩阵 x,x 是维度大于 2 维的矩阵。 + - **pivot** (bool,可选) - LU 分解时是否进行旋转。若为 True 则执行旋转操作,若为 False 则不执行旋转操作,该选项只在 gpu 下起作用,cpu 下暂不支持为 False,会报错。默认 True。 + - **get_infos** (bool,可选) - 是否返回分解状态信息,若为 True,则返回分解状态 Tensor,否则不返回。默认 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor LU, LU分解结果矩阵LU,由L、U拼接组成。 - - Tensor(dtype=int) Pivots,旋转矩阵对应的旋转序列,详情见说明部分pivot部分,对于输入[*,m,n]的x,Pivots shape为[*, m]。 - - Tensor(dtype=int) Infos,矩阵分解状态信息矩阵,对于输入[*,m,n],Infos shape为[*]。每个元素表示每组矩阵的LU分解是否成功,0表示分解成功。 + - Tensor LU, LU 分解结果矩阵 LU,由 L、U 拼接组成。 + - Tensor(dtype=int) Pivots,旋转矩阵对应的旋转序列,详情见说明部分 pivot 部分,对于输入[*,m,n]的 x,Pivots shape 为[*, m]。 + - Tensor(dtype=int) Infos,矩阵分解状态信息矩阵,对于输入[*,m,n],Infos shape 为[*]。每个元素表示每组矩阵的 LU 分解是否成功,0 表示分解成功。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/lu_unpack_cn.rst b/docs/api/paddle/linalg/lu_unpack_cn.rst index 0edfb289638..0217d1a9362 100644 --- a/docs/api/paddle/linalg/lu_unpack_cn.rst +++ b/docs/api/paddle/linalg/lu_unpack_cn.rst @@ -5,10 +5,10 @@ lu_unpack .. py:function:: paddle.linalg.lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None) -对paddle.linalg.lu返回结果的LU、pivot进行展开得到原始的单独矩阵L、U、P。 +对 paddle.linalg.lu 返回结果的 LU、pivot 进行展开得到原始的单独矩阵 L、U、P。 -从LU中获得下三角矩阵L,上三角矩阵U。 -从序列pivot转换得到矩阵P,其转换过程原理如下伪代码所示: +从 LU 中获得下三角矩阵 L,上三角矩阵 U。 +从序列 pivot 转换得到矩阵 P,其转换过程原理如下伪代码所示: .. code-block:: text @@ -20,18 +20,18 @@ lu_unpack 参数 :::::::::::: - - **x** (Tensor) - paddle.linalg.lu返回结果的LU矩阵。 - - **y** (Tensor) - paddle.linalg.lu返回结果的pivot序列。 - - **unpack_ludata** (bool,可选) - 若为True,则对输入x(LU)进行展开得到L、U,否则。默认True。 - - **unpack_pivots** (bool,可选) - 若为True,则对输入y(pivots)序列进行展开,得到转换矩阵P。默认True。 + - **x** (Tensor) - paddle.linalg.lu 返回结果的 LU 矩阵。 + - **y** (Tensor) - paddle.linalg.lu 返回结果的 pivot 序列。 + - **unpack_ludata** (bool,可选) - 若为 True,则对输入 x(LU)进行展开得到 L、U,否则。默认 True。 + - **unpack_pivots** (bool,可选) - 若为 True,则对输入 y(pivots)序列进行展开,得到转换矩阵 P。默认 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor L,由LU展开得到的L矩阵,若unpack_ludata为False,则为None。 - - Tensor U,由LU展开得到的U矩阵,若unpack_ludata为False,则为None。 - - Tensor P,由序列pivots展开得到的旋转矩阵P,若unpack_pivots为False,则为None。 + - Tensor L,由 LU 展开得到的 L 矩阵,若 unpack_ludata 为 False,则为 None。 + - Tensor U,由 LU 展开得到的 U 矩阵,若 unpack_ludata 为 False,则为 None。 + - Tensor P,由序列 pivots 展开得到的旋转矩阵 P,若 unpack_pivots 为 False,则为 None。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/matrix_power_cn.rst b/docs/api/paddle/linalg/matrix_power_cn.rst index 2db17c6e478..1fadb60ba13 100644 --- a/docs/api/paddle/linalg/matrix_power_cn.rst +++ b/docs/api/paddle/linalg/matrix_power_cn.rst @@ -23,7 +23,7 @@ matrix_power 参数 ::::::::: - - **x** (Tensor):输入的欲进行 ``n`` 次幂运算的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为float32, float64。 + - **x** (Tensor):输入的欲进行 ``n`` 次幂运算的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为 float32, float64。 - **n** (int):输入的幂次,类型为 int。它可以是任意整数。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/linalg/matrix_rank_cn.rst b/docs/api/paddle/linalg/matrix_rank_cn.rst index 943eadf96cf..a9551e0f20d 100644 --- a/docs/api/paddle/linalg/matrix_rank_cn.rst +++ b/docs/api/paddle/linalg/matrix_rank_cn.rst @@ -8,18 +8,18 @@ matrix_rank 计算矩阵的秩。 -当hermitian=False时,矩阵的秩是大于指定的 ``tol`` 阈值的奇异值的数量;当hermitian=True时,矩阵的秩是大于指定 ``tol`` 阈值的特征值绝对值的数量。 +当 hermitian=False 时,矩阵的秩是大于指定的 ``tol`` 阈值的奇异值的数量;当 hermitian=True 时,矩阵的秩是大于指定 ``tol`` 阈值的特征值绝对值的数量。 参数 ::::::::: - - **x** (Tensor) - 输入tensor。它的形状应该是 ``[..., m, n]``,其中 ``...`` 是零或者更大的批次维度。如果 ``x`` 是一批矩阵,则输出具有相同的批次尺寸。``x`` 的数据类型应该为float32或float64。 - - **tol** (float|Tensor,可选) - 阈值。默认值:None。如果未指定 ``tol`` , ``sigma`` 为所计算奇异值中的最大值(或特征值绝对值的最大值), ``eps`` 为 ``x`` 的类型的epsilon值,使用公式 ``tol=sigma * max(m,n) * eps`` 来计算 ``tol``。请注意,如果 ``x`` 是一批矩阵,以这种方式为每批矩阵计算 ``tol`` 。 - - **hermitian** (bool,可选) - 表示 ``x`` 是否是Hermitian矩阵。默认值:False。当hermitian=True时,``x`` 被假定为Hermitian矩阵,这时函数内会使用更高效的算法来求解特征值,但在函数内部不会对 ``x`` 进行检查。我们仅仅使用矩阵的下三角来进行计算。 + - **x** (Tensor) - 输入 tensor。它的形状应该是 ``[..., m, n]``,其中 ``...`` 是零或者更大的批次维度。如果 ``x`` 是一批矩阵,则输出具有相同的批次尺寸。``x`` 的数据类型应该为 float32 或 float64。 + - **tol** (float|Tensor,可选) - 阈值。默认值:None。如果未指定 ``tol`` , ``sigma`` 为所计算奇异值中的最大值(或特征值绝对值的最大值), ``eps`` 为 ``x`` 的类型的 epsilon 值,使用公式 ``tol=sigma * max(m,n) * eps`` 来计算 ``tol``。请注意,如果 ``x`` 是一批矩阵,以这种方式为每批矩阵计算 ``tol`` 。 + - **hermitian** (bool,可选) - 表示 ``x`` 是否是 Hermitian 矩阵。默认值:False。当 hermitian=True 时,``x`` 被假定为 Hermitian 矩阵,这时函数内会使用更高效的算法来求解特征值,但在函数内部不会对 ``x`` 进行检查。我们仅仅使用矩阵的下三角来进行计算。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor, ``x`` 的秩,数据类型为int64。 +Tensor, ``x`` 的秩,数据类型为 int64。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/multi_dot_cn.rst b/docs/api/paddle/linalg/multi_dot_cn.rst index deb4f484377..1cf1f3ea466 100755 --- a/docs/api/paddle/linalg/multi_dot_cn.rst +++ b/docs/api/paddle/linalg/multi_dot_cn.rst @@ -5,24 +5,24 @@ multi_dot .. py:function:: paddle.linalg.multi_dot(x, name=None) -Multi_dot是一个计算多个矩阵乘法的算子。 +Multi_dot 是一个计算多个矩阵乘法的算子。 -算子支持float16(仅限GPU)、float32和float64三种类型。该算子不支持批量输入。 +算子支持 float16(仅限 GPU)、float32 和 float64 三种类型。该算子不支持批量输入。 -输入[x]的每个tensor的shape必须是二维的,除了第一个和最后一个tensor可以是一维的。如果第一个tensor是shape为(n, )的一维向量,该tensor将被当作是shape为(1, n)的行向量处理,同样的,如果最后一个tensor的shape是(n, ),将被当作是shape为(n, 1)的列向量处理。 +输入[x]的每个 tensor 的 shape 必须是二维的,除了第一个和最后一个 tensor 可以是一维的。如果第一个 tensor 是 shape 为(n, )的一维向量,该 tensor 将被当作是 shape 为(1, n)的行向量处理,同样的,如果最后一个 tensor 的 shape 是(n, ),将被当作是 shape 为(n, 1)的列向量处理。 -如果第一个和最后一个tensor是二维矩阵,那么输出也是一个二维矩阵,否则输出是一维的向量。 +如果第一个和最后一个 tensor 是二维矩阵,那么输出也是一个二维矩阵,否则输出是一维的向量。 -Multi_dot会选择计算量最小的乘法顺序进行计算。(a, b)和(b, c)这样两个矩阵相乘的计算量是a * b * c。给定矩阵A, B, C的shape分别为(20, 5), (5, 100),(100, 10),我们可以计算不同乘法顺序的计算量: +Multi_dot 会选择计算量最小的乘法顺序进行计算。(a, b)和(b, c)这样两个矩阵相乘的计算量是 a * b * c。给定矩阵 A, B, C 的 shape 分别为(20, 5), (5, 100),(100, 10),我们可以计算不同乘法顺序的计算量: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 -在这个例子中,先算B乘以C再乘A的计算量比按顺序乘少5倍。 +在这个例子中,先算 B 乘以 C 再乘 A 的计算量比按顺序乘少 5 倍。 参数 ::::::::: - - **x** ([tensor]):输入的是一个tensor列表。 + - **x** ([tensor]):输入的是一个 tensor 列表。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/linalg/norm_cn.rst b/docs/api/paddle/linalg/norm_cn.rst index 8bbac12fb5c..888e0956b11 100644 --- a/docs/api/paddle/linalg/norm_cn.rst +++ b/docs/api/paddle/linalg/norm_cn.rst @@ -8,25 +8,25 @@ norm -将计算给定Tensor的矩阵范数(Frobenius 范数)和向量范数(向量1范数、2范数、或者通常的p范数)。 +将计算给定 Tensor 的矩阵范数(Frobenius 范数)和向量范数(向量 1 范数、2 范数、或者通常的 p 范数)。 .. note:: - 此API与 ``numpy.linalg.norm`` 存在差异。此API支持高阶张量(rank>=3)作为输入,输入 ``axis`` 对应的轴就可以计算出norm的值。但是 ``numpy.linalg.norm`` 仅支持一维向量和二维矩阵作为输入。特别需要注意的是,此API的P阶矩阵范数,实际上将矩阵摊平成向量计算。实际计算的是向量范数,而不是真正的矩阵范数。 + 此 API 与 ``numpy.linalg.norm`` 存在差异。此 API 支持高阶张量(rank>=3)作为输入,输入 ``axis`` 对应的轴就可以计算出 norm 的值。但是 ``numpy.linalg.norm`` 仅支持一维向量和二维矩阵作为输入。特别需要注意的是,此 API 的 P 阶矩阵范数,实际上将矩阵摊平成向量计算。实际计算的是向量范数,而不是真正的矩阵范数。 参数 ::::::::: - - **x** (Tensor) - 输入Tensor。维度为多维,数据类型为float32或float64。 - - **p** (float|string,可选) - 范数(ord)的种类。目前支持的值为 `fro`、`inf`、`-inf`、`0`、`1`、`2`,和任何正实数p对应的p范数。默认值为 `fro` 。 - - **axis** (int|list|tuple,可选) - 使用范数计算的轴。如果 ``axis`` 为None,则忽略input的维度,将其当做向量来计算。如果 ``axis`` 为int或者只有一个元素的list|tuple,``norm`` API会计算输入Tensor的向量范数。如果axis为包含两个元素的list,API会计算输入Tensor的矩阵范数。当 ``axis < 0`` 时,实际的计算维度为 rank(input) + axis。默认值为 `None` 。 - - **keepdim** (bool,可选) - 是否在输出的Tensor中保留和输入一样的维度,默认值为False。当 :attr:`keepdim` 为False时,输出的Tensor会比输入 :attr:`input` 的维度少一些。 + - **x** (Tensor) - 输入 Tensor。维度为多维,数据类型为 float32 或 float64。 + - **p** (float|string,可选) - 范数(ord)的种类。目前支持的值为 `fro`、`inf`、`-inf`、`0`、`1`、`2`,和任何正实数 p 对应的 p 范数。默认值为 `fro` 。 + - **axis** (int|list|tuple,可选) - 使用范数计算的轴。如果 ``axis`` 为 None,则忽略 input 的维度,将其当做向量来计算。如果 ``axis`` 为 int 或者只有一个元素的 list|tuple,``norm`` API 会计算输入 Tensor 的向量范数。如果 axis 为包含两个元素的 list,API 会计算输入 Tensor 的矩阵范数。当 ``axis < 0`` 时,实际的计算维度为 rank(input) + axis。默认值为 `None` 。 + - **keepdim** (bool,可选) - 是否在输出的 Tensor 中保留和输入一样的维度,默认值为 False。当 :attr:`keepdim` 为 False 时,输出的 Tensor 会比输入 :attr:`input` 的维度少一些。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,在指定axis上进行范数计算的结果,与输入input数据类型相同。 + Tensor,在指定 axis 上进行范数计算的结果,与输入 input 数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/linalg/pinv_cn.rst b/docs/api/paddle/linalg/pinv_cn.rst index 7d9a458b52a..b31acd9f0d2 100644 --- a/docs/api/paddle/linalg/pinv_cn.rst +++ b/docs/api/paddle/linalg/pinv_cn.rst @@ -5,16 +5,16 @@ pinv .. py:function:: paddle.linalg.pinv(x, rcond=1e-15, hermitian=False, name=None) -该API通过奇异值分解(``svd``)来计算伪逆矩阵,支持单个矩阵或批量矩阵。 +该 API 通过奇异值分解(``svd``)来计算伪逆矩阵,支持单个矩阵或批量矩阵。 - - 如果 ``hermitian`` 为假,那么该API会利用奇异值分解(``svd``)进行伪逆矩阵的求解。 - - 如果 ``hermitian`` 为真,那么该API会利用特征值分解(``eigh``)进行伪逆矩阵的求解。同时输入需要满足以下条件:如果数据类型为实数,那么输入需要为对称矩阵;如果数据类型为复数,那么输入需要为 ``hermitian`` 矩阵。 + - 如果 ``hermitian`` 为假,那么该 API 会利用奇异值分解(``svd``)进行伪逆矩阵的求解。 + - 如果 ``hermitian`` 为真,那么该 API 会利用特征值分解(``eigh``)进行伪逆矩阵的求解。同时输入需要满足以下条件:如果数据类型为实数,那么输入需要为对称矩阵;如果数据类型为复数,那么输入需要为 ``hermitian`` 矩阵。 参数 ::::::::: - - **x** (Tensor):输入变量,类型为 Tensor,数据类型为float32、float64、complex64、complex12,形状为(M, N)或(B, M, N)。 - - **rcond** (float64,可选):奇异值(特征值)被截断的阈值,奇异值(特征值)小于rcond * 最大奇异值时会被置为0,默认值为1e-15。 - - **hermitian** (bool,可选):是否为 ``hermitian`` 矩阵或者实对称矩阵,默认值为False。 + - **x** (Tensor):输入变量,类型为 Tensor,数据类型为 float32、float64、complex64、complex12,形状为(M, N)或(B, M, N)。 + - **rcond** (float64,可选):奇异值(特征值)被截断的阈值,奇异值(特征值)小于 rcond * 最大奇异值时会被置为 0,默认值为 1e-15。 + - **hermitian** (bool,可选):是否为 ``hermitian`` 矩阵或者实对称矩阵,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/linalg/qr_cn.rst b/docs/api/paddle/linalg/qr_cn.rst index 00c5ba28926..4d2d7e8c0b3 100644 --- a/docs/api/paddle/linalg/qr_cn.rst +++ b/docs/api/paddle/linalg/qr_cn.rst @@ -6,9 +6,9 @@ qr .. py:function:: paddle.linalg.qr(x, mode="reduced", name=None) -计算一个或一批矩阵的正交三角分解,也称QR分解(暂不支持反向)。 +计算一个或一批矩阵的正交三角分解,也称 QR 分解(暂不支持反向)。 -记 :math:`X` 为一个矩阵,则计算的结果为2个矩阵 :math:`Q` 和 :math:`R`,则满足公式: +记 :math:`X` 为一个矩阵,则计算的结果为 2 个矩阵 :math:`Q` 和 :math:`R`,则满足公式: .. math:: X = Q * R @@ -19,15 +19,15 @@ qr 参数 :::::::::::: - - **x** (Tensor):输入进行正交三角分解的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, N]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32, float64。 + - **x** (Tensor):输入进行正交三角分解的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, N]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32, float64。 - **mode** (str,可选):控制正交三角分解的行为,默认是 ``reduced``,假设 ``x`` 形状应为 ``[*, M, N]`` 和 ``K = min(M, N)``:如果 ``mode = "reduced"``,则 :math:`Q` 形状为 ``[*, M, K]`` 和 :math:`R` 形状为 ``[*, K, N]``;如果 ``mode = "complete"``,则 :math:`Q` 形状为 ``[*, M, M]`` 和 :math:`R` 形状为 ``[*, M, N]``;如果 ``mode = "r"``,则不返回 :math:`Q`,只返回 :math:`R` 且形状为 ``[*, K, N]`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor Q,正交三角分解的Q正交矩阵,需注意如果 ``mode = "reduced"``,则不返回Q矩阵,只返回R矩阵。 - - Tensor R,正交三角分解的R上三角矩阵。 + - Tensor Q,正交三角分解的 Q 正交矩阵,需注意如果 ``mode = "reduced"``,则不返回 Q 矩阵,只返回 R 矩阵。 + - Tensor R,正交三角分解的 R 上三角矩阵。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/slogdet_cn.rst b/docs/api/paddle/linalg/slogdet_cn.rst index 3d6998c68cc..ed6288e8d32 100644 --- a/docs/api/paddle/linalg/slogdet_cn.rst +++ b/docs/api/paddle/linalg/slogdet_cn.rst @@ -4,17 +4,17 @@ slogdet ------------------------------- .. py:function:: paddle.linalg.slogdet(x) -计算批量矩阵的行列式值的符号值和行列式值绝对值的自然对数值。如果行列式值为0,则符号值为0,自然对数值为-inf。 +计算批量矩阵的行列式值的符号值和行列式值绝对值的自然对数值。如果行列式值为 0,则符号值为 0,自然对数值为-inf。 参数 :::::::::::: - - **x** (Tensor):输入一个或批量矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32、float64。 + - **x** (Tensor):输入一个或批量矩阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32、float64。 返回 :::::::::::: -Tensor,输出矩阵的行列式值 Shape为 ``[2, *]``。 +Tensor,输出矩阵的行列式值 Shape 为 ``[2, *]``。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/solve_cn.rst b/docs/api/paddle/linalg/solve_cn.rst index 627fe4ee788..e80151aa31a 100644 --- a/docs/api/paddle/linalg/solve_cn.rst +++ b/docs/api/paddle/linalg/solve_cn.rst @@ -20,7 +20,7 @@ solve 参数 ::::::::: - - **x** (Tensor):输入的欲进行线性方程组求解的一个或一批方阵(系数矩阵),类型为 Tensor。 ``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为float32, float64。 + - **x** (Tensor):输入的欲进行线性方程组求解的一个或一批方阵(系数矩阵),类型为 Tensor。 ``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为 float32, float64。 - **y** (Tensor):输入的欲进行线性方程组求解的右值,类型为 Tensor。 ``y`` 的形状应为 ``[*, M, K]``,其中 ``*`` 为零或更大的批次维度,数据类型和 ``x`` 相同。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/linalg/svd_cn.rst b/docs/api/paddle/linalg/svd_cn.rst index e0526e643f7..dadffbfc452 100644 --- a/docs/api/paddle/linalg/svd_cn.rst +++ b/docs/api/paddle/linalg/svd_cn.rst @@ -8,27 +8,27 @@ svd 计算一个或一批矩阵的奇异值分解。 -记 :math:`X` 为一个矩阵,则计算的结果为2个矩阵 :math:`U`, :math:`VH` 和一个向量 :math:`S`。则分解后满足公式: +记 :math:`X` 为一个矩阵,则计算的结果为 2 个矩阵 :math:`U`, :math:`VH` 和一个向量 :math:`S`。则分解后满足公式: .. math:: X = U * diag(S) * VH -值得注意的是,:math:`S` 是向量,从大到小表示每个奇异值。而 :math:`VH` 则是V的共轭转置。 +值得注意的是,:math:`S` 是向量,从大到小表示每个奇异值。而 :math:`VH` 则是 V 的共轭转置。 参数 :::::::::::: - - **x** (Tensor):输入的欲进行奇异值分解的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, N]``,其中 ``*`` 为零或更大的批次维度,数据类型支持float32, float64。 - - **full_matrics** (bool):是否计算完整的U和V矩阵,类型为 bool 默认为 False。这个参数会影响U和V生成的Shape。 + - **x** (Tensor):输入的欲进行奇异值分解的一个或一批方阵,类型为 Tensor。 ``x`` 的形状应为 ``[*, M, N]``,其中 ``*`` 为零或更大的批次维度,数据类型支持 float32, float64。 + - **full_matrics** (bool):是否计算完整的 U 和 V 矩阵,类型为 bool 默认为 False。这个参数会影响 U 和 V 生成的 Shape。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - Tensor U,奇异值分解的U矩阵。如果full_matrics设置为False,则Shape为 ``[*, M, K]``,如果full_metrics设置为True,那么Shape为 ``[*, M, M]``。其中K为M和N的最小值。 - - Tensor S,奇异值向量,Shape为 ``[*, K]`` 。 - - Tensor VH,奇异值分解的VH矩阵。如果full_matrics设置为False,则Shape为 ``[*, K, N]``,如果full_metrics设置为True,那么Shape为 ``[*, N, N]``。其中K为M和N的最小值。 + - Tensor U,奇异值分解的 U 矩阵。如果 full_matrics 设置为 False,则 Shape 为 ``[*, M, K]``,如果 full_metrics 设置为 True,那么 Shape 为 ``[*, M, M]``。其中 K 为 M 和 N 的最小值。 + - Tensor S,奇异值向量,Shape 为 ``[*, K]`` 。 + - Tensor VH,奇异值分解的 VH 矩阵。如果 full_matrics 设置为 False,则 Shape 为 ``[*, K, N]``,如果 full_metrics 设置为 True,那么 Shape 为 ``[*, N, N]``。其中 K 为 M 和 N 的最小值。 代码示例 :::::::::: diff --git a/docs/api/paddle/linalg/triangular_solve_cn.rst b/docs/api/paddle/linalg/triangular_solve_cn.rst index 78c3bf353df..e2720216e13 100644 --- a/docs/api/paddle/linalg/triangular_solve_cn.rst +++ b/docs/api/paddle/linalg/triangular_solve_cn.rst @@ -26,11 +26,11 @@ triangular_solve 参数 ::::::::: - - **x** (Tensor):线性方程组左边的系数方阵,其为一个或一批方阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为float32, float64。 - - **y** (Tensor):线性方程组右边的矩阵,其为一个或一批矩阵。``y`` 的形状应为 ``[*, M, K]``,其中 ``*`` 为零或更大的批次维度,数据类型为float32, float64。 - - **upper** (bool,可选) - 对系数矩阵 ``x`` 取上三角还是下三角。默认为True,表示取上三角。 - - **transpose** (bool,可选) - 是否对系数矩阵 ``x`` 进行转置。默认为False,不进行转置。 - - **unitriangular** (bool,可选) - 如果为True,则将系数矩阵 ``x`` 对角线元素假设为1来求解方程。默认为False。 + - **x** (Tensor):线性方程组左边的系数方阵,其为一个或一批方阵。``x`` 的形状应为 ``[*, M, M]``,其中 ``*`` 为零或更大的批次维度,数据类型为 float32, float64。 + - **y** (Tensor):线性方程组右边的矩阵,其为一个或一批矩阵。``y`` 的形状应为 ``[*, M, K]``,其中 ``*`` 为零或更大的批次维度,数据类型为 float32, float64。 + - **upper** (bool,可选) - 对系数矩阵 ``x`` 取上三角还是下三角。默认为 True,表示取上三角。 + - **transpose** (bool,可选) - 是否对系数矩阵 ``x`` 进行转置。默认为 False,不进行转置。 + - **unitriangular** (bool,可选) - 如果为 True,则将系数矩阵 ``x`` 对角线元素假设为 1 来求解方程。默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/linspace_cn.rst b/docs/api/paddle/linspace_cn.rst index ae6c5a78ad5..faa1f63f967 100644 --- a/docs/api/paddle/linspace_cn.rst +++ b/docs/api/paddle/linspace_cn.rst @@ -5,21 +5,21 @@ linspace .. py:function:: paddle.linspace(start, stop, num, dtype=None, name=None) -返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 +返回一个 Tensor,Tensor 的值为在区间 start 和 stop 上均匀间隔的 num 个值,输出 Tensor 的长度为 num。 **注意:不进行梯度计算** 参数 :::::::::::: - - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32 或者int64。 - - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32,float64,int32或者int64。 - - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 - - **dtype** (np.dtype|str,可选) – 输出Tensor的数据类型,可以是float32,float64, int32或者int64。如果dtype的数据类型为None,输出Tensor数据类型为float32。 + - **start** (int|float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个 shape 为[1]的 Tensor,该 Tensor 的数据类型可以是 float32,float64,int32 或者 int64。 + - **stop** (int|float|Tensor) – ``stop`` 是区间结束的变量,可以是一个浮点标量,或是一个 shape 为[1]的 Tensor,该 Tensor 的数据类型可以是 float32,float64,int32 或者 int64。 + - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个 shape 为[1]的 Tensor,该 Tensor 的数据类型需为 int32。 + - **dtype** (np.dtype|str,可选) – 输出 Tensor 的数据类型,可以是 float32,float64, int32 或者 int64。如果 dtype 的数据类型为 None,输出 Tensor 数据类型为 float32。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]`,在mum为1的情况下,仅返回包含start元素值的Tensor。 +表示等间隔划分结果的 1-D Tensor,该 Tensor 的 shape 大小为 :math:`[num]`,在 mum 为 1 的情况下,仅返回包含 start 元素值的 Tensor。 代码示例 diff --git a/docs/api/paddle/load_cn.rst b/docs/api/paddle/load_cn.rst index 9680f2bae48..f35653f3600 100644 --- a/docs/api/paddle/load_cn.rst +++ b/docs/api/paddle/load_cn.rst @@ -23,9 +23,9 @@ load - **path** (str|BytesIO) - 载入目标对象实例的路径/内存对象。通常该路径是目标文件的路径,当从用于存储预测模型 API 的存储结果中载入 state_dict 时,该路径可能是一个文件前缀或者目录。 - **\*\*configs** (dict,可选) - 其他用于兼容的载入配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项: - - (1) model_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; - - (2) params_filename (str) - paddle 1.x版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件; - - (3) return_numpy(bool) - 如果被指定为 ``True`` ,``load`` 的结果中的Tensor会被转化为 ``numpy.ndarray``,默认为 ``False`` 。 + - (1) model_filename (str) - paddle 1.x 版本 ``save_inference_model`` 接口存储格式的预测模型文件名,原默认文件名为 ``__model__`` ; + - (2) params_filename (str) - paddle 1.x 版本 ``save_inference_model`` 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件; + - (3) return_numpy(bool) - 如果被指定为 ``True`` ,``load`` 的结果中的 Tensor 会被转化为 ``numpy.ndarray``,默认为 ``False`` 。 返回 ::::::::: diff --git a/docs/api/paddle/log10_cn.rst b/docs/api/paddle/log10_cn.rst index 6cb128a5f52..b6e023d8c54 100755 --- a/docs/api/paddle/log10_cn.rst +++ b/docs/api/paddle/log10_cn.rst @@ -9,7 +9,7 @@ log10 -Log10激活函数(计算底为10的对数) +Log10 激活函数(计算底为 10 的对数) .. math:: \\Out=log_{10} x\\ @@ -18,12 +18,12 @@ Log10激活函数(计算底为10的对数) 参数 :::::::::::: - - **x** (Tensor) – 输入的 Tensor。数据类型为float32、float64。 + - **x** (Tensor) – 输入的 Tensor。数据类型为 float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - Tensor,Log10算子底为10对数输出,数据类型与输入一致。 + Tensor,Log10 算子底为 10 对数输出,数据类型与输入一致。 代码示例 diff --git a/docs/api/paddle/log1p_cn.rst b/docs/api/paddle/log1p_cn.rst index 39d633451f6..d945c93f5a4 100644 --- a/docs/api/paddle/log1p_cn.rst +++ b/docs/api/paddle/log1p_cn.rst @@ -20,7 +20,7 @@ log1p 返回 :::::::::::: -计算 ``x`` 的自然对数 + 1后的 Tensor,数据类型,形状与 ``x`` 一致。 +计算 ``x`` 的自然对数 + 1 后的 Tensor,数据类型,形状与 ``x`` 一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/log2_cn.rst b/docs/api/paddle/log2_cn.rst index 413ec7f646e..31e257bc52b 100755 --- a/docs/api/paddle/log2_cn.rst +++ b/docs/api/paddle/log2_cn.rst @@ -9,7 +9,7 @@ log2 -Log2激活函数(计算底为2的对数) +Log2 激活函数(计算底为 2 的对数) .. math:: \\Out=log_2x\\ @@ -17,12 +17,12 @@ Log2激活函数(计算底为2的对数) 参数 ::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该 OP 的输入为 Tensor。数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,Log2算子底为2对数输出,数据类型与输入一致。 +Tensor,Log2 算子底为 2 对数输出,数据类型与输入一致。 代码示例 diff --git a/docs/api/paddle/log_cn.rst b/docs/api/paddle/log_cn.rst index fc654706ab9..270bef6dfe4 100644 --- a/docs/api/paddle/log_cn.rst +++ b/docs/api/paddle/log_cn.rst @@ -9,7 +9,7 @@ log -Log激活函数(计算自然对数) +Log 激活函数(计算自然对数) .. math:: \\Out=ln(x)\\ @@ -18,12 +18,12 @@ Log激活函数(计算自然对数) 参数 :::::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该 OP 的输入为 Tensor。数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor, Log算子自然对数输出,数据类型与输入一致。 +Tensor, Log 算子自然对数输出,数据类型与输入一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/logcumsumexp_cn.rst b/docs/api/paddle/logcumsumexp_cn.rst index 49b5acda1e1..d2496bfe57f 100644 --- a/docs/api/paddle/logcumsumexp_cn.rst +++ b/docs/api/paddle/logcumsumexp_cn.rst @@ -18,9 +18,9 @@ logcumsumexp 参数 ::::::::: - - **x** (Tensor) - 需要进行操作的Tensor。 - - **axis** (int,可选) - 指明需要计算的维度。-1代表最后一维。默认:None,将输入展开为一维变量再进行计算。 - - **dtype** (str,可选) - 输出Tensor的数据类型,支持float32、float64。如果指定了,那么在执行操作之前,输入张量将被转换为dtype。这对于防止数据类型溢出非常有用。默认为:None。 + - **x** (Tensor) - 需要进行操作的 Tensor。 + - **axis** (int,可选) - 指明需要计算的维度。-1 代表最后一维。默认:None,将输入展开为一维变量再进行计算。 + - **dtype** (str,可选) - 输出 Tensor 的数据类型,支持 float32、float64。如果指定了,那么在执行操作之前,输入张量将被转换为 dtype。这对于防止数据类型溢出非常有用。默认为:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logical_and_cn.rst b/docs/api/paddle/logical_and_cn.rst index 65690be6b50..3b2b5055b1d 100644 --- a/docs/api/paddle/logical_and_cn.rst +++ b/docs/api/paddle/logical_and_cn.rst @@ -11,14 +11,14 @@ logical_and Out = X \&\& Y .. note:: - ``paddle.logical_and`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.logical_and`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何 Tensor。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logical_not_cn.rst b/docs/api/paddle/logical_not_cn.rst index 48c09f9d7a1..7b1174becf3 100644 --- a/docs/api/paddle/logical_not_cn.rst +++ b/docs/api/paddle/logical_not_cn.rst @@ -8,7 +8,7 @@ logical_not -逐元素的对 ``X`` Tensor进行逻辑非运算 +逐元素的对 ``X`` Tensor 进行逻辑非运算 .. math:: Out = !X @@ -16,8 +16,8 @@ logical_not 参数 :::::::::::: - - **x** (Tensor)- 逻辑非运算的输入,是一个 Tensor,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **out** (Tensor,可选)- 指定算子输出结果的 Tensor,可以是程序中已经创建的任何 Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **x** (Tensor)- 逻辑非运算的输入,是一个 Tensor,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **out** (Tensor,可选)- 指定算子输出结果的 Tensor,可以是程序中已经创建的任何 Tensor。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logical_or_cn.rst b/docs/api/paddle/logical_or_cn.rst index 653a5e72e24..a8a2edbdec8 100644 --- a/docs/api/paddle/logical_or_cn.rst +++ b/docs/api/paddle/logical_or_cn.rst @@ -11,14 +11,14 @@ logical_or Out = X || Y .. note:: - ``paddle.logical_or`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.logical_or`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何 Tensor。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logical_xor_cn.rst b/docs/api/paddle/logical_xor_cn.rst index 289f67c4af3..a4c5a5e84c6 100644 --- a/docs/api/paddle/logical_xor_cn.rst +++ b/docs/api/paddle/logical_xor_cn.rst @@ -11,14 +11,14 @@ logical_xor Out = (X || Y) \&\& !(X \&\& Y) .. note:: - ``paddle.logical_xor`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.logical_xor`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 :::::::::::: - - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为bool, int8, int16, in32, in64, float32, float64。 - - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何Tensor。默认值为None,此时将创建新的Tensor来保存输出结果。 + - **x** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **y** (Tensor)- 输入的 `Tensor`,支持的数据类型为 bool, int8, int16, in32, in64, float32, float64。 + - **out** (Tensor,可选)- 指定算子输出结果的 `Tensor`,可以是程序中已经创建的任何 Tensor。默认值为 None,此时将创建新的 Tensor 来保存输出结果。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/logit_cn.rst b/docs/api/paddle/logit_cn.rst index 87b60e9cfbb..9ac240efb8f 100644 --- a/docs/api/paddle/logit_cn.rst +++ b/docs/api/paddle/logit_cn.rst @@ -5,12 +5,12 @@ logit .. py:function:: paddle.logit(x, eps=None, name=None) -实现了logit层。若eps为默认值None,并且 ``x`` < 0 或者 ``x`` > 1,该函数将返回NaN,OP的计算公式如下: +实现了 logit 层。若 eps 为默认值 None,并且 ``x`` < 0 或者 ``x`` > 1,该函数将返回 NaN,OP 的计算公式如下: .. math:: logit(x) = ln(\frac{x}{1-x}) -其中,:math:`x`` 为输入的 Tensor,且和eps有着如下关系: +其中,:math:`x`` 为输入的 Tensor,且和 eps 有着如下关系: .. math:: x_i=\left\{ diff --git a/docs/api/paddle/logsumexp_cn.rst b/docs/api/paddle/logsumexp_cn.rst index 4996c3985ce..2daefe76924 100644 --- a/docs/api/paddle/logsumexp_cn.rst +++ b/docs/api/paddle/logsumexp_cn.rst @@ -12,9 +12,9 @@ logsumexp 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64,维度不超过4 。 - - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算 logsumexp。默认值为 None。 - - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64,维度不超过 4 。 + - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算 logsumexp。默认值为 None。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/masked_select_cn.rst b/docs/api/paddle/masked_select_cn.rst index 6eb9e46d32e..1f4997d8a58 100644 --- a/docs/api/paddle/masked_select_cn.rst +++ b/docs/api/paddle/masked_select_cn.rst @@ -7,12 +7,12 @@ masked_select -返回一个1-D 的 Tensor,Tensor 的值是根据 ``mask`` 对输入 ``x`` 进行选择的,``mask`` 的数据类型是 bool。 +返回一个 1-D 的 Tensor,Tensor 的值是根据 ``mask`` 对输入 ``x`` 进行选择的,``mask`` 的数据类型是 bool。 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,数据类型为 float32,float64,int32 或者 int64。 + - **x** (Tensor) - 输入 Tensor,数据类型为 float32,float64,int32 或者 int64。 - **mask** (Tensor) - 用于索引的二进制掩码的 Tensor,数据类型为 bool。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/matmul_cn.rst b/docs/api/paddle/matmul_cn.rst index e8be4dbf630..970b871e5c5 100644 --- a/docs/api/paddle/matmul_cn.rst +++ b/docs/api/paddle/matmul_cn.rst @@ -5,12 +5,12 @@ matmul .. py:function:: paddle.matmul(x, y, transpose_x=False, transpose_y=False, name=None) -计算两个Tensor的乘积,遵循完整的广播规则,关于广播规则,请参考 :ref:`cn_user_guide_broadcasting` 。 +计算两个 Tensor 的乘积,遵循完整的广播规则,关于广播规则,请参考 :ref:`cn_user_guide_broadcasting` 。 并且其行为与 ``numpy.matmul`` 一致。目前,输入张量的维数可以是任意数量,``matmul`` 可以用于 实现 ``dot`` , ``matmul`` 和 ``batchmatmul``。实际行为取决于输入 ``x`` 、输入 ``y`` 、 ``transpose_x`` , ``transpose_y``。具体如下: -- 如果 ``transpose`` 为真,则对应 Tensor 的后两维会转置。如果Tensor的一维,则转置无效。假定 ``x`` 是一个 shape=[D] 的一维 Tensor,则 ``x`` 视为 [1, D]。然而,``y`` 是一个shape=[D]的一维Tensor,则视为[D, 1]。 +- 如果 ``transpose`` 为真,则对应 Tensor 的后两维会转置。如果 Tensor 的一维,则转置无效。假定 ``x`` 是一个 shape=[D] 的一维 Tensor,则 ``x`` 视为 [1, D]。然而,``y`` 是一个 shape=[D]的一维 Tensor,则视为[D, 1]。 乘法行为取决于 ``x`` 和 ``y`` 的尺寸。具体如下: @@ -18,18 +18,18 @@ matmul - 如果两个张量都是二维的,则获得矩阵与矩阵的乘积。 -- 如果 ``x`` 是1维的,而 ``y`` 是2维的,则将1放在 ``x`` 维度之前,以进行矩阵乘法。矩阵相乘后,将删除前置尺寸。 +- 如果 ``x`` 是 1 维的,而 ``y`` 是 2 维的,则将 1 放在 ``x`` 维度之前,以进行矩阵乘法。矩阵相乘后,将删除前置尺寸。 -- 如果 ``x`` 是2维的,而 ``y`` 是1维的,获得矩阵与向量的乘积。 +- 如果 ``x`` 是 2 维的,而 ``y`` 是 1 维的,获得矩阵与向量的乘积。 -- 如果两个输入至少为一维,且至少一个输入为N维(其中N> 2),则将获得批矩阵乘法。如果第一个自变量是一维的,则将1放在其维度的前面,以便进行批量矩阵的乘法运算,然后将其删除。如果第二个参数为一维,则将1附加到其维度后面,以实现成批矩阵倍数的目的,然后将其删除。根据广播规则广播非矩阵维度(不包括最后两个维度)。例如,如果输入 ``x`` 是(j,1,n,m)Tensor,另一个 ``y`` 是(k,m,p)Tensor,则out将是(j,k,n,p)张量。 +- 如果两个输入至少为一维,且至少一个输入为 N 维(其中 N> 2),则将获得批矩阵乘法。如果第一个自变量是一维的,则将 1 放在其维度的前面,以便进行批量矩阵的乘法运算,然后将其删除。如果第二个参数为一维,则将 1 附加到其维度后面,以实现成批矩阵倍数的目的,然后将其删除。根据广播规则广播非矩阵维度(不包括最后两个维度)。例如,如果输入 ``x`` 是(j,1,n,m)Tensor,另一个 ``y`` 是(k,m,p)Tensor,则 out 将是(j,k,n,p)张量。 参数 ::::::::: - - **x** (Tensor):输入变量,类型为 Tensor,数据类型为float32, float64。 - - **y** (Tensor):输入变量,类型为 Tensor,数据类型为float32, float64。 - - **transpose_x** (bool,可选):相乘前是否转置 x,默认值为False。 - - **transpose_y** (bool,可选):相乘前是否转置 y,默认值为False。 + - **x** (Tensor):输入变量,类型为 Tensor,数据类型为 float32, float64。 + - **y** (Tensor):输入变量,类型为 Tensor,数据类型为 float32, float64。 + - **transpose_x** (bool,可选):相乘前是否转置 x,默认值为 False。 + - **transpose_y** (bool,可选):相乘前是否转置 y,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/maximum_cn.rst b/docs/api/paddle/maximum_cn.rst index 2c534f42114..81b4dfd4ac2 100644 --- a/docs/api/paddle/maximum_cn.rst +++ b/docs/api/paddle/maximum_cn.rst @@ -6,7 +6,7 @@ maximum .. py:function:: paddle.maximum(x, y, name=None) -逐元素对比输入的两个Tensor,并且把各个位置更大的元素保存到返回结果中。 +逐元素对比输入的两个 Tensor,并且把各个位置更大的元素保存到返回结果中。 等式是: @@ -14,17 +14,17 @@ maximum out = max(x, y) .. note:: - ``paddle.maximum`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.maximum`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: - - **x** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Tensor)- 输入的Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **x** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 + - **y** (Tensor)- 输入的 Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - ``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 + ``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 代码示例 diff --git a/docs/api/paddle/mean_cn.rst b/docs/api/paddle/mean_cn.rst index f20adbd455e..dea06a2342c 100644 --- a/docs/api/paddle/mean_cn.rst +++ b/docs/api/paddle/mean_cn.rst @@ -11,9 +11,9 @@ mean 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 - - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 - - keepdim (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算平均值。默认值为 None。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/median_cn.rst b/docs/api/paddle/median_cn.rst index 19251330461..75996822639 100644 --- a/docs/api/paddle/median_cn.rst +++ b/docs/api/paddle/median_cn.rst @@ -10,13 +10,13 @@ median 参数 :::::::::: - **x** (Tensor) - 输入的 Tensor,数据类型为:bool、float16、float32、float64、int32、int64。 - - **axis** (int,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int。``axis`` 值应该在范围 [-D, D) 内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算中位数。默认值为 None。 + - **axis** (int,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int。``axis`` 值应该在范围 [-D, D) 内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算中位数。默认值为 None。 - **keepdim** (bool,可选) - 是否在输出 Tensor 中保留输入的维度。除非 keepdim 为 True,否则输出 Tensor 的维度将比输入 Tensor 小一维,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - ``Tensor``,沿着 ``axis`` 进行中位数计算的结果。如果 ``x`` 的数据类型为float64,则返回值的数据类型为float64,反之返回值数据类型为float32。 + ``Tensor``,沿着 ``axis`` 进行中位数计算的结果。如果 ``x`` 的数据类型为 float64,则返回值的数据类型为 float64,反之返回值数据类型为 float32。 代码示例 :::::::::: diff --git a/docs/api/paddle/meshgrid_cn.rst b/docs/api/paddle/meshgrid_cn.rst index e0d6e8da389..b510cbfe9d8 100644 --- a/docs/api/paddle/meshgrid_cn.rst +++ b/docs/api/paddle/meshgrid_cn.rst @@ -13,8 +13,8 @@ meshgrid 参数 :::::::::::: - - \* **args** (Tensor|Tensor数组)- 输入变量为 k 个一维张量,形状分别为(N1,), (N2,), ..., (Nk, )。支持数据类型为float32,float64,int32,int64。 - - ** **kargs** (可选)- 目前只接受name参数(str),具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - \* **args** (Tensor|Tensor 数组)- 输入变量为 k 个一维张量,形状分别为(N1,), (N2,), ..., (Nk, )。支持数据类型为 float32,float64,int32,int64。 + - ** **kargs** (可选)- 目前只接受 name 参数(str),具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: diff --git a/docs/api/paddle/metric/Accuracy_cn.rst b/docs/api/paddle/metric/Accuracy_cn.rst index 0707af0c27f..932e9c8d4be 100644 --- a/docs/api/paddle/metric/Accuracy_cn.rst +++ b/docs/api/paddle/metric/Accuracy_cn.rst @@ -7,22 +7,22 @@ accuracy accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall -使用输入和标签计算准确率。如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。 +使用输入和标签计算准确率。如果正确的标签在 topk 个预测值里,则计算结果加 1。注意:输出正确率的类型由 input 类型决定,input 和 lable 的类型可以不一样。 参数 ::::::::: - - **input** (Tensor)-数据类型为float32,float64。输入为网络的预测值。shape为 ``[sample_number, class_dim]`` 。 - - **label** (Tensor)-数据类型为int64。输入为数据集的标签。shape为 ``[sample_number, 1]`` 。 - - **k** (int64|int32,可选) - 取每个类别中k个预测值用于计算,默认值为1。 - - **correct** (int64|int32, 可选)-正确预测值的个数,默认值为None。 - - **total** (int64|int32,可选)-总共的预测值,默认值为None。 + - **input** (Tensor)-数据类型为 float32,float64。输入为网络的预测值。shape 为 ``[sample_number, class_dim]`` 。 + - **label** (Tensor)-数据类型为 int64。输入为数据集的标签。shape 为 ``[sample_number, 1]`` 。 + - **k** (int64|int32,可选) - 取每个类别中 k 个预测值用于计算,默认值为 1。 + - **correct** (int64|int32, 可选)-正确预测值的个数,默认值为 None。 + - **total** (int64|int32,可选)-总共的预测值,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - ``Tensor``,计算出来的正确率,数据类型为float32的Tensor。 + ``Tensor``,计算出来的正确率,数据类型为 float32 的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/metric/Auc_cn.rst b/docs/api/paddle/metric/Auc_cn.rst index 7c74dd9cad8..87a4cc5e121 100644 --- a/docs/api/paddle/metric/Auc_cn.rst +++ b/docs/api/paddle/metric/Auc_cn.rst @@ -6,11 +6,11 @@ Auc .. py:class:: paddle.metric.Auc() .. note:: -目前只用Python实现Auc,可能速度略慢。 +目前只用 Python 实现 Auc,可能速度略慢。 -该接口计算Auc,在二分类(binary classification)中广泛使用。 +该接口计算 Auc,在二分类(binary classification)中广泛使用。 -该接口创建四个局部变量true_positives,true_negatives,false_positives和false_negatives,用于计算Auc。为了离散化AUC曲线,使用临界值的线性间隔来计算召回率和准确率的值。用false positive的召回值高度计算ROC曲线面积,用recall的准确值高度计算PR曲线面积。 +该接口创建四个局部变量 true_positives,true_negatives,false_positives 和 false_negatives,用于计算 Auc。为了离散化 AUC 曲线,使用临界值的线性间隔来计算召回率和准确率的值。用 false positive 的召回值高度计算 ROC 曲线面积,用 recall 的准确值高度计算 PR 曲线面积。 参考链接:https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve @@ -18,8 +18,8 @@ Auc :::::::::::: - **curve** (str) - 将要计算的曲线名的模式,包括'ROC'(默认)或者'PR'(Precision-Recall-curve)。 - - **num_thresholds** (int) - 离散化AUC曲线的整数阈值数,默认是4095。 - - **name** (str,可选) – metric实例的名字,默认是'auc'。 + - **num_thresholds** (int) - 离散化 AUC 曲线的整数阈值数,默认是 4095。 + - **name** (str,可选) – metric 实例的名字,默认是'auc'。 代码示例 1 :::::::::::: @@ -46,7 +46,7 @@ Auc 代码示例 2 :::::::::::: -在Model API中的示例 +在 Model API 中的示例 .. code-block:: python @@ -89,12 +89,12 @@ Auc update(pred, label, *args) ''''''''' -更新AUC计算的状态。 +更新 AUC 计算的状态。 **参数** - - **preds** (numpy.array | Tensor):一个shape为[batch_size, 2]的Numpy数组或Tensor,preds[i][j]表示第i个样本类别为j的概率。 - - **labels** (numpy.array | Tensor):一个shape为[batch_size, 1]的Numpy数组或Tensor,labels[i]是0或1,表示第i个样本的类别。 + - **preds** (numpy.array | Tensor):一个 shape 为[batch_size, 2]的 Numpy 数组或 Tensor,preds[i][j]表示第 i 个样本类别为 j 的概率。 + - **labels** (numpy.array | Tensor):一个 shape 为[batch_size, 1]的 Numpy 数组或 Tensor,labels[i]是 0 或 1,表示第 i 个样本的类别。 **返回** @@ -114,18 +114,18 @@ reset() accumulate() ''''''''' -累积的统计指标,计算和返回AUC值。 +累积的统计指标,计算和返回 AUC 值。 **返回** -AUC值,一个标量。 +AUC 值,一个标量。 name() ''''''''' -返回Metric实例的名字,参考上述的name,默认是'auc'。 +返回 Metric 实例的名字,参考上述的 name,默认是'auc'。 **返回** - 评估的名字,string类型。 + 评估的名字,string 类型。 diff --git a/docs/api/paddle/metric/Metric_cn.rst b/docs/api/paddle/metric/Metric_cn.rst index a616acc2c0b..16728d334cb 100644 --- a/docs/api/paddle/metric/Metric_cn.rst +++ b/docs/api/paddle/metric/Metric_cn.rst @@ -6,7 +6,7 @@ Metric .. py:class:: paddle.metric.Metric() -评估器metric的基类。 +评估器 metric 的基类。 用法: @@ -19,9 +19,9 @@ Metric `compute` 接口的进阶用法: -在 `compute` 中可以使用PaddlePaddle内置的算子进行评估器的状态,而不是通过 +在 `compute` 中可以使用 PaddlePaddle 内置的算子进行评估器的状态,而不是通过 Python/NumPy,这样可以加速计算。`update` 接口将 `compute` 的输出作为 -输入,内部采用Python/NumPy计算。 +输入,内部采用 Python/NumPy 计算。 `Metric` 计算流程如下 (在{}中的表示模型和评估器的计算): @@ -47,8 +47,8 @@ Python/NumPy,这样可以加速计算。`update` 接口将 `compute` 的输出 :::::::::::: 以 计算正确率的 `Accuracy` 为例,该评估器的输入为 `pred` 和 `label`,可以在 `compute` 中通过 `pred` 和 `label`先计算正确预测的矩阵。 -例如,预测结果包含10类,`pred` 的shape是[N, 10],`label` 的shape是[N, 1],N是batch size,我们需要计算top-1和top-5的准确率, -可以在 `compute` 中计算每个样本的top-5得分,正确预测的矩阵的shape是[N, 5]。 +例如,预测结果包含 10 类,`pred` 的 shape 是[N, 10],`label` 的 shape 是[N, 1],N 是 batch size,我们需要计算 top-1 和 top-5 的准确率, +可以在 `compute` 中计算每个样本的 top-5 得分,正确预测的矩阵的 shape 是[N, 5]。 .. code-block:: python @@ -63,7 +63,7 @@ Python/NumPy,这样可以加速计算。`update` 接口将 `compute` 的输出 代码示例 2 :::::::::::: -在 `compute` 中的计算,使用内置的算子(可以跑在GPU上,使得速度更快)。作为 `update` 的输入,该接口计算如下: +在 `compute` 中的计算,使用内置的算子(可以跑在 GPU 上,使得速度更快)。作为 `update` 的输入,该接口计算如下: .. code-block:: python @@ -111,17 +111,17 @@ accumulate() name() ''''''''' -返回Metric的名字,一般通过__init__构造函数传入。 +返回 Metric 的名字,一般通过__init__构造函数传入。 **返回** - 评估的名字,string类型。 + 评估的名字,string 类型。 compute() ''''''''' -此接口可以通过PaddlePaddle内置的算子计算metric的状态,可以加速metric的计算,为可选的高阶接口。 +此接口可以通过 PaddlePaddle 内置的算子计算 metric 的状态,可以加速 metric 的计算,为可选的高阶接口。 - 如果这个接口定义了,输入是网络的输出 **outputs** 和 标签 **labels**,定义如:`compute(output1, output2, ..., label1, label2,...)` 。 - 如果这个接口没有定义,默认的行为是直接将输入参数返回给 `update`,则其定义如:`update(output1, output2, ..., label1, label2,...)` 。 diff --git a/docs/api/paddle/metric/Overview_cn.rst b/docs/api/paddle/metric/Overview_cn.rst index ab13e91825b..cdacdffbb59 100644 --- a/docs/api/paddle/metric/Overview_cn.rst +++ b/docs/api/paddle/metric/Overview_cn.rst @@ -3,34 +3,34 @@ paddle.metric --------------------- -paddle.metric 目录下包含飞桨框架支持的评估器API。具体如下: +paddle.metric 目录下包含飞桨框架支持的评估器 API。具体如下: -- :ref:`评估器类相关API ` -- :ref:`评估器函数相关API ` +- :ref:`评估器类相关 API ` +- :ref:`评估器函数相关 API ` .. _about_metric_class: -评估器类相关API +评估器类相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Metric ` ", "评估器基类" " :ref:`Accuracy ` ", "准确率评估器类" - " :ref:`Auc ` ", "auc评估器类" + " :ref:`Auc ` ", "auc 评估器类" " :ref:`Precision ` ", "精确率评估器类" " :ref:`Recall ` ", "召回率评估器类" .. _about_metric_method: -评估器函数相关API +评估器函数相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`accuracy ` ", "准确率评估函数" diff --git a/docs/api/paddle/metric/Precision_cn.rst b/docs/api/paddle/metric/Precision_cn.rst index af43d7d253d..4fb49df20c3 100644 --- a/docs/api/paddle/metric/Precision_cn.rst +++ b/docs/api/paddle/metric/Precision_cn.rst @@ -6,17 +6,17 @@ Precision .. py:class:: paddle.metric.Precision() -精确率Precision(也称为 positive predictive value,正预测值)是被预测为正样例中实际为正的比例。该类管理二分类任务的precision分数。 +精确率 Precision(也称为 positive predictive value,正预测值)是被预测为正样例中实际为正的比例。该类管理二分类任务的 precision 分数。 相关链接:https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers .. note:: -这个metric只能用来评估二分类。 +这个 metric 只能用来评估二分类。 参数 :::::::::::: - - **name** (str,可选) – metric实例的名字,默认是'precision'。 + - **name** (str,可选) – metric 实例的名字,默认是'precision'。 代码示例 1 @@ -40,7 +40,7 @@ Precision 代码示例 2 :::::::::::: -在Model API中的示例 +在 Model API 中的示例 .. code-block:: python @@ -81,12 +81,12 @@ Precision update(preds, labels, *args) ''''''''' -更新Precision的状态。 +更新 Precision 的状态。 **参数** - - **preds** (numpy.array | Tensor):预测输出结果通常是sigmoid函数的输出,是一个数据类型为float64或float32的向量。 - - **labels** (numpy.array | Tensor):真实标签的shape和:code: `preds` 相同,数据类型为int32或int64。 + - **preds** (numpy.array | Tensor):预测输出结果通常是 sigmoid 函数的输出,是一个数据类型为 float64 或 float32 的向量。 + - **labels** (numpy.array | Tensor):真实标签的 shape 和:code: `preds` 相同,数据类型为 int32 或 int64。 **返回** @@ -105,18 +105,18 @@ reset() accumulate() ''''''''' -累积的统计指标,计算和返回precision值。 +累积的统计指标,计算和返回 precision 值。 **返回** -precision值,一个标量。 +precision 值,一个标量。 name() ''''''''' -返回Metric实例的名字,参考上述的name,默认是'precision'。 +返回 Metric 实例的名字,参考上述的 name,默认是'precision'。 **返回** -评估的名字,string类型。 +评估的名字,string 类型。 diff --git a/docs/api/paddle/metric/Recall_cn.rst b/docs/api/paddle/metric/Recall_cn.rst index 84257a7ca67..a863da90ba8 100644 --- a/docs/api/paddle/metric/Recall_cn.rst +++ b/docs/api/paddle/metric/Recall_cn.rst @@ -6,18 +6,18 @@ Recall .. py:class:: paddle.metric.Recall() -召回率Recall(也称为敏感度)是指得到的相关实例数占相关实例总数的比例。该类管理二分类任务的召回率。 +召回率 Recall(也称为敏感度)是指得到的相关实例数占相关实例总数的比例。该类管理二分类任务的召回率。 相关链接:https://en.wikipedia.org/wiki/Precision_and_recall .. note:: -这个metric只能用来评估二分类。 +这个 metric 只能用来评估二分类。 参数 :::::::::::: - - **name** (str,可选) – metric实例的名字,默认是'recall'。 + - **name** (str,可选) – metric 实例的名字,默认是'recall'。 代码示例 1 @@ -40,7 +40,7 @@ Recall 代码示例 2 :::::::::::: -在Model API中的示例 +在 Model API 中的示例 .. code-block:: python @@ -81,12 +81,12 @@ Recall update(preds, labels, *args) ''''''''' -更新Recall的状态。 +更新 Recall 的状态。 **参数** - - **preds** (numpy.array | Tensor):预测输出结果通常是sigmoid函数的输出,是一个数据类型为float64或float32的向量。 - - **labels** (numpy.array | Tensor):真实标签的shape和:code: `preds` 相同,数据类型为int32或int64。 + - **preds** (numpy.array | Tensor):预测输出结果通常是 sigmoid 函数的输出,是一个数据类型为 float64 或 float32 的向量。 + - **labels** (numpy.array | Tensor):真实标签的 shape 和:code: `preds` 相同,数据类型为 int32 或 int64。 **返回** @@ -106,18 +106,18 @@ reset() accumulate() ''''''''' -累积的统计指标,计算和返回recall值。 +累积的统计指标,计算和返回 recall 值。 **返回** -precision值,一个标量。 +precision 值,一个标量。 name() ''''''''' -返回Metric实例的名字,参考上述的name,默认是'recall'。 +返回 Metric 实例的名字,参考上述的 name,默认是'recall'。 **返回** - 评估的名字,string类型。 + 评估的名字,string 类型。 diff --git a/docs/api/paddle/metric/accuracy_cn.rst b/docs/api/paddle/metric/accuracy_cn.rst index 0707af0c27f..932e9c8d4be 100644 --- a/docs/api/paddle/metric/accuracy_cn.rst +++ b/docs/api/paddle/metric/accuracy_cn.rst @@ -7,22 +7,22 @@ accuracy accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall -使用输入和标签计算准确率。如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。 +使用输入和标签计算准确率。如果正确的标签在 topk 个预测值里,则计算结果加 1。注意:输出正确率的类型由 input 类型决定,input 和 lable 的类型可以不一样。 参数 ::::::::: - - **input** (Tensor)-数据类型为float32,float64。输入为网络的预测值。shape为 ``[sample_number, class_dim]`` 。 - - **label** (Tensor)-数据类型为int64。输入为数据集的标签。shape为 ``[sample_number, 1]`` 。 - - **k** (int64|int32,可选) - 取每个类别中k个预测值用于计算,默认值为1。 - - **correct** (int64|int32, 可选)-正确预测值的个数,默认值为None。 - - **total** (int64|int32,可选)-总共的预测值,默认值为None。 + - **input** (Tensor)-数据类型为 float32,float64。输入为网络的预测值。shape 为 ``[sample_number, class_dim]`` 。 + - **label** (Tensor)-数据类型为 int64。输入为数据集的标签。shape 为 ``[sample_number, 1]`` 。 + - **k** (int64|int32,可选) - 取每个类别中 k 个预测值用于计算,默认值为 1。 + - **correct** (int64|int32, 可选)-正确预测值的个数,默认值为 None。 + - **total** (int64|int32,可选)-总共的预测值,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - ``Tensor``,计算出来的正确率,数据类型为float32的Tensor。 + ``Tensor``,计算出来的正确率,数据类型为 float32 的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/mm_cn.rst b/docs/api/paddle/mm_cn.rst index b9d6aee3358..73ca16a1485 100644 --- a/docs/api/paddle/mm_cn.rst +++ b/docs/api/paddle/mm_cn.rst @@ -10,7 +10,7 @@ mm 用于两个输入矩阵的相乘。 -两个输入的形状可为任意维度,但当任一输入维度大于3时,两个输入的维度必须相等。 +两个输入的形状可为任意维度,但当任一输入维度大于 3 时,两个输入的维度必须相等。 如果原始 Tensor input 或 mat2 的秩为 1 且未转置,则矩阵相乘后的前置或附加维度 1 将移除。 @@ -19,7 +19,7 @@ mm - **input** (Tensor):输入变量,类型为 Tensor 或 LoDTensor。 - **mat2** (Tensor):输入变量,类型为 Tensor 或 LoDTensor。 - - **out** (Tensor,可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 + - **out** (Tensor,可选) – 指定存储运算结果的 Tensor。如果设置为 None 或者不设置,将创建新的 Tensor 存储运算结果,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/mod_cn.rst b/docs/api/paddle/mod_cn.rst index 819fb1c932f..0592b08566e 100644 --- a/docs/api/paddle/mod_cn.rst +++ b/docs/api/paddle/mod_cn.rst @@ -15,13 +15,13 @@ mod 参数 ::::::::: - - x(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 - - y(Tensor)- 多维Tensor。数据类型为float32 、float64、int32或int64。 - - name(str,可选)- 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + - x(Tensor)- 多维 Tensor。数据类型为 float32 、float64、int32 或 int64。 + - y(Tensor)- 多维 Tensor。数据类型为 float32 、float64、int32 或 int64。 + - name(str,可选)- 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 +``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/mode_cn.rst b/docs/api/paddle/mode_cn.rst index e09d15fdf3a..b5671014e56 100644 --- a/docs/api/paddle/mode_cn.rst +++ b/docs/api/paddle/mode_cn.rst @@ -10,13 +10,13 @@ mode 参数 ::::::::: - **x** (Tensor) - 输入的多维 ``Tensor``,支持的数据类型:float32、float64、int32、int64。 - - **axis** (int,可选) - 指定对输入Tensor进行运算的轴,``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 - - **keepdim** (bool,可选)- 是否保留指定的轴。如果是True,维度会与输入x一致,对应所指定的轴的size为1。否则,由于对应轴被展开,输出的维度会比输入小1。默认值为1。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴,``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的 Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 + - **keepdim** (bool,可选)- 是否保留指定的轴。如果是 True,维度会与输入 x 一致,对应所指定的轴的 size 为 1。否则,由于对应轴被展开,输出的维度会比输入小 1。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -tuple(Tensor),返回检索到的众数结果和对应索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是int64。 +tuple(Tensor),返回检索到的众数结果和对应索引信息。结果的数据类型和输入 ``x`` 一致。索引的数据类型是 int64。 代码示例 ::::::::: diff --git a/docs/api/paddle/moveaxis_cn.rst b/docs/api/paddle/moveaxis_cn.rst index 9f80b749407..e7897341947 100644 --- a/docs/api/paddle/moveaxis_cn.rst +++ b/docs/api/paddle/moveaxis_cn.rst @@ -5,18 +5,18 @@ moveaxis .. py:function:: paddle.moveaxis(x, source, destination, name=None) -将输入Tensor ``x`` 的轴从 ``source`` 位置移动到 ``destination`` 位置,其他轴按原来顺序排布。同时根据新的shape,重排Tensor中的数据。 +将输入 Tensor ``x`` 的轴从 ``source`` 位置移动到 ``destination`` 位置,其他轴按原来顺序排布。同时根据新的 shape,重排 Tensor 中的数据。 参数 ::::::::: - - x (Tensor) - 输入的N-D Tensor,数据类型为:bool、int32、int64、float32、float64、complex64、complex128。 + - x (Tensor) - 输入的 N-D Tensor,数据类型为:bool、int32、int64、float32、float64、complex64、complex128。 - source(int|tuple|list) - 将被移动的轴的位置,其每个元素必须为不同的整数。 - destination(int|tuple|list) - 轴被移动后的目标位置,其每个元素必须为不同的整数。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``:将轴移动后的Tensor +``Tensor``:将轴移动后的 Tensor 代码示例 ::::::::: diff --git a/docs/api/paddle/multinomial_cn.rst b/docs/api/paddle/multinomial_cn.rst index 80c66be883b..a5e811dbf6c 100644 --- a/docs/api/paddle/multinomial_cn.rst +++ b/docs/api/paddle/multinomial_cn.rst @@ -9,14 +9,14 @@ multinomial 以输入 ``x`` 为概率,生成一个多项分布的 Tensor。 -输入 ``x`` 是用来随机采样的概率分布,``x`` 中每个元素都应该大于等于0,且不能都为0。 +输入 ``x`` 是用来随机采样的概率分布,``x`` 中每个元素都应该大于等于 0,且不能都为 0。 参数 ``replacement`` 表示它是否是一个可放回的采样,如果 ``replacement`` 为 True,能重复对一种类别采样。 参数 :::::::::::: - **x** (Tensor) - 输入的概率值。数据类型为 ``float32`` 、``float64`` 。 - - **num_samples** (int,可选) - 采样的次数(可选,默认值为1)。 + - **num_samples** (int,可选) - 采样的次数(可选,默认值为 1)。 - **replacement** (bool,可选) - 是否是可放回的采样(可选,默认值为 False)。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/multiplex_cn.rst b/docs/api/paddle/multiplex_cn.rst index e04bed1c60d..ec775a79bec 100644 --- a/docs/api/paddle/multiplex_cn.rst +++ b/docs/api/paddle/multiplex_cn.rst @@ -7,26 +7,26 @@ multiplex -根据给定的index参数,从每个输入Tensor中选择特定行构造输出Tensor。 +根据给定的 index 参数,从每个输入 Tensor 中选择特定行构造输出 Tensor。 -设输入包含 :math:`m` 个Tensor,其中 :math:`I_{i}` 代表第i个输入Tensor,:math:`i` 处于区间 :math:`[0,m)`。 +设输入包含 :math:`m` 个 Tensor,其中 :math:`I_{i}` 代表第 i 个输入 Tensor,:math:`i` 处于区间 :math:`[0,m)`。 -设输出为 :math:`O`,其中 :math:`O[i]` 为输出的第i行,则输出满足::math:`O[i] = I_{index[i]}[i]` +设输出为 :math:`O`,其中 :math:`O[i]` 为输出的第 i 行,则输出满足::math:`O[i] = I_{index[i]}[i]` 示例: .. code-block:: text - # 输入为4个shape为[4,4]的Tensor + # 输入为 4 个 shape 为[4,4]的 Tensor inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]], [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]], [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]], [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]] - # index为shape为[4,1]的Tensor + # index 为 shape 为[4,1]的 Tensor index = [[3],[0],[1],[2]] - # 输出shape为[4,4] + # 输出 shape 为[4,4] out = [[3,0,3,4] // out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4] [0,1,3,4] // out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4] [1,2,4,2] // out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2] @@ -35,13 +35,13 @@ multiplex 参数 :::::::::::: - - **inputs** (list) - 为输入Tensor列表,列表元素为数据类型为float32、float64、int32、int64的多维Tensor。所有输入Tensor的shape应相同,秩必须至少为2。 - - **index** (Tensor)- 用来选择输入Tensor中的某些行构建输出Tensor的索引,为数据类型为int32或int64、shape为[M, 1]的2-D Tensor,其中M为输入Tensor个数。 + - **inputs** (list) - 为输入 Tensor 列表,列表元素为数据类型为 float32、float64、int32、int64 的多维 Tensor。所有输入 Tensor 的 shape 应相同,秩必须至少为 2。 + - **index** (Tensor)- 用来选择输入 Tensor 中的某些行构建输出 Tensor 的索引,为数据类型为 int32 或 int64、shape 为[M, 1]的 2-D Tensor,其中 M 为输入 Tensor 个数。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,进行Multiplex运算后的输出Tensor。 +Tensor,进行 Multiplex 运算后的输出 Tensor。 代码示例 :::::::::::: diff --git a/docs/api/paddle/multiply_cn.rst b/docs/api/paddle/multiply_cn.rst index 7b71a72b057..4932fd0cf17 100755 --- a/docs/api/paddle/multiply_cn.rst +++ b/docs/api/paddle/multiply_cn.rst @@ -15,10 +15,10 @@ multiply .. math:: Out = X \odot Y -- :math:`X`:多维Tensor。 -- :math:`Y`:维度必须小于等于X维度的Tensor。 +- :math:`X`:多维 Tensor。 +- :math:`Y`:维度必须小于等于 X 维度的 Tensor。 -对于这个运算算子有2种情况: +对于这个运算算子有 2 种情况: 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 @@ -34,7 +34,7 @@ multiply 返回 ::::::::: - ``Tensor``,存储运算后的结果。如果x和y有不同的shape且是可以广播的,返回Tensor的shape是x和y经过广播后的shape。如果x和y有相同的shape,返回Tensor的shape与x,y相同。 + ``Tensor``,存储运算后的结果。如果 x 和 y 有不同的 shape 且是可以广播的,返回 Tensor 的 shape 是 x 和 y 经过广播后的 shape。如果 x 和 y 有相同的 shape,返回 Tensor 的 shape 与 x,y 相同。 代码示例 diff --git a/docs/api/paddle/mv_cn.rst b/docs/api/paddle/mv_cn.rst index f6c44a22660..1cada6dc308 100644 --- a/docs/api/paddle/mv_cn.rst +++ b/docs/api/paddle/mv_cn.rst @@ -9,8 +9,8 @@ mv 参数 ::::::::: - - **x** (Tensor) - 输入变量,类型为 Tensor,形状为 :math:`[M, N]`,数据类型为float32、float64。 - - **vec** (Tensor) - 输入变量,类型为 Tensor,形状为 :math:`[N]`,数据类型为float32、float64。 + - **x** (Tensor) - 输入变量,类型为 Tensor,形状为 :math:`[M, N]`,数据类型为 float32、float64。 + - **vec** (Tensor) - 输入变量,类型为 Tensor,形状为 :math:`[N]`,数据类型为 float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nanmean_cn.rst b/docs/api/paddle/nanmean_cn.rst index 7d3c95856c1..a4a702ca32d 100644 --- a/docs/api/paddle/nanmean_cn.rst +++ b/docs/api/paddle/nanmean_cn.rst @@ -11,9 +11,9 @@ paddle.nanmean 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:unit16、float16、float32、float64。 - - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算平均值。默认值为None。 - - keepdim (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - x (Tensor) - 输入的 Tensor,数据类型为:unit16、float16、float32、float64。 + - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算平均值。默认值为 None。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nanmedian_cn.rst b/docs/api/paddle/nanmedian_cn.rst index 4c2dc22f94b..60eb64e5b24 100644 --- a/docs/api/paddle/nanmedian_cn.rst +++ b/docs/api/paddle/nanmedian_cn.rst @@ -5,18 +5,18 @@ nanmedian .. py:function:: paddle.nanmedian(x, axis=None, keepdim=True, name=None) -沿给定的轴 ``axis`` 忽略NAN元素计算 ``x`` 中元素的中位数。 +沿给定的轴 ``axis`` 忽略 NAN 元素计算 ``x`` 中元素的中位数。 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float16、float32、float64、int32、int64。 - - axis (None|int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int或者int元素的列表。``axis`` 值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算中位数。默认值为None。 - - keepdim (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为True。 + - x (Tensor) - 输入的 Tensor,数据类型为:float16、float32、float64、int32、int64。 + - axis (None|int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int 或者 int 元素的列表。``axis`` 值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算中位数。默认值为 None。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - ``Tensor``,沿着 ``axis`` 忽略NAN元素进行中位数计算的结果。 + ``Tensor``,沿着 ``axis`` 忽略 NAN 元素进行中位数计算的结果。 代码示例 :::::::::: diff --git a/docs/api/paddle/neg_cn.rst b/docs/api/paddle/neg_cn.rst index 0d81802e102..92951c02a38 100644 --- a/docs/api/paddle/neg_cn.rst +++ b/docs/api/paddle/neg_cn.rst @@ -15,12 +15,12 @@ neg 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:int8、int16、int32、int64、float32、float64。 + - x (Tensor) - 输入的 Tensor,数据类型为:int8、int16、int32、int64、float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst b/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst index 577451da348..8883fdb775a 100755 --- a/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveAvgPool2D_cn.rst @@ -5,7 +5,7 @@ AdaptiveAvgPool2D .. py:function:: paddle.nn.AdaptiveAvgPool2D(output_size, data_format="NCHW", name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 2D 的自适应平均池化。输入和输出都是 4-D Tensor, 默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。 计算公式如下: @@ -25,19 +25,19 @@ AdaptiveAvgPool2D 参数 ::::::::: - - **output_size** (int|list|tuple):算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **output_size** (int|list|tuple):算子输出特征图的尺寸,如果其是 list 或 turple 类型的数值,必须包含两个元素,H 和 W。H 和 W 既可以是 int 类型值也可以是 None,None 表示与输入特征尺寸相同。 + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算AdaptiveAvgPool2D的可调用对象 +计算 AdaptiveAvgPool2D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst b/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst index dfe3fe57cc9..caa7f572edb 100755 --- a/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveAvgPool3D_cn.rst @@ -5,7 +5,7 @@ AdaptiveAvgPool3D .. py:function:: paddle.nn.AdaptiveAvgPool3D(output_size, data_format="NCDHW", name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 3D 的自适应平均池化。输入和输出都是 5-D Tensor, 默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`D` 是特征图长度,`H` 是输入特征的高度,`W` 是输入特征的宽度。 计算公式如下: @@ -28,19 +28,19 @@ AdaptiveAvgPool3D 参数 ::::::::: - - **output_size** (int|list|tuple):算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **output_size** (int|list|tuple):算子输出特征图的尺寸,如果其是 list 或 turple 类型的数值,必须包含三个元素,D,H 和 W。D,H 和 W 既可以是 int 类型值也可以是 None,None 表示与输入特征尺寸相同。 + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征长度,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算AdaptiveAvgPool3D的可调用对象 +计算 AdaptiveAvgPool3D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst index 42b03d9b755..46ea49c434e 100755 --- a/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool1D_cn.rst @@ -6,7 +6,7 @@ AdaptiveMaxPool1D .. py:function:: paddle.nn.AdaptiveMaxPool1D(output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应最大池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 1D 的自适应最大池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 计算公式如下: @@ -22,18 +22,18 @@ AdaptiveMaxPool1D 参数 ::::::::: - - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为int,list或tuple。 - - **return_mask** (bool,可选):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为 int,list 或 tuple。 + - **return_mask** (bool,可选):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型为float32或者float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型与输入x相同。 + - **x** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型为 float32 或者 float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型与输入 x 相同。 返回 ::::::::: -计算AdaptiveMaxPool1D的可调用对象 +计算 AdaptiveMaxPool1D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst index 2c396776bf2..9ec0741b6a8 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool2D_cn.rst @@ -5,7 +5,7 @@ AdaptiveMaxPool2D ------------------------------- .. py:class:: paddle.nn.AdaptiveMaxPool2D(output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应最大池化。输入和输出都是4-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 2D 的自适应最大池化。输入和输出都是 4-D Tensor, 默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。 计算公式如下: @@ -31,20 +31,20 @@ AdaptiveMaxPool2D 参数 ::::::::: - - **output_size** (int|list|tuple):算子输出特征图的高和宽大小,其数据类型为int,list或tuple。 - - **return_mask** (bool,可选):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **output_size** (int|list|tuple):算子输出特征图的高和宽大小,其数据类型为 int,list 或 tuple。 + - **return_mask** (bool,可选):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,输出特征长度,宽度),即NCHW格式的4-D Tensor。其数据类型为float32或者float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,宽度),即NCHW格式的4-D Tensor。其数据类型与输入x相同。 + - **x** (Tensor):默认形状为(批大小,通道数,输出特征长度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float32 或者 float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型与输入 x 相同。 返回 ::::::::: -计算AdaptiveMaxPool2D的可调用对象 +计算 AdaptiveMaxPool2D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst index 8e3fff9d4e8..47c31bf19d0 100644 --- a/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveMaxPool3D_cn.rst @@ -5,7 +5,7 @@ AdaptiveMaxPool3D ------------------------------- .. py:function:: paddle.nn.AdaptiveMaxPool3D(output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应最大池化。输入和输出都是5-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 3D 的自适应最大池化。输入和输出都是 5-D Tensor, 默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`D` , `H` , `W` 分别是输入特征的深度,高度,宽度。 计算公式如下: @@ -28,18 +28,18 @@ AdaptiveMaxPool3D 参数 ::::::::: - - **output_size** (int|list|tuple):算子输出特征图的高宽长大小,其数据类型为int,list或tuple。 - - **return_mask** (bool,可选):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **output_size** (int|list|tuple):算子输出特征图的高宽长大小,其数据类型为 int,list 或 tuple。 + - **return_mask** (bool,可选):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,输出特征深度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float32或者float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征深度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型与输入x相同。 + - **x** (Tensor):默认形状为(批大小,通道数,输出特征深度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float32 或者 float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征深度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型与输入 x 相同。 返回 ::::::::: -计算AdaptiveMaxPool3D的可调用对象 +计算 AdaptiveMaxPool3D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AlphaDropout_cn.rst b/docs/api/paddle/nn/AlphaDropout_cn.rst index fbd54bf231a..b888e7e4af1 100644 --- a/docs/api/paddle/nn/AlphaDropout_cn.rst +++ b/docs/api/paddle/nn/AlphaDropout_cn.rst @@ -5,21 +5,21 @@ AlphaDropout .. py:function:: paddle.nn.AlphaDropout(p=0.5, name=None) -AlphaDropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过AlphaDropout计算之后,输出的均值和方差与输入保持一致。AlphaDropout通常与SELU激活函数组合使用。论文请参考:`Self-Normalizing Neural Networks `_ +AlphaDropout 是一种具有自归一化性质的 dropout。均值为 0,方差为 1 的输入,经过 AlphaDropout 计算之后,输出的均值和方差与输入保持一致。AlphaDropout 通常与 SELU 激活函数组合使用。论文请参考:`Self-Normalizing Neural Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_alpha_dropout` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_alpha_dropout` 。 参数 ::::::::: - - **p** (float):将输入节点置0的概率,即丢弃概率。默认:0.5。 + - **p** (float):将输入节点置 0 的概率,即丢弃概率。默认:0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -经过AlphaDropout之后的结果,与输入x形状相同的 `Tensor` 。 +经过 AlphaDropout 之后的结果,与输入 x 形状相同的 `Tensor` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/AvgPool1D_cn.rst b/docs/api/paddle/nn/AvgPool1D_cn.rst index b818c7f418d..21d5f296e3a 100755 --- a/docs/api/paddle/nn/AvgPool1D_cn.rst +++ b/docs/api/paddle/nn/AvgPool1D_cn.rst @@ -6,10 +6,10 @@ AvgPool1D .. py:function:: paddle.nn.AvgPool1D(kernel_size, stride=None, padding=0, exclusive=True, ceil_mode=False, name=None) -该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `kernel_size` 等参数对一个输入 Tensor 计算 1D 的平均池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 -假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d平均池化计算公式如下: +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是 k, 1d 平均池化计算公式如下: .. math:: @@ -17,22 +17,22 @@ AvgPool1D 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果kernel_size为list或tuple类型,其必须包含一个整数,最终池化核的大小为该数值。 - - **stride** (int|list|tuple,可选):池化操作步长。如果stride为list或tuple类型,其必须包含一个整数,最终池化操作的步长为该数值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选):池化补零的方式。如果padding是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是turple或者list类型,则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。默认值为0。 - - **exclusive** (bool,可选):是否用额外padding的值计算平均池化结果,默认为True。 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出的height和width,如果设置为False,则使用floor函数来计算,默认为False。 + - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果 kernel_size 为 list 或 tuple 类型,其必须包含一个整数,最终池化核的大小为该数值。 + - **stride** (int|list|tuple,可选):池化操作步长。如果 stride 为 list 或 tuple 类型,其必须包含一个整数,最终池化操作的步长为该数值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选):池化补零的方式。如果 padding 是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是 turple 或者 list 类型,则应是 `[pad_left, pad_right]` 形式。如果 padding 是一个非 0 值,那么表示会在输入的两端都 padding 上同样长度的 0。默认值为 0。 + - **exclusive** (bool,可选):是否用额外 padding 的值计算平均池化结果,默认为 True。 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出的 height 和 width,如果设置为 False,则使用 floor 函数来计算,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。其数据类型为float32或float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型与输入x相同。 + - **x** (Tensor):默认形状为(批大小,通道数,长度),即 NCL 格式的 3-D Tensor。其数据类型为 float32 或 float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型与输入 x 相同。 返回 ::::::::: -计算AvgPool1D的可调用对象 +计算 AvgPool1D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/AvgPool2D_cn.rst b/docs/api/paddle/nn/AvgPool2D_cn.rst index 787432c2623..4cf51a0915c 100644 --- a/docs/api/paddle/nn/AvgPool2D_cn.rst +++ b/docs/api/paddle/nn/AvgPool2D_cn.rst @@ -29,25 +29,25 @@ AvgPool2D 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若pool_size=2,则池化核大小为2x2。 - - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 pool_padding = "SAME"或 pool_padding = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含2个整数值:[pad_height, pad_width];(2)包含4个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含4个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为False。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若 pool_size=2,则池化核大小为 2x2。 + - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 pool_padding = "SAME"或 pool_padding = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 2 个整数值:[pad_height, pad_width];(2)包含 4 个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 4 个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 False。 - **exclusive** (bool,可选):是否在平均池化模式忽略填充值,默认是 `True`。 - **divisor_override** (int|float,可选):如果指定,它将用作除数,否则根据`kernel_size`计算除数。默认`None`。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算AvgPool2D的可调用对象 +计算 AvgPool2D 的可调用对象 diff --git a/docs/api/paddle/nn/AvgPool3D_cn.rst b/docs/api/paddle/nn/AvgPool3D_cn.rst index 39c9f9f1b81..0bb52cc3279 100644 --- a/docs/api/paddle/nn/AvgPool3D_cn.rst +++ b/docs/api/paddle/nn/AvgPool3D_cn.rst @@ -31,26 +31,26 @@ AvgPool3D 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth, pool_size_Height, pool_size_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_size=2,则池化核大小为[2,2,2]。 - - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个元组或列表,它可以有3种格式:(1)包含3个整数值:[pad_depth, pad_height, pad_width];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含5个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示D、H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为False + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth, pool_size_Height, pool_size_Width)。若为一个整数,则表示 D,H 和 W 维度上均为该值,比如若 pool_size=2,则池化核大小为[2,2,2]。 + - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 3 个整数值:[pad_depth, pad_height, pad_width];(2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 5 个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 D、H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 False - **exclusive** (bool,可选):是否在平均池化模式忽略填充值,默认是 `True`。 - **divisor_override** (int|float,可选):如果指定,它将用作除数,否则根据`kernel_size`计算除数。默认`None`。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NDCHW"。 + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NDCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算AvgPool3D的可调用对象 +计算 AvgPool3D 的可调用对象 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/BCELoss_cn.rst b/docs/api/paddle/nn/BCELoss_cn.rst index df14bf312b8..b5481255a99 100644 --- a/docs/api/paddle/nn/BCELoss_cn.rst +++ b/docs/api/paddle/nn/BCELoss_cn.rst @@ -5,7 +5,7 @@ BCELoss .. py:class:: paddle.nn.BCELoss(weight=None, reduction='mean', name=None) -该接口用于创建一个BCELoss的可调用类,用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: +该接口用于创建一个 BCELoss 的可调用类,用于计算输入 ``input`` 和标签 ``label`` 之间的二值交叉熵损失值。二值交叉熵损失函数公式如下: 当 `weight` 不为空时,公式为: @@ -31,23 +31,23 @@ BCELoss .. note:: - 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是 0 或者 1。 参数 ::::::::: - - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **weight** (Tensor,可选) - 手动指定每个 batch 二值交叉熵的权重,如果指定的话,维度必须是一个 batch 的数据的维度。数据类型是 float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回 bce_loss。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **input** (Tensor) - :math:`(N, *)`,其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **input** (Tensor) - :math:`(N, *)`,其中 N 是 batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是 float32、float64。 - **label** (Tensor) - :math:`(N, *)`,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`(N, *)`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`(N, *)`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 返回计算BCELoss的可调用对象。 + 返回计算 BCELoss 的可调用对象。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst b/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst index 66139401844..5a47e1a0c1b 100644 --- a/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst +++ b/docs/api/paddle/nn/BCEWithLogitsLoss_cn.rst @@ -5,13 +5,13 @@ BCEWithLogitsLoss .. py:class:: paddle.nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None, name=None) -该OP可创建一个BCEWithLogitsLoss的可调用类,计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 +该 OP 可创建一个 BCEWithLogitsLoss 的可调用类,计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 -该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 +该 OP 结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该 OP 是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 -在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 +在每个类别独立的分类任务中,该 OP 可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 -首先,该OP可通过下式计算损失函数: +首先,该 OP 可通过下式计算损失函数: .. math:: Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) @@ -21,33 +21,33 @@ BCEWithLogitsLoss .. math:: Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) -为了计算稳定性,防止当 :math:`Logit<0` 时,:math:`e^{-Logit}` 溢出,loss将采用以下公式计算: +为了计算稳定性,防止当 :math:`Logit<0` 时,:math:`e^{-Logit}` 溢出,loss 将采用以下公式计算: .. math:: Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) -然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 +然后,当 ``weight`` or ``pos_weight`` 不为 None 的时候,该算子会在输出 Out 上乘以相应的权重。张量 ``weight`` 给 Batch 中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 -最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,该算子会添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 -**注意:因为是二分类任务,所以标签值应该是0或者1。 +**注意:因为是二分类任务,所以标签值应该是 0 或者 1。 参数 ::::::::: - - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 - - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **weight** (Tensor,可选) - 手动指定每个 batch 二值交叉熵的权重,如果指定的话,维度必须是一个 batch 的数据的维度。数据类型是 float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始 loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是 float32, float64。默认值是:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **logit** (Tensor) - :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **logit** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]`,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 返回计算BCEWithLogitsLoss的可调用对象。 + 返回计算 BCEWithLogitsLoss 的可调用对象。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/BatchNorm1D_cn.rst b/docs/api/paddle/nn/BatchNorm1D_cn.rst index 4f9ce8432a5..80094fefa3e 100644 --- a/docs/api/paddle/nn/BatchNorm1D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm1D_cn.rst @@ -6,9 +6,9 @@ BatchNorm1D .. py:class:: paddle.nn.BatchNorm1D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCL', name=None) -该接口用于构建 ``BatchNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理2D或者3D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 2D 或者 3D 的 Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是 minibatch 的统计数据。计算公式如下: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ @@ -17,7 +17,7 @@ BatchNorm1D - :math:`x`:批输入数据 - :math:`m`:当前批次数据的大小 -当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean 和 moving_variance),通常来自预先训练好的模型。计算公式如下: .. math:: @@ -41,8 +41,8 @@ BatchNorm1D - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -54,11 +54,11 @@ BatchNorm1D 形状 :::::::::::: - - input:形状为(批大小,通道数)的2-D Tensor 或(批大小,通道数,长度)的3-D Tensor。 + - input:形状为(批大小,通道数)的 2-D Tensor 或(批大小,通道数,长度)的 3-D Tensor。 - output:和输入形状一样。 .. note:: -目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 +目前训练时设置 track_running_stats 为 False 是无效的,实际还是会按照 True 的方案保存全局均值和方差。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/BatchNorm2D_cn.rst b/docs/api/paddle/nn/BatchNorm2D_cn.rst index b53d09a0ce2..c90ad45ce06 100644 --- a/docs/api/paddle/nn/BatchNorm2D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm2D_cn.rst @@ -6,9 +6,9 @@ BatchNorm2D .. py:class:: paddle.nn.BatchNorm2D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCHW', name=None): -该接口用于构建 ``BatchNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 4D 的 Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是 minibatch 的统计数据。计算公式如下: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ @@ -17,7 +17,7 @@ BatchNorm2D - :math:`x`:批输入数据 - :math:`m`:当前批次数据的大小 -当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean 和 moving_variance),通常来自预先训练好的模型。计算公式如下: .. math:: @@ -41,8 +41,8 @@ BatchNorm2D - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为"NCHW"。默认值:“NCHW”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -54,11 +54,11 @@ BatchNorm2D 形状 :::::::::::: - - input:形状为(批大小,通道数,高度,宽度)的4-D Tensor 或(批大小,通道数,宽度,高度)的4-D Tensor。 + - input:形状为(批大小,通道数,高度,宽度)的 4-D Tensor 或(批大小,通道数,宽度,高度)的 4-D Tensor。 - output:和输入形状一样。 .. note:: -目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 +目前训练时设置 track_running_stats 为 False 是无效的,实际还是会按照 True 的方案保存全局均值和方差。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/BatchNorm3D_cn.rst b/docs/api/paddle/nn/BatchNorm3D_cn.rst index c3f2eec74b1..af3cb2e96de 100644 --- a/docs/api/paddle/nn/BatchNorm3D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm3D_cn.rst @@ -6,9 +6,9 @@ BatchNorm3D .. py:class:: paddle.nn.BatchNorm3D(num_features, momentum=0.9, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCDHW', name=None): -该接口用于构建 ``BatchNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理4D的Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``BatchNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 4D 的 Tensor,实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: +当训练时 :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是 minibatch 的统计数据。计算公式如下: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ @@ -17,7 +17,7 @@ BatchNorm3D - :math:`x`:批输入数据 - :math:`m`:当前批次数据的大小 -当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: +当预测时,track_running_stats = True :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean 和 moving_variance),通常来自预先训练好的模型。计算公式如下: .. math:: @@ -41,8 +41,8 @@ BatchNorm3D - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NCDHW"。默认值:“NCDHW”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -54,11 +54,11 @@ BatchNorm3D 形状 :::::::::::: - - input:形状为(批大小,通道数,维度,高度,宽度)的5-D Tensor。 + - input:形状为(批大小,通道数,维度,高度,宽度)的 5-D Tensor。 - output:和输入形状一样。 .. note:: -目前训练时设置track_running_stats为False是无效的,实际还是会按照True的方案保存全局均值和方差。之后的版本会修复此问题。 +目前训练时设置 track_running_stats 为 False 是无效的,实际还是会按照 True 的方案保存全局均值和方差。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/BatchNorm_cn.rst b/docs/api/paddle/nn/BatchNorm_cn.rst index 28ac76b3a94..8a8b2703afd 100644 --- a/docs/api/paddle/nn/BatchNorm_cn.rst +++ b/docs/api/paddle/nn/BatchNorm_cn.rst @@ -10,7 +10,7 @@ BatchNorm 该接口用于构建 ``BatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了批归一化层(Batch Normalization Layer)的功能,可用作卷积和全连接操作的批归一化函数,根据当前批次数据按通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -当use_global_stats = False时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是minibatch的统计数据。计算公式如下: +当 use_global_stats = False 时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是 minibatch 的统计数据。计算公式如下: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ @@ -19,7 +19,7 @@ BatchNorm - :math:`x`:批输入数据 - :math:`m`:当前批次数据的大小 -当use_global_stats = True时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。计算公式如下: +当 use_global_stats = True 时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean 和 moving_variance),通常来自预先训练好的模型。计算公式如下: .. math:: @@ -41,20 +41,20 @@ BatchNorm :::::::::::: - **num_channels** (int) - 指明输入 ``Tensor`` 的通道数量。 - - **act** (str,可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`,默认值为None。 + - **act** (str,可选) - 应用于输出上的激活函数,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`,默认值为 None。 - **is_test** (bool,可选) - 指示是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认值:False。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **dtype** (str,可选) - 指明输入 ``Tensor`` 的数据类型,可以为float32或float64。默认值:float32。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **dtype** (str,可选) - 指明输入 ``Tensor`` 的数据类型,可以为 float32 或 float64。默认值:float32。 - **data_layout** (string,可选) - 指定输入数据格式,数据格式可以为“NCHW”或者“NHWC”。默认值:“NCHW”。 - **in_place** (bool,可选) - 指示 ``batch_norm`` 的输出是否可以复用输入内存。默认值:False。 - - **moving_mean_name** (str,可选) - ``moving_mean`` 的名称,存储全局均值。如果将其设置为None, ``batch_norm`` 将随机命名全局均值;否则,``batch_norm`` 将命名全局均值为 ``moving_mean_name``。默认值:None。 - - **moving_variance_name** (string,可选) - ``moving_var`` 的名称,存储全局方差。如果将其设置为None, ``batch_norm`` 将随机命名全局方差;否则,``batch_norm`` 将命名全局方差为 ``moving_variance_name``。默认值:None。 - - **do_model_average_for_mean_and_var** (bool,可选) - 指示是否为mean和variance做模型均值。默认值:False。 - - **use_global_stats** (bool,可选) – 指示是否使用全局均值和方差。在预测或测试模式下,将 ``use_global_stats`` 设置为true或将 ``is_test`` 设置为true,这两种行为是等效的。在训练模式中,当设置 ``use_global_stats`` 为True时,在训练期间也将使用全局均值和方差。默认值:False。 - - **trainable_statistics** (bool,可选) - eval模式下是否计算mean均值和var方差。eval模式下,trainable_statistics为True时,由该批数据计算均值和方差。默认值:False。 + - **moving_mean_name** (str,可选) - ``moving_mean`` 的名称,存储全局均值。如果将其设置为 None, ``batch_norm`` 将随机命名全局均值;否则,``batch_norm`` 将命名全局均值为 ``moving_mean_name``。默认值:None。 + - **moving_variance_name** (string,可选) - ``moving_var`` 的名称,存储全局方差。如果将其设置为 None, ``batch_norm`` 将随机命名全局方差;否则,``batch_norm`` 将命名全局方差为 ``moving_variance_name``。默认值:None。 + - **do_model_average_for_mean_and_var** (bool,可选) - 指示是否为 mean 和 variance 做模型均值。默认值:False。 + - **use_global_stats** (bool,可选) – 指示是否使用全局均值和方差。在预测或测试模式下,将 ``use_global_stats`` 设置为 true 或将 ``is_test`` 设置为 true,这两种行为是等效的。在训练模式中,当设置 ``use_global_stats`` 为 True 时,在训练期间也将使用全局均值和方差。默认值:False。 + - **trainable_statistics** (bool,可选) - eval 模式下是否计算 mean 均值和 var 方差。eval 模式下,trainable_statistics 为 True 时,由该批数据计算均值和方差。默认值:False。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst index 6622693d09a..298a3ff887f 100644 --- a/docs/api/paddle/nn/BeamSearchDecoder_cn.rst +++ b/docs/api/paddle/nn/BeamSearchDecoder_cn.rst @@ -10,19 +10,19 @@ BeamSearchDecoder -带beam search解码策略的解码器。该接口包装一个cell来计算概率,然后执行一个beam search步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ +带 beam search 解码策略的解码器。该接口包装一个 cell 来计算概率,然后执行一个 beam search 步骤计算得分,并为每个解码步骤选择候选输出。更多详细信息请参阅 `Beam search `_ -**注意** 在使用beam search解码时,cell的输入和状态将被扩展到 :math:`beam\_size`,得到 :math:`[batch\_size * beam\_size, ...]` 一样的形状,这个操作在BeamSearchDecoder中自动完成,因此,其他任何在 :code:`cell.call` 中使用的Tensor,如果形状为 :math:`[batch\_size, ...]`,都必须先手动使用 :code:`BeamSearchDecoder.tile_beam_merge_with_batch` 接口扩展。最常见的情况是带注意机制的编码器输出。 +**注意** 在使用 beam search 解码时,cell 的输入和状态将被扩展到 :math:`beam\_size`,得到 :math:`[batch\_size * beam\_size, ...]` 一样的形状,这个操作在 BeamSearchDecoder 中自动完成,因此,其他任何在 :code:`cell.call` 中使用的 Tensor,如果形状为 :math:`[batch\_size, ...]`,都必须先手动使用 :code:`BeamSearchDecoder.tile_beam_merge_with_batch` 接口扩展。最常见的情况是带注意机制的编码器输出。 参数 :::::::::::: - - **cell** (RNNCell) - RNNCell的实例或者具有相同接口定义的对象。 - - **start_token** (int) - 起始标记id。 - - **end_token** (int) - 结束标记id。 - - **beam_size** (int) - 在beam search中使用的beam宽度。 - - **embedding_fn** (可选) - 处理选中的候选id的接口。它通常是一个将词id转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意**,这里要使用 :ref:`cn_api_nn_Embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的id的形状是 :math:`[batch\_size, beam\_size]`,如果使用后者则还需要在这里提供unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值None。 - - **output_fn** (可选) - 处理cell输出的接口,在计算得分和选择候选标记id之前使用。默认值None。 + - **cell** (RNNCell) - RNNCell 的实例或者具有相同接口定义的对象。 + - **start_token** (int) - 起始标记 id。 + - **end_token** (int) - 结束标记 id。 + - **beam_size** (int) - 在 beam search 中使用的 beam 宽度。 + - **embedding_fn** (可选) - 处理选中的候选 id 的接口。它通常是一个将词 id 转换为词嵌入的嵌入层,其返回值将作为 :code:`cell.call` 接口的 :code:`input` 参数。**注意**,这里要使用 :ref:`cn_api_nn_Embedding` 而非 :ref:`cn_api_fluid_layers_embedding`,因为选中的 id 的形状是 :math:`[batch\_size, beam\_size]`,如果使用后者则还需要在这里提供 unsqueeze。如果 :code:`embedding_fn` 未提供,则必须在 :code:`cell.call` 中实现词嵌入转换。默认值 None。 + - **output_fn** (可选) - 处理 cell 输出的接口,在计算得分和选择候选标记 id 之前使用。默认值 None。 返回 :::::::::::: @@ -39,83 +39,83 @@ COPY-FROM: paddle.nn.BeamSearchDecoder tile_beam_merge_with_batch(x, beam_size) ''''''''' -扩展Tensor的batch维度。此函数的输入是形状为 :math:`[batch\_size, s_0, s_1, ...]` 的Tensor t,由minibatch中的样本 :math:`t[0], ..., t[batch\_size - 1]` 组成。将其扩展为形状是 :math:`[batch\_size * beam\_size, s_0, s_1, ...]` 的Tensor,由 :math:`t[0], t[0], ..., t[1], t[1], ...` 组成,每个minibatch中的样本重复 :math:`beam\_size` 次。 +扩展 Tensor 的 batch 维度。此函数的输入是形状为 :math:`[batch\_size, s_0, s_1, ...]` 的 Tensor t,由 minibatch 中的样本 :math:`t[0], ..., t[batch\_size - 1]` 组成。将其扩展为形状是 :math:`[batch\_size * beam\_size, s_0, s_1, ...]` 的 Tensor,由 :math:`t[0], t[0], ..., t[1], t[1], ...` 组成,每个 minibatch 中的样本重复 :math:`beam\_size` 次。 **参数** - - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 - - **beam_size** (int) - 在beam search中使用的beam宽度。 + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的 tenosr。数据类型应为 float32,float64,int32,int64 或 bool。 + - **beam_size** (int) - 在 beam search 中使用的 beam 宽度。 **返回** -Tensor,形状为 :math:`[batch\_size * beam\_size, ...]` 的Tensor,其数据类型与 :code:`x` 相同。 +Tensor,形状为 :math:`[batch\_size * beam\_size, ...]` 的 Tensor,其数据类型与 :code:`x` 相同。 _split_batch_beams(x) ''''''''' -将形状为 :math:`[batch\_size * beam\_size, ...]` 的Tensor变换为形状为 :math:`[batch\_size, beam\_size, ...]` 的新Tensor。 +将形状为 :math:`[batch\_size * beam\_size, ...]` 的 Tensor 变换为形状为 :math:`[batch\_size, beam\_size, ...]` 的新 Tensor。 **参数** - - **x** (Variable) - 形状为 :math:`[batch\_size * beam\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + - **x** (Variable) - 形状为 :math:`[batch\_size * beam\_size, ...]` 的 tenosr。数据类型应为 float32,float64,int32,int64 或 bool。 **返回** -Tensor,形状为 :math:`[batch\_size, beam\_size, ...]` 的Tensor,其数据类型与 :code:`x` 相同。 +Tensor,形状为 :math:`[batch\_size, beam\_size, ...]` 的 Tensor,其数据类型与 :code:`x` 相同。 _merge_batch_beams(x) ''''''''' -将形状为 :math:`[batch\_size, beam\_size, ...]` 的Tensor变换为形状为 :math:`[batch\_size * beam\_size,...]` 的新Tensor。 +将形状为 :math:`[batch\_size, beam\_size, ...]` 的 Tensor 变换为形状为 :math:`[batch\_size * beam\_size,...]` 的新 Tensor。 **参数** - - **x** (Variable) - 形状为 :math:`[batch\_size, beam_size,...]` 的Tenosr。数据类型应为float32,float64,int32,int64或bool。 + - **x** (Variable) - 形状为 :math:`[batch\_size, beam_size,...]` 的 Tenosr。数据类型应为 float32,float64,int32,int64 或 bool。 **返回** -Tensor,形状为 :math:`[batch\_size * beam\_size, ...]` 的Tensor,其数据类型与 :code:`x` 相同。 +Tensor,形状为 :math:`[batch\_size * beam\_size, ...]` 的 Tensor,其数据类型与 :code:`x` 相同。 _expand_to_beam_size(x) ''''''''' -此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的Tensor t,由minibatch中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的Tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个minibatch中的样本重复 :math:`beam\_size` 次。 +此函数输入形状为 :math:`[batch\_size,s_0,s_1,...]` 的 Tensor t,由 minibatch 中的样本 :math:`t[0],...,t[batch\_size-1]` 组成。将其扩展为形状 :math:`[ batch\_size,beam\_size,s_0,s_1,...]` 的 Tensor,由 :math:`t[0],t[0],...,t[1],t[1],...` 组成,其中每个 minibatch 中的样本重复 :math:`beam\_size` 次。 **参数** - - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的tenosr。数据类型应为float32,float64,int32,int64或bool。 + - **x** (Variable) - 形状为 :math:`[batch\_size, ...]` 的 tenosr。数据类型应为 float32,float64,int32,int64 或 bool。 **返回** -Tensor,具有与 :code:`x` 相同的形状和数据类型的Tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的Tensor(Tensor中所有概率质量被分配给EOS标记)。 +Tensor,具有与 :code:`x` 相同的形状和数据类型的 Tensor,其中未完成的 beam 保持不变,而已完成的 beam 被替换成特殊的 Tensor(Tensor 中所有概率质量被分配给 EOS 标记)。 _mask_probs(probs, finished) ''''''''' -屏蔽对数概率。该函数使已完成的beam将所有概率质量分配给EOS标记,而未完成的beam保持不变。 +屏蔽对数概率。该函数使已完成的 beam 将所有概率质量分配给 EOS 标记,而未完成的 beam 保持不变。 **参数** - - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的 Tensor,表示对数概率。其数据类型应为float32。 - - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的 Tensor,表示所有beam的完成状态。其数据类型应为bool。 + - **probs** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的 Tensor,表示对数概率。其数据类型应为 float32。 + - **finish** (Variable) - 形状为 :math:`[batch\_size,beam\_size]` 的 Tensor,表示所有 beam 的完成状态。其数据类型应为 bool。 **返回** -Tensor,具有与 :code:`x` 相同的形状和数据类型的Tensor,其中未完成的beam保持不变,而已完成的beam被替换成特殊的Tensor(Tensor中所有概率质量被分配给EOS标记)。 +Tensor,具有与 :code:`x` 相同的形状和数据类型的 Tensor,其中未完成的 beam 保持不变,而已完成的 beam 被替换成特殊的 Tensor(Tensor 中所有概率质量被分配给 EOS 标记)。 _gather(x, indices, batch_size) ''''''''' -对Tensor :code:`x` 根据索引 :code:`indices` 收集。 +对 Tensor :code:`x` 根据索引 :code:`indices` 收集。 **参数** - - **x** (Variable) - 形状为 :math:`[batch\_size, beam\_size,...]` 的Tensor。 - - **index** (Variable) - 一个形状为 :math:`[batch\_size, beam\_size]` 的int64 Tensor,表示我们用来收集的索引。 - - **batch_size** (Variable) - 形状为 :math:`[1]` 的Tensor。其数据类型应为int32或int64。 + - **x** (Variable) - 形状为 :math:`[batch\_size, beam\_size,...]` 的 Tensor。 + - **index** (Variable) - 一个形状为 :math:`[batch\_size, beam\_size]` 的 int64 Tensor,表示我们用来收集的索引。 + - **batch_size** (Variable) - 形状为 :math:`[1]` 的 Tensor。其数据类型应为 int32 或 int64。 **返回** @@ -125,62 +125,62 @@ Tensor,具有与 :code:``x` 相同的形状和数据类型的 Tensor,表示 initialize(initial_cell_states) ''''''''' -初始化BeamSearchDecoder。 +初始化 BeamSearchDecoder。 **参数** - - **initial_cell_states** (Variable) - 单个Ten'so'r变量或Tensor变量组成的嵌套结构。调用者提供的参数。 + - **initial_cell_states** (Variable) - 单个 Ten'so'r 变量或 Tensor 变量组成的嵌套结构。调用者提供的参数。 **返回** -tuple,一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个Tensor,当 :code:`embedding_fn` 为None时,该Tensor t的形状为 :math:`[batch\_size,beam\_size]`,值为 :code:`start_token`;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是Tensor变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个Tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型Tensor,由False填充,形状为 :math:`[batch\_size,beam\_size]`。 +tuple,一个元组 :code:`(initial_inputs, initial_states, finished)`。:code:`initial_inputs` 是一个 Tensor,当 :code:`embedding_fn` 为 None 时,该 Tensor t 的形状为 :math:`[batch\_size,beam\_size]`,值为 :code:`start_token`;否则使用 :code:`embedding_fn(t)` 返回的值。:code:`initial_states` 是 Tensor 变量的嵌套结构(命名元组,字段包括 :code:`cell_states,log_probs,finished,lengths`),其中 :code:`log_probs,finished,lengths` 都含有一个 Tensor,形状为 :math:`[batch\_size, beam\_size]`,数据类型为 float32,bool,int64。:code:`cell_states` 具有与输入参数 :code:`initial_cell_states` 相同结构的值,但形状扩展为 :math:`[batch\_size,beam\_size,...]`。 :code:`finished` 是一个布尔型 Tensor,由 False 填充,形状为 :math:`[batch\_size,beam\_size]`。 _beam_search_step(time, logits, next_cell_states, beam_state) ''''''''' -计算得分并选择候选id。 +计算得分并选择候选 id。 **参数** - - **time** (Variable) - 调用者提供的形状为[1]的Tensor,表示当前解码的时间步长。其数据类型为int64。 - - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的Tensor,表示当前时间步的logits。其数据类型为float32。 - - **next_cell_states** (Variable) - 单个Tensor变量或Tensor变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该cell的下一个状态。 - - **beam_state** (Variable) - Tensor变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **time** (Variable) - 调用者提供的形状为[1]的 Tensor,表示当前解码的时间步长。其数据类型为 int64。 + - **logits** (Variable) - 形状为 :math:`[batch\_size,beam\_size,vocab\_size]` 的 Tensor,表示当前时间步的 logits。其数据类型为 float32。 + - **next_cell_states** (Variable) - 单个 Tensor 变量或 Tensor 变量组成的嵌套结构。它的结构,形状和数据类型与 :code:`initialize()` 的返回值 :code:`initial_states` 中的 :code:`cell_states` 相同。它代表该 cell 的下一个状态。 + - **beam_state** (Variable) - Tensor 变量的结构。在第一个解码步骤与 :code:`initialize()` 返回的 :code:`initial_states` 同,其他步骤与 :code:`step()` 返回的 :code:`beam_search_state` 相同。 **返回** -tuple,一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是Tensor变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 +tuple,一个元组 :code:`(beam_search_output, beam_search_state)`。:code:`beam_search_output` 是 Tensor 变量的命名元组,字段为 :code:`scores,predicted_ids parent_ids`。其中 :code:`scores,predicted_ids,parent_ids` 都含有一个 Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为 float32 ,int64,int64。:code:`beam_search_state` 具有与输入参数 :code:`beam_state` 相同的结构,形状和数据类型。 step(time, inputs, states, **kwargs) ''''''''' -执行beam search解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行beam search步骤以计算得分并选择候选标记ID。 +执行 beam search 解码步骤,该步骤使用 :code:`cell` 来计算概率,然后执行 beam search 步骤以计算得分并选择候选标记 ID。 **参数** - - **time** (Variable) - 调用者提供的形状为[1]的Tensor,表示当前解码的时间步长。其数据类型为int64。。 - - **inputs** (Variable) - Tensor变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 - - **states** (Variable) - Tensor变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 + - **time** (Variable) - 调用者提供的形状为[1]的 Tensor,表示当前解码的时间步长。其数据类型为 int64。。 + - **inputs** (Variable) - Tensor 变量。在第一个解码时间步时与由 :code:`initialize()` 返回的 :code:`initial_inputs` 相同,其他时间步与由 :code:`step()` 返回的 :code:`next_inputs` 相同。 + - **states** (Variable) - Tensor 变量的结构。在第一个解码时间步时与 :code:`initialize()` 返回的 :code:`initial_states` 相同,其他时间步与由 :code:`step()` 返回的 :code:`beam_search_state` 相同。 - **kwargs** - 附加的关键字参数,由调用者提供。 **返回** -tuple,一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。:code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。:code:`beam_search_output` 是Tensor变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为float32 ,int64,int64。:code:`finished` 是一个bool类型的Tensor,形状为 :math:`[batch\_size,beam\_size]`。 +tuple,一个元组 :code:`(beam_search_output,beam_search_state,next_inputs,finish)` 。:code:`beam_search_state` 和参数 :code:`states` 具有相同的结构,形状和数据类型。:code:`next_inputs` 与输入参数 :code:`inputs` 具有相同的结构,形状和数据类型。:code:`beam_search_output` 是 Tensor 变量的命名元组(字段包括 :code:`scores,predicted_ids,parent_ids` ),其中 :code:`scores,predicted_ids,parent_ids` 都含有一个 Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为 float32 ,int64,int64。:code:`finished` 是一个 bool 类型的 Tensor,形状为 :math:`[batch\_size,beam\_size]`。 finalize(outputs, final_states, sequence_lengths) ''''''''' -使用 :code:`gather_tree` 沿beam search树回溯并构建完整的预测序列。 +使用 :code:`gather_tree` 沿 beam search 树回溯并构建完整的预测序列。 **参数** - - **outputs** (Variable) - Tensor变量组成的结构(命名元组),该结构和数据类型与 :code:`output_dtype` 相同。Tensor将所有时间步的输出堆叠,因此具有形状 :math:`[time\_step,batch\_size,...]`。 - - **final_states** (Variable) - Tensor变量组成的结构(命名元组)。它是 :code:`decoder.step` 在最后一个解码步骤返回的 :code:`next_states`,因此具有与任何时间步的 :code:`state` 相同的结构、形状和数据类型。 - - **sequence_lengths** (Variable) - Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为int64。它包含解码期间确定的每个beam的序列长度。 + - **outputs** (Variable) - Tensor 变量组成的结构(命名元组),该结构和数据类型与 :code:`output_dtype` 相同。Tensor 将所有时间步的输出堆叠,因此具有形状 :math:`[time\_step,batch\_size,...]`。 + - **final_states** (Variable) - Tensor 变量组成的结构(命名元组)。它是 :code:`decoder.step` 在最后一个解码步骤返回的 :code:`next_states`,因此具有与任何时间步的 :code:`state` 相同的结构、形状和数据类型。 + - **sequence_lengths** (Variable) - Tensor,形状为 :math:`[batch\_size,beam\_size]`,数据类型为 int64。它包含解码期间确定的每个 beam 的序列长度。 **返回** -tuple,一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个Tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 +tuple,一个元组 :code:`(predicted_ids, final_states)`。:code:`predicted_ids` 是一个 Tensor,形状为 :math:`[time\_step,batch\_size,beam\_size]`,数据类型为 int64。:code:`final_states` 与输入参数 :code:`final_states` 相同。 diff --git a/docs/api/paddle/nn/BiRNN_cn.rst b/docs/api/paddle/nn/BiRNN_cn.rst index 7b8ce218b85..bd78297ef80 100644 --- a/docs/api/paddle/nn/BiRNN_cn.rst +++ b/docs/api/paddle/nn/BiRNN_cn.rst @@ -9,30 +9,30 @@ BiRNN **双向循环神经网络** -该OP是双向循环神经网络(BiRNN)的封装,将输入的前向cell和后向cell封装为一个双向循环神经网络。该网络单独执行前向和后向cell的计算并将输出拼接。 +该 OP 是双向循环神经网络(BiRNN)的封装,将输入的前向 cell 和后向 cell 封装为一个双向循环神经网络。该网络单独执行前向和后向 cell 的计算并将输出拼接。 参数 :::::::::::: - - **cell_fw** (RNNCellBase) - 前向cell。RNNCellBase类的一个实例。 - - **cell_bw** (RNNCellBase) - 后向cell。RNNCellBase类的一个实例。 - - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。默认为False。 + - **cell_fw** (RNNCellBase) - 前向 cell。RNNCellBase 类的一个实例。 + - **cell_bw** (RNNCellBase) - 后向 cell。RNNCellBase 类的一个实例。 + - **time_major** (bool,可选) - 指定 input 的第一个维度是否是 time steps。默认为 False。 输入 :::::::::::: - - **inputs** (Tensor) - 输入。如果time_major为False,则Tensor的形状为[batch_size,time_steps,input_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],input_size为cell的input_size。 - - **initial_states** (list|tuple,可选) - 输入前向和后向cell的初始状态,如果没有给出则会调用 :code:`cell.get_initial_states` 生成初始状态。默认为None。 - - **sequence_length** (Tensor,可选) - 指定输入序列的长度,形状为[batch_size],数据类型为int64或int32。在输入序列中所有time step不小于sequence_length的元素都会被当作填充元素处理(状态不再更新)。 + - **inputs** (Tensor) - 输入。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,input_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],input_size 为 cell 的 input_size。 + - **initial_states** (list|tuple,可选) - 输入前向和后向 cell 的初始状态,如果没有给出则会调用 :code:`cell.get_initial_states` 生成初始状态。默认为 None。 + - **sequence_length** (Tensor,可选) - 指定输入序列的长度,形状为[batch_size],数据类型为 int64 或 int32。在输入序列中所有 time step 不小于 sequence_length 的元素都会被当作填充元素处理(状态不再更新)。 输出 :::::::::::: - - **outputs** (Tensor) - 输出,由前向和后向cell的输出拼接得到。如果time_major为False,则Tensor的形状为[batch_size,time_steps,cell_fw.hidden_size + cell_bw.hidden_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,cell_fw.hidden_size + cell_bw.hidden_size]。 - - **final_states** (tuple) - 前向和后向cell的最终状态。 + - **outputs** (Tensor) - 输出,由前向和后向 cell 的输出拼接得到。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,cell_fw.hidden_size + cell_bw.hidden_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,cell_fw.hidden_size + cell_bw.hidden_size]。 + - **final_states** (tuple) - 前向和后向 cell 的最终状态。 .. Note:: - 该类是一个封装rnn cell的低级api,用户在使用forward函数时须确保initial_states满足cell的要求。 + 该类是一个封装 rnn cell 的低级 api,用户在使用 forward 函数时须确保 initial_states 满足 cell 的要求。 代码示例 diff --git a/docs/api/paddle/nn/Bilinear_cn.rst b/docs/api/paddle/nn/Bilinear_cn.rst index 5eb6937a854..e949bc85e04 100644 --- a/docs/api/paddle/nn/Bilinear_cn.rst +++ b/docs/api/paddle/nn/Bilinear_cn.rst @@ -17,10 +17,10 @@ Bilinear 在这个公式中: - - :math:`x1`:第一个输入,包含:in1_features个元素,形状为 [batch_size, in1_features]。 - - :math:`x2`:第二个输入,包含:in2_features个元素,形状为 [batch_size, in2_features]。 - - :math:`W_{i}`:第:i个被学习的权重,形状是 [in1_features, in2_features],而完整的W(即可训练的参数W)的形状为[out_features, in1_features, in2_features]。 - - :math:`out_{i}`:输出的第:i个元素,形状是 [batch_size],而完整的out的形状为[batch_size, out_features]。 + - :math:`x1`:第一个输入,包含:in1_features 个元素,形状为 [batch_size, in1_features]。 + - :math:`x2`:第二个输入,包含:in2_features 个元素,形状为 [batch_size, in2_features]。 + - :math:`W_{i}`:第:i 个被学习的权重,形状是 [in1_features, in2_features],而完整的 W(即可训练的参数 W)的形状为[out_features, in1_features, in2_features]。 + - :math:`out_{i}`:输出的第:i 个元素,形状是 [batch_size],而完整的 out 的形状为[batch_size, out_features]。 - :math:`b`:被学习的偏置参数,形状是 [1, out_features]。 - :math:`x2^\mathrm{T}`: :math:`x2` 的转置。 @@ -30,7 +30,7 @@ Bilinear - **in2_features** (int):每个 **x2** 元素的维度。 - **out_features** (int):输出张量的维度。 - **weight_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。 - - **bias_attr** (ParamAttr,可选):指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性,此时bias的元素会被初始化成0。如果设置成False,则不会有bias加到output结果上。 + - **bias_attr** (ParamAttr,可选):指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性,此时 bias 的元素会被初始化成 0。如果设置成 False,则不会有 bias 加到 output 结果上。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 属性 diff --git a/docs/api/paddle/nn/CELU_cn.rst b/docs/api/paddle/nn/CELU_cn.rst index ea442a7590c..e2074a89aca 100644 --- a/docs/api/paddle/nn/CELU_cn.rst +++ b/docs/api/paddle/nn/CELU_cn.rst @@ -4,9 +4,9 @@ CELU ------------------------------- .. py:class:: paddle.nn.CELU(alpha=1.0, name=None) -CELU激活层(CELU Activation Operator) +CELU 激活层(CELU Activation Operator) -根据 `Continuously Differentiable Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 +根据 `Continuously Differentiable Exponential Linear Units `_ 对输入 Tensor 中每个元素应用以下计算。 .. math:: @@ -16,13 +16,13 @@ CELU激活层(CELU Activation Operator) 参数 :::::::::: - - alpha (float,可选) - CELU的alpha值,默认值为1.0。 + - alpha (float,可选) - CELU 的 alpha 值,默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/CTCLoss_cn.rst b/docs/api/paddle/nn/CTCLoss_cn.rst index b1675348163..3411c30826c 100644 --- a/docs/api/paddle/nn/CTCLoss_cn.rst +++ b/docs/api/paddle/nn/CTCLoss_cn.rst @@ -10,19 +10,19 @@ CTCLoss 参数 ::::::::: - - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1)。数据类型支持int32。默认值为0。 + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1)。数据类型支持 int32。默认值为 0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 形状 ::::::::: - - **logits** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 - - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 - - **input_lengths** (Tensor): - 表示输入 ``logits`` 数据中每个序列的长度,shape为 [batch_size]。数据类型支持int64。 - - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size]。数据类型支持int64。 + - **logits** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持 float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持 int32。 + - **input_lengths** (Tensor): - 表示输入 ``logits`` 数据中每个序列的长度,shape 为 [batch_size]。数据类型支持 int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape 为 [batch_size]。数据类型支持 int64。 返回 ::::::::: -``Tensor``,输入 ``logits`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。数据类型与输入 ``logits`` 一致。 +``Tensor``,输入 ``logits`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。数据类型与输入 ``logits`` 一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst index c27cb76a797..d15e942bd1e 100644 --- a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst @@ -8,7 +8,7 @@ ClipGradByGlobalNorm -将一个 Tensor列表 :math:`t\_list` 中所有Tensor的L2范数之和,限定在 ``clip_norm`` 范围内。 +将一个 Tensor 列表 :math:`t\_list` 中所有 Tensor 的 L2 范数之和,限定在 ``clip_norm`` 范围内。 - 如果范数之和大于 ``clip_norm``,则所有 Tensor 会乘以一个系数进行压缩 diff --git a/docs/api/paddle/nn/ClipGradByNorm_cn.rst b/docs/api/paddle/nn/ClipGradByNorm_cn.rst index 65efda938dd..963c994ca5d 100644 --- a/docs/api/paddle/nn/ClipGradByNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByNorm_cn.rst @@ -8,11 +8,11 @@ ClipGradByNorm -将输入的多维Tensor :math:`X` 的L2范数限制在 ``clip_norm`` 范围之内。 +将输入的多维 Tensor :math:`X` 的 L2 范数限制在 ``clip_norm`` 范围之内。 -- 如果L2范数大于 ``clip_norm``,则该 Tensor 会乘以一个系数进行压缩 +- 如果 L2 范数大于 ``clip_norm``,则该 Tensor 会乘以一个系数进行压缩 -- 如果L2范数小于或等于 ``clip_norm``,则不会进行任何操作。 +- 如果 L2 范数小于或等于 ``clip_norm``,则不会进行任何操作。 输入的 Tensor 不是从该类里传入,而是默认选择优化器中输入的所有参数的梯度。如果某个参数 ``ParamAttr`` 中的 ``need_clip`` 值被设置为 ``False``,则该参数的梯度不会被裁剪。 @@ -31,7 +31,7 @@ ClipGradByNorm \right. -其中 :math:`norm(X)` 代表 :math:`X` 的L2范数 +其中 :math:`norm(X)` 代表 :math:`X` 的 L2 范数 .. math:: \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\ diff --git a/docs/api/paddle/nn/ClipGradByValue_cn.rst b/docs/api/paddle/nn/ClipGradByValue_cn.rst index 62c63687a31..0c00f3a8ad4 100644 --- a/docs/api/paddle/nn/ClipGradByValue_cn.rst +++ b/docs/api/paddle/nn/ClipGradByValue_cn.rst @@ -8,7 +8,7 @@ ClipGradByValue -将输入的多维Tensor :math:`X` 的值限制在 [min, max] 范围。 +将输入的多维 Tensor :math:`X` 的值限制在 [min, max] 范围。 输入的 Tensor 不是从该类里传入,而是默认选择优化器中输入的所有参数的梯度。如果某个参数 ``ParamAttr`` 中的 ``need_clip`` 值被设置为 ``False``,则该参数的梯度不会被裁剪。 @@ -24,7 +24,7 @@ ClipGradByValue :::::::::::: - **max** (foat) - 要修剪的最大值。 - - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被自动设置为 ``-max`` (此时 ``max`` 必须大于0)。 + - **min** (float,optional) - 要修剪的最小值。如果用户没有设置,将被自动设置为 ``-max`` (此时 ``max`` 必须大于 0)。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/Conv1DTranspose_cn.rst b/docs/api/paddle/nn/Conv1DTranspose_cn.rst index e26532b52c7..65b821facf8 100644 --- a/docs/api/paddle/nn/Conv1DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv1DTranspose_cn.rst @@ -8,7 +8,7 @@ Conv1DTranspose 一维转置卷积层(Convlution1d transpose layer) -该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCL或NLC格式,其中N为批尺寸,C为通道数(channel),L为特征长度。卷积核是MCL格式,M是输出图像通道数,C是输入图像通道数,L是卷积核长度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。 +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCL 或 NLC 格式,其中 N 为批尺寸,C 为通道数(channel),L 为特征长度。卷积核是 MCL 格式,M 是输出图像通道数,C 是输入图像通道数,L 是卷积核长度。如果组数大于 1,C 等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。 .. _参考文献:https://arxiv.org/pdf/1603.07285.pdf @@ -20,12 +20,12 @@ Conv1DTranspose 其中: - - :math:`X`:输入,具有NCL或NLC格式的3-D Tensor - - :math:`W`:卷积核,具有NCL格式的3-D Tensor + - :math:`X`:输入,具有 NCL 或 NLC 格式的 3-D Tensor + - :math:`W`:卷积核,具有 NCL 格式的 3-D Tensor - :math:`*`:卷积计算(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),1-D Tensor,形状为 ``[M]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCL或NLC格式的3-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCL 或 NLC 格式的 3-D Tensor,和 ``X`` 的形状可能不同 参数 @@ -35,13 +35,13 @@ Conv1DTranspose - **out_channels** (int) - 卷积核的个数,和输出特征通道数相同。 - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含一个整数的元组或列表,表示卷积核的长度。 - **stride** (int|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含一个整型数,表示滑动步长。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的0。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的 0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的 0。默认值:0。 - **output_padding** (int|list|tuple, optional):输出特征尾部一侧额外添加的大小。默认值:0。 - - **groups** (int,可选) - 一维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **groups** (int,可选) - 一维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的分组卷积:当 group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 - **dilation** (int|tuple,可选) - 空洞大小。可以为单个整数或包含一个整数的元组或列表,表示卷积核中的空洞。默认值:1。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N是批尺寸,C是通道数,L特征长度。默认值:"NCL"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N 是批尺寸,C 是通道数,L 特征长度。默认值:"NCL"。 形状 diff --git a/docs/api/paddle/nn/Conv1D_cn.rst b/docs/api/paddle/nn/Conv1D_cn.rst index 1332ef3d30c..db86d4ecffa 100644 --- a/docs/api/paddle/nn/Conv1D_cn.rst +++ b/docs/api/paddle/nn/Conv1D_cn.rst @@ -9,9 +9,9 @@ Conv1D **一维卷积层** -该OP是一维卷积层(convolution1d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCL或NLC格式,其中N是批尺寸,C是通道数,L是特征长度。卷积核是MCL格式,M是输出特征通道数,C是输入特征通道数,L是卷积核长度度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。 +该 OP 是一维卷积层(convolution1d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是 NCL 或 NLC 格式,其中 N 是批尺寸,C 是通道数,L 是特征长度。卷积核是 MCL 格式,M 是输出特征通道数,C 是输入特征通道数,L 是卷积核长度度。如果组数(groups)大于 1,C 等于输入图像通道数除以组数的结果。详情请参考 UFLDL's : `卷积 `_ 。如果 bias_attr 不为 False,卷积计算会添加偏置项。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -19,12 +19,12 @@ Conv1D 其中: - - :math:`X`:输入值,NCL或NLC格式的3-D Tensor - - :math:`W`:卷积核值,MCL格式的3-D Tensor + - :math:`X`:输入值,NCL 或 NLC 格式的 3-D Tensor + - :math:`W`:卷积核值,MCL 格式的 3-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,1-D Tensor,形状为 ``[M]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCL或NLC格式的3-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCL 或 NLC 格式的 3-D Tensor,和 ``X`` 的形状可能不同 参数 @@ -34,13 +34,13 @@ Conv1D - **out_channels** (int) - 由卷积操作产生的输出的通道数。 - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含一个整数的元组或列表,表示卷积核的长度。 - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含一个整数的元组或列表,表示卷积的步长。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的0。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的 0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的 0。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含一个整数的元组或列表,表示卷积核中的元素的空洞。默认值:1。 - - **groups** (int,可选) - 一维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **groups** (int,可选) - 一维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 - **padding_mode** (str,可选):填充模式。包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``。默认值:``'zeros'`` 。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N是批尺寸,C是通道数,L是特征长度。默认值:"NCL"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N 是批尺寸,C 是通道数,L 是特征长度。默认值:"NCL"。 属性 diff --git a/docs/api/paddle/nn/Conv2DTranspose_cn.rst b/docs/api/paddle/nn/Conv2DTranspose_cn.rst index b57dc40d7aa..8c48c787df2 100644 --- a/docs/api/paddle/nn/Conv2DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv2DTranspose_cn.rst @@ -8,7 +8,7 @@ Conv2DTranspose 二维转置卷积层(Convlution2d transpose layer) -该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。 +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCHW 或 NHWC 格式,其中 N 为批尺寸,C 为通道数(channel),H 为特征层高度,W 为特征层宽度。卷积核是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是卷积核高度,W 是卷积核宽度。如果组数大于 1,C 等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。 .. _参考文献:https://arxiv.org/pdf/1603.07285.pdf @@ -20,21 +20,21 @@ Conv2DTranspose 其中: - - :math:`X`:输入,具有NCHW或NHWC格式的4-D Tensor - - :math:`W`:卷积核,具有NCHW格式的4-D Tensor + - :math:`X`:输入,具有 NCHW 或 NHWC 格式的 4-D Tensor + - :math:`W`:卷积核,具有 NCHW 格式的 4-D Tensor - :math:`*`:卷积计算(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),1-D Tensor,形状为 ``[M]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同 注意: -如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 +如果 output_size 为 None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 -如果指定了output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 +如果指定了 output_size, ``conv2d_transpose`` 可以自动计算卷积核的大小。 参数 :::::::::::: @@ -45,11 +45,11 @@ Conv2DTranspose - **stride** (int|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride``。默认值:1。 - **padding** (int|tuple,可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding``。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 - **output_padding** (int|list|tuple, optional):输出形状上一侧额外添加的大小。默认值:0。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的分组卷积:当 group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 - **dilation** (int|tuple,可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 形状 diff --git a/docs/api/paddle/nn/Conv2D_cn.rst b/docs/api/paddle/nn/Conv2D_cn.rst index 40550c54f56..1bc7edb9af0 100644 --- a/docs/api/paddle/nn/Conv2D_cn.rst +++ b/docs/api/paddle/nn/Conv2D_cn.rst @@ -9,9 +9,9 @@ Conv2D **二维卷积层** -该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。 +该 OP 是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是 NCHW 或 NHWC 格式,其中 N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。卷积核是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是卷积核高度,W 是卷积核宽度。如果组数(groups)大于 1,C 等于输入图像通道数除以组数的结果。详情请参考 UFLDL's : `卷积 `_ 。如果 bias_attr 不为 False,卷积计算会添加偏置项。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -19,12 +19,12 @@ Conv2D 其中: - - :math:`X`:输入值,NCHW或NHWC格式的4-D Tensor - - :math:`W`:卷积核值,MCHW格式的4-D Tensor + - :math:`X`:输入值,NCHW 或 NHWC 格式的 4-D Tensor + - :math:`W`:卷积核值,MCHW 格式的 4-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,1-D Tensor,形状为 ``[M]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同 参数 @@ -34,13 +34,13 @@ Conv2D - **out_channels** (int) - 由卷积操作产生的输出的通道数。 - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核的高和宽。如果为单个整数,表示卷积核的高和宽都等于该整数。 - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积沿着高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 4 个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含 4 个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含 2 个整数值:[padding_height, padding_width],此时 padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 - **padding_mode** (str,可选):填充模式。包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``。默认值:``'zeros'`` 。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 属性 diff --git a/docs/api/paddle/nn/Conv3DTranspose_cn.rst b/docs/api/paddle/nn/Conv3DTranspose_cn.rst index 9bcecf3db6a..91303bb7e6e 100755 --- a/docs/api/paddle/nn/Conv3DTranspose_cn.rst +++ b/docs/api/paddle/nn/Conv3DTranspose_cn.rst @@ -8,7 +8,7 @@ Conv3DTranspose 三维转置卷积层(Convlution3d transpose layer) -该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。 +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCDHW 或者 NDHWC 格式。其中 N 为批尺寸,C 为通道数(channel),D 为特征深度,H 为特征层高度,W 为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。 .. _参考文献:https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf @@ -19,36 +19,36 @@ Conv3DTranspose 其中: - - :math:`X`:输入,具有NCDHW或NDHWC格式的5-D Tensor - - :math:`W`:卷积核,具有NCDHW格式的5-D Tensor + - :math:`X`:输入,具有 NCDHW 或 NDHWC 格式的 5-D Tensor + - :math:`W`:卷积核,具有 NCDHW 格式的 5-D Tensor - :math:`*`:卷积操作(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),1-D Tensor,形状为 ``[M]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同 注意: -如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 +如果 output_size 为 None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 -如果指定了output_size,该算子可以自动计算卷积核的大小。 +如果指定了 output_size,该算子可以自动计算卷积核的大小。 参数 :::::::::::: - **in_channels** (int) - 输入图像的通道数。 - **out_channels** (int) - 卷积核的个数,和输出特征图个数相同。 - - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。默认:None。output_size和kernel_size不能同时为None。 + - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。默认:None。output_size 和 kernel_size 不能同时为 None。 - **stride** (int|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride``。默认值:1。 - **padding** (int|tuple,可选) - 填充大小。如果 ``padding`` 为元组或列表,则必须包含三个整型数,分别表示深度,竖直和水平边界填充大小。否则,表示深度,竖直和水平边界填充大小均为 ``padding``。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下方形状 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。默认值:0。 - **output_padding** (int|list|tuple, optional):输出形状上一侧额外添加的大小。默认值:0。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的分组卷积:当 group=2,卷积核的前一半仅和输入特征图的前一半连接。卷积核的后一半仅和输入特征图的后一半连接。默认值:1。 - **dilation** (int|tuple,可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 形状 :::::::::::: diff --git a/docs/api/paddle/nn/Conv3D_cn.rst b/docs/api/paddle/nn/Conv3D_cn.rst index 617ddedbde2..7ab8ea93d83 100644 --- a/docs/api/paddle/nn/Conv3D_cn.rst +++ b/docs/api/paddle/nn/Conv3D_cn.rst @@ -9,9 +9,9 @@ Conv3D **三维卷积层** -该OP是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDHWC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 +该 OP 是三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是 NCDHW 或 NDHWC 格式,其中 N 是批尺寸,C 是通道数,D 是特征层深度,H 是特征层高度,W 是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果 bias_attr 不为 False,卷积计算会添加偏置项。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -19,12 +19,12 @@ Conv3D 其中: - - :math:`X`:输入值,NCDHW或NDHWC格式的5-D Tensor - - :math:`W`:卷积核值,MCDHW格式的5-D Tensor + - :math:`X`:输入值,NCDHW 或 NDHWC 格式的 5-D Tensor + - :math:`W`:卷积核值,MCDHW 格式的 5-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,1-D Tensor,形为 ``[M]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同 参数 :::::::::::: @@ -33,13 +33,13 @@ Conv3D - **out_channels** (int) - 由卷积操作产生的输出的通道数。 - **kernel_size** (int|list|tuple) - 卷积核大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核的深度,高和宽。如果为单个整数,表示卷积核的深度,高和宽都等于该整数。 - **stride** (int|list|tuple,可选) - 步长大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积沿着深度,高和宽的步长。如果为单个整数,表示沿着高和宽的步长都等于该整数。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 5 个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含 6 个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含 3 个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含三个整数的元组或列表,分别表示卷积核中的元素沿着深度,高和宽的空洞。如果为单个整数,表示深度,高和宽的空洞都等于该整数。默认值:1。 - - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **groups** (int,可选) - 三维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 - **padding_mode** (str,可选):填充模式。包括 ``'zeros'``, ``'reflect'``, ``'replicate'`` 或者 ``'circular'``。默认值:``'zeros'`` 。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 属性 diff --git a/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst b/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst index 8d13c3fc3a4..7b2325ec80a 100644 --- a/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst +++ b/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst @@ -5,35 +5,35 @@ CosineEmbeddingLoss .. py:function:: paddle.nn.CosineEmbeddingLoss(margin=0, reduction='mean', name=None) -该函数计算给定的输入input1, input2和label之间的 `CosineEmbedding` 损失,通常用于学习非线性嵌入或半监督学习 +该函数计算给定的输入 input1, input2 和 label 之间的 `CosineEmbedding` 损失,通常用于学习非线性嵌入或半监督学习 -如果label=1,则该损失函数的数学计算公式如下: +如果 label=1,则该损失函数的数学计算公式如下: .. math:: Out = 1 - cos(input1, input2) -如果label=-1,则该损失函数的数学计算公式如下: +如果 label=-1,则该损失函数的数学计算公式如下: .. math:: Out = max(0, cos(input1, input2)) - margin -其中cos计算公式如下: +其中 cos 计算公式如下: .. math:: cos(x1, x2) = \frac{x1 \cdot{} x2}{\Vert x1 \Vert_2 * \Vert x2 \Vert_2} 参数 ::::::::: - - **margin** (float,可选): - 可以设置的范围为[-1, 1],建议设置的范围为[0, 0.5]。其默认为 `0`。数据类型为int。 - - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `CosineEmbeddingLoss` 的均值;设置为 ``'sum'`` 时,计算 `CosineEmbeddingLoss` 的总和;设置为 ``'none'`` 时,则返回 `CosineEmbeddingLoss`。数据类型为string。 + - **margin** (float,可选): - 可以设置的范围为[-1, 1],建议设置的范围为[0, 0.5]。其默认为 `0`。数据类型为 int。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `CosineEmbeddingLoss` 的均值;设置为 ``'sum'`` 时,计算 `CosineEmbeddingLoss` 的总和;设置为 ``'none'`` 时,则返回 `CosineEmbeddingLoss`。数据类型为 string。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **input1** (Tensor): - 输入的Tensor,维度是[N, M],其中N是batch size,可为0,M是数组长度。数据类型为:float32、float64。 - - **input2** (Tensor): - 输入的Tensor,维度是[N, M],其中N是batch size,可为0,M是数组长度。数据类型为:float32、float64。 - - **label** (Tensor): - 标签,维度是[N],N是数组长度,数据类型为:float32、float64、int32、int64。 - - **output** (Tensor): - 输入 ``input1`` 、 ``input2`` 和标签 ``label`` 间的 `CosineEmbeddingLoss` 损失。如果 `reduction` 是 ``'none'``,则输出Loss的维度为 [N],与输入 ``input1`` 和 ``input2`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。 + - **input1** (Tensor): - 输入的 Tensor,维度是[N, M],其中 N 是 batch size,可为 0,M 是数组长度。数据类型为:float32、float64。 + - **input2** (Tensor): - 输入的 Tensor,维度是[N, M],其中 N 是 batch size,可为 0,M 是数组长度。数据类型为:float32、float64。 + - **label** (Tensor): - 标签,维度是[N],N 是数组长度,数据类型为:float32、float64、int32、int64。 + - **output** (Tensor): - 输入 ``input1`` 、 ``input2`` 和标签 ``label`` 间的 `CosineEmbeddingLoss` 损失。如果 `reduction` 是 ``'none'``,则输出 Loss 的维度为 [N],与输入 ``input1`` 和 ``input2`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/CosineSimilarity_cn.rst b/docs/api/paddle/nn/CosineSimilarity_cn.rst index 093e1521d06..5032ba476b0 100644 --- a/docs/api/paddle/nn/CosineSimilarity_cn.rst +++ b/docs/api/paddle/nn/CosineSimilarity_cn.rst @@ -6,13 +6,13 @@ CosineSimilarity **CosineSimilarity** -计算x1与x2沿axis维度的余弦相似度。 +计算 x1 与 x2 沿 axis 维度的余弦相似度。 参数 :::::::::::: - - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 - - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为 1。 + - **eps** (float) - 很小的值,防止计算时分母为 0,默认值为 1e-8。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/CrossEntropyLoss_cn.rst b/docs/api/paddle/nn/CrossEntropyLoss_cn.rst index c9ee5aa4f03..af6ebf7af8e 100644 --- a/docs/api/paddle/nn/CrossEntropyLoss_cn.rst +++ b/docs/api/paddle/nn/CrossEntropyLoss_cn.rst @@ -5,9 +5,9 @@ CrossEntropyLoss .. py:function:: paddle.nn.CrossEntropyLoss(weight=None, ignore_index=-100, reduction='mean', soft_label=False, axis=-1, name=None) -该OP计算输入input和标签label间的交叉熵损失,它结合了 `LogSoftmax` 和 `NLLLoss` 的OP计算,可用于训练一个 `n` 类分类器。 +该 OP 计算输入 input 和标签 label 间的交叉熵损失,它结合了 `LogSoftmax` 和 `NLLLoss` 的 OP 计算,可用于训练一个 `n` 类分类器。 -如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor,每个值对应每个类别的权重。 +如果提供 `weight` 参数的话,它是一个 `1-D` 的 tensor,每个值对应每个类别的权重。 该损失函数的数学计算公式如下: .. math:: @@ -23,17 +23,17 @@ CrossEntropyLoss 参数 ::::::::: - - **weight** (Tensor,可选): - 指定每个类别的权重。其默认为 `None`。如果提供该参数的话,维度必须为 `C` (类别数)。数据类型为float32或float64。 - - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在soft_label=False时有效。默认值为-100。数据类型为int64。 - - **reduction** (str,可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 - - **soft_label** (bool, optional) – 指明label是否为软标签。默认为False,表示label为硬标签;若soft_label=True则表示软标签。 - - **axis** (int, optional) - 进行softmax计算的维度索引。它应该在 :math:`[-1,dim-1]` 范围内,而 ``dim`` 是输入logits的维度。默认值:-1。 - - **use_softmax** (bool, optional) - 指定是否对input进行softmax归一化。默认值:True。 + - **weight** (Tensor,可选): - 指定每个类别的权重。其默认为 `None`。如果提供该参数的话,维度必须为 `C` (类别数)。数据类型为 float32 或 float64。 + - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在 soft_label=False 时有效。默认值为-100。数据类型为 int64。 + - **reduction** (str,可选): - 指定应用于输出结果的计算方式,数据类型为 string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss 均值。设置为 `sum` 时,计算 `mini-batch` loss 的总和。设置为 `none` 时,则返回 loss Tensor。 + - **soft_label** (bool, optional) – 指明 label 是否为软标签。默认为 False,表示 label 为硬标签;若 soft_label=True 则表示软标签。 + - **axis** (int, optional) - 进行 softmax 计算的维度索引。它应该在 :math:`[-1,dim-1]` 范围内,而 ``dim`` 是输入 logits 的维度。默认值:-1。 + - **use_softmax** (bool, optional) - 指定是否对 input 进行 softmax 归一化。默认值:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **input** (Tensor): - 输入 `Tensor`,数据类型为float32或float64。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k, C]` ,k >= 1。 - - **label** (Tensor): - 当soft_label=False时,输入input对应的标签值,数据类型为int64。其形状为 :math:`[N]`,每个元素符合条件:0 <= label[i] <= C-1。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k]` ,k >= 1;当soft_label=True时,输入形状应与input一致,数据类型为float32或float64且每个样本的各标签概率和应为1。 + - **input** (Tensor): - 输入 `Tensor`,数据类型为 float32 或 float64。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k, C]` ,k >= 1。 + - **label** (Tensor): - 当 soft_label=False 时,输入 input 对应的标签值,数据类型为 int64。其形状为 :math:`[N]`,每个元素符合条件:0 <= label[i] <= C-1。对于多维度的情形下,它的形状为 :math:`[N, d_1, d_2, ..., d_k]` ,k >= 1;当 soft_label=True 时,输入形状应与 input 一致,数据类型为 float32 或 float64 且每个样本的各标签概率和应为 1。 - **output** (Tensor): - 计算 `CrossEntropyLoss` 交叉熵后的损失值。 diff --git a/docs/api/paddle/nn/Dropout2D_cn.rst b/docs/api/paddle/nn/Dropout2D_cn.rst index a01c4778dbb..c80d6f8e575 100644 --- a/docs/api/paddle/nn/Dropout2D_cn.rst +++ b/docs/api/paddle/nn/Dropout2D_cn.rst @@ -5,16 +5,16 @@ Dropout2D .. py:function:: paddle.nn.Dropout2D(p=0.5, data_format='NCHW', name=None) -根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。Dropout2D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ +根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置 0(对一个形状为 `NCHW` 的 4 维张量,通道特征图指的是其中的形状为 `HW` 的 2 维特征图)。Dropout2D 可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_dropout2d` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_dropout2d` 。 参数 ::::::::: - - **p** (float):将输入通道置0的概率,即丢弃概率。默认:0.5。 + - **p** (float):将输入通道置 0 的概率,即丢弃概率。默认:0.5。 - **data_format** (str):指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 `NCHW` 和 `NHWC`。其中 `N` 是批尺寸,`C` 是通道数,`H` 是特征高度,`W` 是特征宽度。默认值:`NCHW` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/Dropout3D_cn.rst b/docs/api/paddle/nn/Dropout3D_cn.rst index 65ba119e227..4c1408137e2 100644 --- a/docs/api/paddle/nn/Dropout3D_cn.rst +++ b/docs/api/paddle/nn/Dropout3D_cn.rst @@ -5,16 +5,16 @@ Dropout3D .. py:function:: paddle.nn.Dropout3D(p=0.5, data_format='NCDHW', name=None) -根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道特征图指的是其中的形状为 `DHW` 的3维特征图)。Dropout3D可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ +根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置 0(对一个形状为 `NCDHW` 的 5 维张量,通道特征图指的是其中的形状为 `DHW` 的 3 维特征图)。Dropout3D 可以提高通道特征图之间的独立性。论文请参考:`Efficient Object Localization Using Convolutional Networks `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_dropout3d` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_dropout3d` 。 参数 ::::::::: - - **p** (float):将输入通道置0的概率,即丢弃概率。默认:0.5。 + - **p** (float):将输入通道置 0 的概率,即丢弃概率。默认:0.5。 - **data_format** (str):指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 `NCDHW` 和 `NDHWC`。其中 `N` 是批尺寸,`C` 是通道数,`D` 是特征深度,`H` 是特征高度,`W` 是特征宽度。默认值:`NCDHW` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/Dropout_cn.rst b/docs/api/paddle/nn/Dropout_cn.rst index 7d12137f578..686fb932c6e 100644 --- a/docs/api/paddle/nn/Dropout_cn.rst +++ b/docs/api/paddle/nn/Dropout_cn.rst @@ -5,17 +5,17 @@ Dropout .. py:function:: paddle.nn.Dropout(p=0.5, axis=None, mode="upscale_in_train”, name=None) -Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。论文请参考:`Improving neural networks by preventing co-adaptation of feature detectors `_ +Dropout 是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为 0,通过阻止神经元节点间的相关性来减少过拟合。论文请参考:`Improving neural networks by preventing co-adaptation of feature detectors `_ 在动态图模式下,请使用模型的 `eval()` 方法切换至测试阶段。 .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_dropout` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_dropout` 。 参数 ::::::::: - - **p** (float):将输入节点置为0的概率,即丢弃概率。默认:0.5。 - - **axis** (int|list):指定对输入 `Tensor` 进行Dropout操作的轴。默认:None。 + - **p** (float):将输入节点置为 0 的概率,即丢弃概率。默认:0.5。 + - **axis** (int|list):指定对输入 `Tensor` 进行 Dropout 操作的轴。默认:None。 - **mode** (str):丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认:'upscale_in_train'。计算方法如下: 1. upscale_in_train,在训练时增大输出结果。 diff --git a/docs/api/paddle/nn/ELU_cn.rst b/docs/api/paddle/nn/ELU_cn.rst index 6da1b8512e6..627300ef57a 100644 --- a/docs/api/paddle/nn/ELU_cn.rst +++ b/docs/api/paddle/nn/ELU_cn.rst @@ -4,9 +4,9 @@ ELU ------------------------------- .. py:class:: paddle.nn.ELU(alpha=1.0, name=None) -ELU激活层(ELU Activation Operator) +ELU 激活层(ELU Activation Operator) -根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 +根据 `Exponential Linear Units `_ 对输入 Tensor 中每个元素应用以下计算。 .. math:: @@ -22,13 +22,13 @@ ELU激活层(ELU Activation Operator) 参数 :::::::::: - - alpha (float,可选) - ELU的alpha值,默认值为1.0。 + - alpha (float,可选) - ELU 的 alpha 值,默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Embedding_cn.rst b/docs/api/paddle/nn/Embedding_cn.rst index 5f8863805dc..91a94245e84 100644 --- a/docs/api/paddle/nn/Embedding_cn.rst +++ b/docs/api/paddle/nn/Embedding_cn.rst @@ -9,21 +9,21 @@ Embedding 嵌入层(Embedding Layer) -该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例``。其根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (num_embeddings, embedding_dim)和weight_attr自动构造一个二维embedding矩阵。 +该接口用于构建 ``Embedding`` 的一个可调用对象,具体用法参照 ``代码示例``。其根据 input 中的 id 信息从 embedding 矩阵中查询对应 embedding 信息,并会根据输入的 size (num_embeddings, embedding_dim)和 weight_attr 自动构造一个二维 embedding 矩阵。 -输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了embedding_dim的维度。 +输出的 Tensor 的 shape 是在输入 Tensor shape 的最后一维后面添加了 embedding_dim 的维度。 -注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 +注:input 中的 id 必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 .. code-block:: text - x是Tensor,且padding_idx = -1。 + x 是 Tensor,且 padding_idx = -1。 padding_idx = -1 x.data = [[1, 3], [2, 4], [4, 127]] x.shape = [3, 2] weight.shape = [128, 16] - 输出是Tensor: + 输出是 Tensor: out.shape = [3, 2, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654]], @@ -32,24 +32,24 @@ Embedding [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data - 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 + 输入的 padding_idx 小于 0,则自动转换为 padding_idx = -1 + 128 = 127,对于输入 id 为 127 的词,进行 padding 处理。 参数 :::::::::::: - - **num_embeddings** (int) - 嵌入字典的大小,input中的id必须满足 ``0 =< id < num_embeddings`` 。 。 + - **num_embeddings** (int) - 嵌入字典的大小,input 中的 id 必须满足 ``0 =< id < num_embeddings`` 。 。 - **embedding_dim** (int) - 每个嵌入向量的维度。 - - **padding_idx** (int|long|None) - padding_idx的配置区间为 ``[-weight.shape[0], weight.shape[0]``,如果配置了padding_idx,那么在训练过程中遇到此id时,其参数及对应的梯度将会以0进行填充。 + - **padding_idx** (int|long|None) - padding_idx 的配置区间为 ``[-weight.shape[0], weight.shape[0]``,如果配置了 padding_idx,那么在训练过程中遇到此 id 时,其参数及对应的梯度将会以 0 进行填充。 - **sparse** (bool) - 是否使用稀疏更新,在词嵌入权重较大的情况下,使用稀疏更新能够获得更快的训练速度及更小的内存/显存占用。 - - **weight_attr** (ParamAttr|None) - 指定嵌入向量的配置,包括初始化方法,具体用法请参见 :ref:`api_guide_ParamAttr`,一般无需设置,默认值为None。 + - **weight_attr** (ParamAttr|None) - 指定嵌入向量的配置,包括初始化方法,具体用法请参见 :ref:`api_guide_ParamAttr`,一般无需设置,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor, input映射后得到的Embedding Tensor,数据类型和词嵌入的定义类型一致。 +Tensor, input 映射后得到的 Embedding Tensor,数据类型和词嵌入的定义类型一致。 代码示例 diff --git a/docs/api/paddle/nn/Flatten_cn.rst b/docs/api/paddle/nn/Flatten_cn.rst index 7cbd5142e1a..79b5339a5d1 100644 --- a/docs/api/paddle/nn/Flatten_cn.rst +++ b/docs/api/paddle/nn/Flatten_cn.rst @@ -7,13 +7,13 @@ Flatten -该接口用于构造一个 ``Flatten`` 类的可调用对象。更多信息请参见代码示例。它实现将一个连续维度的Tensor展平成一维Tensor。 +该接口用于构造一个 ``Flatten`` 类的可调用对象。更多信息请参见代码示例。它实现将一个连续维度的 Tensor 展平成一维 Tensor。 参数 :::::::::::: - - start_axis (int,可选) - 展开的起始维度,默认值为1。 + - start_axis (int,可选) - 展开的起始维度,默认值为 1。 - stop_axis (int,可选) - 展开的结束维度,默认值为-1。 返回 diff --git a/docs/api/paddle/nn/Fold_cn.rst b/docs/api/paddle/nn/Fold_cn.rst index 78a1eabca69..32a38b9dd81 100644 --- a/docs/api/paddle/nn/Fold_cn.rst +++ b/docs/api/paddle/nn/Fold_cn.rst @@ -5,9 +5,9 @@ Fold .. py:function:: paddle.nn.Fold(output_sizes, kernel_sizes, dilations=1, paddings=0, strides=1, name=None) -该Op用于将一个滑动局部块组合成一个大的张量。通常也被称为col2im,用于批处理二维图像张量。Fold通过对所有包含块的值求和来计算结果中的每个大张量的组合值。 +该 Op 用于将一个滑动局部块组合成一个大的张量。通常也被称为 col2im,用于批处理二维图像张量。Fold 通过对所有包含块的值求和来计算结果中的每个大张量的组合值。 -对于输入x,如果形状为[N, C_in, L],其输出形状[N, C_out, H_out, W_out],计算过程如下: +对于输入 x,如果形状为[N, C_in, L],其输出形状[N, C_out, H_out, W_out],计算过程如下: .. math:: H_out &= output_size[0] @@ -15,23 +15,23 @@ Fold C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1] .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_fold` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_fold` 。 参数 ::::::::: - - **output_sizes** (int|list|tuple) – 输出尺寸,整数或者整型列表。如为列表类型应包含两个元素 ``[output_size_h, output_size_w]``。如果为整数o,则输出形状会被认为 ``[o, o]``。 - - **kernel_size** (int|list|tuple) - 卷积核大小,整数或者整型列表。如为列表类型应包含两个元素 ``[k_h, k_w]``。如果为整数k,则输出形状会被认为 ``[k, k]``。 - - **strides** (int|list|tuple,可选) - 步长大小,整数或者整型列表。如为列表类型应包含两个元素 ``[stride_h, stride_w]``。如果为整数stride,则输出形状会被认为 ``[sride, stride]``。默认为[1,1]。 - - **paddings** (int|list|tuple,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为4或者2;长度为4 对应的padding参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为2对应的padding参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数padding,则会被当作[padding, padding, padding, padding]处理。默认值为0。 - - **dilations** (int|list|tuple,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数dilation,会被当作整型列表[dilation, dilation]处理。默认值为1。 + - **output_sizes** (int|list|tuple) – 输出尺寸,整数或者整型列表。如为列表类型应包含两个元素 ``[output_size_h, output_size_w]``。如果为整数 o,则输出形状会被认为 ``[o, o]``。 + - **kernel_size** (int|list|tuple) - 卷积核大小,整数或者整型列表。如为列表类型应包含两个元素 ``[k_h, k_w]``。如果为整数 k,则输出形状会被认为 ``[k, k]``。 + - **strides** (int|list|tuple,可选) - 步长大小,整数或者整型列表。如为列表类型应包含两个元素 ``[stride_h, stride_w]``。如果为整数 stride,则输出形状会被认为 ``[sride, stride]``。默认为[1,1]。 + - **paddings** (int|list|tuple,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为 4 或者 2;长度为 4 对应的 padding 参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为 2 对应的 padding 参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数 padding,则会被当作[padding, padding, padding, padding]处理。默认值为 0。 + - **dilations** (int|list|tuple,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数 dilation,会被当作整型列表[dilation, dilation]处理。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **输入** : 4-D Tensor,形状为[N, C_in, L],数据类型为float32或者float64 + - **输入** : 4-D Tensor,形状为[N, C_in, L],数据类型为 float32 或者 float64 - **输出**:形状如上面所描述的[N, Cout, H, W],数据类型与 ``x`` 相同 diff --git a/docs/api/paddle/nn/GELU_cn.rst b/docs/api/paddle/nn/GELU_cn.rst index df76f0919fd..b010e192b86 100644 --- a/docs/api/paddle/nn/GELU_cn.rst +++ b/docs/api/paddle/nn/GELU_cn.rst @@ -4,9 +4,9 @@ GELU ------------------------------- .. py:class:: paddle.nn.GELU(approximate=False, name=None) -GELU激活层(GELU Activation Operator) +GELU 激活层(GELU Activation Operator) -逐元素计算 GELU激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 +逐元素计算 GELU 激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 如果使用近似计算: @@ -28,8 +28,8 @@ GELU激活层(GELU Activation Operator) 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/GRUCell_cn.rst b/docs/api/paddle/nn/GRUCell_cn.rst index cb58e6bb314..f1dbc45da13 100644 --- a/docs/api/paddle/nn/GRUCell_cn.rst +++ b/docs/api/paddle/nn/GRUCell_cn.rst @@ -9,7 +9,7 @@ GRUCell **门控循环单元** -该OP是门控循环单元(GRUCell),根据当前时刻输入x(t)和上一时刻状态h(t-1)计算当前时刻输出y(t)并更新状态h(t)。 +该 OP 是门控循环单元(GRUCell),根据当前时刻输入 x(t)和上一时刻状态 h(t-1)计算当前时刻输出 y(t)并更新状态 h(t)。 状态更新公式如下: @@ -27,7 +27,7 @@ GRUCell 其中: - - :math:`\sigma` :sigmoid激活函数。 + - :math:`\sigma` :sigmoid 激活函数。 详情请参考论文:`An Empirical Exploration of Recurrent Network Architectures `_ 。 @@ -37,32 +37,32 @@ GRUCell - **input_size** (int) - 输入的大小。 - **hidden_size** (int) - 隐藏状态大小。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 变量 :::::::::::: - - **weight_ih** (Parameter) - input到hidden的变换矩阵的权重。形状为(3 * hidden_size, input_size)。对应公式中的 :math:`W_{ir}, W_{iz}, W_{ic}`。 - - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(3 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hr}, W_{hz}, W_{hc}`。 - - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{ir}, b_{iz}, b_{ic}`。 - - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{hr}, b_{hz}, b_{hc}`。 + - **weight_ih** (Parameter) - input 到 hidden 的变换矩阵的权重。形状为(3 * hidden_size, input_size)。对应公式中的 :math:`W_{ir}, W_{iz}, W_{ic}`。 + - **weight_hh** (Parameter) - hidden 到 hidden 的变换矩阵的权重。形状为(3 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hr}, W_{hz}, W_{hc}`。 + - **bias_ih** (Parameter) - input 到 hidden 的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{ir}, b_{iz}, b_{ic}`。 + - **bias_hh** (Parameter) - hidden 到 hidden 的变换矩阵的偏置。形状为(3 * hidden_size, )。对应公式中的 :math:`b_{hr}, b_{hz}, b_{hc}`。 输入 :::::::::::: - **inputs** (Tensor) - 输入。形状为[batch_size, input_size],对应公式中的 :math:`x_t`。 - - **states** (Tensor,可选) - 上一轮的隐藏状态。对应公式中的 :math:`h_{t-1}`。当state为None的时候,初始状态为全0矩阵。默认为None。 + - **states** (Tensor,可选) - 上一轮的隐藏状态。对应公式中的 :math:`h_{t-1}`。当 state 为 None 的时候,初始状态为全 0 矩阵。默认为 None。 输出: - **outputs** (Tensor) - 输出。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - **new_states** (Tensor) - 新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 .. Note:: - 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 + 所有的变换矩阵的权重和偏置都默认初始化为 Uniform(-std, std),其中 std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 代码示例 diff --git a/docs/api/paddle/nn/GRU_cn.rst b/docs/api/paddle/nn/GRU_cn.rst index 8cff30a7716..fb519761ac0 100644 --- a/docs/api/paddle/nn/GRU_cn.rst +++ b/docs/api/paddle/nn/GRU_cn.rst @@ -9,7 +9,7 @@ GRU **门控循环单元网络** -该OP是门控循环单元网络(GRU),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的step,每个step根据当前时刻输入 :math:`x_{t}` 和上一时刻状态 :math:`h_{t-1}` 计算当前时刻输出 :math:`y_{t}` 并更新状态 :math:`h_{t}` 。 +该 OP 是门控循环单元网络(GRU),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的 step,每个 step 根据当前时刻输入 :math:`x_{t}` 和上一时刻状态 :math:`h_{t-1}` 计算当前时刻输出 :math:`y_{t}` 并更新状态 :math:`h_{t}` 。 状态更新公式如下: @@ -27,34 +27,34 @@ GRU 其中: - - :math:`\sigma` :sigmoid激活函数。 + - :math:`\sigma` :sigmoid 激活函数。 参数 :::::::::::: - **input_size** (int) - 输入 :math:`x` 的大小。 - **hidden_size** (int) - 隐藏状态 :math:`h` 大小。 - - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为2,会将两层GRU网络堆叠在一起,第二层的输入来自第一层的输出。默认为1。 - - **direction** (str,可选) - 网络迭代方向,可设置为forward或bidirect(或bidirectional)。foward指从序列开始到序列结束的单向GRU网络方向,bidirectional指从序列开始到序列结束,又从序列结束到开始的双向GRU网络方向。默认为forward。 - - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为False。 - - **dropout** (float,可选) - dropout概率,指的是出第一层外每层输入时的dropout概率。范围为[0, 1]。默认为0。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为 2,会将两层 GRU 网络堆叠在一起,第二层的输入来自第一层的输出。默认为 1。 + - **direction** (str,可选) - 网络迭代方向,可设置为 forward 或 bidirect(或 bidirectional)。foward 指从序列开始到序列结束的单向 GRU 网络方向,bidirectional 指从序列开始到序列结束,又从序列结束到开始的双向 GRU 网络方向。默认为 forward。 + - **time_major** (bool,可选) - 指定 input 的第一个维度是否是 time steps。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为 False。 + - **dropout** (float,可选) - dropout 概率,指的是出第一层外每层输入时的 dropout 概率。范围为[0, 1]。默认为 0。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 输入 :::::::::::: - - **inputs** (Tensor) - 网络输入。如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],如果time_major为False,则Tensor的形状为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。 + - **inputs** (Tensor) - 网络输入。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。 - **initial_states** (Tensor,可选) - 网络的初始状态,形状为[num_layers * num_directions, batch_size, hidden_size]。如果没有给出则会以全零初始化。 - - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为int64或int32。在输入序列中所有time step不小于sequence_length的元素都会被当作填充元素处理(状态不再更新)。 + - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为 int64 或 int32。在输入序列中所有 time step 不小于 sequence_length 的元素都会被当作填充元素处理(状态不再更新)。 输出 :::::::::::: - - **outputs** (Tensor) - 输出,由前向和后向cell的输出拼接得到。如果time_major为True,则Tensor的形状为[time_steps,batch_size,num_directions * hidden_size],如果time_major为False,则Tensor的形状为[batch_size,time_steps,num_directions * hidden_size],当direction设置为bidirectional时,num_directions等于2,否则等于1。`time_steps` 指输出序列的长度。 - - **final_states** (Tensor) - 最终状态。形状为[num_layers * num_directions, batch_size, hidden_size],当direction设置为bidirectional时,num_directions等于2,返回值的前向和后向的状态的索引是0,2,4,6..。和1,3,5,7...,否则等于1。 + - **outputs** (Tensor) - 输出,由前向和后向 cell 的输出拼接得到。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,num_directions * hidden_size],如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,num_directions * hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,否则等于 1。`time_steps` 指输出序列的长度。 + - **final_states** (Tensor) - 最终状态。形状为[num_layers * num_directions, batch_size, hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,返回值的前向和后向的状态的索引是 0,2,4,6..。和 1,3,5,7...,否则等于 1。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/GroupNorm_cn.rst b/docs/api/paddle/nn/GroupNorm_cn.rst index 90c31af1b30..66ebdd91834 100644 --- a/docs/api/paddle/nn/GroupNorm_cn.rst +++ b/docs/api/paddle/nn/GroupNorm_cn.rst @@ -5,7 +5,7 @@ GroupNorm .. py:class:: paddle.nn.GroupNorm(num_groups, num_channels, epsilon=1e-05, weight_attr=None, bias_attr=None, data_format='NCHW', name=None) -**Group Normalization层** +**Group Normalization 层** 该接口用于构建 ``GroupNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了组归一化层的功能。更多详情请参考:`Group Normalization `_ 。 @@ -15,15 +15,15 @@ GroupNorm - **num_groups** (int) - 从通道中分离出来的 ``group`` 的数目。 - **num_channels** (int) - 输入的通道数。 - **epsilon** (float,可选) - 为防止方差除零,增加一个很小的值。默认值:1e-05。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,表示参数不学习。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。如果为False,表示参数不学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,表示参数不学习。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。如果为 False,表示参数不学习。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **data_format** (string,可选) - 只支持“NCHW”(num_batches,channels,height,width)格式。默认值:“NCHW”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::::::: - - input:形状为(批大小,通道数,\*) 的Tensor。 + - input:形状为(批大小,通道数,\*) 的 Tensor。 - output:和输入形状一样。 代码示例 diff --git a/docs/api/paddle/nn/HSigmoidLoss_cn.rst b/docs/api/paddle/nn/HSigmoidLoss_cn.rst index 422e43c8a35..b771905c926 100644 --- a/docs/api/paddle/nn/HSigmoidLoss_cn.rst +++ b/docs/api/paddle/nn/HSigmoidLoss_cn.rst @@ -5,39 +5,39 @@ HSigmoidLoss .. py:class:: paddle.nn.HSigmoidLoss(feature_size, num_classes, weight_attr=None, bias_attr=None, is_custom=False, is_sparse=False, name=None) -层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 +层次 sigmoid(hierarchical sigmoid),该 OP 通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 -该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 +该 OP 建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid 累加这条路径上每个非叶子节点的损失得到总损失。 -相较于传统softmax的计算复杂度 :math:`O(N)` ,hsigmoid可以将计算复杂度降至 :math:`O(logN)`,其中 :math:`N` 表示类别总数(字典大小)。 +相较于传统 softmax 的计算复杂度 :math:`O(N)` ,hsigmoid 可以将计算复杂度降至 :math:`O(logN)`,其中 :math:`N` 表示类别总数(字典大小)。 若使用默认树结构,请参考 `Hierarchical Probabilistic Neural Network Language Model `_ 。 -若使用自定义树结构,请将参数 ``is_custom`` 设置为True,并完成以下步骤(以语言模型为例): +若使用自定义树结构,请将参数 ``is_custom`` 设置为 True,并完成以下步骤(以语言模型为例): 1. 使用自定义词典来建立二叉树,每个叶结点都应该是词典中的单词; -2. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; +2. 建立一个 dict 类型数据结构,用于存储 **单词 id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; -3. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。编码是指每次二分类的标签,1为真,0为假; +3. 建立一个 dict 类型数据结构,用于存储 **单词 id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。编码是指每次二分类的标签,1 为真,0 为假; 4. 每个单词都已经有自己的路径和路径编码,当对于同一批输入进行操作时,可以同时传入一批路径和路径编码进行运算。 参数 :::::::::: - - **feature_size** (int) - 输入Tensor的特征大尺寸。 - - **num_classes** (int) - 类别总数(字典大小)必须大于等于2。若使用默认树结构,即当 ``is_custom=False`` 时,必须设置该参数。若使用自定义树结构,即当 ``is_custom=True`` 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 - - **weight_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) – 指定偏置参数属性的对象,若 `bias_attr` 为bool类型,如果设置为False,表示不会为该层添加偏置;如果设置为True,表示使用默认的偏置参数属性。默认值为None,表示使用默认的偏置参数属性。默认的偏置参数属性将偏置参数的初始值设为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **is_custom** (bool,可选) – 是否使用用户自定义二叉树取代默认二叉树结构。如果设置为True,请务必设置 ``path_table`` , ``path_code`` , ``num_classes``,否则必须设置num_classes。默认值为False。 - - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为True,W的梯度和输入梯度将会变得稀疏。默认值为False。 + - **feature_size** (int) - 输入 Tensor 的特征大尺寸。 + - **num_classes** (int) - 类别总数(字典大小)必须大于等于 2。若使用默认树结构,即当 ``is_custom=False`` 时,必须设置该参数。若使用自定义树结构,即当 ``is_custom=True`` 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 + - **weight_attr** (ParamAttr,可选) – 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) – 指定偏置参数属性的对象,若 `bias_attr` 为 bool 类型,如果设置为 False,表示不会为该层添加偏置;如果设置为 True,表示使用默认的偏置参数属性。默认值为 None,表示使用默认的偏置参数属性。默认的偏置参数属性将偏置参数的初始值设为 0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **is_custom** (bool,可选) – 是否使用用户自定义二叉树取代默认二叉树结构。如果设置为 True,请务必设置 ``path_table`` , ``path_code`` , ``num_classes``,否则必须设置 num_classes。默认值为 False。 + - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为 True,W 的梯度和输入梯度将会变得稀疏。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **input** (Tensor): - 输入的Tensor,维度是[N, D],其中N是batch size, D是特征尺寸。 + - **input** (Tensor): - 输入的 Tensor,维度是[N, D],其中 N 是 batch size, D 是特征尺寸。 - **label** (Tensor): - 标签,维度是[N, 1]。 - - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `hsigmoid loss` 损失。输出Loss的维度为[N, 1]。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `hsigmoid loss` 损失。输出 Loss 的维度为[N, 1]。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Hardshrink_cn.rst b/docs/api/paddle/nn/Hardshrink_cn.rst index dadc7afd175..22c73462a3b 100644 --- a/docs/api/paddle/nn/Hardshrink_cn.rst +++ b/docs/api/paddle/nn/Hardshrink_cn.rst @@ -4,7 +4,7 @@ Hardshrink ------------------------------- .. py:class:: paddle.nn.Hardshrink(threshold=0.5, name=None) -Hardshrink激活层 +Hardshrink 激活层 .. math:: @@ -21,13 +21,13 @@ Hardshrink激活层 参数 :::::::::: - - threshold (float,可选) - Hardshrink激活计算公式中的threshold值。默认值为0.5。 + - threshold (float,可选) - Hardshrink 激活计算公式中的 threshold 值。默认值为 0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Hardsigmoid_cn.rst b/docs/api/paddle/nn/Hardsigmoid_cn.rst index 7b4ab4e710b..ed6820c3b51 100644 --- a/docs/api/paddle/nn/Hardsigmoid_cn.rst +++ b/docs/api/paddle/nn/Hardsigmoid_cn.rst @@ -5,7 +5,7 @@ Hardsigmoid .. py:function:: paddle.nn.Hardsigmoid(name=None) -Hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmoid快,详细解释参见 https://arxiv.org/abs/1603.00391。 +Hardsigmoid 激活层。sigmoid 的分段线性逼近激活函数,速度比 sigmoid 快,详细解释参见 https://arxiv.org/abs/1603.00391。 .. math:: @@ -27,8 +27,8 @@ Hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmo 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Hardswish_cn.rst b/docs/api/paddle/nn/Hardswish_cn.rst index 1e769a58f46..43296aa9da3 100644 --- a/docs/api/paddle/nn/Hardswish_cn.rst +++ b/docs/api/paddle/nn/Hardswish_cn.rst @@ -5,7 +5,7 @@ Hardswish .. py:function:: paddle.nn.Hardswish(name=None) -Hardswish激活函数。在MobileNetV3架构中被提出,相较于swish函数,具有数值稳定性好,计算速度快等优点,具体原理请参考:https://arxiv.org/pdf/1905.02244.pdf +Hardswish 激活函数。在 MobileNetV3 架构中被提出,相较于 swish 函数,具有数值稳定性好,计算速度快等优点,具体原理请参考:https://arxiv.org/pdf/1905.02244.pdf .. math:: @@ -27,8 +27,8 @@ Hardswish激活函数。在MobileNetV3架构中被提出,相较于swish函数 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Hardtanh_cn.rst b/docs/api/paddle/nn/Hardtanh_cn.rst index c7a1d8a80b4..ee66e3d2174 100644 --- a/docs/api/paddle/nn/Hardtanh_cn.rst +++ b/docs/api/paddle/nn/Hardtanh_cn.rst @@ -4,7 +4,7 @@ Hardtanh ------------------------------- .. py:class:: paddle.nn.Hardtanh(min=-1.0, max=1.0, name=None) -Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: +Hardtanh 激活层(Hardtanh Activation Operator)。计算公式如下: .. math:: @@ -21,14 +21,14 @@ Hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: 参数 :::::::::: - - min (float,可选) - Hardtanh激活计算公式中的min值。默认值为-1。 - - max (float,可选) - Hardtanh激活计算公式中的max值。默认值为1。 + - min (float,可选) - Hardtanh 激活计算公式中的 min 值。默认值为-1。 + - max (float,可选) - Hardtanh 激活计算公式中的 max 值。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Identity_cn.rst b/docs/api/paddle/nn/Identity_cn.rst index c72d5b3edd0..c5e136c76f3 100644 --- a/docs/api/paddle/nn/Identity_cn.rst +++ b/docs/api/paddle/nn/Identity_cn.rst @@ -6,7 +6,7 @@ Identity .. py:class:: paddle.nn.Identity(*args, **kwargs) -**等效层**。对于输入Tensor :math:`X`,计算公式为: +**等效层**。对于输入 Tensor :math:`X`,计算公式为: .. math:: @@ -22,8 +22,8 @@ Identity 形状 ::::::::: -- 输入:形状为 :math:`[batch\_size, n1, n2, ...]` 的多维Tensor。 -- 输出:形状为 :math:`[batch\_size, n1, n2, ...]` 的多维Tensor。 +- 输入:形状为 :math:`[batch\_size, n1, n2, ...]` 的多维 Tensor。 +- 输出:形状为 :math:`[batch\_size, n1, n2, ...]` 的多维 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/InstanceNorm1D_cn.rst b/docs/api/paddle/nn/InstanceNorm1D_cn.rst index 3178bff5434..8dfe5ac9739 100644 --- a/docs/api/paddle/nn/InstanceNorm1D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm1D_cn.rst @@ -6,9 +6,9 @@ InstanceNorm1D .. py:class:: paddle.nn.InstanceNorm1D(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, data_format="NCL", name=None): -该接口用于构建 ``InstanceNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理2D或者3D的Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 +该接口用于构建 ``InstanceNorm1D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 2D 或者 3D 的 Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 -``input`` 是mini-batch的输入。 +``input`` 是 mini-batch 的输入。 .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ @@ -28,8 +28,8 @@ Note: - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NC"或者"NCL"。默认值:“NCL”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -37,11 +37,11 @@ Note: 形状 :::::::::::: - - input:形状为(批大小,通道数)的2-D Tensor 或(批大小,通道数,长度)的3-D Tensor。 + - input:形状为(批大小,通道数)的 2-D Tensor 或(批大小,通道数,长度)的 3-D Tensor。 - output:和输入形状一样。 .. note:: -目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 +目前设置 track_running_stats 和 momentum 是无效的。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/InstanceNorm2D_cn.rst b/docs/api/paddle/nn/InstanceNorm2D_cn.rst index b26c305e4c2..9651b45949c 100644 --- a/docs/api/paddle/nn/InstanceNorm2D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm2D_cn.rst @@ -6,9 +6,9 @@ InstanceNorm2D .. py:class:: paddle.nn.InstanceNorm2D(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, data_format="NCHW", name=None): -该接口用于构建 ``InstanceNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理2D或者3D的Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 +该接口用于构建 ``InstanceNorm2D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 2D 或者 3D 的 Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 -``input`` 是mini-batch的输入。 +``input`` 是 mini-batch 的输入。 .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ @@ -27,8 +27,8 @@ Note: - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NCHW"。默认值:“NCHW”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -36,11 +36,11 @@ Note: 形状 :::::::::::: - - input:形状为(批大小,通道数,高度,宽度)的4-D Tensor。 + - input:形状为(批大小,通道数,高度,宽度)的 4-D Tensor。 - output:和输入形状一样。 .. note:: -目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 +目前设置 track_running_stats 和 momentum 是无效的。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/InstanceNorm3D_cn.rst b/docs/api/paddle/nn/InstanceNorm3D_cn.rst index a372ffdbe6d..9ad9bfc30c5 100644 --- a/docs/api/paddle/nn/InstanceNorm3D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm3D_cn.rst @@ -5,9 +5,9 @@ InstanceNorm3D .. py:class:: paddle.nn.InstanceNorm3D(num_features, epsilon=1e-05, momentum=0.9, weight_attr=None, bias_attr=None, data_format="NCDHW", name=None): -该接口用于构建 ``InstanceNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理5D的Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 +该接口用于构建 ``InstanceNorm3D`` 类的一个可调用对象,具体用法参照 ``代码示例``。可以处理 5D 的 Tensor,实现了实例归一化层(Instance Normalization Layer)的功能。更多详情请参考:Instance Normalization: The Missing Ingredient for Fast Stylization 。 -``input`` 是mini-batch的输入。 +``input`` 是 mini-batch 的输入。 .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean \\ @@ -26,8 +26,8 @@ Note: - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False,则表示每个通道的伸缩固定为1,不可改变。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False,则表示每一个通道的偏移固定为0,不可改变。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False,则表示每个通道的伸缩固定为 1,不可改变。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False,则表示每一个通道的偏移固定为 0,不可改变。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为"NCDHW"。默认值:“NCDHW”。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -35,11 +35,11 @@ Note: 形状 :::::::::::: - - input:形状为5-D Tensor。 + - input:形状为 5-D Tensor。 - output:和输入形状一样。 .. note:: -目前设置track_running_stats和momentum是无效的。之后的版本会修复此问题。 +目前设置 track_running_stats 和 momentum 是无效的。之后的版本会修复此问题。 代码示例 diff --git a/docs/api/paddle/nn/KLDivLoss_cn.rst b/docs/api/paddle/nn/KLDivLoss_cn.rst index 4d59f6e5ffa..1fe114d5627 100644 --- a/docs/api/paddle/nn/KLDivLoss_cn.rst +++ b/docs/api/paddle/nn/KLDivLoss_cn.rst @@ -5,34 +5,34 @@ KLDivLoss .. py:class:: paddle.nn.KLDivLoss(reduction='mean') -该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 +该算子计算输入(Input)和输入(Label)之间的 Kullback-Leibler 散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 -kL发散损失计算如下: +kL 发散损失计算如下: .. math:: l(input, label) = label * (log(label) - input) -当 ``reduction`` 为 ``none`` 时,输出损失与输入(input)形状相同,各点的损失单独计算,不会对结果做reduction 。 +当 ``reduction`` 为 ``none`` 时,输出损失与输入(input)形状相同,各点的损失单独计算,不会对结果做 reduction 。 当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 -当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N 为批大小,输出为所有损失的总和除以批量大小。 参数 :::::::::::: - - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + - **reduction** (str,可选) - 要应用于输出的 reduction 类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无 reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 形状 :::::::::::: - - **input** (Tensor): - 输入的Tensor,维度是[N, *],其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64。 + - **input** (Tensor): - 输入的 Tensor,维度是[N, *],其中 N 是 batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64。 - **label** (Tensor): - 标签,维度是[N, *],与 ``input`` 相同。数据类型为:float32、float64。 - - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的kl散度。如果 `reduction` 是 ``'none'``,则输出Loss的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'batchmean'`` 、 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 kl 散度。如果 `reduction` 是 ``'none'``,则输出 Loss 的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'batchmean'`` 、 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/L1Loss_cn.rst b/docs/api/paddle/nn/L1Loss_cn.rst index 3640f7af9ce..5c6ef069d3e 100644 --- a/docs/api/paddle/nn/L1Loss_cn.rst +++ b/docs/api/paddle/nn/L1Loss_cn.rst @@ -5,7 +5,7 @@ L1Loss .. py:class:: paddle.nn.L1Loss(reduction='mean', name=None) -该接口用于创建一个L1Loss的可调用类,L1Loss计算输入input和标签label间的 `L1 loss` 损失。 +该接口用于创建一个 L1Loss 的可调用类,L1Loss 计算输入 input 和标签 label 间的 `L1 loss` 损失。 该损失函数的数学计算公式如下: @@ -32,9 +32,9 @@ L1Loss 形状 ::::::::: - - **input** (Tensor): - 输入的Tensor,维度是[N, *],其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **input** (Tensor): - 输入的 Tensor,维度是[N, *],其中 N 是 batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 - **label** (Tensor): - 标签,维度是[N, *],与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 - - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``,则输出Loss的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。 + - **output** (Tensor): - 输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``,则输出 Loss 的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/LSTMCell_cn.rst b/docs/api/paddle/nn/LSTMCell_cn.rst index 70a0bdad001..b9a51cacc1a 100644 --- a/docs/api/paddle/nn/LSTMCell_cn.rst +++ b/docs/api/paddle/nn/LSTMCell_cn.rst @@ -9,7 +9,7 @@ LSTMCell **长短期记忆网络单元** -该OP是长短期记忆网络单元(LSTMCell),根据当前时刻输入x(t)和上一时刻状态h(t-1)计算当前时刻输出y(t)并更新状态h(t)。 +该 OP 是长短期记忆网络单元(LSTMCell),根据当前时刻输入 x(t)和上一时刻状态 h(t-1)计算当前时刻输出 y(t)并更新状态 h(t)。 状态更新公式如下: @@ -27,7 +27,7 @@ LSTMCell 其中: - - :math:`\sigma` :sigmoid激活函数。 + - :math:`\sigma` :sigmoid 激活函数。 详情请参考论文:`An Empirical Exploration of Recurrent Network Architectures `_ 。 @@ -37,34 +37,34 @@ LSTMCell - **input_size** (int) - 输入的大小。 - **hidden_size** (int) - 隐藏状态大小。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 变量 :::::::::::: - - **weight_ih** (Parameter) - input到hidden的变换矩阵的权重。形状为(4 * hidden_size, input_size)。对应公式中的 :math:`W_{ii}, W_{if}, W_{ig}, W_{io}`。 - - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(4 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}`。 - - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{ii}, b_{if}, b_{ig}, b_{io}`。 - - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}`。 + - **weight_ih** (Parameter) - input 到 hidden 的变换矩阵的权重。形状为(4 * hidden_size, input_size)。对应公式中的 :math:`W_{ii}, W_{if}, W_{ig}, W_{io}`。 + - **weight_hh** (Parameter) - hidden 到 hidden 的变换矩阵的权重。形状为(4 * hidden_size, hidden_size)。对应公式中的 :math:`W_{hi}, W_{hf}, W_{hg}, W_{ho}`。 + - **bias_ih** (Parameter) - input 到 hidden 的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{ii}, b_{if}, b_{ig}, b_{io}`。 + - **bias_hh** (Parameter) - hidden 到 hidden 的变换矩阵的偏置。形状为(4 * hidden_size, )。对应公式中的 :math:`b_{hi}, b_{hf}, b_{hg}, b_{ho}`。 输入 :::::::::::: - **inputs** (Tensor) - 输入。形状为[batch_size, input_size],对应公式中的 :math:`x_t`。 - - **states** (tuple,可选) - 一个包含两个Tensor的元组,每个Tensor的形状都为[batch_size, hidden_size],上一轮的隐藏状态。对应公式中的 :math:`h_{t-1},c_{t-1}`。当state为None的时候,初始状态为全0矩阵。默认为None。 + - **states** (tuple,可选) - 一个包含两个 Tensor 的元组,每个 Tensor 的形状都为[batch_size, hidden_size],上一轮的隐藏状态。对应公式中的 :math:`h_{t-1},c_{t-1}`。当 state 为 None 的时候,初始状态为全 0 矩阵。默认为 None。 输出 :::::::::::: - **outputs** (Tensor) - 输出。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 - - **new_states** (tuple) - 一个包含两个Tensor的元组,每个Tensor的形状都为[batch_size, hidden_size],新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t},c_{t}`。 + - **new_states** (tuple) - 一个包含两个 Tensor 的元组,每个 Tensor 的形状都为[batch_size, hidden_size],新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t},c_{t}`。 .. Note:: - 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 + 所有的变换矩阵的权重和偏置都默认初始化为 Uniform(-std, std),其中 std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 代码示例 diff --git a/docs/api/paddle/nn/LSTM_cn.rst b/docs/api/paddle/nn/LSTM_cn.rst index 15e70fb6818..ba984404815 100644 --- a/docs/api/paddle/nn/LSTM_cn.rst +++ b/docs/api/paddle/nn/LSTM_cn.rst @@ -9,7 +9,7 @@ LSTM **长短期记忆网络** -该OP是长短期记忆网络(LSTM),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的step,每个step根据当前时刻输入 :math:`x_{t}` 和上一时刻状态 :math:`h_{t-1}, c_{t-1}` 计算当前时刻输出 :math:`y_{t}` 并更新状态 :math:`h_{t}, c_{t}` 。 +该 OP 是长短期记忆网络(LSTM),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的 step,每个 step 根据当前时刻输入 :math:`x_{t}` 和上一时刻状态 :math:`h_{t-1}, c_{t-1}` 计算当前时刻输出 :math:`y_{t}` 并更新状态 :math:`h_{t}, c_{t}` 。 状态更新公式如下: @@ -31,34 +31,34 @@ LSTM 其中: - - :math:`\sigma` :sigmoid激活函数。 + - :math:`\sigma` :sigmoid 激活函数。 参数 :::::::::::: - **input_size** (int) - 输入 :math:`x` 的大小。 - **hidden_size** (int) - 隐藏状态 :math:`h` 大小。 - - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为2,会将两层GRU网络堆叠在一起,第二层的输入来自第一层的输出。默认为1。 - - **direction** (str,可选) - 网络迭代方向,可设置为forward或bidirect(或bidirectional)。foward指从序列开始到序列结束的单向GRU网络方向,bidirectional指从序列开始到序列结束,又从序列结束到开始的双向GRU网络方向。默认为forward。 - - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为False。 - - **dropout** (float,可选) - dropout概率,指的是出第一层外每层输入时的dropout概率。范围为[0, 1]。默认为0。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为 2,会将两层 GRU 网络堆叠在一起,第二层的输入来自第一层的输出。默认为 1。 + - **direction** (str,可选) - 网络迭代方向,可设置为 forward 或 bidirect(或 bidirectional)。foward 指从序列开始到序列结束的单向 GRU 网络方向,bidirectional 指从序列开始到序列结束,又从序列结束到开始的双向 GRU 网络方向。默认为 forward。 + - **time_major** (bool,可选) - 指定 input 的第一个维度是否是 time steps。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为 False。 + - **dropout** (float,可选) - dropout 概率,指的是出第一层外每层输入时的 dropout 概率。范围为[0, 1]。默认为 0。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 输入 :::::::::::: - - **inputs** (Tensor) - 网络输入。如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],如果time_major为False,则Tensor的形状为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。 - - **initial_states** (tuple,可选) - 网络的初始状态,一个包含h和c的元组,形状为[num_layers * num_directions, batch_size, hidden_size]。如果没有给出则会以全零初始化。 - - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为int64或int32。在输入序列中所有time step不小于sequence_length的元素都会被当作填充元素处理(状态不再更新)。 + - **inputs** (Tensor) - 网络输入。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。 + - **initial_states** (tuple,可选) - 网络的初始状态,一个包含 h 和 c 的元组,形状为[num_layers * num_directions, batch_size, hidden_size]。如果没有给出则会以全零初始化。 + - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为 int64 或 int32。在输入序列中所有 time step 不小于 sequence_length 的元素都会被当作填充元素处理(状态不再更新)。 输出 :::::::::::: - - **outputs** (Tensor) - 输出,由前向和后向cell的输出拼接得到。如果time_major为True,则Tensor的形状为[time_steps,batch_size,num_directions * hidden_size],如果time_major为False,则Tensor的形状为[batch_size,time_steps,num_directions * hidden_size],当direction设置为bidirectional时,num_directions等于2,否则等于1。`time_steps` 指输出序列的长度。 - - **final_states** (tuple) - 最终状态,一个包含h和c的元组。形状为[num_layers * num_directions, batch_size, hidden_size],当direction设置为bidirectional时,num_directions等于2,返回值的前向和后向的状态的索引是0,2,4,6..。和1,3,5,7...,否则等于1。 + - **outputs** (Tensor) - 输出,由前向和后向 cell 的输出拼接得到。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,num_directions * hidden_size],如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,num_directions * hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,否则等于 1。`time_steps` 指输出序列的长度。 + - **final_states** (tuple) - 最终状态,一个包含 h 和 c 的元组。形状为[num_layers * num_directions, batch_size, hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,返回值的前向和后向的状态的索引是 0,2,4,6..。和 1,3,5,7...,否则等于 1。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/LayerDict_cn.rst b/docs/api/paddle/nn/LayerDict_cn.rst index 9127b05f669..5cbe98b02eb 100644 --- a/docs/api/paddle/nn/LayerDict_cn.rst +++ b/docs/api/paddle/nn/LayerDict_cn.rst @@ -8,7 +8,7 @@ LayerDict -LayerDict用于保存子层到有序字典中,它包含的子层将被正确地注册和添加。列表中的子层可以像常规python 有序字典一样被访问。 +LayerDict 用于保存子层到有序字典中,它包含的子层将被正确地注册和添加。列表中的子层可以像常规 python 有序字典一样被访问。 参数 :::::::::::: @@ -58,7 +58,7 @@ LayerDict用于保存子层到有序字典中,它包含的子层将被正确 clear() ''''''''' -清除LayerDict 中所有的子层。 +清除 LayerDict 中所有的子层。 **参数** @@ -88,11 +88,11 @@ clear() pop() ''''''''' -移除LayerDict 中的键 并且返回该键对应的子层。 +移除 LayerDict 中的键 并且返回该键对应的子层。 **参数** - - **key** (str) - 要移除的key。 + - **key** (str) - 要移除的 key。 **代码示例** @@ -118,7 +118,7 @@ pop() keys() ''''''''' -返回LayerDict 中键的可迭代对象。 +返回 LayerDict 中键的可迭代对象。 **参数** @@ -149,7 +149,7 @@ keys() items() ''''''''' -返回LayerDict 中键/值对的可迭代对象。 +返回 LayerDict 中键/值对的可迭代对象。 **参数** @@ -180,7 +180,7 @@ items() values() ''''''''' -返回LayerDict 中值的可迭代对象。 +返回 LayerDict 中值的可迭代对象。 **参数** @@ -211,7 +211,7 @@ values() update() ''''''''' -更新子层中的键/值对到LayerDict中,会覆盖已经存在的键。 +更新子层中的键/值对到 LayerDict 中,会覆盖已经存在的键。 **参数** diff --git a/docs/api/paddle/nn/LayerList_cn.rst b/docs/api/paddle/nn/LayerList_cn.rst index f1250a155b5..4a297276b94 100644 --- a/docs/api/paddle/nn/LayerList_cn.rst +++ b/docs/api/paddle/nn/LayerList_cn.rst @@ -8,7 +8,7 @@ LayerList -LayerList用于保存子层列表,它包含的子层将被正确地注册和添加。列表中的子层可以像常规python列表一样被索引。 +LayerList 用于保存子层列表,它包含的子层将被正确地注册和添加。列表中的子层可以像常规 python 列表一样被索引。 参数 :::::::::::: @@ -41,7 +41,7 @@ LayerList用于保存子层列表,它包含的子层将被正确地注册和 append() ''''''''' -添加一个子层到整个list的最后。 +添加一个子层到整个 list 的最后。 **参数** @@ -62,7 +62,7 @@ append() insert() ''''''''' -向list中插入一个子层,到给定的index前面。 +向 list 中插入一个子层,到给定的 index 前面。 **参数** @@ -83,7 +83,7 @@ insert() extend() ''''''''' -添加多个子层到整个list的最后。 +添加多个子层到整个 list 的最后。 **参数** diff --git a/docs/api/paddle/nn/LayerNorm_cn.rst b/docs/api/paddle/nn/LayerNorm_cn.rst index 3e9d5394994..817fe730409 100644 --- a/docs/api/paddle/nn/LayerNorm_cn.rst +++ b/docs/api/paddle/nn/LayerNorm_cn.rst @@ -26,16 +26,16 @@ LayerNorm 参数 :::::::::::: - - **normalized_shape** (int 或 list 或 tuple) – 需规范化的shape,期望的输入shape为 ``[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]``。如果是单个整数,则此模块将在最后一个维度上规范化(此时最后一维的维度需与该参数相同)。 + - **normalized_shape** (int 或 list 或 tuple) – 需规范化的 shape,期望的输入 shape 为 ``[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]``。如果是单个整数,则此模块将在最后一个维度上规范化(此时最后一维的维度需与该参数相同)。 - **epsilon** (float,可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为False固定为1,不进行学习。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为False固定为0,不进行学习。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果为 False 固定为 1,不进行学习。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。如果为 False 固定为 0,不进行学习。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::::::: - - input: 2-D, 3-D, 4-D或5D 的Tensor。 + - input: 2-D, 3-D, 4-D 或 5D 的 Tensor。 - output:和输入形状一样。 代码示例 diff --git a/docs/api/paddle/nn/Layer_cn.rst b/docs/api/paddle/nn/Layer_cn.rst index 1aff35022f7..7bf906fd46a 100644 --- a/docs/api/paddle/nn/Layer_cn.rst +++ b/docs/api/paddle/nn/Layer_cn.rst @@ -8,20 +8,20 @@ Layer -基于OOD实现的动态图Layer,包含该Layer的参数、前序运行的结构等信息。 +基于 OOD 实现的动态图 Layer,包含该 Layer 的参数、前序运行的结构等信息。 参数 :::::::::::: - - **name_scope** (str,可选) - 为Layer内部参数命名而采用的名称前缀。如果前缀为“mylayer”,在一个类名为MyLayer的Layer中,参数名为“mylayer_0.w_n”,其中w是参数的名称,n为自动生成的具有唯一性的后缀。如果为None,前缀名将为小写的类名。默认值为None。 - - **dtype** (str可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32"。 + - **name_scope** (str,可选) - 为 Layer 内部参数命名而采用的名称前缀。如果前缀为“mylayer”,在一个类名为 MyLayer 的 Layer 中,参数名为“mylayer_0.w_n”,其中 w 是参数的名称,n 为自动生成的具有唯一性的后缀。如果为 None,前缀名将为小写的类名。默认值为 None。 + - **dtype** (str 可选) - Layer 中参数数据类型。如果设置为 str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32"。 方法 :::::::::::: train() ''''''''' -将此层及其所有子层设置为训练模式。这只会影响某些模块,如Dropout和BatchNorm。 +将此层及其所有子层设置为训练模式。这只会影响某些模块,如 Dropout 和 BatchNorm。 **返回** 无 @@ -53,7 +53,7 @@ train() eval() ''''''''' -将此层及其所有子层设置为预测模式。这只会影响某些模块,如Dropout和BatchNorm。 +将此层及其所有子层设置为预测模式。这只会影响某些模块,如 Dropout 和 BatchNorm。 **返回** 无 @@ -84,10 +84,10 @@ eval() full_name() ''''''''' -Layer的全名。组成方式为:``name_scope`` + “/” + MyLayer.__class__.__name__ 。 +Layer 的全名。组成方式为:``name_scope`` + “/” + MyLayer.__class__.__name__ 。 **返回** -str, Layer的全名 +str, Layer 的全名 **代码示例** @@ -109,7 +109,7 @@ str, Layer的全名 register_forward_pre_hook(hook) ''''''''' -为Layer注册一个 ``forward pre-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之前被调用。 +为 Layer 注册一个 ``forward pre-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之前被调用。 ``hook`` 函数具有以下形式:它的 ``input`` 是 ``Layer`` 的 ``input``,并且可以返回一个元组或者单个修改值;如果返回单个修改值,则将值包装到一个元组中。用户可以使用该函数来查看或修改 ``Layer`` ``forward`` 函数的输入。 @@ -120,7 +120,7 @@ hook(Layer, input) -> None or modified input - **hook** (function) - 被注册为 ``forward pre-hook`` 的函数 **返回** -HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 +HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的 hook 函数。 **代码示例** @@ -155,7 +155,7 @@ HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注 register_forward_post_hook(hook) ''''''''' -为Layer注册一个 ``forward post-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之后被调用。 +为 Layer 注册一个 ``forward post-hook`` 函数,该 ``hook`` 函数将会在 ``forward`` 函数调用之后被调用。 ``hook`` 函数具有以下形式,它的 ``input`` 和 ``output`` 是 ``Layer`` 的 ``input`` 和 ``output``。用户可以用该函数来查看和修改 ``Layer`` ``forward`` 函数的输出。 @@ -166,7 +166,7 @@ hook(Layer, input, output) -> None or modified output - **hook** (function) - 被注册为 ``forward post-hook`` 的函数 **返回** -HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的hook函数。 +HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注册的 hook 函数。 **代码示例** @@ -198,15 +198,15 @@ HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注 create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None) ''''''''' -为Layer创建参数。 +为 Layer 创建参数。 **参数** - - **shape** (list) - 参数的形状。列表中的数据类型必须为int。 - - **attr** (ParamAttr,可选) - 指定权重参数属性的对象,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为None。 - - **dtype** (str|core.VarDesc.VarType,可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为“float32”。 + - **shape** (list) - 参数的形状。列表中的数据类型必须为 int。 + - **attr** (ParamAttr,可选) - 指定权重参数属性的对象,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为 None。 + - **dtype** (str|core.VarDesc.VarType,可选) - Layer 中参数数据类型。如果设置为 str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为“float32”。 - **is_bias** (bool,可选) - 是否是偏置参数。默认值:False。 - - **default_initializer** (Initializer,可选) - 默认的参数初始化方法。如果设置为None,则设置非bias参数的初始化方式为 paddle.nn.initializer.Xavier,设置bias参数的初始化方式为 paddle.nn.initializer.Constant。默认值:None。 + - **default_initializer** (Initializer,可选) - 默认的参数初始化方法。如果设置为 None,则设置非 bias 参数的初始化方式为 paddle.nn.initializer.Xavier,设置 bias 参数的初始化方式为 paddle.nn.initializer.Constant。默认值:None。 **返回** Tensor,创建的参数变量 @@ -234,13 +234,13 @@ Tensor,创建的参数变量 create_variable(name=None, persistable=None, dtype=None) ''''''''' -为Layer创建变量。 +为 Layer 创建变量。 **参数** - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **persistable** (bool,可选) - 是否为持久性变量,后续会被移出。默认值:None。 - - **dtype** (str,可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 + - **dtype** (str,可选) - Layer 中参数数据类型。如果设置为 str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 **返回** Tensor,返回创建的 ``Tensor`` @@ -269,13 +269,13 @@ Tensor,返回创建的 ``Tensor`` create_tensor(name=None, persistable=None, dtype=None) ''''''''' -为Layer创建变量。 +为 Layer 创建变量。 **参数** - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **persistable** (bool,可选) - 是否为持久性变量,后续会被移出。默认值:None。 - - **dtype** (str,可选) - Layer中参数数据类型。如果设置为str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 + - **dtype** (str,可选) - Layer 中参数数据类型。如果设置为 str,则可以是“bool”,“float16”,“float32”,“float64”,“int8”,“int16”,“int32”,“int64”,“uint8”或“uint16”。默认值为 "float32" 。 **返回** Tensor,返回创建的 ``Tensor`` @@ -309,10 +309,10 @@ parameters(include_sublayers=True) **参数** - - **include_sublayers** (bool,可选) - 是否返回子层的参数。如果为True,返回的列表中包含子层的参数。默认值:True。 + - **include_sublayers** (bool,可选) - 是否返回子层的参数。如果为 True,返回的列表中包含子层的参数。默认值:True。 **返回** -list,一个由当前层及其子层的所有参数组成的列表,列表中的元素类型为Parameter(Tensor)。 +list,一个由当前层及其子层的所有参数组成的列表,列表中的元素类型为 Parameter(Tensor)。 **代码示例** @@ -374,10 +374,10 @@ sublayers(include_self=False) **参数** - - **include_self** (bool,可选) - 是否包含本层。如果为True,则包括本层。默认值:False + - **include_self** (bool,可选) - 是否包含本层。如果为 True,则包括本层。默认值:False **返回** - list,一个由所有子层组成的列表,列表中的元素类型为Layer。 + list,一个由所有子层组成的列表,列表中的元素类型为 Layer。 **代码示例** @@ -432,7 +432,7 @@ named_parameters(prefix='', include_sublayers=True) **参数** - **prefix** (str,可选) - 在所有参数名称前加的前缀。默认值:''。 - - **include_sublayers** (bool,可选) - 是否返回子层的参数。如果为True,返回的列表中包含子层的参数。默认值:True。 + - **include_sublayers** (bool,可选) - 是否返回子层的参数。如果为 True,返回的列表中包含子层的参数。默认值:True。 **返回** iterator,产出名称和参数的元组的迭代器。 @@ -478,17 +478,17 @@ iterator,产出名称和子层的元组的迭代器。 register_buffer(name, tensor, persistable=True) ''''''''' -将一个Tensor注册为buffer。 +将一个 Tensor 注册为 buffer。 -buffer是一个不可训练的变量,不会被优化器更新,但在评估或预测阶段可能是必要的状态变量。比如 ``BatchNorm`` 中的均值和方差。 +buffer 是一个不可训练的变量,不会被优化器更新,但在评估或预测阶段可能是必要的状态变量。比如 ``BatchNorm`` 中的均值和方差。 -注册的buffer默认是可持久性的,会被保存到 ``state_dict`` 中。如果指定 ``persistable`` 参数为False,则会注册一个非持久性的buffer,即不会同步和保存到 ``state_dict`` 中。 +注册的 buffer 默认是可持久性的,会被保存到 ``state_dict`` 中。如果指定 ``persistable`` 参数为 False,则会注册一个非持久性的 buffer,即不会同步和保存到 ``state_dict`` 中。 **参数** - - **name** (str) - 注册buffer的名字。可以通过此名字来访问已注册的buffer。 - - **tensor** (Tensor) - 将被注册为buffer的变量。 - - **persistable** (bool,可选) - 注册的buffer是否需要可持久性地保存到 ``state_dict`` 中。 + - **name** (str) - 注册 buffer 的名字。可以通过此名字来访问已注册的 buffer。 + - **tensor** (Tensor) - 将被注册为 buffer 的变量。 + - **persistable** (bool,可选) - 注册的 buffer 是否需要可持久性地保存到 ``state_dict`` 中。 **返回** None @@ -510,14 +510,14 @@ None buffers(include_sublayers=True) ''''''''' -返回一个由当前层及其子层的所有buffers组成的列表。 +返回一个由当前层及其子层的所有 buffers 组成的列表。 **参数** - - **include_sublayers** (bool,可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + - **include_sublayers** (bool,可选) - 是否返回子层的 buffers。如果为 True,返回的列表中包含子层的 buffers。默认值:True。 **返回** -list,一个由当前层及其子层的所有buffers组成的列表,列表中的元素类型为Tensor。 +list,一个由当前层及其子层的所有 buffers 组成的列表,列表中的元素类型为 Tensor。 **代码示例** @@ -536,15 +536,15 @@ list,一个由当前层及其子层的所有buffers组成的列表,列表中 named_buffers(prefix='', include_sublayers=True) ''''''''' -返回层中所有buffers的迭代器,生成名称和buffer的元组。 +返回层中所有 buffers 的迭代器,生成名称和 buffer 的元组。 **参数** - - **prefix** (str,可选) - 在所有buffer名称前加的前缀。默认值:''。 - - **include_sublayers** (bool,可选) - 是否返回子层的buffers。如果为True,返回的列表中包含子层的buffers。默认值:True。 + - **prefix** (str,可选) - 在所有 buffer 名称前加的前缀。默认值:''。 + - **include_sublayers** (bool,可选) - 是否返回子层的 buffers。如果为 True,返回的列表中包含子层的 buffers。默认值:True。 **返回** -iterator,产出名称和buffer的元组的迭代器。 +iterator,产出名称和 buffer 的元组的迭代器。 **代码示例** @@ -577,8 +577,8 @@ forward(*inputs, **kwargs) **参数** - - **\*inputs** (tuple) - 解包后的tuple参数。 - - **\*\*kwargs** (dict) - 解包后的dict参数。 + - **\*inputs** (tuple) - 解包后的 tuple 参数。 + - **\*\*kwargs** (dict) - 解包后的 dict 参数。 **返回** 无 @@ -586,12 +586,12 @@ forward(*inputs, **kwargs) add_sublayer(name, sublayer) ''''''''' -添加子层实例。可以通过self.name访问该sublayer。 +添加子层实例。可以通过 self.name 访问该 sublayer。 **参数** - **name** (str) - 子层名。 - - **sublayer** (Layer) - Layer实例。 + - **sublayer** (Layer) - Layer 实例。 **返回** Layer,添加的子层 @@ -627,12 +627,12 @@ Layer,添加的子层 add_parameter(name, parameter) ''''''''' -添加参数实例。可以通过self.name访问该parameter。 +添加参数实例。可以通过 self.name 访问该 parameter。 **参数** - **name** (str) - 参数名。 - - **parameter** (Parameter) - Parameter实例。 + - **parameter** (Parameter) - Parameter 实例。 **返回** Parameter,传入的参数实例 @@ -661,16 +661,16 @@ Parameter,传入的参数实例 state_dict(destination=None, include_sublayers=True, use_hook=True) ''''''''' -获取当前层及其子层的所有参数和可持久性buffers。并将所有参数和buffers存放在dict结构中。 +获取当前层及其子层的所有参数和可持久性 buffers。并将所有参数和 buffers 存放在 dict 结构中。 **参数** - - **destination** (dict,可选) - 如果提供 ``destination``,则所有参数和可持久性buffers都将存放在 ``destination`` 中。默认值:None。 - - **include_sublayers** (bool,可选) - 如果设置为True,则包括子层的参数和buffers。默认值:True。 - - **use_hook** (bool,可选) - 如果设置为True,将_state_dict_hooks中注册的函数应用于destination。默认值:True。 + - **destination** (dict,可选) - 如果提供 ``destination``,则所有参数和可持久性 buffers 都将存放在 ``destination`` 中。默认值:None。 + - **include_sublayers** (bool,可选) - 如果设置为 True,则包括子层的参数和 buffers。默认值:True。 + - **use_hook** (bool,可选) - 如果设置为 True,将_state_dict_hooks 中注册的函数应用于 destination。默认值:True。 **返回** -dict,包含所有参数和可持久行buffers的dict +dict,包含所有参数和可持久行 buffers 的 dict **代码示例** @@ -686,12 +686,12 @@ dict,包含所有参数和可持久行buffers的dict set_state_dict(state_dict, use_structured_name=True) ''''''''' -根据传入的 ``state_dict`` 设置参数和可持久性buffers。所有参数和buffers将由 ``state_dict`` 中的 ``Tensor`` 设置。 +根据传入的 ``state_dict`` 设置参数和可持久性 buffers。所有参数和 buffers 将由 ``state_dict`` 中的 ``Tensor`` 设置。 **参数** - - **state_dict** (dict) - 包含所有参数和可持久性buffers的dict。 - - **use_structured_name** (bool,可选) - 如果设置为True,将使用Layer的结构性变量名作为dict的key,否则将使用Parameter或者Buffer的变量名作为key。默认值:True。 + - **state_dict** (dict) - 包含所有参数和可持久性 buffers 的 dict。 + - **use_structured_name** (bool,可选) - 如果设置为 True,将使用 Layer 的结构性变量名作为 dict 的 key,否则将使用 Parameter 或者 Buffer 的变量名作为 key。默认值:True。 **返回** 无 @@ -712,13 +712,13 @@ set_state_dict(state_dict, use_structured_name=True) to(device=None, dtype=None, blocking=None) ''''''''' -根据给定的device、dtype和blocking 转换 Layer中的parameters 和 buffers。 +根据给定的 device、dtype 和 blocking 转换 Layer 中的 parameters 和 buffers。 **参数** - - **device** (str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None,可选) - 希望存储Layer 的设备位置。如果为None,设备位置和原始的Tensor 的设备位置一致。如果设备位置是string 类型,取值可为 ``cpu``, ``gpu:x`` and ``xpu:x``,这里的 ``x`` 是 GPUs 或者 XPUs的编号。默认值:None。 - - **dtype** (str|numpy.dtype|paddle.dtype|None,可选) - 数据的类型。如果为None,数据类型和原始的Tensor 一致。默认值:None。 - - **blocking** (bool|None,可选)- 如果为False并且当前Tensor处于固定内存上,将会发生主机到设备端的异步拷贝。否则,会发生同步拷贝。如果为None,blocking 会被设置为True。默认为False。 + - **device** (str|paddle.CPUPlace()|paddle.CUDAPlace()|paddle.CUDAPinnedPlace()|paddle.XPUPlace()|None,可选) - 希望存储 Layer 的设备位置。如果为 None,设备位置和原始的 Tensor 的设备位置一致。如果设备位置是 string 类型,取值可为 ``cpu``, ``gpu:x`` and ``xpu:x``,这里的 ``x`` 是 GPUs 或者 XPUs 的编号。默认值:None。 + - **dtype** (str|numpy.dtype|paddle.dtype|None,可选) - 数据的类型。如果为 None,数据类型和原始的 Tensor 一致。默认值:None。 + - **blocking** (bool|None,可选)- 如果为 False 并且当前 Tensor 处于固定内存上,将会发生主机到设备端的异步拷贝。否则,会发生同步拷贝。如果为 None,blocking 会被设置为 True。默认为 False。 **代码示例** diff --git a/docs/api/paddle/nn/LeakyReLU_cn.rst b/docs/api/paddle/nn/LeakyReLU_cn.rst index f4692abf49d..e014bfc08b2 100644 --- a/docs/api/paddle/nn/LeakyReLU_cn.rst +++ b/docs/api/paddle/nn/LeakyReLU_cn.rst @@ -20,14 +20,14 @@ LeakyReLU 激活层 参数 :::::::::: - - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为 0.01。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Linear_cn.rst b/docs/api/paddle/nn/Linear_cn.rst index 851b5465423..669e33ab0ad 100644 --- a/docs/api/paddle/nn/Linear_cn.rst +++ b/docs/api/paddle/nn/Linear_cn.rst @@ -6,7 +6,7 @@ Linear .. py:class:: paddle.nn.Linear(in_features, out_features, weight_attr=None, bias_attr=None, name=None) -**线性变换层**。对于每个输入Tensor :math:`X`,计算公式为: +**线性变换层**。对于每个输入 Tensor :math:`X`,计算公式为: .. math:: @@ -14,17 +14,17 @@ Linear 其中,:math:`W` 和 :math:`b` 分别为权重和偏置。 -Linear层只接受一个Tensor作为输入,形状为 :math:`[batch\_size, *, in\_features]`,其中 :math:`*` 表示可以为任意个额外的维度。 -该层可以计算输入Tensor与权重矩阵 :math:`W` 的乘积,然后生成形状为 :math:`[batch\_size, *, out\_features]` 的输出Tensor。 -如果 :math:`bias\_attr` 不是False,则将创建一个偏置参数并将其添加到输出中。 +Linear 层只接受一个 Tensor 作为输入,形状为 :math:`[batch\_size, *, in\_features]`,其中 :math:`*` 表示可以为任意个额外的维度。 +该层可以计算输入 Tensor 与权重矩阵 :math:`W` 的乘积,然后生成形状为 :math:`[batch\_size, *, out\_features]` 的输出 Tensor。 +如果 :math:`bias\_attr` 不是 False,则将创建一个偏置参数并将其添加到输出中。 参数 ::::::::: - **in_features** (int) – 线性变换层输入单元的数目。 - **out_features** (int) – 线性变换层输出单元的数目。 -- **weight_attr** (ParamAttr,可选) – 指定权重参数的属性。默认值为None,表示使用默认的权重参数属性,将权重参数初始化为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 -- **bias_attr** (ParamAttr|bool,可选) – 指定偏置参数的属性。:math:`bias\_attr` 为bool类型且设置为False时,表示不会为该层添加偏置。:math:`bias\_attr` 如果设置为True或者None,则表示使用默认的偏置参数属性,将偏置参数初始化为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为None。 +- **weight_attr** (ParamAttr,可选) – 指定权重参数的属性。默认值为 None,表示使用默认的权重参数属性,将权重参数初始化为 0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 +- **bias_attr** (ParamAttr|bool,可选) – 指定偏置参数的属性。:math:`bias\_attr` 为 bool 类型且设置为 False 时,表示不会为该层添加偏置。:math:`bias\_attr` 如果设置为 True 或者 None,则表示使用默认的偏置参数属性,将偏置参数初始化为 0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 属性 @@ -43,8 +43,8 @@ bias 形状 ::::::::: -- 输入:形状为 :math:`[batch\_size, *, in\_features]` 的多维Tensor。 -- 输出:形状为 :math:`[batch\_size, *, out\_features]` 的多维Tensor。 +- 输入:形状为 :math:`[batch\_size, *, in\_features]` 的多维 Tensor。 +- 输出:形状为 :math:`[batch\_size, *, out\_features]` 的多维 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/LocalResponseNorm_cn.rst b/docs/api/paddle/nn/LocalResponseNorm_cn.rst index 6fead24fab7..ec8ef732ab0 100644 --- a/docs/api/paddle/nn/LocalResponseNorm_cn.rst +++ b/docs/api/paddle/nn/LocalResponseNorm_cn.rst @@ -8,15 +8,15 @@ LocalResponseNorm 局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_local_response_norm` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_local_response_norm` 。 参数 ::::::::: - **size** (int) - 累加的通道数。 - - **alpha** (float,可选)- 缩放参数,正数。默认值为1e-4。 - - **beta** (float,可选)- 指数,正数。默认值为0.75。 - - **k** (float,可选)- 位移,正数。默认值为1.0。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致。如果输入是三维 `Tensor`,该参数可以是"NCL"或"NLC",其中N是批尺寸,C是通道数,L是特征长度。如果输入是四维 `Tensor`,该参数可以是"NCHW"或"NHWC",其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。如果输入是五维 `Tensor`,该参数可以是"NCDHW"或"NDHWC",其中N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **alpha** (float,可选)- 缩放参数,正数。默认值为 1e-4。 + - **beta** (float,可选)- 指数,正数。默认值为 0.75。 + - **k** (float,可选)- 位移,正数。默认值为 1.0。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致。如果输入是三维 `Tensor`,该参数可以是"NCL"或"NLC",其中 N 是批尺寸,C 是通道数,L 是特征长度。如果输入是四维 `Tensor`,该参数可以是"NCHW"或"NHWC",其中 N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。如果输入是五维 `Tensor`,该参数可以是"NCDHW"或"NDHWC",其中 N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 diff --git a/docs/api/paddle/nn/LogSigmoid_cn.rst b/docs/api/paddle/nn/LogSigmoid_cn.rst index d4e39cb2866..51abc77781e 100644 --- a/docs/api/paddle/nn/LogSigmoid_cn.rst +++ b/docs/api/paddle/nn/LogSigmoid_cn.rst @@ -4,7 +4,7 @@ LogSigmoid ------------------------------- .. py:class:: paddle.nn.LogSigmoid(name=None) -LogSigmoid激活层。计算公式如下: +LogSigmoid 激活层。计算公式如下: .. math:: @@ -18,8 +18,8 @@ LogSigmoid激活层。计算公式如下: 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/LogSoftmax_cn.rst b/docs/api/paddle/nn/LogSoftmax_cn.rst index 1da69b265f3..5f3a5c7530f 100644 --- a/docs/api/paddle/nn/LogSoftmax_cn.rst +++ b/docs/api/paddle/nn/LogSoftmax_cn.rst @@ -4,7 +4,7 @@ LogSoftmax ------------------------------- .. py:class:: paddle.nn.LogSoftmax(axis=-1, name=None) -LogSoftmax激活层,计算公式如下: +LogSoftmax 激活层,计算公式如下: .. math:: @@ -15,14 +15,14 @@ LogSoftmax激活层,计算公式如下: 参数 ::::::::: - - axis (int,可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - axis (int,可选) - 指定对输入 Tensor 进行运算的轴。``axis`` 的有效范围是[-D, D),D 是输入 Tensor 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/MSELoss_cn.rst b/docs/api/paddle/nn/MSELoss_cn.rst index 10e463c4861..95efef792b3 100644 --- a/docs/api/paddle/nn/MSELoss_cn.rst +++ b/docs/api/paddle/nn/MSELoss_cn.rst @@ -5,11 +5,11 @@ MSELoss .. py:function:: paddle.nn.MSELoss(reduction='mean') -该OP用于计算预测值和目标值的均方差误差。 +该 OP 用于计算预测值和目标值的均方差误差。 -对于预测值input和目标值label: +对于预测值 input 和目标值 label: -当reduction为'none'时: +当 reduction 为'none'时: .. math:: Out = (input - label)^2 @@ -27,13 +27,13 @@ MSELoss 参数 :::::::::::: - - **reduction** (str,可选) - 约简方式,可以是 'none' | 'mean' | 'sum'。设为'none'时不使用约简,设为'mean'时返回loss的均值,设为'sum'时返回loss的和。 + - **reduction** (str,可选) - 约简方式,可以是 'none' | 'mean' | 'sum'。设为'none'时不使用约简,设为'mean'时返回 loss 的均值,设为'sum'时返回 loss 的和。 形状 :::::::::::: - - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 - - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维 Tensor。数据类型为 float32 或 float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维 Tensor。数据类型为 float32 或 float64。 返回 diff --git a/docs/api/paddle/nn/MarginRankingLoss_cn.rst b/docs/api/paddle/nn/MarginRankingLoss_cn.rst index 905bc24f0c0..385b4931deb 100644 --- a/docs/api/paddle/nn/MarginRankingLoss_cn.rst +++ b/docs/api/paddle/nn/MarginRankingLoss_cn.rst @@ -5,7 +5,7 @@ MarginRankingLoss .. py:class:: paddle.nn.MarginRankingLoss(margin=0.0, reduction='mean', name=None) -该接口用于创建一个 ``MarginRankingLoss`` 的可调用类,计算输入input,other 和 标签label间的 `margin rank loss` 损失。 +该接口用于创建一个 ``MarginRankingLoss`` 的可调用类,计算输入 input,other 和 标签 label 间的 `margin rank loss` 损失。 该损失函数的数学计算公式如下: @@ -26,20 +26,20 @@ MarginRankingLoss 参数 :::::::: - - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **margin** (float,可选): - 用于加和的 margin 值,默认值为 0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'`` 、 ``'mean'`` 、 ``'sum'``。如果设置为 ``'none'``,则直接返回 最原始的 ``margin_rank_loss``。如果设置为 ``'sum'``,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'``,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::: - - **input** - N-D Tensor,维度是[N,*] 其中N 是batch size,`*` 是任意数量的额外维度,数据类型为float32或float64。 + - **input** - N-D Tensor,维度是[N,*] 其中 N 是 batch size,`*` 是任意数量的额外维度,数据类型为 float32 或 float64。 - **other** - 与 ``input`` 的形状、数据类型相同。 - **label** - 与 ``input`` 的形状、数据类型相同。 - - **output** - 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'``,则形状为 :math:`[1]`,否则shape和输入 `input` 保持一致。数据类型与 ``input``、 ``other`` 相同。 + - **output** - 如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'``,则形状为 :math:`[1]`,否则 shape 和输入 `input` 保持一致。数据类型与 ``input``、 ``other`` 相同。 返回 :::::::: -返回计算MarginRankingLoss的可调用对象。 +返回计算 MarginRankingLoss 的可调用对象。 代码示例 :::::::: diff --git a/docs/api/paddle/nn/MaxPool1D_cn.rst b/docs/api/paddle/nn/MaxPool1D_cn.rst index 15f25ed6d92..250c3cc79a6 100755 --- a/docs/api/paddle/nn/MaxPool1D_cn.rst +++ b/docs/api/paddle/nn/MaxPool1D_cn.rst @@ -6,10 +6,10 @@ MaxPool1D .. py:function:: paddle.nn.MaxPool1D(kernel_size, stride=None, padding=0, return_mask=False, ceil_mode=False, name=None) -该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `kernel_size` 等参数对一个输入 Tensor 计算 1D 的最大值池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 -假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是k, 1d最大值池化计算公式如下: +假设输入形状是(N, C, L),输出形状是 (N, C, L_{out}),卷积核尺寸是 k, 1d 最大值池化计算公式如下: .. math:: @@ -17,22 +17,22 @@ MaxPool1D 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果kernel_size为list或tuple类型,其必须包含一个整数,最终池化核的大小为该数值。 - - **stride** (int|list|tuple,可选):池化操作步长。如果stride为list或tuple类型,其必须包含一个整数,最终池化操作的步长为该数值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选):池化补零的方式。如果padding是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是turple或者list类型,则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 - - **return_mask** (bool,可选):是否返回最大值的索引,默认为False。 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出的height和width,如果设置为False,则使用floor函数来计算,默认为False。 + - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果 kernel_size 为 list 或 tuple 类型,其必须包含一个整数,最终池化核的大小为该数值。 + - **stride** (int|list|tuple,可选):池化操作步长。如果 stride 为 list 或 tuple 类型,其必须包含一个整数,最终池化操作的步长为该数值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选):池化补零的方式。如果 padding 是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是 turple 或者 list 类型,则应是 `[pad_left, pad_right]` 形式。如果 padding 是一个非 0 值,那么表示会在输入的两端都 padding 上同样长度的 0。 + - **return_mask** (bool,可选):是否返回最大值的索引,默认为 False。 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出的 height 和 width,如果设置为 False,则使用 floor 函数来计算,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。其数据类型为float32或float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型与输入x相同。 + - **x** (Tensor):默认形状为(批大小,通道数,长度),即 NCL 格式的 3-D Tensor。其数据类型为 float32 或 float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型与输入 x 相同。 返回 ::::::::: -计算MaxPool1D的可调用对象 +计算 MaxPool1D 的可调用对象 diff --git a/docs/api/paddle/nn/MaxPool2D_cn.rst b/docs/api/paddle/nn/MaxPool2D_cn.rst index 4450c0e240d..c10858cc811 100644 --- a/docs/api/paddle/nn/MaxPool2D_cn.rst +++ b/docs/api/paddle/nn/MaxPool2D_cn.rst @@ -24,25 +24,25 @@ MaxPool2D 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若pool_size=2,则池化核大小为2x2。 - - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有3种格式:(1)包含2个整数值:[pad_height, pad_width];(2)包含4个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含4个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。 - - **return_mask** (bool,可选):是否返回最大索引和输出。默认为False。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若 pool_size=2,则池化核大小为 2x2。 + - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 2 个整数值:[pad_height, pad_width];(2)包含 4 个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 4 个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。 + - **return_mask** (bool,可选):是否返回最大索引和输出。默认为 False。 + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算MaxPool2D的可调用对象 +计算 MaxPool2D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/MaxPool3D_cn.rst b/docs/api/paddle/nn/MaxPool3D_cn.rst index e1912834c9c..a15410db595 100644 --- a/docs/api/paddle/nn/MaxPool3D_cn.rst +++ b/docs/api/paddle/nn/MaxPool3D_cn.rst @@ -23,24 +23,24 @@ MaxPool3D \text{stride[1]} \times h + m, \text{stride[2]} \times w + n) 参数 ::::::::: - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth,pool_size_Height, pool_size_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_size=2,则池化核大小为[2,2,2]。 - - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含三个整数,(pool_stride_Depth,pool_stride_Height, pool_stride_Width)。若为一个整数,则表示D, H和W维度上stride均为该值。默认值为None,这时会使用kernel_size作为stride。 - - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有3种格式:(1)包含3个整数值:[pad_depth, pad_height, pad_width];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含5个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示D、H和W维度上均为该值。默认值:0 - - **ceil_mode** (bool,可选):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为False - - **return_mask** (bool,可选):是否返回最大索引和输出。默认为False。 - - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NDCHW"。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth,pool_size_Height, pool_size_Width)。若为一个整数,则表示 D,H 和 W 维度上均为该值,比如若 pool_size=2,则池化核大小为[2,2,2]。 + - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它将包含三个整数,(pool_stride_Depth,pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 D, H 和 W 维度上 stride 均为该值。默认值为 None,这时会使用 kernel_size 作为 stride。 + - **padding** (str|int|list|tuple,可选) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 3 个整数值:[pad_depth, pad_height, pad_width];(2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 5 个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 D、H 和 W 维度上均为该值。默认值:0 + - **ceil_mode** (bool,可选):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 False + - **return_mask** (bool,可选):是否返回最大索引和输出。默认为 False。 + - **data_format** (str,可选):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NDCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算MaxPool3D的可调用对象 +计算 MaxPool3D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/MaxUnPool1D_cn.rst b/docs/api/paddle/nn/MaxUnPool1D_cn.rst index bdd556386f9..a43a2ccdcc0 100644 --- a/docs/api/paddle/nn/MaxUnPool1D_cn.rst +++ b/docs/api/paddle/nn/MaxUnPool1D_cn.rst @@ -5,7 +5,7 @@ MaxUnPool1D .. py:function:: paddle.nn.MaxUnPool1D(kernel_size, stride=None, padding=0, data_format="NCL", output_size=None, name=None) -该接口用于构建 `MaxUnPool1D` 类的一个可调用对象,根据输入的input和最大值位置计算出池化的逆结果。所有非最大值设置为零。 +该接口用于构建 `MaxUnPool1D` 类的一个可调用对象,根据输入的 input 和最大值位置计算出池化的逆结果。所有非最大值设置为零。 输入: X 形状::math:`(N, C, L_{in})` @@ -25,21 +25,21 @@ MaxUnPool1D - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须包含一个整数,(pool_stride_Length),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, stride, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCL"。N是批尺寸,C是通道数,L是特征长度。默认值:"NCL" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCL"。N 是批尺寸,C 是通道数,L 是特征长度。默认值:"NCL" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度),即NCL格式的3-D Tensor。其数据类型为float32或float64。 - - **indices** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型为int32。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即NCL格式的3-D Tensor。其数据类型与输入一致。 + - **x** (Tensor):默认形状为(批大小,通道数,长度),即 NCL 格式的 3-D Tensor。其数据类型为 float32 或 float64。 + - **indices** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型为 int32。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征长度),即 NCL 格式的 3-D Tensor。其数据类型与输入一致。 返回 ::::::::: -计算MaxUnPool1D的可调用对象 +计算 MaxUnPool1D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/MaxUnPool2D_cn.rst b/docs/api/paddle/nn/MaxUnPool2D_cn.rst index 64b55c9d4b3..020a6fd5cf3 100644 --- a/docs/api/paddle/nn/MaxUnPool2D_cn.rst +++ b/docs/api/paddle/nn/MaxUnPool2D_cn.rst @@ -5,7 +5,7 @@ MaxUnPool2D .. py:function:: paddle.nn.MaxUnPool2D(kernel_size, stride=None,padding=0,data_format="NCHW",output_size=None,name=None) -该接口用于构建 `MaxUnPool2D` 类的一个可调用对象,根据输入的input和最大值位置计算出池化的逆结果。所有非最大值设置为零。 +该接口用于构建 `MaxUnPool2D` 类的一个可调用对象,根据输入的 input 和最大值位置计算出池化的逆结果。所有非最大值设置为零。 输入: X 形状::math:`(N, C, H_{in}, W_{in})` @@ -28,21 +28,21 @@ MaxUnPool2D - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须是两个相等的整数,(pool_stride_Height, pool_stride_Width),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCHW"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCHW"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。其数据类型为float32或float64。 - - **indices** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。其数据类型为int32。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor。其数据类型与输入一致。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float32 或 float64。 + - **indices** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 int32。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor。其数据类型与输入一致。 返回 ::::::::: -计算MaxUnPool2D的可调用对象 +计算 MaxUnPool2D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/MaxUnPool3D_cn.rst b/docs/api/paddle/nn/MaxUnPool3D_cn.rst index c9b0bc2d608..e1f8ee593b2 100644 --- a/docs/api/paddle/nn/MaxUnPool3D_cn.rst +++ b/docs/api/paddle/nn/MaxUnPool3D_cn.rst @@ -5,7 +5,7 @@ MaxUnPool3D .. py:function:: paddle.nn.MaxUnPool3D(kernel_size, stride=None, padding=0, data_format="NCDHW", output_size=None, name=None) -该接口用于构建 `MaxUnPool3D` 类的一个可调用对象,根据输入的input和最大值位置计算出池化的逆结果。所有非最大值设置为零。 +该接口用于构建 `MaxUnPool3D` 类的一个可调用对象,根据输入的 input 和最大值位置计算出池化的逆结果。所有非最大值设置为零。 输入: X 形状::math:`(N, C, D_{in}, H_{in}, W_{in})` @@ -31,21 +31,21 @@ MaxUnPool3D - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须是三个相等的整数,(pool_stride_Depth, pool_stride_Height, pool_stride_Width),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, stride, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCDHW"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCDHW"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCDHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,深度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float32或float64。 - - **indices** (Tensor):默认形状为(批大小,通道数,输出特征深度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。其数据类型为int32。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征深度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor。其数据类型与输入一致。 + - **x** (Tensor):默认形状为(批大小,通道数,深度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float32 或 float64。 + - **indices** (Tensor):默认形状为(批大小,通道数,输出特征深度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 int32。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征深度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor。其数据类型与输入一致。 返回 ::::::::: -计算MaxUnPool3D的可调用对象 +计算 MaxUnPool3D 的可调用对象 代码示例 diff --git a/docs/api/paddle/nn/Maxout_cn.rst b/docs/api/paddle/nn/Maxout_cn.rst index aca9fed6a5a..c4adfcc316a 100644 --- a/docs/api/paddle/nn/Maxout_cn.rst +++ b/docs/api/paddle/nn/Maxout_cn.rst @@ -5,7 +5,7 @@ Maxout .. py:function:: paddle.nn.Maxout(groups, axis=1, name=None) -Maxout激活层。 +Maxout 激活层。 假设输入形状为(N, Ci, H, W),输出形状为(N, Co, H, W),则 :math:`Co=Ci/groups` 运算公式如下: @@ -22,14 +22,14 @@ Maxout激活层。 :::::::::::: :::::::::: - - groups (int) - 指定将输入张量的channel通道维度进行分组的数目。输出的通道数量为通道数除以组数。 - - axis (int,可选) - 指定通道所在维度的索引。当数据格式为NCHW时,axis应该被设置为1,当数据格式为NHWC时,axis应该被设置为-1或者3。默认值为1。 + - groups (int) - 指定将输入张量的 channel 通道维度进行分组的数目。输出的通道数量为通道数除以组数。 + - axis (int,可选) - 指定通道所在维度的索引。当数据格式为 NCHW 时,axis 应该被设置为 1,当数据格式为 NHWC 时,axis 应该被设置为-1 或者 3。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度。 - - output:输出形状为 :math:`[N, Co, H, W]` 或 :math:`[N, H, W, Co]` 的4-D Tensor,其中 :math:`Co=C/groups` + - input:形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。 + - output:输出形状为 :math:`[N, Co, H, W]` 或 :math:`[N, H, W, Co]` 的 4-D Tensor,其中 :math:`Co=C/groups` 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Mish_cn.rst b/docs/api/paddle/nn/Mish_cn.rst index 79516f21786..28fc019ce25 100644 --- a/docs/api/paddle/nn/Mish_cn.rst +++ b/docs/api/paddle/nn/Mish_cn.rst @@ -4,7 +4,7 @@ Mish ------------------------------- .. py:class:: paddle.nn.Mish(name=None) -Mish激活层 +Mish 激活层 .. math:: @@ -22,8 +22,8 @@ Mish激活层 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/MultiHeadAttention_cn.rst b/docs/api/paddle/nn/MultiHeadAttention_cn.rst index e2e7b7c7da5..e092226202a 100644 --- a/docs/api/paddle/nn/MultiHeadAttention_cn.rst +++ b/docs/api/paddle/nn/MultiHeadAttention_cn.rst @@ -18,10 +18,10 @@ MultiHeadAttention :::::::::::: - **embed_dim** (int) - 输入输出的维度。 - - **num_heads** (int) - 多头注意力机制的Head数量。 - - **dropout** (float,可选) - 注意力目标的随机失活率。0表示不加dropout。默认值:0。 - - **kdim** (int,可选) - 键值对中key的维度。如果为 ``None`` 则 ``kdim = embed_dim``。默认值:``None``。 - - **vdim** (int,可选) - 键值对中value的维度。如果为 ``None`` 则 ``kdim = embed_dim``。默认值:``None``。 + - **num_heads** (int) - 多头注意力机制的 Head 数量。 + - **dropout** (float,可选) - 注意力目标的随机失活率。0 表示不加 dropout。默认值:0。 + - **kdim** (int,可选) - 键值对中 key 的维度。如果为 ``None`` 则 ``kdim = embed_dim``。默认值:``None``。 + - **vdim** (int,可选) - 键值对中 value 的维度。如果为 ``None`` 则 ``kdim = embed_dim``。默认值:``None``。 - **need_weights** (bool,可选) - 表明是否返回注意力权重。默认值:``False``。 - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值:``None``,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **bias_attr** (ParamAttr,可选)- 指定偏置参数属性的对象。默认值:``None``,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 diff --git a/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst b/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst index 03983ba8a07..b30d3b3c553 100644 --- a/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst +++ b/docs/api/paddle/nn/MultiLabelSoftMarginLoss_cn.rst @@ -16,14 +16,14 @@ MultiLabelSoftMarginLoss 如果添加权重则再乘以对应的权重值 -最后,会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,会添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 参数 ::::::::: - - **weight** (Tensor,可选) - 手动设定权重,默认为None - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **weight** (Tensor,可选) - 手动设定权重,默认为 None + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 输入 ::::::::: @@ -32,9 +32,9 @@ MultiLabelSoftMarginLoss 形状 ::::::::: - - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]` , 其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 返回 diff --git a/docs/api/paddle/nn/NLLLoss_cn.rst b/docs/api/paddle/nn/NLLLoss_cn.rst index 9eb1ea7d53c..b68b672c071 100644 --- a/docs/api/paddle/nn/NLLLoss_cn.rst +++ b/docs/api/paddle/nn/NLLLoss_cn.rst @@ -5,9 +5,9 @@ NLLLoss .. py:class:: paddle.nn.NLLLoss(weight=None, ignore_index=-100, reduction='mean', name=None) -该接口可创建一个NLLLoss可调用类,计算输入x和标签label间的 `negative log likelihood loss` 损失,可用于训练一个 `n` 类分类器。 +该接口可创建一个 NLLLoss 可调用类,计算输入 x 和标签 label 间的 `negative log likelihood loss` 损失,可用于训练一个 `n` 类分类器。 -如果提供 `weight` 参数的话,它是一个 `1-D` 的tensor,里面的值对应类别的权重。当你的训练集样本 +如果提供 `weight` 参数的话,它是一个 `1-D` 的 tensor,里面的值对应类别的权重。当你的训练集样本 不均衡的话,使用这个参数是非常有用的。 该损失函数的数学计算公式如下: @@ -32,16 +32,16 @@ NLLLoss 参数 ::::::::: - - **weight** (Tensor,可选): - 手动指定每个类别的权重。其默认为 `None`。如果提供该参数的话,长度必须为 `num_classes`。数据类型为float32或float64。 - - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 - - **reduction** (str,可选): - 指定应用于输出结果的计算方式,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **weight** (Tensor,可选): - 手动指定每个类别的权重。其默认为 `None`。如果提供该参数的话,长度必须为 `num_classes`。数据类型为 float32 或 float64。 + - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为 int64。 + - **reduction** (str,可选): - 指定应用于输出结果的计算方式,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss 均值。设置为 `sum` 时,计算 `mini-batch` loss 的总和。设置为 `none` 时,则返回 loss Tensor。数据类型为 string。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **input** (Tensor): - 输入 `Tensor`,其形状为 :math:`[N, C]`,其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]`。数据类型为float32或float64。 - - **label** (Tensor): - 输入 `input` 对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`,数据类型为int64。 - - **output** (Tensor): - 输入 `input` 和 `label` 间的 `negative log likelihood loss` 损失。如果 `reduction` 为 `'none'`,则输出Loss形状为 `[N, *]`。如果 `reduction` 为 `'sum'` 或者 `'mean'`,则输出Loss形状为 `'[1]'` 。 + - **input** (Tensor): - 输入 `Tensor`,其形状为 :math:`[N, C]`,其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]`。数据类型为 float32 或 float64。 + - **label** (Tensor): - 输入 `input` 对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`,数据类型为 int64。 + - **output** (Tensor): - 输入 `input` 和 `label` 间的 `negative log likelihood loss` 损失。如果 `reduction` 为 `'none'`,则输出 Loss 形状为 `[N, *]`。如果 `reduction` 为 `'sum'` 或者 `'mean'`,则输出 Loss 形状为 `'[1]'` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Overview_cn.rst b/docs/api/paddle/nn/Overview_cn.rst index f4d51771238..e781a14c2b8 100644 --- a/docs/api/paddle/nn/Overview_cn.rst +++ b/docs/api/paddle/nn/Overview_cn.rst @@ -3,31 +3,31 @@ paddle.nn --------------------- -paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的相关API。具体如下: +paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的相关 API。具体如下: - :ref:`容器相关 ` - :ref:`卷积层 ` -- :ref:`Pooling层 ` -- :ref:`Padding层 ` +- :ref:`Pooling 层 ` +- :ref:`Padding 层 ` - :ref:`激活层 ` -- :ref:`Normalization层 ` +- :ref:`Normalization 层 ` - :ref:`循环神经网络层 ` -- :ref:`Transformer相关 ` +- :ref:`Transformer 相关 ` - :ref:`线性层 ` -- :ref:`Dropout层 ` -- :ref:`Embedding层 ` -- :ref:`Loss层 ` -- :ref:`Vision层 ` -- :ref:`Clip相关 ` +- :ref:`Dropout 层 ` +- :ref:`Embedding 层 ` +- :ref:`Loss 层 ` +- :ref:`Vision 层 ` +- :ref:`Clip 相关 ` - :ref:`公共层 ` - :ref:`卷积相关函数 ` -- :ref:`Pooling相关函数 ` -- :ref:`Padding相关函数 ` +- :ref:`Pooling 相关函数 ` +- :ref:`Padding 相关函数 ` - :ref:`激活函数 ` -- :ref:`Normalization方法 ` +- :ref:`Normalization 方法 ` - :ref:`线性处理相关函数 ` -- :ref:`Dropout方法 ` -- :ref:`Embedding相关函数 ` +- :ref:`Dropout 方法 ` +- :ref:`Embedding 相关函数 ` - :ref:`损失函数 ` - :ref:`公用方法 ` - :ref:`初始化相关 ` @@ -41,13 +41,13 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.Layer ` ", "基于OOD实现的动态图Layer" + " :ref:`paddle.nn.Layer ` ", "基于 OOD 实现的动态图 Layer" " :ref:`paddle.nn.LayerList ` ", "用于保存子层列表" " :ref:`paddle.nn.ParameterList ` ", "参数列表容器" - " :ref:`paddle.nn.Sequential ` ", "顺序容器;子Layer将按构造函数参数的顺序添加到此容器中" + " :ref:`paddle.nn.Sequential ` ", "顺序容器;子 Layer 将按构造函数参数的顺序添加到此容器中" " :ref:`paddle.nn.LayerDict ` ", "保存子层到有序字典中,它包含的子层将被正确地注册和添加" .. _convolution_layers: @@ -56,7 +56,7 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" @@ -69,11 +69,11 @@ paddle.nn 目录下包含飞桨框架支持的神经网络层和相关函数的 .. _pooling_layers: -pooling层 +pooling 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.AdaptiveAvgPool1D ` ", "一维自适应平均池化层" @@ -94,11 +94,11 @@ pooling层 .. _padding_layers: -Padding层 +Padding 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.Pad1D ` ", "一维填充层" @@ -112,58 +112,58 @@ Padding层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.CELU ` ", "CELU激活层" - " :ref:`paddle.nn.ELU ` ", "ELU激活层" - " :ref:`paddle.nn.GELU ` ", "GELU激活层" - " :ref:`paddle.nn.Hardshrink ` ", "Hardshrink激活层" - " :ref:`paddle.nn.Hardsigmoid ` ", "Hardsigmoid激活层" - " :ref:`paddle.nn.Hardswish ` ", "Hardswish激活层" - " :ref:`paddle.nn.Hardtanh ` ", "Hardtanh激活层" + " :ref:`paddle.nn.CELU ` ", "CELU 激活层" + " :ref:`paddle.nn.ELU ` ", "ELU 激活层" + " :ref:`paddle.nn.GELU ` ", "GELU 激活层" + " :ref:`paddle.nn.Hardshrink ` ", "Hardshrink 激活层" + " :ref:`paddle.nn.Hardsigmoid ` ", "Hardsigmoid 激活层" + " :ref:`paddle.nn.Hardswish ` ", "Hardswish 激活层" + " :ref:`paddle.nn.Hardtanh ` ", "Hardtanh 激活层" " :ref:`paddle.nn.LeakyReLU ` ", "LeakyReLU 激活层" - " :ref:`paddle.nn.LogSigmoid ` ", "LogSigmoid激活层" - " :ref:`paddle.nn.LogSoftmax ` ", "LogSoftmax激活层" - " :ref:`paddle.nn.Maxout ` ", "Maxout激活层" - " :ref:`paddle.nn.PReLU ` ", "PReLU激活层" - " :ref:`paddle.nn.RReLU ` ", "RReLU激活层" - " :ref:`paddle.nn.ReLU ` ", "ReLU激活层" - " :ref:`paddle.nn.ReLU6 ` ", "ReLU6激活层" - " :ref:`paddle.nn.SELU ` ", "SELU激活层" - " :ref:`paddle.nn.Sigmoid ` ", "Sigmoid激活层" - " :ref:`paddle.nn.Silu ` ", "Silu激活层" - " :ref:`paddle.nn.Softmax ` ", "Softmax激活层" - " :ref:`paddle.nn.Softplus ` ", "Softplus激活层" - " :ref:`paddle.nn.Softshrink ` ", "Softshrink激活层" - " :ref:`paddle.nn.Softsign ` ", "Softsign激活层" - " :ref:`paddle.nn.Swish ` ", "Swish激活层" - " :ref:`paddle.nn.Mish ` ", "Mish激活层" - " :ref:`paddle.nn.Tanh ` ", "Tanh激活层" - " :ref:`paddle.nn.Tanhshrink ` ", "Tanhshrink激活层" - " :ref:`paddle.nn.ThresholdedReLU ` ", "Thresholded ReLU激活层" + " :ref:`paddle.nn.LogSigmoid ` ", "LogSigmoid 激活层" + " :ref:`paddle.nn.LogSoftmax ` ", "LogSoftmax 激活层" + " :ref:`paddle.nn.Maxout ` ", "Maxout 激活层" + " :ref:`paddle.nn.PReLU ` ", "PReLU 激活层" + " :ref:`paddle.nn.RReLU ` ", "RReLU 激活层" + " :ref:`paddle.nn.ReLU ` ", "ReLU 激活层" + " :ref:`paddle.nn.ReLU6 ` ", "ReLU6 激活层" + " :ref:`paddle.nn.SELU ` ", "SELU 激活层" + " :ref:`paddle.nn.Sigmoid ` ", "Sigmoid 激活层" + " :ref:`paddle.nn.Silu ` ", "Silu 激活层" + " :ref:`paddle.nn.Softmax ` ", "Softmax 激活层" + " :ref:`paddle.nn.Softplus ` ", "Softplus 激活层" + " :ref:`paddle.nn.Softshrink ` ", "Softshrink 激活层" + " :ref:`paddle.nn.Softsign ` ", "Softsign 激活层" + " :ref:`paddle.nn.Swish ` ", "Swish 激活层" + " :ref:`paddle.nn.Mish ` ", "Mish 激活层" + " :ref:`paddle.nn.Tanh ` ", "Tanh 激活层" + " :ref:`paddle.nn.Tanhshrink ` ", "Tanhshrink 激活层" + " :ref:`paddle.nn.ThresholdedReLU ` ", "Thresholded ReLU 激活层" .. _normalization_layers: -Normalization层 +Normalization 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" - - - " :ref:`paddle.nn.BatchNorm ` ", "Batch Normalization层" - " :ref:`paddle.nn.BatchNorm1D ` ", "一维Batch Normalization层" - " :ref:`paddle.nn.BatchNorm2D ` ", "二维Batch Normalization层" - " :ref:`paddle.nn.BatchNorm3D ` ", "三维Batch Normalization层" - " :ref:`paddle.nn.GroupNorm ` ", "Group Normalization层" - " :ref:`paddle.nn.InstanceNorm1D ` ", "一维Instance Normalization层" - " :ref:`paddle.nn.InstanceNorm2D ` ", "二维Instance Normalization层" - " :ref:`paddle.nn.InstanceNorm3D ` ", "三维Instance Normalization层" - " :ref:`paddle.nn.LayerNorm ` ", "用于保存Normalization层列表" - " :ref:`paddle.nn.LocalResponseNorm ` ", "Local Response Normalization层" - " :ref:`paddle.nn.SpectralNorm ` ", "Spectral Normalization层" - " :ref:`paddle.nn.SyncBatchNorm ` ", "Synchronized Batch Normalization层" + :header: "API 名称", "API 功能" + + + " :ref:`paddle.nn.BatchNorm ` ", "Batch Normalization 层" + " :ref:`paddle.nn.BatchNorm1D ` ", "一维 Batch Normalization 层" + " :ref:`paddle.nn.BatchNorm2D ` ", "二维 Batch Normalization 层" + " :ref:`paddle.nn.BatchNorm3D ` ", "三维 Batch Normalization 层" + " :ref:`paddle.nn.GroupNorm ` ", "Group Normalization 层" + " :ref:`paddle.nn.InstanceNorm1D ` ", "一维 Instance Normalization 层" + " :ref:`paddle.nn.InstanceNorm2D ` ", "二维 Instance Normalization 层" + " :ref:`paddle.nn.InstanceNorm3D ` ", "三维 Instance Normalization 层" + " :ref:`paddle.nn.LayerNorm ` ", "用于保存 Normalization 层列表" + " :ref:`paddle.nn.LocalResponseNorm ` ", "Local Response Normalization 层" + " :ref:`paddle.nn.SpectralNorm ` ", "Spectral Normalization 层" + " :ref:`paddle.nn.SyncBatchNorm ` ", "Synchronized Batch Normalization 层" .. _RNN_layers: @@ -171,7 +171,7 @@ Normalization层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.BiRNN ` ", "双向循环神经网络" @@ -186,19 +186,19 @@ Normalization层 .. _Transformer: -Transformer相关 +Transformer 相关 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.MultiHeadAttention ` ", "多头注意力机制" - " :ref:`paddle.nn.Transformer ` ", "Transformer模型" - " :ref:`paddle.nn.TransformerDecoder ` ", "Transformer解码器" - " :ref:`paddle.nn.TransformerDecoderLayer ` ", "Transformer解码器层" - " :ref:`paddle.nn.TransformerEncoder ` ", "Transformer编码器" - " :ref:`paddle.nn.TransformerEncoderLayer ` ", "Transformer编码器层" + " :ref:`paddle.nn.Transformer ` ", "Transformer 模型" + " :ref:`paddle.nn.TransformerDecoder ` ", "Transformer 解码器" + " :ref:`paddle.nn.TransformerDecoderLayer ` ", "Transformer 解码器层" + " :ref:`paddle.nn.TransformerEncoder ` ", "Transformer 编码器" + " :ref:`paddle.nn.TransformerEncoderLayer ` ", "Transformer 编码器层" .. _linear_layers: @@ -206,7 +206,7 @@ Transformer相关 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.Bilinear ` ", "对两个输入执行双线性张量积" @@ -214,84 +214,84 @@ Transformer相关 .. _dropout_layers: -Dropout层 +Dropout 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.AlphaDropout ` ", "具有自归一化性质的dropout" + " :ref:`paddle.nn.AlphaDropout ` ", "具有自归一化性质的 dropout" " :ref:`paddle.nn.Dropout ` ", "Dropout" - " :ref:`paddle.nn.Dropout2D ` ", "一维Dropout" - " :ref:`paddle.nn.Dropout3D ` ", "二维Dropout" + " :ref:`paddle.nn.Dropout2D ` ", "一维 Dropout" + " :ref:`paddle.nn.Dropout3D ` ", "二维 Dropout" .. _embedding_layers: -Embedding层 +Embedding 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.Embedding ` ", "嵌入层(Embedding Layer)" .. _loss_layers: -Loss层 +Loss 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.BCELoss ` ", "BCELoss层" - " :ref:`paddle.nn.BCEWithLogitsLoss ` ", "BCEWithLogitsLoss层" - " :ref:`paddle.nn.CosineEmbeddingLoss ` ", "CosineEmbeddingLoss层" + " :ref:`paddle.nn.BCELoss ` ", "BCELoss 层" + " :ref:`paddle.nn.BCEWithLogitsLoss ` ", "BCEWithLogitsLoss 层" + " :ref:`paddle.nn.CosineEmbeddingLoss ` ", "CosineEmbeddingLoss 层" " :ref:`paddle.nn.CrossEntropyLoss ` ", "交叉熵损失层" - " :ref:`paddle.nn.CTCLoss ` ", "CTCLoss层" - " :ref:`paddle.nn.HSigmoidLoss ` ", "层次sigmoid损失层" - " :ref:`paddle.nn.KLDivLoss ` ", "Kullback-Leibler散度损失层" - " :ref:`paddle.nn.L1Loss ` ", "L1损失层" - " :ref:`paddle.nn.MarginRankingLoss ` ", "MarginRankingLoss层" + " :ref:`paddle.nn.CTCLoss ` ", "CTCLoss 层" + " :ref:`paddle.nn.HSigmoidLoss ` ", "层次 sigmoid 损失层" + " :ref:`paddle.nn.KLDivLoss ` ", "Kullback-Leibler 散度损失层" + " :ref:`paddle.nn.L1Loss ` ", "L1 损失层" + " :ref:`paddle.nn.MarginRankingLoss ` ", "MarginRankingLoss 层" " :ref:`paddle.nn.MSELoss ` ", "均方差误差损失层" - " :ref:`paddle.nn.NLLLoss ` ", "NLLLoss层" - " :ref:`paddle.nn.SmoothL1Loss ` ", "平滑L1损失层" - " :ref:`paddle.nn.SoftMarginLoss ` ", "SoftMarginLoss层" - " :ref:`paddle.nn.TripletMarginLoss ` ", "TripletMarginLoss层" - " :ref:`paddle.nn.TripletMarginWithDistanceLoss ` ", "TripletMarginWithDistanceLoss层" - " :ref:`paddle.nn.MultiLabelSoftMarginLoss ` ", "多标签Hinge损失层" + " :ref:`paddle.nn.NLLLoss ` ", "NLLLoss 层" + " :ref:`paddle.nn.SmoothL1Loss ` ", "平滑 L1 损失层" + " :ref:`paddle.nn.SoftMarginLoss ` ", "SoftMarginLoss 层" + " :ref:`paddle.nn.TripletMarginLoss ` ", "TripletMarginLoss 层" + " :ref:`paddle.nn.TripletMarginWithDistanceLoss ` ", "TripletMarginWithDistanceLoss 层" + " :ref:`paddle.nn.MultiLabelSoftMarginLoss ` ", "多标签 Hinge 损失层" .. _vision_layers: -Vision层 +Vision 层 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.ChannelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor按通道分成g组,得到形为[N, g, C/g, H, W]或[N, H, W, g, C/g]的Tensor,然后转置为[N, C/g, g, H, W]或[N, H, W, C/g, g]的形状,最后重新排列为原来的形状" - " :ref:`paddle.nn.PixelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor" - " :ref:`paddle.nn.PixelUnshuffle ` ", "PixelShuffle的逆操作,将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C*r*r, H/r, W/r] 或 [N, H/r, W/r, C*r*r] 的Tensor" - " :ref:`paddle.nn.Upsample ` ", "用于调整一个batch中图片的大小" - " :ref:`paddle.nn.UpsamplingBilinear2D ` ", "用于调整一个batch中图片的大小(使用双线性插值方法)" - " :ref:`paddle.nn.UpsamplingNearest2D ` ", "用于调整一个batch中图片的大小(使用最近邻插值方法)" + " :ref:`paddle.nn.ChannelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的 Tensor 按通道分成 g 组,得到形为[N, g, C/g, H, W]或[N, H, W, g, C/g]的 Tensor,然后转置为[N, C/g, g, H, W]或[N, H, W, C/g, g]的形状,最后重新排列为原来的形状" + " :ref:`paddle.nn.PixelShuffle ` ", "将一个形为[N, C, H, W]或是[N, H, W, C]的 Tensor 重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的 Tensor" + " :ref:`paddle.nn.PixelUnshuffle ` ", "PixelShuffle 的逆操作,将一个形为[N, C, H, W]或是[N, H, W, C]的 Tensor 重新排列成形为 [N, C*r*r, H/r, W/r] 或 [N, H/r, W/r, C*r*r] 的 Tensor" + " :ref:`paddle.nn.Upsample ` ", "用于调整一个 batch 中图片的大小" + " :ref:`paddle.nn.UpsamplingBilinear2D ` ", "用于调整一个 batch 中图片的大小(使用双线性插值方法)" + " :ref:`paddle.nn.UpsamplingNearest2D ` ", "用于调整一个 batch 中图片的大小(使用最近邻插值方法)" .. _about_clip: -Clip相关 +Clip 相关 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.ClipGradByGlobalNorm ` ", "将一个 Tensor列表 t_list 中所有Tensor的L2范数之和,限定在 clip_norm 范围内" - " :ref:`paddle.nn.ClipGradByNorm ` ", "将输入的多维Tensor X 的L2范数限制在 clip_norm 范围之内" - " :ref:`paddle.nn.ClipGradByValue ` ", "将输入的多维Tensor X 的值限制在 [min, max] 范围" + " :ref:`paddle.nn.ClipGradByGlobalNorm ` ", "将一个 Tensor 列表 t_list 中所有 Tensor 的 L2 范数之和,限定在 clip_norm 范围内" + " :ref:`paddle.nn.ClipGradByNorm ` ", "将输入的多维 Tensor X 的 L2 范数限制在 clip_norm 范围之内" + " :ref:`paddle.nn.ClipGradByValue ` ", "将输入的多维 Tensor X 的值限制在 [min, max] 范围" .. _common_layers: @@ -299,17 +299,17 @@ Clip相关 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.BeamSearchDecoder ` ", "带beam search解码策略的解码器" + " :ref:`paddle.nn.BeamSearchDecoder ` ", "带 beam search 解码策略的解码器" " :ref:`paddle.nn.CosineSimilarity ` ", "余弦相似度计算" " :ref:`paddle.nn.dynamic_decode ` ", "循环解码" - " :ref:`paddle.nn.Flatten ` ", "将一个连续维度的Tensor展平成一维Tensor" - " :ref:`paddle.nn.PairwiseDistance ` ", "计算两个向量之间pairwise的距离" + " :ref:`paddle.nn.Flatten ` ", "将一个连续维度的 Tensor 展平成一维 Tensor" + " :ref:`paddle.nn.PairwiseDistance ` ", "计算两个向量之间 pairwise 的距离" " :ref:`paddle.nn.Identity ` ", "建立等效层,作为输入的 Placeholder" - " :ref:`paddle.nn.Unfold ` ", "实现的功能与卷积中用到的im2col函数一样,通常也被称作为im2col过程" - " :ref:`paddle.nn.Fold ` ", "该Op用于将一个滑动局部块组合成一个大的张量,通常也被称为col2im过程" + " :ref:`paddle.nn.Unfold ` ", "实现的功能与卷积中用到的 im2col 函数一样,通常也被称作为 im2col 过程" + " :ref:`paddle.nn.Fold ` ", "该 Op 用于将一个滑动局部块组合成一个大的张量,通常也被称为 col2im 过程" .. _convolution_functional: @@ -318,7 +318,7 @@ Clip相关 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.functional.conv1d ` ", "一维卷积函数" @@ -330,11 +330,11 @@ Clip相关 .. _pooling_functional: -Pooling相关函数 +Pooling 相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.functional.adaptive_avg_pool1d ` ", "一维自适应平均池化" @@ -355,15 +355,15 @@ Pooling相关函数 .. _padding_functional: -Padding相关函数 +Padding 相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.pad ` ", "依照 pad 和 mode 属性对input进行填充" - " :ref:`paddle.nn.functional.zeropad2d ` ", "依照 pad 对x进行零填充" + " :ref:`paddle.nn.functional.pad ` ", "依照 pad 和 mode 属性对 input 进行填充" + " :ref:`paddle.nn.functional.zeropad2d ` ", "依照 pad 对 x 进行零填充" .. _activation_functional: @@ -371,50 +371,50 @@ Padding相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.celu ` ", "celu激活函数" - " :ref:`paddle.nn.functional.elu ` ", "elu激活函数" + " :ref:`paddle.nn.functional.celu ` ", "celu 激活函数" + " :ref:`paddle.nn.functional.elu ` ", "elu 激活函数" " :ref:`paddle.nn.functional.elu_ ` ", "Inplace 版本的 elu API,对输入 x 采用 Inplace 策略" - " :ref:`paddle.nn.functional.gelu ` ", "gelu激活函数" - " :ref:`paddle.nn.functional.gumbel_softmax ` ", "gumbel_softmax采样激活函数" - " :ref:`paddle.nn.functional.hardshrink ` ", "hardshrink激活函数" - " :ref:`paddle.nn.functional.hardsigmoid ` ", "sigmoid的分段线性逼近激活函数" - " :ref:`paddle.nn.functional.hardswish ` ", "hardswish激活函数" - " :ref:`paddle.nn.functional.hardtanh ` ", "hardtanh激活函数" - " :ref:`paddle.nn.functional.leaky_relu ` ", "leaky_relu激活函数" - " :ref:`paddle.nn.functional.log_sigmoid ` ", "log_sigmoid激活函数" - " :ref:`paddle.nn.functional.log_softmax ` ", "log_softmax激活函数" - " :ref:`paddle.nn.functional.maxout ` ", "maxout激活函数" - " :ref:`paddle.nn.functional.prelu ` ", "prelu激活函数" - " :ref:`paddle.nn.functional.rrelu ` ", "rrelu激活函数" - " :ref:`paddle.nn.functional.relu ` ", "relu激活函数" + " :ref:`paddle.nn.functional.gelu ` ", "gelu 激活函数" + " :ref:`paddle.nn.functional.gumbel_softmax ` ", "gumbel_softmax 采样激活函数" + " :ref:`paddle.nn.functional.hardshrink ` ", "hardshrink 激活函数" + " :ref:`paddle.nn.functional.hardsigmoid ` ", "sigmoid 的分段线性逼近激活函数" + " :ref:`paddle.nn.functional.hardswish ` ", "hardswish 激活函数" + " :ref:`paddle.nn.functional.hardtanh ` ", "hardtanh 激活函数" + " :ref:`paddle.nn.functional.leaky_relu ` ", "leaky_relu 激活函数" + " :ref:`paddle.nn.functional.log_sigmoid ` ", "log_sigmoid 激活函数" + " :ref:`paddle.nn.functional.log_softmax ` ", "log_softmax 激活函数" + " :ref:`paddle.nn.functional.maxout ` ", "maxout 激活函数" + " :ref:`paddle.nn.functional.prelu ` ", "prelu 激活函数" + " :ref:`paddle.nn.functional.rrelu ` ", "rrelu 激活函数" + " :ref:`paddle.nn.functional.relu ` ", "relu 激活函数" " :ref:`paddle.nn.functional.relu_ ` ", "Inplace 版本的 :ref:`cn_api_nn_cn_relu` API,对输入 x 采用 Inplace 策略" - " :ref:`paddle.nn.functional.relu6 ` ", "relu6激活函数" - " :ref:`paddle.nn.functional.selu ` ", "selu激活函数" - " :ref:`paddle.nn.functional.sigmoid ` ", "sigmoid激活函数" - " :ref:`paddle.nn.functional.silu ` ", "silu激活函数" - " :ref:`paddle.nn.functional.softmax ` ", "softmax激活函数" + " :ref:`paddle.nn.functional.relu6 ` ", "relu6 激活函数" + " :ref:`paddle.nn.functional.selu ` ", "selu 激活函数" + " :ref:`paddle.nn.functional.sigmoid ` ", "sigmoid 激活函数" + " :ref:`paddle.nn.functional.silu ` ", "silu 激活函数" + " :ref:`paddle.nn.functional.softmax ` ", "softmax 激活函数" " :ref:`paddle.nn.functional.softmax_ ` ", "Inplace 版本的 :ref:`cn_api_nn_cn_softmax` API,对输入 x 采用 Inplace 策略" - " :ref:`paddle.nn.functional.softplus ` ", "softplus激活函数" - " :ref:`paddle.nn.functional.softshrink ` ", "softshrink激活函数" - " :ref:`paddle.nn.functional.softsign ` ", "softsign激活函数" - " :ref:`paddle.nn.functional.swish ` ", "swish激活函数" - " :ref:`paddle.nn.functional.mish ` ", "mish激活函数" - " :ref:`paddle.nn.functional.tanhshrink ` ", "tanhshrink激活函数" - " :ref:`paddle.nn.functional.thresholded_relu ` ", "thresholded_relu激活函数" + " :ref:`paddle.nn.functional.softplus ` ", "softplus 激活函数" + " :ref:`paddle.nn.functional.softshrink ` ", "softshrink 激活函数" + " :ref:`paddle.nn.functional.softsign ` ", "softsign 激活函数" + " :ref:`paddle.nn.functional.swish ` ", "swish 激活函数" + " :ref:`paddle.nn.functional.mish ` ", "mish 激活函数" + " :ref:`paddle.nn.functional.tanhshrink ` ", "tanhshrink 激活函数" + " :ref:`paddle.nn.functional.thresholded_relu ` ", "thresholded_relu 激活函数" .. _normalization_functional: -Normalization方法 +Normalization 方法 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.local_response_norm ` ", "Local Response Normalization函数" + " :ref:`paddle.nn.functional.local_response_norm ` ", "Local Response Normalization 函数" " :ref:`paddle.nn.functional.normalize ` ", "归一化方法" " :ref:`paddle.nn.functional.remove_weight_norm ` ", "移除传入 layer 中的权重归一化" " :ref:`paddle.nn.functional.weight_norm ` ", "对传入的 layer 中的权重参数进行归一化" @@ -426,7 +426,7 @@ Normalization方法 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.functional.bilinear ` ", "对两个输入执行双线性张量积" @@ -434,28 +434,28 @@ Normalization方法 .. _dropout_functional: -Dropout方法 +Dropout 方法 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.alpha_dropout ` ", "一种具有自归一化性质的dropout" + " :ref:`paddle.nn.functional.alpha_dropout ` ", "一种具有自归一化性质的 dropout" " :ref:`paddle.nn.functional.dropout ` ", "Dropout" - " :ref:`paddle.nn.functional.dropout2d ` ", "一维Dropout" - " :ref:`paddle.nn.functional.dropout3d ` ", "二维Dropout" + " :ref:`paddle.nn.functional.dropout2d ` ", "一维 Dropout" + " :ref:`paddle.nn.functional.dropout3d ` ", "二维 Dropout" .. _embedding_functional: -Embedding相关函数 +Embedding 相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.diag_embed ` ", "对角线Embedding 方法" + " :ref:`paddle.nn.functional.diag_embed ` ", "对角线 Embedding 方法" " :ref:`paddle.nn.functional.embedding ` ", "Embedding 方法" .. _loss_functional: @@ -464,30 +464,30 @@ Embedding相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.nn.functional.binary_cross_entropy ` ", "二值交叉熵损失值" - " :ref:`paddle.nn.functional.binary_cross_entropy_with_logits ` ", "logits二值交叉熵损失值" + " :ref:`paddle.nn.functional.binary_cross_entropy_with_logits ` ", "logits 二值交叉熵损失值" " :ref:`paddle.nn.functional.cosine_embedding_loss ` ", "用于计算余弦相似度损失" - " :ref:`paddle.nn.functional.ctc_loss ` ", "用于计算ctc损失" + " :ref:`paddle.nn.functional.ctc_loss ` ", "用于计算 ctc 损失" " :ref:`paddle.nn.functional.dice_loss ` ", "用于比较预测结果跟标签之间的相似度" - " :ref:`paddle.nn.functional.hsigmoid_loss ` ", "层次sigmoid损失函数" - " :ref:`paddle.nn.functional.l1_loss ` ", "用于计算L1损失" - " :ref:`paddle.nn.functional.kl_div ` ", "用于计算KL散度损失" + " :ref:`paddle.nn.functional.hsigmoid_loss ` ", "层次 sigmoid 损失函数" + " :ref:`paddle.nn.functional.l1_loss ` ", "用于计算 L1 损失" + " :ref:`paddle.nn.functional.kl_div ` ", "用于计算 KL 散度损失" " :ref:`paddle.nn.functional.log_loss ` ", "用于计算负对数损失" - " :ref:`paddle.nn.functional.margin_ranking_loss ` ", "用于计算margin rank loss 损失" + " :ref:`paddle.nn.functional.margin_ranking_loss ` ", "用于计算 margin rank loss 损失" " :ref:`paddle.nn.functional.mse_loss ` ", "用于计算均方差误差" - " :ref:`paddle.nn.functional.nll_loss ` ", "用于计算nll损失" + " :ref:`paddle.nn.functional.nll_loss ` ", "用于计算 nll 损失" " :ref:`paddle.nn.functional.npair_loss ` ", "成对数据损失计算" " :ref:`paddle.nn.functional.sigmoid_focal_loss ` ", "用于计算分类任务中前景类-背景类数量不均衡问题的损失" - " :ref:`paddle.nn.functional.smooth_l1_loss ` ", "用于计算平滑L1损失" - " :ref:`paddle.nn.functional.softmax_with_cross_entropy ` ", "将softmax操作、交叉熵损失函数的计算过程进行合并" + " :ref:`paddle.nn.functional.smooth_l1_loss ` ", "用于计算平滑 L1 损失" + " :ref:`paddle.nn.functional.softmax_with_cross_entropy ` ", "将 softmax 操作、交叉熵损失函数的计算过程进行合并" " :ref:`paddle.nn.functional.margin_cross_entropy ` ", "支持 ``Arcface``,``Cosface``,``Sphereface`` 的结合 Margin 损失函数" - " :ref:`paddle.nn.functional.soft_margin_loss ` ", "用于计算soft margin loss损失函数" - " :ref:`paddle.nn.functional.triplet_margin_loss ` ", "用于计算TripletMarginLoss" - " :ref:`paddle.nn.functional.triplet_margin_with_distance_loss ` ", "用户自定义距离函数用于计算triplet margin loss 损失" - " :ref:`paddle.nn.functional.multi_label_soft_margin_loss ` ", "用于计算多分类的hinge loss损失函数" + " :ref:`paddle.nn.functional.soft_margin_loss ` ", "用于计算 soft margin loss 损失函数" + " :ref:`paddle.nn.functional.triplet_margin_loss ` ", "用于计算 TripletMarginLoss" + " :ref:`paddle.nn.functional.triplet_margin_with_distance_loss ` ", "用户自定义距离函数用于计算 triplet margin loss 损失" + " :ref:`paddle.nn.functional.multi_label_soft_margin_loss ` ", "用于计算多分类的 hinge loss 损失函数" .. _common_functional: @@ -496,27 +496,27 @@ Embedding相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.functional.affine_grid ` ", "用于生成仿射变换前后的feature maps的坐标映射关系" - " :ref:`paddle.nn.functional.cosine_similarity ` ", "用于计算x1与x2沿axis维度的余弦相似度" - " :ref:`paddle.nn.functional.cross_entropy ` ", "计算输入input和标签label间的交叉熵" - " :ref:`paddle.nn.functional.grid_sample ` ", "用于调整一个batch中图片的大小" + " :ref:`paddle.nn.functional.affine_grid ` ", "用于生成仿射变换前后的 feature maps 的坐标映射关系" + " :ref:`paddle.nn.functional.cosine_similarity ` ", "用于计算 x1 与 x2 沿 axis 维度的余弦相似度" + " :ref:`paddle.nn.functional.cross_entropy ` ", "计算输入 input 和标签 label 间的交叉熵" + " :ref:`paddle.nn.functional.grid_sample ` ", "用于调整一个 batch 中图片的大小" " :ref:`paddle.nn.functional.label_smooth ` ", "标签平滑" - " :ref:`paddle.nn.functional.one_hot ` ", "将输入'x'中的每个id转换为一个one-hot向量" - " :ref:`paddle.nn.functional.pixel_shuffle ` ", "将Tensor重新排列" - " :ref:`paddle.nn.functional.pixel_unshuffle ` ", "将Tensor重新排列,是pixel_shuffle的逆操作" + " :ref:`paddle.nn.functional.one_hot ` ", "将输入'x'中的每个 id 转换为一个 one-hot 向量" + " :ref:`paddle.nn.functional.pixel_shuffle ` ", "将 Tensor 重新排列" + " :ref:`paddle.nn.functional.pixel_unshuffle ` ", "将 Tensor 重新排列,是 pixel_shuffle 的逆操作" " :ref:`paddle.nn.functional.square_error_cost ` ", "用于计算预测值和目标值的方差估计" " :ref:`paddle.nn.functional.unfold ` ", "对每一个卷积核覆盖下的区域,将元素重新排成一列" - " :ref:`paddle.nn.functional.fold ` ", "该Op用于将一个滑动局部块组合成一个大的张量,通常也被称为col2im。" + " :ref:`paddle.nn.functional.fold ` ", "该 Op 用于将一个滑动局部块组合成一个大的张量,通常也被称为 col2im。" " :ref:`paddle.nn.functional.gather_tree ` ", "整个束搜索结束后使用,获得每个时间步选择的的候选词 id 及其对应的在搜索树中的 parent 节点" " :ref:`paddle.nn.functional.glu ` ", "门控线性单元" - " :ref:`paddle.nn.functional.interpolate ` ", "用于调整一个batch中图片的大小" + " :ref:`paddle.nn.functional.interpolate ` ", "用于调整一个 batch 中图片的大小" " :ref:`paddle.nn.functional.sequence_mask ` ", "根据输入 x 和 maxlen 输出一个掩码,数据类型为 dtype" - " :ref:`paddle.nn.functional.temporal_shift ` ", "用于对输入X做时序通道T上的位移操作,为TSM中使用的操作" - " :ref:`paddle.nn.functional.upsample ` ", "用于调整一个batch中图片的大小" - " :ref:`paddle.nn.functional.class_center_sample ` ", "用于PartialFC类别中心采样" + " :ref:`paddle.nn.functional.temporal_shift ` ", "用于对输入 X 做时序通道 T 上的位移操作,为 TSM 中使用的操作" + " :ref:`paddle.nn.functional.upsample ` ", "用于调整一个 batch 中图片的大小" + " :ref:`paddle.nn.functional.class_center_sample ` ", "用于 PartialFC 类别中心采样" .. _about_initializer: @@ -524,20 +524,20 @@ Embedding相关函数 ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" - " :ref:`paddle.nn.initializer.Assign ` ", "使用Numpy数组、Python列表、Tensor来初始化参数" + " :ref:`paddle.nn.initializer.Assign ` ", "使用 Numpy 数组、Python 列表、Tensor 来初始化参数" " :ref:`paddle.nn.initializer.Bilinear ` ", "该接口为参数初始化函数,用于转置卷积函数中" - " :ref:`paddle.nn.initializer.Constant ` ", "用于权重初始化,通过输入的value值初始化输入变量" - " :ref:`paddle.nn.initializer.KaimingNormal ` ", "实现Kaiming正态分布方式的权重初始化" - " :ref:`paddle.nn.initializer.KaimingUniform ` ", "实现Kaiming均匀分布方式的权重初始化" + " :ref:`paddle.nn.initializer.Constant ` ", "用于权重初始化,通过输入的 value 值初始化输入变量" + " :ref:`paddle.nn.initializer.KaimingNormal ` ", "实现 Kaiming 正态分布方式的权重初始化" + " :ref:`paddle.nn.initializer.KaimingUniform ` ", "实现 Kaiming 均匀分布方式的权重初始化" " :ref:`paddle.nn.initializer.Normal ` ", "随机正态(高斯)分布初始化函数" - " :ref:`paddle.nn.initializer.set_global_initializer ` ", "用于设置Paddle框架中全局的参数初始化方法" - " :ref:`paddle.nn.initializer.calculate_gain ` ", "获取某些激活函数的推荐增益值(增益值可用于对某些初始化API进行设置,以调整初始化值)" - " :ref:`paddle.nn.initializer.Dirac ` ", "通过狄拉克delta函数来初始化3D/4D/5D Tensor,一般用于卷积层,能最大程度保留卷积层输入的特性" + " :ref:`paddle.nn.initializer.set_global_initializer ` ", "用于设置 Paddle 框架中全局的参数初始化方法" + " :ref:`paddle.nn.initializer.calculate_gain ` ", "获取某些激活函数的推荐增益值(增益值可用于对某些初始化 API 进行设置,以调整初始化值)" + " :ref:`paddle.nn.initializer.Dirac ` ", "通过狄拉克 delta 函数来初始化 3D/4D/5D Tensor,一般用于卷积层,能最大程度保留卷积层输入的特性" " :ref:`paddle.nn.initializer.Orthogonal ` ", "正交矩阵初始化方式,被初始化的参数为(半)正交的" " :ref:`paddle.nn.initializer.TruncatedNormal ` ", "随机截断正态(高斯)分布初始化函数" " :ref:`paddle.nn.initializer.Uniform ` ", "随机均匀分布初始化函数" - " :ref:`paddle.nn.initializer.XavierNormal ` ", "实现Xavier权重初始化方法( Xavier weight initializer)" - " :ref:`paddle.nn.initializer.XavierUniform ` ", "实现Xavier权重初始化方法( Xavier weight initializer)" + " :ref:`paddle.nn.initializer.XavierNormal ` ", "实现 Xavier 权重初始化方法( Xavier weight initializer)" + " :ref:`paddle.nn.initializer.XavierUniform ` ", "实现 Xavier 权重初始化方法( Xavier weight initializer)" diff --git a/docs/api/paddle/nn/PReLU_cn.rst b/docs/api/paddle/nn/PReLU_cn.rst index abb7b68e771..9cdc3a7096b 100644 --- a/docs/api/paddle/nn/PReLU_cn.rst +++ b/docs/api/paddle/nn/PReLU_cn.rst @@ -4,7 +4,7 @@ PReLU ------------------------------- .. py:class:: paddle.nn.PReLU(num_parameters=1, init=0.25, weight_attr=None, data_format="NCHW", name=None) -PReLU激活层(PReLU Activation Operator)。计算公式如下: +PReLU 激活层(PReLU Activation Operator)。计算公式如下: 如果使用近似计算: @@ -16,16 +16,16 @@ PReLU激活层(PReLU Activation Operator)。计算公式如下: 参数 :::::::::: - - num_parameters (int,可选) - 可训练`weight`数量,支持2种输入:1 - 输入中的所有元素使用同一个`weight`值;输入的通道数 - 在同一个通道中的元素使用同一个`weight`值。默认为1。 - - init (float,可选) - `weight`的初始值。默认为0.25。 - - weight_attr (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 + - num_parameters (int,可选) - 可训练`weight`数量,支持 2 种输入:1 - 输入中的所有元素使用同一个`weight`值;输入的通道数 - 在同一个通道中的元素使用同一个`weight`值。默认为 1。 + - init (float,可选) - `weight`的初始值。默认为 0.25。 + - weight_attr (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_ParamAttr` 。 - data_format (str,可选) – 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" 或者 "NDHWC"。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor,默认数据类型为float32。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor,默认数据类型为 float32。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Pad1D_cn.rst b/docs/api/paddle/nn/Pad1D_cn.rst index da2b2f35bc3..fc6d0d952c3 100644 --- a/docs/api/paddle/nn/Pad1D_cn.rst +++ b/docs/api/paddle/nn/Pad1D_cn.rst @@ -11,13 +11,13 @@ Pad1D 参数 :::::::::::: - - **padding** (Tensor | List[int] | int) - 填充大小。如果是int,则在所有待填充边界使用相同的填充, + - **padding** (Tensor | List[int] | int) - 填充大小。如果是 int,则在所有待填充边界使用相同的填充, 否则填充的格式为[pad_left, pad_right]。 - - **mode** (str) - padding的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 + - **mode** (str) - padding 的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 ``'constant'`` 表示填充常数 ``value`` ; ``'reflect'`` 表示填充以输入边界值为轴的映射;``'replicate'`` 表示 填充输入边界值;``'circular'`` 为循环填充输入。默认值为 ``'constant'``。 - - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为0.0。 - - **data_format** (str) - 指定输入的format,可为 ``'NCL'`` 或者 ``'NLC'``,默认值为 ``'NCL'``。 + - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为 0.0。 + - **data_format** (str) - 指定输入的 format,可为 ``'NCL'`` 或者 ``'NLC'``,默认值为 ``'NCL'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/Pad2D_cn.rst b/docs/api/paddle/nn/Pad2D_cn.rst index 9be03c54fc7..0aad0e3acb6 100644 --- a/docs/api/paddle/nn/Pad2D_cn.rst +++ b/docs/api/paddle/nn/Pad2D_cn.rst @@ -11,13 +11,13 @@ Pad2D 参数 :::::::::::: - - **padding** (Tensor | List[int] | int]) - 填充大小。如果是int,则在所有待填充边界使用相同的填充, + - **padding** (Tensor | List[int] | int]) - 填充大小。如果是 int,则在所有待填充边界使用相同的填充, 否则填充的格式为[pad_left, pad_right, pad_top, pad_bottom]。 - - **mode** (str) - padding的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 + - **mode** (str) - padding 的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 ``'constant'`` 表示填充常数 ``value``; ``'reflect'`` 表示填充以输入边界值为轴的映射;``'replicate'`` 表示 填充输入边界值;``'circular'`` 为循环填充输入。默认值为 ``'constant'`` 。 - - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为0.0。 - - **data_format** (str) - 指定输入的format,可为 ``'NCHW'`` 或者 ``'NHWC'``,默认值为 ``'NCHW'``。 + - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为 0.0。 + - **data_format** (str) - 指定输入的 format,可为 ``'NCHW'`` 或者 ``'NHWC'``,默认值为 ``'NCHW'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/Pad3D_cn.rst b/docs/api/paddle/nn/Pad3D_cn.rst index da121a4a607..2dc4cee8b1b 100644 --- a/docs/api/paddle/nn/Pad3D_cn.rst +++ b/docs/api/paddle/nn/Pad3D_cn.rst @@ -11,13 +11,13 @@ Pad3D 参数 :::::::::::: - - **padding** (Tensor | List[int] | int) - 填充大小。如果是int,则在所有待填充边界使用相同的填充, + - **padding** (Tensor | List[int] | int) - 填充大小。如果是 int,则在所有待填充边界使用相同的填充, 否则填充的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 - - **mode** (str) - padding的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 + - **mode** (str) - padding 的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 ``'constant'`` 表示填充常数 ``value``; ``'reflect'`` 表示填充以输入边界值为轴的映射;``'replicate'`` 表示 填充输入边界值;``'circular'`` 为循环填充输入。默认值为 ``'constant'`` 。 - - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为0.0。 - - **data_format** (str) - 指定输入的format,可为 ``'NCDHW'`` 或者 ``'NDHWC'``,默认值为 ``'NCDHW'``。 + - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为 0.0。 + - **data_format** (str) - 指定输入的 format,可为 ``'NCDHW'`` 或者 ``'NDHWC'``,默认值为 ``'NCDHW'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/PairwiseDistance_cn.rst b/docs/api/paddle/nn/PairwiseDistance_cn.rst index 0ce19034542..55fc24fcdcb 100644 --- a/docs/api/paddle/nn/PairwiseDistance_cn.rst +++ b/docs/api/paddle/nn/PairwiseDistance_cn.rst @@ -5,7 +5,7 @@ PairwiseDistance .. py:class:: paddle.nn.PairwiseDistance(p=2., epsilon=1e-6, keepdim=False, name=None) -该OP计算两个向量(输入 ``x``、``y`` )之间pairwise的距离。该距离通过p范数计算: +该 OP 计算两个向量(输入 ``x``、``y`` )之间 pairwise 的距离。该距离通过 p 范数计算: .. math:: @@ -13,16 +13,16 @@ PairwiseDistance 参数 :::::::: - - **p** (float,可选)- 指定p阶的范数。默认值为2。 - - **epsilon** (float,可选)- 添加到分母的一个很小值,避免发生除零错误。默认值为1e-6。 - - **keepdim** (bool,可选)- 是否保留输出张量减少的维度。输出结果相对于 ``|x-y|`` 的结果减少一维,除非 :attr:`keepdim` 为True,默认值为False。 + - **p** (float,可选)- 指定 p 阶的范数。默认值为 2。 + - **epsilon** (float,可选)- 添加到分母的一个很小值,避免发生除零错误。默认值为 1e-6。 + - **keepdim** (bool,可选)- 是否保留输出张量减少的维度。输出结果相对于 ``|x-y|`` 的结果减少一维,除非 :attr:`keepdim` 为 True,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::: - - **x** (Tensor) - :math:`(N, D)`,其中D是向量的维度,数据类型为float32或float64。 + - **x** (Tensor) - :math:`(N, D)`,其中 D 是向量的维度,数据类型为 float32 或 float64。 - **y** (Tensor) - :math:`(N, D)`,与 ``x`` 的形状、数据类型相同。 - - **output** (Tensor) - :math:`(N)`,如果 :attr:`keepdim` 为True,则形状为 :math:`(N, 1)`。数据类型与 ``x``、 ``y`` 相同。 + - **output** (Tensor) - :math:`(N)`,如果 :attr:`keepdim` 为 True,则形状为 :math:`(N, 1)`。数据类型与 ``x``、 ``y`` 相同。 代码示例 :::::::: diff --git a/docs/api/paddle/nn/ParameterList_cn.rst b/docs/api/paddle/nn/ParameterList_cn.rst index df77abd9924..533dad3853c 100644 --- a/docs/api/paddle/nn/ParameterList_cn.rst +++ b/docs/api/paddle/nn/ParameterList_cn.rst @@ -8,12 +8,12 @@ ParameterList -参数列表容器。此容器的行为类似于Python列表,但它包含的参数将被正确地注册和添加。 +参数列表容器。此容器的行为类似于 Python 列表,但它包含的参数将被正确地注册和添加。 参数 :::::::::::: - - **parameters** (iterable,可选) - 可迭代的Parameters。 + - **parameters** (iterable,可选) - 可迭代的 Parameters。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/PixelShuffle_cn.rst b/docs/api/paddle/nn/PixelShuffle_cn.rst index 2ceea96d997..671c6ab5f23 100644 --- a/docs/api/paddle/nn/PixelShuffle_cn.rst +++ b/docs/api/paddle/nn/PixelShuffle_cn.rst @@ -4,11 +4,11 @@ PixelShuffle ------------------------------- .. py:function:: paddle.nn.PixelShuffle(upscale_factor, data_format="NCHW", name=None) -该算子将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 +该算子将一个形为[N, C, H, W]或是[N, H, W, C]的 Tensor 重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的 Tensor。这样做有利于实现步长(stride)为 1/r 的高效 sub-pixel(亚像素)卷积。详见 Shi 等人在 2016 年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 .. code-block:: text - 给定一个形为 x.shape = [1, 9, 4, 4] 的4-D张量 + 给定一个形为 x.shape = [1, 9, 4, 4] 的 4-D 张量 设定:upscale_factor=3 那么输出张量的形为:[1, 1, 12, 12] @@ -20,12 +20,12 @@ PixelShuffle 形状 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor或NHWC格式的4-DTensor。其数据类型为float32, float64。 - - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式或NHWC的4-D Tensor。其数据类型与输入相同。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor 或 NHWC 格式的 4-DTensor。其数据类型为 float32, float64。 + - **output** (Tensor):默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式或 NHWC 的 4-D Tensor。其数据类型与输入相同。 返回 ::::::::: -计算PixelShuffle的可调用对象 +计算 PixelShuffle 的可调用对象 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/RNNCellBase_cn.rst b/docs/api/paddle/nn/RNNCellBase_cn.rst index 90c9a648b83..08041660ad6 100644 --- a/docs/api/paddle/nn/RNNCellBase_cn.rst +++ b/docs/api/paddle/nn/RNNCellBase_cn.rst @@ -9,7 +9,7 @@ RNNCellBase **循环神经网络单元基类** -该OP(RNNCellBase)是一个抽象表示根据输入和隐藏状态来计算输出和新状态的基本类,最适合也最常用于循环神经网络。 +该 OP(RNNCellBase)是一个抽象表示根据输入和隐藏状态来计算输出和新状态的基本类,最适合也最常用于循环神经网络。 .. py:function:: get_initial_states(batch_ref,shape=None,dtype=None,init_value=0.,batch_dim_idx=0): @@ -18,13 +18,13 @@ RNNCellBase 参数 :::::::::::: - - **batch_ref** (Tensor) - 一个Tensor,其形状决定了生成初始状态使用的batch_size。当batch_ref形状为d时,d[batch_dim_idx]为batch_size。 - - **shape** (list|tuple,可选) - 隐藏层的形状(可以是多层嵌套的),列表或元组的第一位为batch_size,默认为-1。shape为None时,使用state_shape(property)。默认为None。 - - **dtype** (str|list|tuple,可选) - 数据类型(可以是多层嵌套的,但嵌套结构要和shape相同。或者所有Tensor的数据类型相同时可以只输入一个dtype。)。当dtype为None且state_dtype(property)不可用时,则使用paddle默认的float类型。默认为None。 - - **init_value** (float,可选) -用于初始状态的浮点数值。默认为0。 - - **batch_dim_idx** (int,可选) - 用于指定batch_size在batch_ref的索引位置的整数值。默认为0。 + - **batch_ref** (Tensor) - 一个 Tensor,其形状决定了生成初始状态使用的 batch_size。当 batch_ref 形状为 d 时,d[batch_dim_idx]为 batch_size。 + - **shape** (list|tuple,可选) - 隐藏层的形状(可以是多层嵌套的),列表或元组的第一位为 batch_size,默认为-1。shape 为 None 时,使用 state_shape(property)。默认为 None。 + - **dtype** (str|list|tuple,可选) - 数据类型(可以是多层嵌套的,但嵌套结构要和 shape 相同。或者所有 Tensor 的数据类型相同时可以只输入一个 dtype。)。当 dtype 为 None 且 state_dtype(property)不可用时,则使用 paddle 默认的 float 类型。默认为 None。 + - **init_value** (float,可选) -用于初始状态的浮点数值。默认为 0。 + - **batch_dim_idx** (int,可选) - 用于指定 batch_size 在 batch_ref 的索引位置的整数值。默认为 0。 返回 :::::::::::: - - **init_state** (Tensor|tuple|list) - 根据输出的数据类型,形状和嵌套层级返回的初始状态Tensor。 + - **init_state** (Tensor|tuple|list) - 根据输出的数据类型,形状和嵌套层级返回的初始状态 Tensor。 diff --git a/docs/api/paddle/nn/RNN_cn.rst b/docs/api/paddle/nn/RNN_cn.rst index a594f816fd4..01bbd83668b 100644 --- a/docs/api/paddle/nn/RNN_cn.rst +++ b/docs/api/paddle/nn/RNN_cn.rst @@ -9,30 +9,30 @@ RNN **循环神经网络** -该OP是循环神经网络(RNN)的封装,将输入的Cell封装为一个循环神经网络。它能够重复执行 :code:`cell.forward()` 直到遍历完input中的所有Tensor。 +该 OP 是循环神经网络(RNN)的封装,将输入的 Cell 封装为一个循环神经网络。它能够重复执行 :code:`cell.forward()` 直到遍历完 input 中的所有 Tensor。 参数 :::::::::::: - - **cell** (RNNCellBase) - RNNCellBase类的一个实例。 - - **is_reverse** (bool,可选) - 指定遍历input的方向。默认为False - - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。默认为False。 + - **cell** (RNNCellBase) - RNNCellBase 类的一个实例。 + - **is_reverse** (bool,可选) - 指定遍历 input 的方向。默认为 False + - **time_major** (bool,可选) - 指定 input 的第一个维度是否是 time steps。默认为 False。 输入 :::::::::::: - - **inputs** (Tensor) - 输入(可以是多层嵌套的)。如果time_major为False,则Tensor的形状为[batch_size,time_steps,input_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],input_size为cell的input_size。 - - **initial_states** (Tensor|list|tuple,可选) - 输入cell的初始状态(可以是多层嵌套的),如果没有给出则会调用 :code:`cell.get_initial_states` 生成初始状态。默认为None。 - - **sequence_length** (Tensor,可选) - 指定输入序列的长度,形状为[batch_size],数据类型为int64或int32。在输入序列中所有time step不小于sequence_length的元素都会被当作填充元素处理(状态不再更新)。 + - **inputs** (Tensor) - 输入(可以是多层嵌套的)。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,input_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],input_size 为 cell 的 input_size。 + - **initial_states** (Tensor|list|tuple,可选) - 输入 cell 的初始状态(可以是多层嵌套的),如果没有给出则会调用 :code:`cell.get_initial_states` 生成初始状态。默认为 None。 + - **sequence_length** (Tensor,可选) - 指定输入序列的长度,形状为[batch_size],数据类型为 int64 或 int32。在输入序列中所有 time step 不小于 sequence_length 的元素都会被当作填充元素处理(状态不再更新)。 输出 :::::::::::: - - **outputs** (Tensor|list|tuple) - 输出。如果time_major为False,则Tensor的形状为[batch_size,time_steps,hidden_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,hidden_size]。 - - **final_states** (Tensor|list|tuple) - cell的最终状态,嵌套结构,形状和数据类型都与初始状态相同。 + - **outputs** (Tensor|list|tuple) - 输出。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,hidden_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,hidden_size]。 + - **final_states** (Tensor|list|tuple) - cell 的最终状态,嵌套结构,形状和数据类型都与初始状态相同。 .. Note:: - 该类是一个封装rnn cell的低级api,用户在使用forward函数时须确保initial_states满足cell的要求。 + 该类是一个封装 rnn cell 的低级 api,用户在使用 forward 函数时须确保 initial_states 满足 cell 的要求。 代码示例 diff --git a/docs/api/paddle/nn/RReLU_cn.rst b/docs/api/paddle/nn/RReLU_cn.rst index 1b5a34d812f..1c85588e72b 100644 --- a/docs/api/paddle/nn/RReLU_cn.rst +++ b/docs/api/paddle/nn/RReLU_cn.rst @@ -4,7 +4,7 @@ RReLU ------------------------------- .. py:class:: paddle.nn.RReLU(lower=1./8., upper=1./3., name=None) -RReLU激活层,应用随机纠正线性单元对神经元激活,参考论文: +RReLU 激活层,应用随机纠正线性单元对神经元激活,参考论文: `Empirical Evaluation of Rectified Activations in Convolutional Network `_ 。 训练阶段对负斜率进行均匀分布随机采样: @@ -37,14 +37,14 @@ RReLU激活层,应用随机纠正线性单元对神经元激活,参考论文 参数 :::::::::: - - **lower** (float,可选) - 负值斜率的随机值范围下限,`lower` 包含在范围中。支持的数据类型:float。默认值为0.125。 - - **upper** (float,可选) - 负值斜率的随机值范围上限,`upper` 包含在范围中。支持的数据类型:float。默认值为0.333。 + - **lower** (float,可选) - 负值斜率的随机值范围下限,`lower` 包含在范围中。支持的数据类型:float。默认值为 0.125。 + - **upper** (float,可选) - 负值斜率的随机值范围上限,`upper` 包含在范围中。支持的数据类型:float。默认值为 0.333。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 :::::::::: - - **x** (Tensor) – 任意形状的Tensor,默认数据类型为float32。 - - **out** (Tensor) – 和x具有相同形状的Tensor。 + - **x** (Tensor) – 任意形状的 Tensor,默认数据类型为 float32。 + - **out** (Tensor) – 和 x 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/ReLU6_cn.rst b/docs/api/paddle/nn/ReLU6_cn.rst index 7e5f836caf8..8a2173a49b7 100644 --- a/docs/api/paddle/nn/ReLU6_cn.rst +++ b/docs/api/paddle/nn/ReLU6_cn.rst @@ -4,7 +4,7 @@ ReLU6 ------------------------------- .. py:class:: paddle.nn.ReLU6(name=None) -ReLU6激活层 +ReLU6 激活层 .. math:: @@ -18,8 +18,8 @@ ReLU6激活层 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/ReLU_cn.rst b/docs/api/paddle/nn/ReLU_cn.rst index aa93abe4dec..776e6cd8be0 100644 --- a/docs/api/paddle/nn/ReLU_cn.rst +++ b/docs/api/paddle/nn/ReLU_cn.rst @@ -4,7 +4,7 @@ ReLU ------------------------------- .. py:class:: paddle.nn.ReLU(name=None) -ReLU激活层(Rectified Linear Unit)。计算公式如下: +ReLU 激活层(Rectified Linear Unit)。计算公式如下: .. math:: @@ -18,8 +18,8 @@ ReLU激活层(Rectified Linear Unit)。计算公式如下: 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/SELU_cn.rst b/docs/api/paddle/nn/SELU_cn.rst index 57857534761..e8bdb4296f9 100644 --- a/docs/api/paddle/nn/SELU_cn.rst +++ b/docs/api/paddle/nn/SELU_cn.rst @@ -4,7 +4,7 @@ SELU ------------------------------- .. py:class:: paddle.nn.SELU(scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) -SELU激活层 +SELU 激活层 .. math:: @@ -18,14 +18,14 @@ SELU激活层 参数 :::::::::: - - scale (float,可选) - SELU激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 - - alpha (float,可选) - SELU激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - scale (float,可选) - SELU 激活计算公式中的 scale 值,必须大于 1.0。默认值为 1.0507009873554804934193349852946。 + - alpha (float,可选) - SELU 激活计算公式中的 alpha 值,必须大于等于零。默认值为 1.6732632423543772848170429916717。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Sequential_cn.rst b/docs/api/paddle/nn/Sequential_cn.rst index 2c46cb75c2a..c58c0674006 100644 --- a/docs/api/paddle/nn/Sequential_cn.rst +++ b/docs/api/paddle/nn/Sequential_cn.rst @@ -8,12 +8,12 @@ Sequential -顺序容器。子Layer将按构造函数参数的顺序添加到此容器中。传递给构造函数的参数可以Layers或可迭代的name Layer元组。 +顺序容器。子 Layer 将按构造函数参数的顺序添加到此容器中。传递给构造函数的参数可以 Layers 或可迭代的 name Layer 元组。 参数 :::::::::::: - - **layers** (tuple) - Layers或可迭代的name Layer对。 + - **layers** (tuple) - Layers 或可迭代的 name Layer 对。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/Sigmoid_cn.rst b/docs/api/paddle/nn/Sigmoid_cn.rst index 3d502a7b35d..47a64aa17b0 100644 --- a/docs/api/paddle/nn/Sigmoid_cn.rst +++ b/docs/api/paddle/nn/Sigmoid_cn.rst @@ -17,7 +17,7 @@ Sigmoid 形状 :::::::: - - **x** (Tensor)- N-D tensor,可以支持的数据类型是float16,float32,float64。 + - **x** (Tensor)- N-D tensor,可以支持的数据类型是 float16,float32,float64。 返回 :::::::: diff --git a/docs/api/paddle/nn/Silu_cn.rst b/docs/api/paddle/nn/Silu_cn.rst index f23ba4c9ff5..9ccc0690a7d 100644 --- a/docs/api/paddle/nn/Silu_cn.rst +++ b/docs/api/paddle/nn/Silu_cn.rst @@ -4,7 +4,7 @@ Silu ------------------------------- .. py:class:: paddle.nn.Silu(name=None) -Silu激活层。计算公式如下: +Silu 激活层。计算公式如下: .. math:: @@ -18,8 +18,8 @@ Silu激活层。计算公式如下: 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/SimpleRNNCell_cn.rst b/docs/api/paddle/nn/SimpleRNNCell_cn.rst index 49de0da7b38..49151b37861 100644 --- a/docs/api/paddle/nn/SimpleRNNCell_cn.rst +++ b/docs/api/paddle/nn/SimpleRNNCell_cn.rst @@ -9,7 +9,7 @@ SimpleRNNCell **简单循环神经网络单元** -该OP是简单循环神经网络单元(SimpleRNNCell),根据当前时刻输入x(t)和上一时刻状态h(t-1)计算当前时刻输出y(t)并更新状态h(t)。 +该 OP 是简单循环神经网络单元(SimpleRNNCell),根据当前时刻输入 x(t)和上一时刻状态 h(t-1)计算当前时刻输出 y(t)并更新状态 h(t)。 状态更新公式如下: @@ -28,26 +28,26 @@ SimpleRNNCell - **input_size** (int) - 输入的大小。 - **hidden_size** (int) - 隐藏状态大小。 - - **activation** (str,可选) - 简单循环神经网络单元的激活函数。可以是tanh或relu。默认为tanh。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **activation** (str,可选) - 简单循环神经网络单元的激活函数。可以是 tanh 或 relu。默认为 tanh。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 变量 :::::::::::: - - **weight_ih** (Parameter) - input到hidden的变换矩阵的权重。形状为(hidden_size, input_size)。对应公式中的 :math:`W_{ih}`。 - - **weight_hh** (Parameter) - hidden到hidden的变换矩阵的权重。形状为(hidden_size, hidden_size)。对应公式中的 :math:`W_{hh}`。 - - **bias_ih** (Parameter) - input到hidden的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{ih}`。 - - **bias_hh** (Parameter) - hidden到hidden的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{hh}`。 + - **weight_ih** (Parameter) - input 到 hidden 的变换矩阵的权重。形状为(hidden_size, input_size)。对应公式中的 :math:`W_{ih}`。 + - **weight_hh** (Parameter) - hidden 到 hidden 的变换矩阵的权重。形状为(hidden_size, hidden_size)。对应公式中的 :math:`W_{hh}`。 + - **bias_ih** (Parameter) - input 到 hidden 的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{ih}`。 + - **bias_hh** (Parameter) - hidden 到 hidden 的变换矩阵的偏置。形状为(hidden_size, )。对应公式中的 :math:`b_{hh}`。 输入 :::::::::::: - **inputs** (Tensor) - 输入。形状为[batch_size, input_size],对应公式中的 :math:`x_t`。 - - **states** (Tensor,可选) - 上一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t-1}`。当state为None的时候,初始状态为全0矩阵。默认为None。 + - **states** (Tensor,可选) - 上一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t-1}`。当 state 为 None 的时候,初始状态为全 0 矩阵。默认为 None。 输出 :::::::::::: @@ -56,7 +56,7 @@ SimpleRNNCell - **new_states** (Tensor) - 新一轮的隐藏状态。形状为[batch_size, hidden_size],对应公式中的 :math:`h_{t}`。 .. Note:: - 所有的变换矩阵的权重和偏置都默认初始化为Uniform(-std, std),其中std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 + 所有的变换矩阵的权重和偏置都默认初始化为 Uniform(-std, std),其中 std = :math:`\frac{1}{\sqrt{hidden\_size}}`。对于参数初始化,详情请参考 :ref:`cn_api_fluid_ParamAttr`。 代码示例 diff --git a/docs/api/paddle/nn/SimpleRNN_cn.rst b/docs/api/paddle/nn/SimpleRNN_cn.rst index 504b8e38060..7447ae7a6ad 100644 --- a/docs/api/paddle/nn/SimpleRNN_cn.rst +++ b/docs/api/paddle/nn/SimpleRNN_cn.rst @@ -9,7 +9,7 @@ SimpleRNN **简单循环神经网络** -该OP是简单循环神经网络(SimpleRNN),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的step,每个step根据当前时刻输入x(t)和上一时刻状态h(t-1)计算当前时刻输出y(t)并更新状态h(t)。 +该 OP 是简单循环神经网络(SimpleRNN),根据输出序列和给定的初始状态计算返回输出序列和最终状态。在该网络中的每一层对应输入的 step,每个 step 根据当前时刻输入 x(t)和上一时刻状态 h(t-1)计算当前时刻输出 y(t)并更新状态 h(t)。 状态更新公式如下: @@ -24,28 +24,28 @@ SimpleRNN - **input_size** (int) - 输入 :math:`x` 的大小。 - **hidden_size** (int) - 隐藏状态 :math:`h` 大小。 - - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为2,会将两层GRU网络堆叠在一起,第二层的输入来自第一层的输出。默认为1。 - - **direction** (str,可选) - 网络迭代方向,可设置为forward或bidirect(或bidirectional)。foward指从序列开始到序列结束的单向GRU网络方向,bidirectional指从序列开始到序列结束,又从序列结束到开始的双向GRU网络方向。默认为forward。 - - **time_major** (bool,可选) - 指定input的第一个维度是否是time steps。如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为False。 - - **dropout** (float,可选) - dropout概率,指的是出第一层外每层输入时的dropout概率。范围为[0, 1]。默认为0。 - - **activation** (str,可选) - 网络中每个单元的激活函数。可以是tanh或relu。默认为tanh。 - - **weight_ih_attr** (ParamAttr,可选) - weight_ih的参数。默认为None。 - - **weight_hh_attr** (ParamAttr,可选) - weight_hh的参数。默认为None。 - - **bias_ih_attr** (ParamAttr,可选) - bias_ih的参数。默认为None。 - - **bias_hh_attr** (ParamAttr,可选) - bias_hh的参数。默认为None。 + - **num_layers** (int,可选) - 循环网络的层数。例如,将层数设为 2,会将两层 GRU 网络堆叠在一起,第二层的输入来自第一层的输出。默认为 1。 + - **direction** (str,可选) - 网络迭代方向,可设置为 forward 或 bidirect(或 bidirectional)。foward 指从序列开始到序列结束的单向 GRU 网络方向,bidirectional 指从序列开始到序列结束,又从序列结束到开始的双向 GRU 网络方向。默认为 forward。 + - **time_major** (bool,可选) - 指定 input 的第一个维度是否是 time steps。如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size],否则为[batch_size,time_steps,input_size]。`time_steps` 指输入序列的长度。默认为 False。 + - **dropout** (float,可选) - dropout 概率,指的是出第一层外每层输入时的 dropout 概率。范围为[0, 1]。默认为 0。 + - **activation** (str,可选) - 网络中每个单元的激活函数。可以是 tanh 或 relu。默认为 tanh。 + - **weight_ih_attr** (ParamAttr,可选) - weight_ih 的参数。默认为 None。 + - **weight_hh_attr** (ParamAttr,可选) - weight_hh 的参数。默认为 None。 + - **bias_ih_attr** (ParamAttr,可选) - bias_ih 的参数。默认为 None。 + - **bias_hh_attr** (ParamAttr,可选) - bias_hh 的参数。默认为 None。 输入 :::::::::::: - - **inputs** (Tensor) - 网络输入。如果time_major为False,则Tensor的形状为[batch_size,time_steps,input_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,input_size]。 `time_steps` 指输入序列的长度。 + - **inputs** (Tensor) - 网络输入。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,input_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,input_size]。 `time_steps` 指输入序列的长度。 - **initial_states** (Tensor,可选) - 网络的初始状态,形状为[num_layers * num_directions, batch_size, hidden_size]。如果没有给出则会以全零初始化。 - - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为int64或int32。在输入序列中所有time step不小于sequence_length的元素都会被当作填充元素处理(状态不再更新)。 + - **sequence_length** (Tensor,可选) - 指定输入序列的实际长度,形状为[batch_size],数据类型为 int64 或 int32。在输入序列中所有 time step 不小于 sequence_length 的元素都会被当作填充元素处理(状态不再更新)。 输出 :::::::::::: - - **outputs** (Tensor) - 输出,由前向和后向cell的输出拼接得到。如果time_major为False,则Tensor的形状为[batch_size,time_steps,num_directions * hidden_size],如果time_major为True,则Tensor的形状为[time_steps,batch_size,num_directions * hidden_size],当direction设置为bidirectional时,num_directions等于2,否则等于1。 `time_steps` 指输出序列的长度。 - - **final_states** (Tensor) - 最终状态。形状为[num_layers * num_directions, batch_size, hidden_size],当direction设置为bidirectional时,num_directions等于2,返回值的前向和后向的状态的索引是0,2,4,6..。和1,3,5,7...,否则等于1。 + - **outputs** (Tensor) - 输出,由前向和后向 cell 的输出拼接得到。如果 time_major 为 False,则 Tensor 的形状为[batch_size,time_steps,num_directions * hidden_size],如果 time_major 为 True,则 Tensor 的形状为[time_steps,batch_size,num_directions * hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,否则等于 1。 `time_steps` 指输出序列的长度。 + - **final_states** (Tensor) - 最终状态。形状为[num_layers * num_directions, batch_size, hidden_size],当 direction 设置为 bidirectional 时,num_directions 等于 2,返回值的前向和后向的状态的索引是 0,2,4,6..。和 1,3,5,7...,否则等于 1。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/SmoothL1Loss_cn.rst b/docs/api/paddle/nn/SmoothL1Loss_cn.rst index 532faa6b87e..731b6fb0120 100644 --- a/docs/api/paddle/nn/SmoothL1Loss_cn.rst +++ b/docs/api/paddle/nn/SmoothL1Loss_cn.rst @@ -5,8 +5,8 @@ SmoothL1Loss .. py:class:: paddle.nn.SmoothL1Loss(reduction='mean', delta=1.0, name=None) -该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 -,否则为L1损失。在某些情况下,它可以防止爆炸梯度,也称为Huber损失,该损失函数的数学计算公式如下: +该 OP 计算输入 input 和标签 label 间的 SmoothL1 损失,如果逐个元素的绝对误差低于 1,则创建使用平方项的条件 +,否则为 L1 损失。在某些情况下,它可以防止爆炸梯度,也称为 Huber 损失,该损失函数的数学计算公式如下: .. math:: loss(x,y) = \frac{1}{n}\sum_{i}z_i @@ -22,14 +22,14 @@ SmoothL1Loss 参数 :::::::::: - - **reduction** (string,可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 - - **delta** (string,可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。默认值= 1.0。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,数据类型为 string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss 均值。设置为 `sum` 时,计算 `mini-batch` loss 的总和。设置为 `none` 时,则返回 loss Tensor。 + - **delta** (string,可选): SmoothL1Loss 损失的阈值参数,用于控制 Huber 损失对线性误差或平方误差的侧重。数据类型为 float32。默认值= 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 输入 :::::::::: - - **input** (Tensor):输入 `Tensor`,数据类型为float32。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 - - **label** (Tensor):输入input对应的标签值,数据类型为float32。数据类型和input相同。 + - **input** (Tensor):输入 `Tensor`,数据类型为 float32。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor):输入 input 对应的标签值,数据类型为 float32。数据类型和 input 相同。 diff --git a/docs/api/paddle/nn/SoftMarginLoss_cn.rst b/docs/api/paddle/nn/SoftMarginLoss_cn.rst index 6a0cfbc8730..ee828a43d75 100644 --- a/docs/api/paddle/nn/SoftMarginLoss_cn.rst +++ b/docs/api/paddle/nn/SoftMarginLoss_cn.rst @@ -14,23 +14,23 @@ SoftMarginloss \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} -最后,添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 参数 ::::::::: - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]` , 其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 返回计算SoftMarginLoss的可调用对象。 + 返回计算 SoftMarginLoss 的可调用对象。 代码示例 diff --git a/docs/api/paddle/nn/Softmax_cn.rst b/docs/api/paddle/nn/Softmax_cn.rst index 2efd7b57f99..ffb4b93010a 100644 --- a/docs/api/paddle/nn/Softmax_cn.rst +++ b/docs/api/paddle/nn/Softmax_cn.rst @@ -4,27 +4,27 @@ Softmax ------------------------------- .. py:class:: paddle.nn.Softmax(axis=-1, name=None) -Softmax激活层,OP的计算过程如下: +Softmax 激活层,OP 的计算过程如下: -步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; +步骤 1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; -步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; +步骤 2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax 操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为 1; -步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 +步骤 3:softmax 操作执行完成后,执行步骤 1 和步骤 2 的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 -上述步骤2中softmax操作计算过程如下: +上述步骤 2 中 softmax 操作计算过程如下: - - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + - 对于二维矩阵的每一行,计算 K 维向量(K 是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 - - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + - 指定位置指数值与全部位置指数值之和的比值就是 softmax 操作的输出。 -对于二维矩阵中的第i行和第j列有: +对于二维矩阵中的第 i 行和第 j 列有: .. math:: Softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} -- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) +- 示例 1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做 softmax 操作) .. code-block:: text @@ -52,7 +52,7 @@ Softmax激活层,OP的计算过程如下: [0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] -- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) +- 示例 2(矩阵一共有三维。axis = 1,表示沿着第二维做 softmax 操作) .. code-block:: text @@ -82,13 +82,13 @@ Softmax激活层,OP的计算过程如下: 参数 :::::::::: - - axis (int,可选) - 指定对输入Tensor进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入Tensor的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - axis (int,可选) - 指定对输入 Tensor 进行运算的轴。``axis`` 的有效范围是[-D, D),D 是输入 Tensor 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Softplus_cn.rst b/docs/api/paddle/nn/Softplus_cn.rst index ec206917d46..b9a6f6fd626 100644 --- a/docs/api/paddle/nn/Softplus_cn.rst +++ b/docs/api/paddle/nn/Softplus_cn.rst @@ -4,25 +4,25 @@ Softplus ------------------------------- .. py:class:: paddle.nn.Softplus(beta=1, threshold=20, name=None) -Softplus激活层 +Softplus 激活层 .. math:: Softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ - \text{为了保证数值稳定性,当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}。 + \text{为了保证数值稳定性,当}\,beta * x > threshold\,\text{时,函数转变为线性函数 x}。 其中,:math:`x` 为输入的 Tensor 参数 :::::::::: - - beta (float,可选) - Softplus激活计算公式中的beta值。默认值为1。 - - threshold (float,可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - beta (float,可选) - Softplus 激活计算公式中的 beta 值。默认值为 1。 + - threshold (float,可选) - Softplus 激活计算公式中的 threshold 值。默认值为 20。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Softshrink_cn.rst b/docs/api/paddle/nn/Softshrink_cn.rst index bf055bb352b..507b3968148 100644 --- a/docs/api/paddle/nn/Softshrink_cn.rst +++ b/docs/api/paddle/nn/Softshrink_cn.rst @@ -4,7 +4,7 @@ Softshrink ------------------------------- .. py:class:: paddle.nn.Softshrink(threshold=0.5, name=None) -Softshrink激活层 +Softshrink 激活层 .. math:: @@ -18,13 +18,13 @@ Softshrink激活层 参数 :::::::::: - - threshold (float,可选) - Softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - threshold (float,可选) - Softshrink 激活计算公式中的 threshold 值,必须大于等于零。默认值为 0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/Softsign_cn.rst b/docs/api/paddle/nn/Softsign_cn.rst index 1fbdbfe545a..a09e973f8b0 100644 --- a/docs/api/paddle/nn/Softsign_cn.rst +++ b/docs/api/paddle/nn/Softsign_cn.rst @@ -4,7 +4,7 @@ Softsign ------------------------------- .. py:class:: paddle.nn.Softsign(name=None) -Softsign激活层 +Softsign 激活层 .. math:: @@ -18,8 +18,8 @@ Softsign激活层 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/SpectralNorm_cn.rst b/docs/api/paddle/nn/SpectralNorm_cn.rst index 7eb0f46aa0e..9405e38b429 100644 --- a/docs/api/paddle/nn/SpectralNorm_cn.rst +++ b/docs/api/paddle/nn/SpectralNorm_cn.rst @@ -6,18 +6,18 @@ SpectralNorm .. py:class:: paddle.nn.SpectralNorm(weight_shape, dim=0, power_iters=1, eps=1e-12, name=None, dtype="float32") -该接口用于构建 ``SpectralNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了谱归一化层的功能,用于计算fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量维度相同。谱特征值计算方式如下: +该接口用于构建 ``SpectralNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。其中实现了谱归一化层的功能,用于计算 fc、conv1d、conv2d、conv3d 层的权重参数的谱正则值,输入权重参数应分别为 2-D, 3-D, 4-D, 5-D 张量,输出张量与输入张量维度相同。谱特征值计算方式如下: -步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 +步骤 1:生成形状为[H]的向量 U,以及形状为[W]的向量 V,其中 H 是输入权重张量的第 ``dim`` 个维度,W 是剩余维度的乘积。 -步骤2: ``power_iters`` 应该是一个正整数,用U和V迭代计算 ``power_iters`` 轮,迭代步骤如下。 +步骤 2: ``power_iters`` 应该是一个正整数,用 U 和 V 迭代计算 ``power_iters`` 轮,迭代步骤如下。 .. math:: \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ \mathbf{u} &:= \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} -步骤3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 +步骤 3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 .. math:: \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ @@ -28,8 +28,8 @@ SpectralNorm 参数 ::::::::: - - **weight_shape** (list 或 tuple) - 权重参数的shape。 - - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1。默认值:0。 + - **weight_shape** (list 或 tuple) - 权重参数的 shape。 + - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果 input(weight)是 fc 层的权重,则应设置为 0;如果 input(weight)是 conv 层的权重,则应设置为 1。默认值:0。 - **power_iters** (int,可选) - 将用于计算的 ``SpectralNorm`` 功率迭代次数,默认值:1。 - **eps** (float,可选) - ``eps`` 用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零。默认值:1e-12。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -38,7 +38,7 @@ SpectralNorm 形状 ::::::::: -- input:任意形状的Tensor。 +- input:任意形状的 Tensor。 - output:和输入形状一样。 代码示例 diff --git a/docs/api/paddle/nn/Swish_cn.rst b/docs/api/paddle/nn/Swish_cn.rst index 7c3b74cbbae..d62628b909f 100644 --- a/docs/api/paddle/nn/Swish_cn.rst +++ b/docs/api/paddle/nn/Swish_cn.rst @@ -4,7 +4,7 @@ Swish ------------------------------- .. py:class:: paddle.nn.Swish(name=None) -Swish激活层 +Swish 激活层 .. math:: @@ -18,8 +18,8 @@ Swish激活层 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/SyncBatchNorm_cn.rst b/docs/api/paddle/nn/SyncBatchNorm_cn.rst index fcad169fed3..a50f7170732 100644 --- a/docs/api/paddle/nn/SyncBatchNorm_cn.rst +++ b/docs/api/paddle/nn/SyncBatchNorm_cn.rst @@ -5,18 +5,18 @@ SyncBatchNorm .. py:class:: paddle.nn.SyncBatchNorm(num_features, epsilon=1e-5, momentum=0.9, weight_attr=None, bias_attr=None, data_format='NCHW', name=None) -该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。实现了跨卡GPU同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有GPU同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ +该接口用于构建 ``SyncBatchNorm`` 类的一个可调用对象,具体用法参照 ``代码示例``。实现了跨卡 GPU 同步的批归一化(Cross-GPU Synchronized Batch Normalization Layer)的功能,可用在其他层(类似卷积层和全连接层)之后进行归一化操作。根据所有 GPU 同一批次的数据按照通道计算的均值和方差进行归一化。更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -当模型处于训练模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是所有GPU上同一minibatch的统计数据。计算公式如下: +当模型处于训练模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是所有 GPU 上同一 minibatch 的统计数据。计算公式如下: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mini-batch-mean \\ \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// mini-batch-variance \\ -- :math:`x`:所有GPU上同一批输入数据 -- :math:`m`:所有GPU上同一批次数据的大小 +- :math:`x`:所有 GPU 上同一批输入数据 +- :math:`m`:所有 GPU 上同一批次数据的大小 -当模型处于评估模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean和moving_variance,这两个统计量通常来自预先训练好的模型)。计算公式如下: +当模型处于评估模式时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 是全局(或运行)统计数据(moving_mean 和 moving_variance,这两个统计量通常来自预先训练好的模型)。计算公式如下: .. math:: @@ -44,14 +44,14 @@ SyncBatchNorm - **num_features** (int) - 指明输入 ``Tensor`` 的通道数量。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果设置为 ``False``,则表示本层没有可训练的权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。如果设置为 ``False``,则表示本层没有可训练的偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **weight_attr** (ParamAttr|bool,可选) - 指定权重参数属性的对象。如果设置为 ``False``,则表示本层没有可训练的权重参数。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) - 指定偏置参数属性的对象。如果设置为 ``False``,则表示本层没有可训练的偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 形状 :::::::::::: - input:一个二维到五维的 ``Tensor`` 。 - - output:和input 相同形状的 ``Tensor`` 。 + - output:和 input 相同形状的 ``Tensor`` 。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/Tanh_cn.rst b/docs/api/paddle/nn/Tanh_cn.rst index 27f82d5a902..3249e715aac 100644 --- a/docs/api/paddle/nn/Tanh_cn.rst +++ b/docs/api/paddle/nn/Tanh_cn.rst @@ -4,7 +4,7 @@ Tanh ------------------------------- .. py:class:: paddle.nn.Tanh(name=None) -Tanh激活层 +Tanh 激活层 .. math:: Tanh(x) = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} @@ -17,8 +17,8 @@ Tanh激活层 形状 :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/Tanhshrink_cn.rst b/docs/api/paddle/nn/Tanhshrink_cn.rst index 2db0da03418..4631cad8b17 100644 --- a/docs/api/paddle/nn/Tanhshrink_cn.rst +++ b/docs/api/paddle/nn/Tanhshrink_cn.rst @@ -4,7 +4,7 @@ Tanhshrink ------------------------------- .. py:class:: paddle.nn.Tanhshrink(name=None) -Tanhshrink激活层 +Tanhshrink 激活层 .. math:: @@ -18,8 +18,8 @@ Tanhshrink激活层 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/ThresholdedReLU_cn.rst b/docs/api/paddle/nn/ThresholdedReLU_cn.rst index 565a284c104..61f0a987839 100644 --- a/docs/api/paddle/nn/ThresholdedReLU_cn.rst +++ b/docs/api/paddle/nn/ThresholdedReLU_cn.rst @@ -4,7 +4,7 @@ ThresholdedReLU ------------------------------- .. py:class:: paddle.nn.ThresholdedReLU(threshold=1.0, name=None) -Thresholded ReLU激活层 +Thresholded ReLU 激活层 .. math:: @@ -17,13 +17,13 @@ Thresholded ReLU激活层 参数 :::::::::: - - threshold (float,可选) - ThresholdedReLU激活计算公式中的threshold值。默认值为1.0。 + - threshold (float,可选) - ThresholdedReLU 激活计算公式中的 threshold 值。默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状: :::::::::: - - input:任意形状的Tensor。 - - output:和input具有相同形状的Tensor。 + - input:任意形状的 Tensor。 + - output:和 input 具有相同形状的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst b/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst index 8e94c99e96f..6f80d5b73bc 100644 --- a/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst +++ b/docs/api/paddle/nn/TransformerDecoderLayer_cn.rst @@ -7,22 +7,22 @@ TransformerDecoderLayer -**Transformer解码器层** +**Transformer 解码器层** -Transformer解码器层由三个子层组成:多头自注意力机制、编码-解码交叉注意力机制(encoder-decoder cross attention)和前馈神经网络。如果 ``normalize_before`` 为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行dropout和残差连接(residual connection)。否则(即 ``normalize_before`` 为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行dropout、残差连接(residual connection)和层标准化(Layer Normalization)。 +Transformer 解码器层由三个子层组成:多头自注意力机制、编码-解码交叉注意力机制(encoder-decoder cross attention)和前馈神经网络。如果 ``normalize_before`` 为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行 dropout 和残差连接(residual connection)。否则(即 ``normalize_before`` 为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行 dropout、残差连接(residual connection)和层标准化(Layer Normalization)。 参数 :::::::::::: - **d_model** (int) - 输入输出的维度。 - - **nhead** (int) - 多头注意力机制的Head数量。 + - **nhead** (int) - 多头注意力机制的 Head 数量。 - **dim_feedforward** (int) - 前馈神经网络中隐藏层的大小。 - - **dropout** (float,可选) - 对三个子层的输出进行处理的dropout值。默认值:0.1。 + - **dropout** (float,可选) - 对三个子层的输出进行处理的 dropout 值。默认值:0.1。 - **activation** (str,可选) - 前馈神经网络的激活函数。默认值:``relu``。 - **attn_dropout** (float,可选) - 多头自注意力机制中对注意力目标的随机失活率。如果为 ``None`` 则 ``attn_dropout = dropout``。默认值:``None``。 - - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 - - **normalize_before** (bool,可选) - 设置对每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行dropout和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 + - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的 dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 + - **normalize_before** (bool,可选) - 设置对每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行 dropout 和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行 dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 - **weight_attr** (ParamAttr|tuple,可选) - 指定权重参数属性的对象。如果是 ``tuple``,多头自注意力机制的权重参数属性使用 ``weight_attr[0]``,编码-解码交叉注意力机制的权重参数属性使用 ``weight_attr[1]``,前馈神经网络的权重参数属性使用 ``weight_attr[2]``。如果该值是 ``ParamAttr``,则多头自注意力机制、编码-解码交叉注意力机制和前馈神经网络的权重参数属性都使用 ``ParamAttr``。默认值:``None``,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **bias_attr** (ParamAttr|tuple|bool,可选)- 指定偏置参数属性的对象。如果是 ``tuple``,多头自注意力机制的偏置参数属性使用 ``bias_attr[0]``,编码-解码交叉注意力机制的偏置参数属性使用 ``bias_attr[1]``,前馈神经网络的偏置参数属性使用 ``bias_attr[2]``。如果该值是 ``ParamAttr``,则多头自注意力机制、编码-解码交叉注意力机制和前馈神经网络的偏置参数属性都使用 ``ParamAttr``。如果该参数为 ``bool`` 类型,只支持为 ``False``,表示没有偏置参数。默认值:``None``,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 diff --git a/docs/api/paddle/nn/TransformerDecoder_cn.rst b/docs/api/paddle/nn/TransformerDecoder_cn.rst index 9e482ea5eb0..6dc29b3deca 100644 --- a/docs/api/paddle/nn/TransformerDecoder_cn.rst +++ b/docs/api/paddle/nn/TransformerDecoder_cn.rst @@ -7,15 +7,15 @@ TransformerDecoder -**Transformer解码器** +**Transformer 解码器** -Transformer解码器由多个Transformer解码器层(``TransformerDecoderLayer``)叠加组成的。 +Transformer 解码器由多个 Transformer 解码器层(``TransformerDecoderLayer``)叠加组成的。 参数 :::::::::::: - - **decoder_layer** (Layer) - ``TransformerDecoderLayer`` 的一个实例,作为Transformer解码器的第一层,其他层将根据它的配置进行构建。 + - **decoder_layer** (Layer) - ``TransformerDecoderLayer`` 的一个实例,作为 Transformer 解码器的第一层,其他层将根据它的配置进行构建。 - **num_layers** (int) - ``TransformerDecoderLayer`` 层的叠加数量。 - **norm** (LayerNorm,可选) - 层标准化(Layer Normalization)。如果提供该参数,将对解码器的最后一层的输出进行层标准化。 diff --git a/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst b/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst index eb4abbcc80b..afb909eddc4 100644 --- a/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst +++ b/docs/api/paddle/nn/TransformerEncoderLayer_cn.rst @@ -7,24 +7,24 @@ TransformerEncoderLayer -**Transformer编码器层** +**Transformer 编码器层** -Transformer编码器层由两个子层组成:多头自注意力机制和前馈神经网络。如果 ``normalize_before`` 为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行dropout和残差连接(residual connection)。否则(即 ``normalize_before`` 为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行dropout、残差连接(residual connection)和层标准化(Layer Normalization)。 +Transformer 编码器层由两个子层组成:多头自注意力机制和前馈神经网络。如果 ``normalize_before`` 为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行 dropout 和残差连接(residual connection)。否则(即 ``normalize_before`` 为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行 dropout、残差连接(residual connection)和层标准化(Layer Normalization)。 参数 :::::::::::: - **d_model** (int) - 输入输出的维度。 - - **nhead** (int) - 多头注意力机制的Head数量。 + - **nhead** (int) - 多头注意力机制的 Head 数量。 - **dim_feedforward** (int) - 前馈神经网络中隐藏层的大小。 - - **dropout** (float,可选) - 对两个子层的输出进行处理的dropout值。默认值:0.1。 + - **dropout** (float,可选) - 对两个子层的输出进行处理的 dropout 值。默认值:0.1。 - **activation** (str,可选) - 前馈神经网络的激活函数。默认值:``relu``。 - **attn_dropout** (float,可选) - 多头自注意力机制中对注意力目标的随机失活率。如果为 ``None`` 则 ``attn_dropout = dropout``。默认值:``None``。 - - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 - - **normalize_before** (bool,可选) - 设置对每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行dropout和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 + - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的 dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 + - **normalize_before** (bool,可选) - 设置对每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行 dropout 和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行 dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 - **weight_attr** (ParamAttr|tuple,可选) - 指定权重参数属性的对象。如果是 ``tuple``,多头自注意力机制的权重参数属性使用 ``weight_attr[0]``,前馈神经网络的权重参数属性使用 ``weight_attr[1]``。如果参数值是 ``ParamAttr``,则多头自注意力机制和前馈神经网络的权重参数属性都使用 ``ParamAttr``。默认值:``None``,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|tuple|bool,可选)- 指定偏置参数属性的对象。如果是 ``tuple``,多头自注意力机制的偏置参数属性使用 ``bias_attr[0]``,前馈神经网络的偏置参数属性使用 ``bias_attr[1]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制和前馈神经网络的偏置参数属性都使用 ``ParamAttr``。如果该参数为 ``bool`` 类型,只支持为 ``False``,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|tuple|bool,可选)- 指定偏置参数属性的对象。如果是 ``tuple``,多头自注意力机制的偏置参数属性使用 ``bias_attr[0]``,前馈神经网络的偏置参数属性使用 ``bias_attr[1]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制和前馈神经网络的偏置参数属性都使用 ``ParamAttr``。如果该参数为 ``bool`` 类型,只支持为 ``False``,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 代码示例 diff --git a/docs/api/paddle/nn/TransformerEncoder_cn.rst b/docs/api/paddle/nn/TransformerEncoder_cn.rst index 490cefa8874..3dacaeb8d92 100644 --- a/docs/api/paddle/nn/TransformerEncoder_cn.rst +++ b/docs/api/paddle/nn/TransformerEncoder_cn.rst @@ -7,15 +7,15 @@ TransformerEncoder -**Transformer编码器** +**Transformer 编码器** -Transformer编码器由多个Transformer编码器层(``TransformerEncoderLayer``)叠加组成的。 +Transformer 编码器由多个 Transformer 编码器层(``TransformerEncoderLayer``)叠加组成的。 参数 :::::::::::: - - **encoder_layer** (Layer) - ``TransformerEncoderLayer`` 的一个实例,作为Transformer编码器的第一层,其他层将根据它的配置进行构建。 + - **encoder_layer** (Layer) - ``TransformerEncoderLayer`` 的一个实例,作为 Transformer 编码器的第一层,其他层将根据它的配置进行构建。 - **num_layers** (int) - ``TransformerEncoderLayer`` 层的叠加数量。 - **norm** (LayerNorm,可选) - 层标准化(Layer Normalization)。如果提供该参数,将对编码器的最后一层的输出进行层标准化。 diff --git a/docs/api/paddle/nn/Transformer_cn.rst b/docs/api/paddle/nn/Transformer_cn.rst index 1da2ab00a77..140760e9c38 100644 --- a/docs/api/paddle/nn/Transformer_cn.rst +++ b/docs/api/paddle/nn/Transformer_cn.rst @@ -7,30 +7,30 @@ Transformer -**Transformer模型** +**Transformer 模型** -Transformer模型由一个 ``TransformerEncoder`` 实例和一个 ``TransformerDecoder`` 实例组成,不包含embedding层和输出层。 +Transformer 模型由一个 ``TransformerEncoder`` 实例和一个 ``TransformerDecoder`` 实例组成,不包含 embedding 层和输出层。 细节可参考论文 `Attention is all you need `_ 。 -用户可以使用相应的参数配置模型结构。请注意 ``normalize_before`` 的用法与某些类似Transformer的模型例如BERT和GPT2的用法不同,它表示在哪里(多头注意力机制或前馈神经网络的输入还是输出)进行层标准化(Layer Normalization)。该模型默认的结构是对每个子层的output进行层归一化,并在最后一个编码器/解码器的输出上进行另一个层归一化操作。 +用户可以使用相应的参数配置模型结构。请注意 ``normalize_before`` 的用法与某些类似 Transformer 的模型例如 BERT 和 GPT2 的用法不同,它表示在哪里(多头注意力机制或前馈神经网络的输入还是输出)进行层标准化(Layer Normalization)。该模型默认的结构是对每个子层的 output 进行层归一化,并在最后一个编码器/解码器的输出上进行另一个层归一化操作。 参数 :::::::::::: - **d_model** (int,可选) - 编码器和解码器的输入输出的维度。默认值:512。 - - **nhead** (int,可选) - 多头注意力机制的Head数量。默认值:8。 + - **nhead** (int,可选) - 多头注意力机制的 Head 数量。默认值:8。 - **num_encoder_layers** (int,可选) - 编码器中 ``TransformerEncoderLayer`` 的层数。默认值:6。 - **num_decoder_layers** (int,可选) - 解码器中 ``TransformerDecoderLayer`` 的层数。默认值:6。 - **dim_feedforward** (int,可选) - 前馈神经网络中隐藏层的大小。默认值:2048。 - - **dropout** (float,可选) - 对编码器和解码器中每个子层的输出进行处理的dropout值。默认值:0.1。 + - **dropout** (float,可选) - 对编码器和解码器中每个子层的输出进行处理的 dropout 值。默认值:0.1。 - **activation** (str,可选) - 前馈神经网络的激活函数。默认值:``relu``。 - **attn_dropout** (float,可选) - 多头自注意力机制中对注意力目标的随机失活率。如果为 ``None`` 则 ``attn_dropout = dropout``。默认值:``None``。 - - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 - - **normalize_before** (bool,可选) - 设置对编码器解码器的每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行dropout和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 - - **weight_attr** (ParamAttr|tuple,可选) - 指定权重参数属性的对象。如果是 ``tuple``,则只支持 ``tuple`` 长度为1、2或3的情况。如果 ``tuple`` 长度为3,多头自注意力机制的权重参数属性使用 ``weight_attr[0]``,解码器的编码-解码交叉注意力机制的权重参数属性使用 ``weight_attr[1]``,前馈神经网络的权重参数属性使用 ``weight_attr[2]``;如果 ``tuple`` 的长度是2,多头自注意力机制和解码器的编码-解码交叉注意力机制的权重参数属性使用 ``weight_attr[0]``,前馈神经网络的权重参数属性使用 ``weight_attr[1]``;如果 ``tuple`` 的长度是1,多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的权重参数属性都使用 ``weight_attr[0]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的权重参数属性都使用 ``ParamAttr``。默认值:``None``,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|tuple|bool,可选)- 指定偏置参数属性的对象。如果是 ``tuple``,则只支持 ``tuple`` 长度为1、2或3的情况。如果 ``tuple`` 长度为3,多头自注意力机制的偏置参数属性使用 ``bias_attr[0]``,解码器的编码-解码交叉注意力机制的偏置参数属性使用 ``bias_attr[1]``,前馈神经网络的偏置参数属性使用 ``bias_attr[2]``;如果 ``tuple`` 的长度是2,多头自注意力机制和解码器的编码-解码交叉注意力机制的偏置参数属性使用 ``bias_attr[0]``,前馈神经网络的偏置参数属性使用 ``bias_attr[1]``;如果 ``tuple`` 的长度是1,多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的偏置参数属性都使用 ``bias_attr[0]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的偏置参数属性都使用 ``ParamAttr``。如果该参数为 ``bool`` 类型,只支持为 ``False``,表示没有偏置参数。默认值:``None``,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act_dropout** (float,可选) - 前馈神经网络的激活函数后的 dropout。如果为 ``None`` 则 ``act_dropout = dropout``。默认值:``None``。 + - **normalize_before** (bool,可选) - 设置对编码器解码器的每个子层的输入输出的处理。如果为 ``True``,则对每个子层的输入进行层标准化(Layer Normalization),对每个子层的输出进行 dropout 和残差连接(residual connection)。否则(即为 ``False``),则对每个子层的输入不进行处理,只对每个子层的输出进行 dropout、残差连接(residual connection)和层标准化(Layer Normalization)。默认值:``False``。 + - **weight_attr** (ParamAttr|tuple,可选) - 指定权重参数属性的对象。如果是 ``tuple``,则只支持 ``tuple`` 长度为 1、2 或 3 的情况。如果 ``tuple`` 长度为 3,多头自注意力机制的权重参数属性使用 ``weight_attr[0]``,解码器的编码-解码交叉注意力机制的权重参数属性使用 ``weight_attr[1]``,前馈神经网络的权重参数属性使用 ``weight_attr[2]``;如果 ``tuple`` 的长度是 2,多头自注意力机制和解码器的编码-解码交叉注意力机制的权重参数属性使用 ``weight_attr[0]``,前馈神经网络的权重参数属性使用 ``weight_attr[1]``;如果 ``tuple`` 的长度是 1,多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的权重参数属性都使用 ``weight_attr[0]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的权重参数属性都使用 ``ParamAttr``。默认值:``None``,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|tuple|bool,可选)- 指定偏置参数属性的对象。如果是 ``tuple``,则只支持 ``tuple`` 长度为 1、2 或 3 的情况。如果 ``tuple`` 长度为 3,多头自注意力机制的偏置参数属性使用 ``bias_attr[0]``,解码器的编码-解码交叉注意力机制的偏置参数属性使用 ``bias_attr[1]``,前馈神经网络的偏置参数属性使用 ``bias_attr[2]``;如果 ``tuple`` 的长度是 2,多头自注意力机制和解码器的编码-解码交叉注意力机制的偏置参数属性使用 ``bias_attr[0]``,前馈神经网络的偏置参数属性使用 ``bias_attr[1]``;如果 ``tuple`` 的长度是 1,多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的偏置参数属性都使用 ``bias_attr[0]``。如果该参数值是 ``ParamAttr``,则多头自注意力机制、解码器的编码-解码交叉注意力机制和前馈神经网络的偏置参数属性都使用 ``ParamAttr``。如果该参数为 ``bool`` 类型,只支持为 ``False``,表示没有偏置参数。默认值:``None``,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **custom_encoder** (Layer,可选) - 若提供该参数,则将 ``custom_encoder`` 作为编码器。默认值:``None``。 - **custom_decoder** (Layer,可选) - 若提供该参数,则将 ``custom_decoder`` 作为解码器。默认值:``None``。 diff --git a/docs/api/paddle/nn/TripletMarginLoss_cn.rst b/docs/api/paddle/nn/TripletMarginLoss_cn.rst index 41386737e7a..01450f3450c 100644 --- a/docs/api/paddle/nn/TripletMarginLoss_cn.rst +++ b/docs/api/paddle/nn/TripletMarginLoss_cn.rst @@ -5,7 +5,7 @@ TripletMarginLoss .. py:class:: paddle.nn.TripletMarginLoss(margin: float = 1.0, p: float = 2., epsilon: float = 1e-6, swap: bool = False,reduction: str = 'mean', name:str=None) -创建一个TripletMarginLoss的可调用类。通过计算输入 `input` 和 `positive` 和 `negative` 间的 `triplet margin loss` 损失,测量样本之间,即 `input` 与 `positive examples` 和 `negative examples` 的相对相似性。 +创建一个 TripletMarginLoss 的可调用类。通过计算输入 `input` 和 `positive` 和 `negative` 间的 `triplet margin loss` 损失,测量样本之间,即 `input` 与 `positive examples` 和 `negative examples` 的相对相似性。 损失函数按照下列公式计算 @@ -19,27 +19,27 @@ TripletMarginLoss d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p -``p`` 为距离函数的范数。``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为True时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 +``p`` 为距离函数的范数。``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为 True 时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 参数 ::::::::: - - **margin** (float,可选) - 手动指定间距,默认为1。 - - **p** (float,可选) - 手动指定范数,默认为2。 - - **epsilon** (float,可选) - 防止除数为0,默认为1e-6。 - - **swap** (bool,可选) - 默认为False。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``、``'mean'``、``'sum'``。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **margin** (float,可选) - 手动指定间距,默认为 1。 + - **p** (float,可选) - 手动指定范数,默认为 2。 + - **epsilon** (float,可选) - 防止除数为 0,默认为 1e-6。 + - **swap** (bool,可选) - 默认为 False。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``、``'mean'``、``'sum'``。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - **input** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,标签 ``positive`` 的维度、数据类型与输入 ``input`` 相同。 - **negative** (Tensor) - :math:`[N, *]`,标签 ``negative`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 返回计算TripletMarginLoss的可调用对象。 + 返回计算 TripletMarginLoss 的可调用对象。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/TripletMarginWithDistanceLoss_cn.rst b/docs/api/paddle/nn/TripletMarginWithDistanceLoss_cn.rst index f78b7bea8a1..8444679e4b3 100644 --- a/docs/api/paddle/nn/TripletMarginWithDistanceLoss_cn.rst +++ b/docs/api/paddle/nn/TripletMarginWithDistanceLoss_cn.rst @@ -5,7 +5,7 @@ TripletMarginWithDistanceLoss .. py:class:: paddle.nn.TripletMarginWithDistanceLoss(distance_function=None, margin: float = 1.0, swap: bool = False, reduction: str = 'mean', name:str=None) -创建一个TripletMarginWithDistanceLoss的可调用类,通过计算输入 `input` 和 `positive` 和 `negative` 间的 `triplet margin loss` 损失,测量样本之间,即 `input` 与 `positive examples` 和 `negative examples` 的相对相似性。 +创建一个 TripletMarginWithDistanceLoss 的可调用类,通过计算输入 `input` 和 `positive` 和 `negative` 间的 `triplet margin loss` 损失,测量样本之间,即 `input` 与 `positive examples` 和 `negative examples` 的相对相似性。 损失函数按照下列公式计算 @@ -14,30 +14,30 @@ TripletMarginWithDistanceLoss L(input, pos, neg) = \max \{d(input_i, pos_i) - d(input_i, neg_i) + {\rm margin}, 0\} -其中的距离函数 ``distance_function`` 可以由用户自定义,使用 lambda 或是 def 都可以。如果未定义则调用2范数计算距离 +其中的距离函数 ``distance_function`` 可以由用户自定义,使用 lambda 或是 def 都可以。如果未定义则调用 2 范数计算距离 .. math:: d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_2 -``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为True时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)的值换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 +``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为 True 时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)的值换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 参数 ::::::::: - - **distance_function** (可选) - 手动指定范数,默认为None,使用欧式距离。 - - **margin** (float,可选) - 手动指定间距,默认为1。 - - **swap** (bool,可选) - 默认为False。 + - **distance_function** (可选) - 手动指定范数,默认为 None,使用欧式距离。 + - **margin** (float,可选) - 手动指定间距,默认为 1。 + - **swap** (bool,可选) - 默认为 False。 - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - - **input** (Tensor) - :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,标签 ``positive`` 的维度、数据类型与输入 ``input`` 相同。 - **negative** (Tensor) - :math:`[N, *]`,标签 ``negative`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: diff --git a/docs/api/paddle/nn/Unfold_cn.rst b/docs/api/paddle/nn/Unfold_cn.rst index 29f948248b8..21634723ebe 100644 --- a/docs/api/paddle/nn/Unfold_cn.rst +++ b/docs/api/paddle/nn/Unfold_cn.rst @@ -8,10 +8,10 @@ unfold -该OP实现的功能与卷积中用到的im2col函数一样,通常也被称作为im2col过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x``,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 +该 OP 实现的功能与卷积中用到的 im2col 函数一样,通常也被称作为 im2col 过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x``,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 .. note:: - 对应的 `functional方法` 请参考::ref:`cn_api_nn_functional_unfold` 。 + 对应的 `functional 方法` 请参考::ref:`cn_api_nn_functional_unfold` 。 **样例**: @@ -31,16 +31,16 @@ unfold 参数 :::::::::::: - - **kernel_size** (int|list of int) – 卷积核的尺寸,整数或者整型列表。如果为整型列表,应包含两个元素 ``[k_h, k_w]``,卷积核大小为 ``k_h * k_w``;如果为整数k,会被当作整型列表 ``[k, k]`` 处理 - - **strides** (int|list of int,可选) – 卷积步长,整数或者整型列表。如果为整型列表,应该包含两个元素 ``[stride_h, stride_w]``。如果为整数,则 ``stride_h = stride_w = strides``。默认值为1 - - **paddings** (int|list of int,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为4或者2;长度为4 对应的padding参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为2对应的padding参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数padding,则会被当作[padding, padding, padding, padding]处理。默认值为0 - - **dilations** (int|list of int,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数dilation,会被当作整型列表[dilation, dilation]处理。默认值为1 + - **kernel_size** (int|list of int) – 卷积核的尺寸,整数或者整型列表。如果为整型列表,应包含两个元素 ``[k_h, k_w]``,卷积核大小为 ``k_h * k_w``;如果为整数 k,会被当作整型列表 ``[k, k]`` 处理 + - **strides** (int|list of int,可选) – 卷积步长,整数或者整型列表。如果为整型列表,应该包含两个元素 ``[stride_h, stride_w]``。如果为整数,则 ``stride_h = stride_w = strides``。默认值为 1 + - **paddings** (int|list of int,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为 4 或者 2;长度为 4 对应的 padding 参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为 2 对应的 padding 参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数 padding,则会被当作[padding, padding, padding, padding]处理。默认值为 0 + - **dilations** (int|list of int,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数 dilation,会被当作整型列表[dilation, dilation]处理。默认值为 1 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **输入** : 4-D Tensor,形状为[N, C, H, W],数据类型为float32或者float64 - - **输出**:形状如上面所描述的[N, Cout, Lout],Cout每一个滑动block里面覆盖的元素个数,Lout是滑动block的个数,数据类型与 ``x`` 相同 + - **输入** : 4-D Tensor,形状为[N, C, H, W],数据类型为 float32 或者 float64 + - **输出**:形状如上面所描述的[N, Cout, Lout],Cout 每一个滑动 block 里面覆盖的元素个数,Lout 是滑动 block 的个数,数据类型与 ``x`` 相同 代码示例 diff --git a/docs/api/paddle/nn/Upsample_cn.rst b/docs/api/paddle/nn/Upsample_cn.rst index 57792737257..7a52c483292 100644 --- a/docs/api/paddle/nn/Upsample_cn.rst +++ b/docs/api/paddle/nn/Upsample_cn.rst @@ -6,9 +6,9 @@ Upsample .. py:class:: paddle.nn.Upsample(size=None, scale_factor=None, mode='nearest', align_corners=False, align_mode=0, data_format='NCHW', name=None) -该OP用于调整一个batch中图片的大小。 +该 OP 用于调整一个 batch 中图片的大小。 -输入为3-D Tensor时形状为(num_batches, channels, in_w),输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 +输入为 3-D Tensor 时形状为(num_batches, channels, in_w),输入为 4-D Tensor 时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为 5-D Tensor 时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 支持的插值方法: @@ -27,13 +27,13 @@ Upsample 线性插值是用一条线连接两个已知量来确定两个已知量之间的一个未知量的值的方法。 -双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 +双线性插值是线性插值的扩展,用于在直线 2D 网格上插值两个变量(例如,该操作中的 H 方向和 W 方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 -三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 +三线插值是线性插值的一种扩展,是 3 参数的插值方程(比如 op 里的 D,H,W 方向),在三个方向上进行线性插值。 双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 -Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 +Align_corners 和 align_mode 是可选参数,插值的计算方法可以由它们选择。 示例: @@ -150,17 +150,17 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 参数 ::::::::: - - **size** (list|tuple|Variable|None) - 输出Tensor,输入为3D张量时,形状为为(out_w)的1-D Tensor。输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 - - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。如果scale_factor是一个list或tuple,它必须与输入的shape匹配。 + - **size** (list|tuple|Variable|None) - 输出 Tensor,输入为 3D 张量时,形状为为(out_w)的 1-D Tensor。输入为 4D 张量时,形状为为(out_h, out_w)的 2-D Tensor。输入为 5-D Tensor 时,形状为(out_d, out_h, out_w)的 3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为 1。默认值为 None。 + - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape 和 scale 至少要设置一个。out_shape 的优先级高于 scale。默认值为 None。如果 scale_factor 是一个 list 或 tuple,它必须与输入的 shape 匹配。 - **mode** (str,可选) - 插值方法。支持"bilinear"或"trilinear"或"nearest"或"bicubic"或"linear"或"area"。默认值为"nearest"。 - - **align_corners** (bool,可选)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。默认值为True - - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表src_idx = scale * dst_index。 - - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于3-D Tensor,支持 NCHW(num_batches, channels, width),对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + - **align_corners** (bool,可选)- 一个可选的 bool 型参数,如果为 True,则将输入和输出张量的 4 个角落像素的中心对齐,并保留角点像素的值。默认值为 True + - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表 src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表 src_idx = scale * dst_index。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于 3-D Tensor,支持 NCHW(num_batches, channels, width),对于 4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于 5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -3-D Tensor,形状为 (num_batches, channels, out_w) ;4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 +3-D Tensor,形状为 (num_batches, channels, out_w) ;4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者 5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 代码示例 diff --git a/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst b/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst index 343557bde28..6c1878a82ca 100644 --- a/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst +++ b/docs/api/paddle/nn/UpsamplingBilinear2D_cn.rst @@ -7,16 +7,16 @@ UpsamplingBilinear2D -该OP用于调整一个batch中图片的大小。 +该 OP 用于调整一个 batch 中图片的大小。 -输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),调整大小只适用于高度和宽度对应的维度。 +输入为 4-D Tensor 时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),调整大小只适用于高度和宽度对应的维度。 支持的插值方法: BILINEAR:双线性插值 -双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 +双线性插值是线性插值的扩展,用于在直线 2D 网格上插值两个变量(例如,该操作中的 H 方向和 W 方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 有关双线性插值的详细信息,请参阅维基百科: https://en.wikipedia.org/wiki/Bilinear_interpolation @@ -25,9 +25,9 @@ https://en.wikipedia.org/wiki/Bilinear_interpolation 参数 :::::::::::: - - **size** (list|tuple|Tensor|None) - 输出Tensor,输入为4D张量,形状为为(out_h, out_w)的2-D Tensor。如果 :code:`size` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`size` 是变量,则其维度大小为1。默认值为None。 - - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。``size`` 和 ``scale_factor`` 至少要设置一个。``size`` 的优先级高于 ``scale_factor``。默认值为None。如果 ``scale_factor`` 是一个list或tuple,它必须与输入的shape匹配。 - - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),默认值:'NCHW'。 + - **size** (list|tuple|Tensor|None) - 输出 Tensor,输入为 4D 张量,形状为为(out_h, out_w)的 2-D Tensor。如果 :code:`size` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`size` 是变量,则其维度大小为 1。默认值为 None。 + - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。``size`` 和 ``scale_factor`` 至少要设置一个。``size`` 的优先级高于 ``scale_factor``。默认值为 None。如果 ``scale_factor`` 是一个 list 或 tuple,它必须与输入的 shape 匹配。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于 4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),默认值:'NCHW'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst index 17804a8be8b..7be8ffdf3f0 100644 --- a/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst +++ b/docs/api/paddle/nn/UpsamplingNearest2D_cn.rst @@ -7,9 +7,9 @@ UpsamplingNearest2D -该OP用于调整一个batch中图片的大小。 +该 OP 用于调整一个 batch 中图片的大小。 -输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),调整大小只适用于高度和宽度对应的维度。 +输入为 4-D Tensor 时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),调整大小只适用于高度和宽度对应的维度。 支持的插值方法: @@ -53,9 +53,9 @@ https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation 参数 :::::::::::: - - **size** (list|tuple|Tensor|None) - 输出Tensor,输入为4D张量,形状为(out_h, out_w)的2-D Tensor。如果 :code:`size` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 ``size`` 是变量,则其维度大小为1。默认值为None。 - - **scale_factor** (float|Tensor|list|None)-输入的高度或宽度的乘数因子。``size`` 和 ``scale_factor`` 至少要设置一个。``size`` 的优先级高于 ``scale_factor``。默认值为None。如果 ``scale_factor`` 是一个list或tuple,它必须与输入的shape匹配。 - - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),默认值:'NCHW'。 + - **size** (list|tuple|Tensor|None) - 输出 Tensor,输入为 4D 张量,形状为(out_h, out_w)的 2-D Tensor。如果 :code:`size` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 ``size`` 是变量,则其维度大小为 1。默认值为 None。 + - **scale_factor** (float|Tensor|list|None)-输入的高度或宽度的乘数因子。``size`` 和 ``scale_factor`` 至少要设置一个。``size`` 的优先级高于 ``scale_factor``。默认值为 None。如果 ``scale_factor`` 是一个 list 或 tuple,它必须与输入的 shape 匹配。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于 4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),默认值:'NCHW'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/ZeroPad2D_cn.rst b/docs/api/paddle/nn/ZeroPad2D_cn.rst index 76739674521..700c803c8c8 100644 --- a/docs/api/paddle/nn/ZeroPad2D_cn.rst +++ b/docs/api/paddle/nn/ZeroPad2D_cn.rst @@ -11,9 +11,9 @@ ZeroPad2D 参数 ::::::::: - - **padding** (Tensor | List[int] | int]) - 填充大小。如果是int,则在所有待填充边界使用相同的填充, + - **padding** (Tensor | List[int] | int]) - 填充大小。如果是 int,则在所有待填充边界使用相同的填充, 否则填充的格式为[pad_left, pad_right, pad_top, pad_bottom]。 - - **data_format** (str) - 指定输入的format,可为 ``'NCHW'`` 或者 ``'NHWC'``,默认值为 ``'NCHW'``。 + - **data_format** (str) - 指定输入的 format,可为 ``'NCHW'`` 或者 ``'NHWC'``,默认值为 ``'NCHW'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 @@ -23,8 +23,8 @@ ZeroPad2D 形状 ::::::::: - - x(Tensor): ZeroPadD层的输入,要求形状为4-D,dtype为 ``'float32'`` 或 ``'float64'`` - - output(Tensor):输出,形状为4-D,dtype与 ``'input'`` 相同 + - x(Tensor): ZeroPadD 层的输入,要求形状为 4-D,dtype 为 ``'float32'`` 或 ``'float64'`` + - output(Tensor):输出,形状为 4-D,dtype 与 ``'input'`` 相同 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/dynamic_decode_cn.rst b/docs/api/paddle/nn/dynamic_decode_cn.rst index decd6eaf272..a00f480e807 100644 --- a/docs/api/paddle/nn/dynamic_decode_cn.rst +++ b/docs/api/paddle/nn/dynamic_decode_cn.rst @@ -9,7 +9,7 @@ dynamic_decode -该接口重复执行 :code:`decoder.step()` 直到 其返回的表示完成状态的Tensor中的值全部为True或解码步骤达到 :code:`max_step_num`。 +该接口重复执行 :code:`decoder.step()` 直到 其返回的表示完成状态的 Tensor 中的值全部为 True 或解码步骤达到 :code:`max_step_num`。 :code:`decode.initialize()` 会在解码循环之前被调用一次。如果 :code:`decoder` 实现了 :code:`finalize` 方法,则 :code:`decoder.finalize()` 在解码循环后将被调用一次。 @@ -17,18 +17,18 @@ dynamic_decode ::::::::: - **decoder** (Decoder) - 解码器的实例。 - - **inits** (object,可选) - 传递给 :code:`decoder.initialize` 的参数。默认为None。 - - **max_step_num** (int,可选) - 最大步数。如果未提供,解码直到解码过程完成( :code:`decode.step()` 返回的表示完成状态的Tensor中的值全部为True)。默认为None。 - - **output_time_major** (bool,可选) - 指明最终输出(此方法的第一个返回值)中包含的Tensor的数据布局。如果为False,其将使用batch优先的数据布局,此时的形状为 :math:`[batch\_size,seq\_len,...]`。如果为True,其将使用time优先的数据布局,此时的形状为 :math:`[seq\_len,batch\_size,...]`。默认值为False。 - - **impute_finished** (bool,可选) - 若为True并且 :code:`decoder.tracks_own_finished` 为False,对于当前批次中完成状态为结束的样本,将会拷贝其上一步的状态,而非像未结束的实例那样使用 :code:`decode.step()` 返回的 :code:`next_states` 作为新的状态,这保证了返回的最终状态 :code:`final_states` 是正确的;否则,不会区分是否结束,也没有这个拷贝操作。若 :code:`final_states` 会被使用,则这里应该设置为True,这会一定程度上影响速度。默认为False。 - - **is_test** (bool,可选) - 标识是否是预测模式,预测模式下内存占用会更少。默认为False。 - - **return_length** (bool,可选) - 标识是否在返回的元组中额外包含一个存放了所有解码序列实际长度的Tensor。默认为False。 + - **inits** (object,可选) - 传递给 :code:`decoder.initialize` 的参数。默认为 None。 + - **max_step_num** (int,可选) - 最大步数。如果未提供,解码直到解码过程完成( :code:`decode.step()` 返回的表示完成状态的 Tensor 中的值全部为 True)。默认为 None。 + - **output_time_major** (bool,可选) - 指明最终输出(此方法的第一个返回值)中包含的 Tensor 的数据布局。如果为 False,其将使用 batch 优先的数据布局,此时的形状为 :math:`[batch\_size,seq\_len,...]`。如果为 True,其将使用 time 优先的数据布局,此时的形状为 :math:`[seq\_len,batch\_size,...]`。默认值为 False。 + - **impute_finished** (bool,可选) - 若为 True 并且 :code:`decoder.tracks_own_finished` 为 False,对于当前批次中完成状态为结束的样本,将会拷贝其上一步的状态,而非像未结束的实例那样使用 :code:`decode.step()` 返回的 :code:`next_states` 作为新的状态,这保证了返回的最终状态 :code:`final_states` 是正确的;否则,不会区分是否结束,也没有这个拷贝操作。若 :code:`final_states` 会被使用,则这里应该设置为 True,这会一定程度上影响速度。默认为 False。 + - **is_test** (bool,可选) - 标识是否是预测模式,预测模式下内存占用会更少。默认为 False。 + - **return_length** (bool,可选) - 标识是否在返回的元组中额外包含一个存放了所有解码序列实际长度的 Tensor。默认为 False。 - **kwargs** - 其他命名关键字参数。这些参数将传递给 :code:`decoder.step`。 返回 ::::::::: -tuple,若 :code:`return_length` 为True,则返回三元组 :code:`(final_outputs, final_states, sequence_lengths)`,否则返回二元组 :code:`(final_outputs, final_states)` 。 :code:`final_outputs, final_states` 包含了最终的输出和状态,这两者都是Tensor或Tensor的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.step()` 返回的 :code:`outputs` 相同的结构和数据类型,且其中的每个tensor都是将所有解码步中与其对应的的输出进行堆叠的结果;如果 :code:`decoder` 实现了 :code:`finalize` 方法,这些tensor也可能会通过 :code:`decoder.finalize()` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize()` 返回的初始状态具有相同的结构,形状和数据类型。:code:`sequence_lengths` 是int64类型的tensor,和 :code:`decoder.initialize()` 返回的 :code:`finished` 具有相同的形状,其保存了所有解码序列实际长度。 +tuple,若 :code:`return_length` 为 True,则返回三元组 :code:`(final_outputs, final_states, sequence_lengths)`,否则返回二元组 :code:`(final_outputs, final_states)` 。 :code:`final_outputs, final_states` 包含了最终的输出和状态,这两者都是 Tensor 或 Tensor 的嵌套结构。:code:`final_outputs` 具有与 :code:`decoder.step()` 返回的 :code:`outputs` 相同的结构和数据类型,且其中的每个 tensor 都是将所有解码步中与其对应的的输出进行堆叠的结果;如果 :code:`decoder` 实现了 :code:`finalize` 方法,这些 tensor 也可能会通过 :code:`decoder.finalize()` 进行修改。:code:`final_states` 是最后时间步的状态,和 :code:`decoder.initialize()` 返回的初始状态具有相同的结构,形状和数据类型。:code:`sequence_lengths` 是 int64 类型的 tensor,和 :code:`decoder.initialize()` 返回的 :code:`finished` 具有相同的形状,其保存了所有解码序列实际长度。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst index c6a16622876..3515177b9f6 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool2d_cn.rst @@ -5,7 +5,7 @@ adaptive_avg_pool2d .. py:function:: paddle.nn.functional.adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应平均池化。输入和输出都是4-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 2D 的自适应平均池化。输入和输出都是 4-D Tensor, 默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`H` 是输入特征的宽度。 计算公式如下: @@ -25,14 +25,14 @@ adaptive_avg_pool2d 参数 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即NCHW格式的4-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output_size** (int|list|turple):算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含两个元素,H和W。H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 - - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **x** (Tensor):默认形状为(批大小,通道数,高度,宽度),即 NCHW 格式的 4-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output_size** (int|list|turple):算子输出特征图的尺寸,如果其是 list 或 turple 类型的数值,必须包含两个元素,H 和 W。H 和 W 既可以是 int 类型值也可以是 None,None 表示与输入特征尺寸相同。 + - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即NCHW格式的4-D Tensor,其数据类型与输入相同。 +``Tensor``,默认形状为(批大小,通道数,输出特征高度,输出特征宽度),即 NCHW 格式的 4-D Tensor,其数据类型与输入相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst index f1e773b32fa..8497dcc74ce 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool3d_cn.rst @@ -5,7 +5,7 @@ adaptive_avg_pool3d .. py:function:: paddle.nn.functional.adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应平均池化。输入和输出都是5-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 3D 的自适应平均池化。输入和输出都是 5-D Tensor, 默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`D` 是特征图长度,`H` 是输入特征的高度,`H` 是输入特征的宽度。 计算公式如下: @@ -30,14 +30,14 @@ adaptive_avg_pool3d 参数 ::::::::: - - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即NCDHW格式的5-D Tensor。其数据类型为float16, float32, float64, int32或int64。 - - **output_size** (int|list|turple):算子输出特征图的尺寸,如果其是list或turple类型的数值,必须包含三个元素,D,H和W。D,H和W既可以是int类型值也可以是None,None表示与输入特征尺寸相同。 - - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征长度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **x** (Tensor):默认形状为(批大小,通道数,长度,高度,宽度),即 NCDHW 格式的 5-D Tensor。其数据类型为 float16, float32, float64, int32 或 int64。 + - **output_size** (int|list|turple):算子输出特征图的尺寸,如果其是 list 或 turple 类型的数值,必须包含三个元素,D,H 和 W。D,H 和 W 既可以是 int 类型值也可以是 None,None 表示与输入特征尺寸相同。 + - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征长度,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即NCDHW格式的5-D Tensor,其数据类型与输入相同。 +``Tensor``,默认形状为(批大小,通道数,输出特征长度,输出特征高度,输出特征宽度),即 NCDHW 格式的 5-D Tensor,其数据类型与输入相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst index 425445315ca..3f81372514f 100755 --- a/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool1d_cn.rst @@ -6,7 +6,7 @@ adaptive_max_pool1d .. py:function:: paddle.nn.functional.adaptive_max_pool1d(x, output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算1D的自适应最大值池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 1D 的自适应最大值池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 .. note:: @@ -15,14 +15,14 @@ adaptive_max_pool1d 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为float32或者float64。 - - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为int或list,tuple。 - - **return_mask** (bool):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的 3-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为 float32 或者 float64。 + - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为 int 或 list,tuple。 + - **return_mask** (bool):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 `x` 经过自适应池化计算得到的目标3-D Tensor,其数据类型与输入相同。 +``Tensor``,输入 `x` 经过自适应池化计算得到的目标 3-D Tensor,其数据类型与输入相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst index 59a5163c33e..5a4f8d721e2 100644 --- a/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool2d_cn.rst @@ -4,7 +4,7 @@ adaptive_max_pool2d ------------------------------- .. py:function:: paddle.nn.functional.adaptive_max_pool2d(x, output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算2D的自适应最大值池化。输入和输出都是4-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 2D 的自适应最大值池化。输入和输出都是 4-D Tensor, 默认是以 `NCHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。 .. note:: @@ -13,14 +13,14 @@ adaptive_max_pool2d 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, H, W]` 的4-D Tensor。其中 `N` 是batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。其数据类型为float32或者float64。 - - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为int或list,tuple。 - - **return_mask** (bool,可选):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, H, W]` 的 4-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。其数据类型为 float32 或者 float64。 + - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为 int 或 list,tuple。 + - **return_mask** (bool,可选):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 `x` 经过自适应池化计算得到的目标4-D Tensor,其数据类型与输入相同。 +``Tensor``,输入 `x` 经过自适应池化计算得到的目标 4-D Tensor,其数据类型与输入相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst b/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst index b972e8723de..43c5b2dd7af 100644 --- a/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_max_pool3d_cn.rst @@ -4,7 +4,7 @@ adaptive_max_pool3d ------------------------------- .. py:function:: paddle.nn.functional.adaptive_max_pool3d(x, output_size, return_mask=False, name=None) -该算子根据输入 `x` , `output_size` 等参数对一个输入Tensor计算3D的自适应最大值池化。输入和输出都是5-D Tensor, +该算子根据输入 `x` , `output_size` 等参数对一个输入 Tensor 计算 3D 的自适应最大值池化。输入和输出都是 5-D Tensor, 默认是以 `NCDHW` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`D` , `H` , `W` 是输入特征的深度,高度,宽度。 .. note:: @@ -13,14 +13,14 @@ adaptive_max_pool3d 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, D, H, W]` 的5-D Tensor。其中 `N` 是batch size, `C` 是通道数,`D` , `H` , `W` 是输入特征的深度,高度,宽度。其数据类型为float32或者float64。 - - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为int或list,tuple。 - - **return_mask** (bool,可选):如果设置为True,则会与输出一起返回最大值的索引,默认为False。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, D, H, W]` 的 5-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`D` , `H` , `W` 是输入特征的深度,高度,宽度。其数据类型为 float32 或者 float64。 + - **output_size** (int|list|tuple):算子输出特征图的长度,其数据类型为 int 或 list,tuple。 + - **return_mask** (bool,可选):如果设置为 True,则会与输出一起返回最大值的索引,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 `x` 经过自适应池化计算得到的目标5-D Tensor,其数据类型与输入相同。 +``Tensor``,输入 `x` 经过自适应池化计算得到的目标 5-D Tensor,其数据类型与输入相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/affine_grid_cn.rst b/docs/api/paddle/nn/functional/affine_grid_cn.rst index b2600cf6b38..49f249f95ee 100644 --- a/docs/api/paddle/nn/functional/affine_grid_cn.rst +++ b/docs/api/paddle/nn/functional/affine_grid_cn.rst @@ -6,19 +6,19 @@ affine_grid .. py:function:: paddle.nn.functional.affine_grid(theta, out_shape, align_corners=True, name=None) -该OP用于生成仿射变换前后的feature maps的坐标映射关系。在视觉应用中,根据该OP得到的映射关系,将输入feature map的像素点变换到对应的坐标,就得到了经过仿射变换的feature map。 +该 OP 用于生成仿射变换前后的 feature maps 的坐标映射关系。在视觉应用中,根据该 OP 得到的映射关系,将输入 feature map 的像素点变换到对应的坐标,就得到了经过仿射变换的 feature map。 参数 :::::::::::: - - **theta** (Tensor) - Shape为 ``[batch_size, 2, 3]`` 的Tensor,表示batch_size个 ``2X3`` 的变换矩阵。数据类型支持float32,float64。 - - **out_shape** (Tensor | list | tuple) - 类型可以是1-D Tensor、list或tuple。用于表示在仿射变换中的输出的shape,其格式 ``[N, C, H, W]``,分别为输出feature map的batch size、channel数量、高和宽。数据类型支持int32。 - - **align_corners** (bool, optional):一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。默认值:True。 + - **theta** (Tensor) - Shape 为 ``[batch_size, 2, 3]`` 的 Tensor,表示 batch_size 个 ``2X3`` 的变换矩阵。数据类型支持 float32,float64。 + - **out_shape** (Tensor | list | tuple) - 类型可以是 1-D Tensor、list 或 tuple。用于表示在仿射变换中的输出的 shape,其格式 ``[N, C, H, W]``,分别为输出 feature map 的 batch size、channel 数量、高和宽。数据类型支持 int32。 + - **align_corners** (bool, optional):一个可选的 bool 型参数,如果为 True,则将输入和输出张量的 4 个角落像素的中心对齐,并保留角点像素的值。默认值:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - Tensor。Shape为 ``[N, H, W, 2]`` 的4-D Tensor,表示仿射变换前后的坐标的映射关系。其中,N、H、W分别为仿射变换中输出feature map的batch size、高和宽。数据类型与 ``theta`` 一致。 + Tensor。Shape 为 ``[N, H, W, 2]`` 的 4-D Tensor,表示仿射变换前后的坐标的映射关系。其中,N、H、W 分别为仿射变换中输出 feature map 的 batch size、高和宽。数据类型与 ``theta`` 一致。 代码示例 diff --git a/docs/api/paddle/nn/functional/alpha_dropout_cn.rst b/docs/api/paddle/nn/functional/alpha_dropout_cn.rst index de09f0b0da7..3777c678481 100644 --- a/docs/api/paddle/nn/functional/alpha_dropout_cn.rst +++ b/docs/api/paddle/nn/functional/alpha_dropout_cn.rst @@ -5,18 +5,18 @@ alpha_dropout .. py:function:: paddle.nn.functional.alpha_dropout(x, p=0.5, training=True, name=None) -alpha_dropout是一种具有自归一化性质的dropout。均值为0,方差为1的输入,经过alpha_dropout计算之后,输出的均值和方差与输入保持一致。alpha_dropout通常与SELU激活函数组合使用。 +alpha_dropout 是一种具有自归一化性质的 dropout。均值为 0,方差为 1 的输入,经过 alpha_dropout 计算之后,输出的均值和方差与输入保持一致。alpha_dropout 通常与 SELU 激活函数组合使用。 参数 ::::::::: - **x** (Tensor):输入的多维 `Tensor`,数据类型为:float32、float64。 - - **p** (float):将输入节点置0的概率,即丢弃概率。默认:0.5。 + - **p** (float):将输入节点置 0 的概率,即丢弃概率。默认:0.5。 - **training** (bool):标记是否为训练阶段。默认:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -经过alpha_dropout之后的结果,与输入x形状相同的 `Tensor` 。 +经过 alpha_dropout 之后的结果,与输入 x 形状相同的 `Tensor` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/avg_pool1d_cn.rst b/docs/api/paddle/nn/functional/avg_pool1d_cn.rst index 4b84be57351..06583b4ab5f 100755 --- a/docs/api/paddle/nn/functional/avg_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool1d_cn.rst @@ -6,7 +6,7 @@ avg_pool1d .. py:function:: paddle.nn.functional.avg_pool1d(x, kernel_size, stride=None, padding=0, exclusive=True, ceil_mode=False, name=None) -该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的平均池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `kernel_size` 等参数对一个输入 Tensor 计算 1D 的平均池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 .. note:: @@ -15,19 +15,19 @@ avg_pool1d 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为float32或者float64。 - - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果kernel_size为list或tuple类型,其必须包含一个整数。 - - **stride** (int|list|tuple):池化操作步长。如果stride为list或tuple类型,其必须包含一个整数。 - - **padding** (string|int|list|tuple):池化补零的方式。如果padding是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是turple或者list类型,则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 - - **exclusive** (bool):是否用额外padding的值计算平均池化结果,默认为True。 - - **ceil_mode** (bool):是否用ceil函数计算输出的height和width,如果设置为False,则使用floor函数来计算,默认为False。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的 3-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为 float32 或者 float64。 + - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果 kernel_size 为 list 或 tuple 类型,其必须包含一个整数。 + - **stride** (int|list|tuple):池化操作步长。如果 stride 为 list 或 tuple 类型,其必须包含一个整数。 + - **padding** (string|int|list|tuple):池化补零的方式。如果 padding 是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是 turple 或者 list 类型,则应是 `[pad_left, pad_right]` 形式。如果 padding 是一个非 0 值,那么表示会在输入的两端都 padding 上同样长度的 0。 + - **exclusive** (bool):是否用额外 padding 的值计算平均池化结果,默认为 True。 + - **ceil_mode** (bool):是否用 ceil 函数计算输出的 height 和 width,如果设置为 False,则使用 floor 函数来计算,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 `x` 经过平均池化计算得到的目标3-D Tensor,其数据类型与输入相同。 +``Tensor``,输入 `x` 经过平均池化计算得到的目标 3-D Tensor,其数据类型与输入相同。 diff --git a/docs/api/paddle/nn/functional/avg_pool2d_cn.rst b/docs/api/paddle/nn/functional/avg_pool2d_cn.rst index 5574d0b97f1..5d058afc086 100644 --- a/docs/api/paddle/nn/functional/avg_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool2d_cn.rst @@ -24,14 +24,14 @@ avg_pool2d 参数 ::::::::: - - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若pool_size=2,则池化核大小为2x2。 - - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为kernel_size。 - - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 pool_padding = "SAME"或 pool_padding = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含2个整数值:[pad_height, pad_width];(2)包含4个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含4个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为None + - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若 pool_size=2,则池化核大小为 2x2。 + - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 kernel_size。 + - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 pool_padding = "SAME"或 pool_padding = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 2 个整数值:[pad_height, pad_width];(2)包含 4 个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 4 个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 None - **exclusive** (bool):是否在平均池化模式忽略填充值,默认是 `True`。 - **divisor_override** (int|float):如果指定,它将用作除数,否则根据`kernel_size`计算除数。默认`None`。 - - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/avg_pool3d_cn.rst b/docs/api/paddle/nn/functional/avg_pool3d_cn.rst index 6e5c224dbea..f495fbb90a7 100644 --- a/docs/api/paddle/nn/functional/avg_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/avg_pool3d_cn.rst @@ -27,14 +27,14 @@ avg_pool3d 参数 ::::::::: - - **x** (Tensor):形状为 [N,C,D,H,W] 或 [N,D,H,W,C] 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth, pool_size_Height, pool_size_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_size=2,则池化核大小为[2,2,2]。 - - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为kernel_size。 - - **padding** (string|int|list|tuple) 池化填充。如果它是一个元组或列表,它可以有3种格式:(1)包含3个整数值:[pad_depth, pad_height, pad_width];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含5个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示D、H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为False + - **x** (Tensor):形状为 [N,C,D,H,W] 或 [N,D,H,W,C] 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth, pool_size_Height, pool_size_Width)。若为一个整数,则表示 D,H 和 W 维度上均为该值,比如若 pool_size=2,则池化核大小为[2,2,2]。 + - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 kernel_size。 + - **padding** (string|int|list|tuple) 池化填充。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 3 个整数值:[pad_depth, pad_height, pad_width];(2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 5 个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 D、H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 False - **exclusive** (bool):是否在平均池化模式忽略填充值,默认是 `True`。 - **divisor_override** (int|float):如果指定,它将用作除数,否则根据`kernel_size`计算除数。默认`None`。 - - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NDCHW"。 + - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NDCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/batch_norm_cn.rst b/docs/api/paddle/nn/functional/batch_norm_cn.rst index 697a3cf109f..0d33a4619ee 100644 --- a/docs/api/paddle/nn/functional/batch_norm_cn.rst +++ b/docs/api/paddle/nn/functional/batch_norm_cn.rst @@ -5,18 +5,18 @@ batch_norm .. py:class:: paddle.nn.functional.batch_norm(x, running_mean, running_var, weight, bias, training=False, momentum=0.9, epsilon=1e-05, data_format='NCHW', name=None): -推荐使用nn.BatchNorm1D,nn.BatchNorm2D, nn.BatchNorm3D,由内部调用此方法。 +推荐使用 nn.BatchNorm1D,nn.BatchNorm2D, nn.BatchNorm3D,由内部调用此方法。 详情见 :ref:`cn_api_nn_BatchNorm1D` 。 参数 :::::::::::: - - **x** (int) - 输入,数据类型为float32, float64。 - - **running_mean** (Tensor) - 均值的Tensor。 - - **running_var** (Tensor) - 方差的Tensor。 - - **weight** (Tensor) - 权重的Tensor。 - - **bias** (Tensor) - 偏置的Tensor。 + - **x** (int) - 输入,数据类型为 float32, float64。 + - **running_mean** (Tensor) - 均值的 Tensor。 + - **running_var** (Tensor) - 方差的 Tensor。 + - **weight** (Tensor) - 权重的 Tensor。 + - **bias** (Tensor) - 偏置的 Tensor。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 diff --git a/docs/api/paddle/nn/functional/bilinear_cn.rst b/docs/api/paddle/nn/functional/bilinear_cn.rst index 53956b42e99..bb273896982 100644 --- a/docs/api/paddle/nn/functional/bilinear_cn.rst +++ b/docs/api/paddle/nn/functional/bilinear_cn.rst @@ -14,7 +14,7 @@ bilinear - **x1** (int):第一个输入的 `Tensor`,数据类型为:float32、float64。 - **x2** (int):第二个输入的 `Tensor`,数据类型为:float32、float64。 - **weight** (Parameter):本层的可学习参数。形状是 [out_features, in1_features, in2_features]。 - - **bias** (Parameter,可选):本层的可学习偏置。形状是 [1, out_features]。默认值为None,如果被设置成None,则不会有bias加到output结果上。 + - **bias** (Parameter,可选):本层的可学习偏置。形状是 [1, out_features]。默认值为 None,如果被设置成 None,则不会有 bias 加到 output 结果上。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst index 1aa613c080d..35ac58bb53c 100644 --- a/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/binary_cross_entropy_cn.rst @@ -31,19 +31,19 @@ binary_cross_entropy .. note:: - 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是0或者1。 + 输入数据 ``input`` 一般是 ``sigmoid`` 的输出。因为是二分类,所以标签值 ``label`` 应该是 0 或者 1。 参数 ::::::::: - - **input** (Tensor) - :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。输入数据 ``input`` 一般是 ``sigmoid`` 的输出。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]`,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回bce_loss。 + - **weight** (Tensor,可选) - 手动指定每个 batch 二值交叉熵的权重,如果指定的话,维度必须是一个 batch 的数据的维度。数据类型是 float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回 bce_loss。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - - 输出的结果Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - 输出的结果 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst b/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst index 864fd2d4846..1d572aaa75a 100644 --- a/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst +++ b/docs/api/paddle/nn/functional/binary_cross_entropy_with_logits_cn.rst @@ -5,13 +5,13 @@ binary_cross_entropy_with_logits .. py:function:: paddle.nn.functional.binary_cross_entropy_with_logits(logit, label, weight=None, reduction='mean', pos_weight=None, name=None) -该OP用于计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 +该 OP 用于计算输入 `logit` 和标签 `label` 间的 `binary cross entropy with logits loss` 损失。 -该OP结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该OP是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 +该 OP 结合了 `sigmoid` 操作和 :ref:`api_nn_loss_BCELoss` 操作。同时,我们也可以认为该 OP 是 ``sigmoid_cross_entrop_with_logits`` 和一些 `reduce` 操作的组合。 -在每个类别独立的分类任务中,该OP可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 +在每个类别独立的分类任务中,该 OP 可以计算按元素的概率误差。可以将其视为预测数据点的标签,其中标签不是互斥的。例如,一篇新闻文章可以同时关于政治,科技,体育或者同时不包含这些内容。 -首先,该OP可通过下式计算损失函数: +首先,该 OP 可通过下式计算损失函数: .. math:: Out = -Labels * \log(\sigma(Logit)) - (1 - Labels) * \log(1 - \sigma(Logit)) @@ -21,29 +21,29 @@ binary_cross_entropy_with_logits .. math:: Out = Logit - Logit * Labels + \log(1 + e^{-Logit}) -为了计算稳定性,防止当 :math:`Logit<0` 时,:math:`e^{-Logit}` 溢出,loss将采用以下公式计算: +为了计算稳定性,防止当 :math:`Logit<0` 时,:math:`e^{-Logit}` 溢出,loss 将采用以下公式计算: .. math:: Out = \max(Logit, 0) - Logit * Labels + \log(1 + e^{-\|Logit\|}) -然后,当 ``weight`` or ``pos_weight`` 不为None的时候,该算子会在输出Out上乘以相应的权重。张量 ``weight`` 给Batch中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 +然后,当 ``weight`` or ``pos_weight`` 不为 None 的时候,该算子会在输出 Out 上乘以相应的权重。张量 ``weight`` 给 Batch 中的每一条数据赋予不同权重,张量 ``pos_weight`` 给每一类的正例添加相应的权重。 -最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,该算子会添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 -**注意:因为是二分类任务,所以标签值应该是0或者1。 +**注意:因为是二分类任务,所以标签值应该是 0 或者 1。 参数 ::::::::: - - **logit** (Tensor) - :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **logit** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是线性层的输出,不需要经过 ``sigmoid`` 层。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]`,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 - - **weight** (Tensor,可选) - 手动指定每个batch二值交叉熵的权重,如果指定的话,维度必须是一个batch的数据的维度。数据类型是float32, float64。默认值是:None。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 - - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是float32, float64。默认值是:None。 + - **weight** (Tensor,可选) - 手动指定每个 batch 二值交叉熵的权重,如果指定的话,维度必须是一个 batch 的数据的维度。数据类型是 float32, float64。默认值是:None。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `BCELoss` 的均值;设置为 ``'sum'`` 时,计算 `BCELoss` 的总和;设置为 ``'none'`` 时,则返回原始 loss。 + - **pos_weight** (Tensor,可选) - 手动指定正类的权重,必须是与类别数相等长度的向量。数据类型是 float32, float64。默认值是:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - - Tensor,输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - Tensor,输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/celu_cn.rst b/docs/api/paddle/nn/functional/celu_cn.rst index 79418dda6c2..ea43ef08e3f 100644 --- a/docs/api/paddle/nn/functional/celu_cn.rst +++ b/docs/api/paddle/nn/functional/celu_cn.rst @@ -5,9 +5,9 @@ celu .. py:function:: paddle.nn.functional.celu(x, alpha=1.0, name=None) -celu激活层(CELU Activation Operator) +celu 激活层(CELU Activation Operator) -根据 `Continuously Differentiable Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 +根据 `Continuously Differentiable Exponential Linear Units `_ 对输入 Tensor 中每个元素应用以下计算。 .. math:: @@ -20,7 +20,7 @@ celu激活层(CELU Activation Operator) :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float16、float32、float64。 - - alpha (float,可选) - celu的alpha值,默认值为1.0。 + - alpha (float,可选) - celu 的 alpha 值,默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/conv1d_cn.rst b/docs/api/paddle/nn/functional/conv1d_cn.rst index b563861dc8e..058aae14fd7 100755 --- a/docs/api/paddle/nn/functional/conv1d_cn.rst +++ b/docs/api/paddle/nn/functional/conv1d_cn.rst @@ -5,9 +5,9 @@ conv1d .. py:function:: paddle.nn.functional.conv1d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCL", name=None) -该OP是一维卷积层(convolution1d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilation)一组参数计算输出特征层大小。输入和输出是NCL或NLC格式,其中N是批尺寸,C是通道数,L是长度。卷积核是MCL格式,M是输出图像通道数,C是输入图像通道数,L是卷积核长度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 +该 OP 是一维卷积层(convolution1d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilation)一组参数计算输出特征层大小。输入和输出是 NCL 或 NLC 格式,其中 N 是批尺寸,C 是通道数,L 是长度。卷积核是 MCL 格式,M 是输出图像通道数,C 是输入图像通道数,L 是卷积核长度。如果组数(groups)大于 1,C 等于输入图像通道数除以组数的结果。详情请参考 UFLDL's : `卷积 `_ 。如果 bias_attr 不为 False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -15,12 +15,12 @@ conv1d 其中: - - :math:`X`:输入值,NCL或NLC格式的3-D Tensor - - :math:`W`:卷积核值,MCL格式的3-D Tensor + - :math:`X`:输入值,NCL 或 NLC 格式的 3-D Tensor + - :math:`W`:卷积核值,MCL 格式的 3-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,2-D Tensor,形状为 ``[M,1]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCL或NLC格式的3-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCL 或 NLC 格式的 3-D Tensor,和 ``X`` 的形状可能不同 **示例** @@ -53,14 +53,14 @@ conv1d 参数 :::::::::::: - - **x** (Tensor) - 输入是形状为 :math:`[N, C, L]` 或 :math:`[N, L, C]` 的4-D Tensor,N是批尺寸,C是通道数,L是特征长度,数据类型为float16, float32或float64。 - - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kL]` 的卷积核。M是输出通道数,g是分组的个数,kL是卷积核的长度度。 + - **x** (Tensor) - 输入是形状为 :math:`[N, C, L]` 或 :math:`[N, L, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,L 是特征长度,数据类型为 float16, float32 或 float64。 + - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kL]` 的卷积核。M 是输出通道数,g 是分组的个数,kL 是卷积核的长度度。 - **bias** (int|list|tuple,可选) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。整数或包含一个整数的列表或元组。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的0。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的 0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的 0。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。整数或包含一个整型数的列表或元组。默认值:1。 - - **groups** (int,可选) - 一维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N是批尺寸,C是通道数,L是特征长度。默认值:"NCL"。 + - **groups** (int,可选) - 一维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N 是批尺寸,C 是通道数,L 是特征长度。默认值:"NCL"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst index f28fa3ea8c4..48a7dfea822 100644 --- a/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv1d_transpose_cn.rst @@ -10,7 +10,7 @@ conv1d_transpose 一维转置卷积层(Convlution1D transpose layer) -该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCL或NLC格式,其中N为批尺寸,C为通道数(channel),L为特征层长度。卷积核是MCL格式,M是输出图像通道数,C是输入图像通道数,L是卷积核长度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCL 或 NLC 格式,其中 N 为批尺寸,C 为通道数(channel),L 为特征层长度。卷积核是 MCL 格式,M 是输出图像通道数,C 是输入图像通道数,L 是卷积核长度。如果组数大于 1,C 等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。如果 act 不为 None,则转置卷积计算之后添加相应的激活函数。 .. _参考文献:https://arxiv.org/pdf/1603.07285.pdf @@ -22,24 +22,24 @@ conv1d_transpose 其中: - - :math:`X`:输入,具有NCL或NLC格式的3-D Tensor - - :math:`W`:卷积核,具有NCL格式的3-D Tensor + - :math:`X`:输入,具有 NCL 或 NLC 格式的 3-D Tensor + - :math:`W`:卷积核,具有 NCL 格式的 3-D Tensor - :math:`*`:卷积计算(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),2-D Tensor,形状为 ``[M,1]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCL或NLC格式的3-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCL 或 NLC 格式的 3-D Tensor,和 ``X`` 的形状可能不同 **示例** - 输入: - 输入Tensor的形状::math:`(N,C_{in}, L_{in})` + 输入 Tensor 的形状::math:`(N,C_{in}, L_{in})` 卷积核的形状::math:`(C_{in}, C_{out}, L_f)` - 输出: - 输出Tensor的形状::math:`(N,C_{out}, L_{out})` + 输出 Tensor 的形状::math:`(N,C_{out}, L_{out})` 其中 @@ -62,23 +62,23 @@ conv1d_transpose 注意: -如果output_size为None,则 :math:`L_{out}` = :math:`L^\prime_{out}`;否则,指定的output_size(输出特征层的长度) :math:`L_{out}` 应当介于 :math:`L^\prime_{out}` 和 :math:`L^\prime_{out} + stride` 之间(不包含 :math:`L^\prime_{out} + stride` )。 +如果 output_size 为 None,则 :math:`L_{out}` = :math:`L^\prime_{out}`;否则,指定的 output_size(输出特征层的长度) :math:`L_{out}` 应当介于 :math:`L^\prime_{out}` 和 :math:`L^\prime_{out} + stride` 之间(不包含 :math:`L^\prime_{out} + stride` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 参数 :::::::::::: - - **x** (Tensor) - 输入是形状为 :math:`[N, C, L]` 或 :math:`[N, L, C]` 的3-D Tensor,N是批尺寸,C是通道数,L是特征长度,数据类型为float16, float32或float64。 - - **weight** (Tensor) - 形状为 :math:`[C, M/g, kL]` 的卷积核(卷积核)。 M是输出通道数,g是分组的个数,kL是卷积核的长度。 + - **x** (Tensor) - 输入是形状为 :math:`[N, C, L]` 或 :math:`[N, L, C]` 的 3-D Tensor,N 是批尺寸,C 是通道数,L 是特征长度,数据类型为 float16, float32 或 float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kL]` 的卷积核(卷积核)。 M 是输出通道数,g 是分组的个数,kL 是卷积核的长度。 - **bias** (int|list|tuple,可选) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。整数或包含一个整数的列表或元组。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的0。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。可以是以下三种格式:(1)字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考下述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。(2)整数,表示在输入特征两侧各填充 ``padding`` 大小的 0。(3)包含一个整数的列表或元组,表示在输入特征两侧各填充 ``padding[0]`` 大小的 0。默认值:0。 - **output_padding** (int|list|tuple, optional):输出形状上尾部一侧额外添加的大小。默认值:0。 - - **groups** (int,可选) - 一维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 + - **groups** (int,可选) - 一维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。整数或包含一个整数的列表或元组。默认值:1。 - - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N是批尺寸,C是通道数,L是特征长度。默认值:"NCL"。 + - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的 shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCL"和"NLC"。N 是批尺寸,C 是通道数,L 是特征长度。默认值:"NCL"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/conv2d_cn.rst b/docs/api/paddle/nn/functional/conv2d_cn.rst index f91cccf68fd..8af3f691ca6 100755 --- a/docs/api/paddle/nn/functional/conv2d_cn.rst +++ b/docs/api/paddle/nn/functional/conv2d_cn.rst @@ -5,9 +5,9 @@ conv2d .. py:function:: paddle.nn.functional.conv2d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCHW", name=None) -该OP是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 +该 OP 是二维卷积层(convolution2d layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算输出特征层大小。输入和输出是 NCHW 或 NHWC 格式,其中 N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。卷积核是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是卷积核高度,W 是卷积核宽度。如果组数(groups)大于 1,C 等于输入图像通道数除以组数的结果。详情请参考 UFLDL's : `卷积 `_ 。如果 bias_attr 不为 False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -15,12 +15,12 @@ conv2d 其中: - - :math:`X`:输入值,NCHW或NHWC格式的4-D Tensor - - :math:`W`:卷积核值,MCHW格式的4-D Tensor + - :math:`X`:输入值,NCHW 或 NHWC 格式的 4-D Tensor + - :math:`W`:卷积核值,MCHW 格式的 4-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,2-D Tensor,形状为 ``[M,1]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同 **示例** @@ -61,14 +61,14 @@ conv2d 参数 :::::::::::: - - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 - - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核。M是输出通道数,g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float16, float32 或 float64。 + - **weight** (Tensor)) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核。M 是输出通道数,g 是分组的个数,kH 是卷积核的高度,kW 是卷积核的宽度。 - **bias** (int|list|tuple) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 4 个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含 4 个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含 2 个整数值:[padding_height, padding_width],此时 padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst index f8b1906c526..aa51db86780 100644 --- a/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv2d_transpose_cn.rst @@ -10,7 +10,7 @@ conv2d_transpose 二维转置卷积层(Convlution2D transpose layer) -该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。卷积核是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是卷积核高度,W是卷积核宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 +该层根据输入(input)、卷积核(kernel)和空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCHW 或 NHWC 格式,其中 N 为批尺寸,C 为通道数(channel),H 为特征层高度,W 为特征层宽度。卷积核是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是卷积核高度,W 是卷积核宽度。如果组数大于 1,C 等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。如果 act 不为 None,则转置卷积计算之后添加相应的激活函数。 .. _参考文献:https://arxiv.org/pdf/1603.07285.pdf @@ -22,24 +22,24 @@ conv2d_transpose 其中: - - :math:`X`:输入,具有NCHW或NHWC格式的4-D Tensor - - :math:`W`:卷积核,具有NCHW格式的4-D Tensor + - :math:`X`:输入,具有 NCHW 或 NHWC 格式的 4-D Tensor + - :math:`W`:卷积核,具有 NCHW 格式的 4-D Tensor - :math:`*`:卷积计算(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),2-D Tensor,形状为 ``[M,1]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同 **示例** - 输入: - 输入Tensor的形状::math:`(N,C_{in}, H_{in}, W_{in})` + 输入 Tensor 的形状::math:`(N,C_{in}, H_{in}, W_{in})` 卷积核的形状::math:`(C_{in}, C_{out}, H_f, W_f)` - 输出: - 输出Tensor的形状::math:`(N,C_{out}, H_{out}, W_{out})` + 输出 Tensor 的形状::math:`(N,C_{out}, H_{out}, W_{out})` 其中 @@ -64,23 +64,23 @@ conv2d_transpose 注意: -如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 +如果 output_size 为 None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 参数 :::::::::::: - - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 - - **weight** (Tensor) - 形状为 :math:`[C, M/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数,g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **x** (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float16, float32 或 float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kH, kW]` 的卷积核(卷积核)。 M 是输出通道数,g 是分组的个数,kH 是卷积核的高度,kW 是卷积核的宽度。 - **bias** (int|list|tuple) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride``。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 4 个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含 4 个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含 2 个整数值:[padding_height, padding_width],此时 padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **output_padding** (int|list|tuple, optional):输出形状上一侧额外添加的大小。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 + - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的 shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/conv3d_cn.rst b/docs/api/paddle/nn/functional/conv3d_cn.rst index c6f1951ac6d..caecd9ebaf4 100755 --- a/docs/api/paddle/nn/functional/conv3d_cn.rst +++ b/docs/api/paddle/nn/functional/conv3d_cn.rst @@ -5,9 +5,9 @@ conv3d .. py:function:: paddle.nn.functional.conv3d(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, data_format="NCDHW", name=None) -三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDHWC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。 +三维卷积层(convolution3D layer),根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)一组参数计算得到输出特征层大小。输入和输出是 NCDHW 或 NDHWC 格式,其中 N 是批尺寸,C 是通道数,D 是特征层深度,H 是特征层高度,W 是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果 bias_attr 不为 False,卷积计算会添加偏置项。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -15,12 +15,12 @@ conv3d 其中: - - :math:`X`:输入值,NCDHW或NDHWC格式的5-D Tensor - - :math:`W`:卷积核值,MCDHW格式的5-D Tensor + - :math:`X`:输入值,NCDHW 或 NDHWC 格式的 5-D Tensor + - :math:`W`:卷积核值,MCDHW 格式的 5-D Tensor - :math:`*`:卷积操作 - :math:`b`:偏置值,2-D Tensor,形为 ``[M,1]`` - :math:`\sigma`:激活函数 - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同 **示例** @@ -37,16 +37,16 @@ conv3d 参数 :::::::::::: - - **x** (Tensor) - 输入是形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征层深度,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 - - **weight** (Tensor) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核(卷积核)。 M是输出通道数,g是分组的个数,kH是卷积核的高度,kW是卷积核的宽度。 + - **x** (Tensor) - 输入是形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征层深度,H 是特征高度,W 是特征宽度,数据类型为 float16, float32 或 float64。 + - **weight** (Tensor) - 形状为 :math:`[M, C/g, kH, kW]` 的卷积核(卷积核)。 M 是输出通道数,g 是分组的个数,kH 是卷积核的高度,kW 是卷积核的宽度。 - **bias** (int|list|tuple,可选) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 5 个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含 6 个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含 3 个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征层深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征层深度,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 @@ -60,14 +60,14 @@ Tensor。 抛出异常 :::::::::::: - - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``use_cudnn`` 不是 bool 值。 - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 - - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 - - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ValueError`` - 如果 ``padding`` 含有 5 个二元组,与批尺寸对应维度的值不为 0 或者与通道对应维度的值不为 0。 + - ``ShapeError`` - 如果输入不是 5-D Tensor。 - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 - - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是 2。 - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 diff --git a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst index 663bf3c516c..069faccf319 100755 --- a/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst +++ b/docs/api/paddle/nn/functional/conv3d_transpose_cn.rst @@ -11,7 +11,7 @@ conv3d_transpose 三维转置卷积层(Convlution3d transpose layer) -该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_。如果参数bias_attr不为False,转置卷积计算会添加偏置项。 +该层根据输入(input)、卷积核(kernel)和卷积核空洞大小(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。输入(Input)和输出(Output)为 NCDHW 或者 NDHWC 格式。其中 N 为批尺寸,C 为通道数(channel),D 为特征深度,H 为特征层高度,W 为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_。如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。 .. _参考文献:https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf @@ -22,24 +22,24 @@ conv3d_transpose 其中: - - :math:`X`:输入,具有NCDHW或NDHWC格式的5-D Tensor - - :math:`W`:卷积核,具有NCDHW格式的5-D Tensor + - :math:`X`:输入,具有 NCDHW 或 NDHWC 格式的 5-D Tensor + - :math:`W`:卷积核,具有 NCDHW 格式的 5-D Tensor - :math:`*`:卷积操作(注意:转置卷积本质上的计算还是卷积) - :math:`b`:偏置(bias),2-D Tensor,形状为 ``[M,1]`` - :math:`σ`:激活函数 - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同 **示例** 输入: - 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + 输入的 shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` - 卷积核的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + 卷积核的 shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` 输出: - 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + 输出的 shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` 其中: @@ -69,23 +69,23 @@ conv3d_transpose 注意: -如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 +如果 output_size 为 None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`;否则,指定的 output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 参数 :::::::::::: - - **x** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 - - **weight** (Tensor) - 形状为 :math:`[C, M/g, kD, kH, kW]` 的卷积核。M是输出通道数,g是分组的个数,kD是卷积核的深度,kH是卷积核的高度,kW是卷积核的宽度。 + - **x** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型:float32 或 float64。 + - **weight** (Tensor) - 形状为 :math:`[C, M/g, kD, kH, kW]` 的卷积核。M 是输出通道数,g 是分组的个数,kD 是卷积核的深度,kH 是卷积核的高度,kW 是卷积核的宽度。 - **bias** (int|list|tuple) - 偏置项,形状为::math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。如果 ``stride`` 为元组或列表,则必须包含三个整型数,分别表示深度,垂直和水平滑动步长。否则,表示深度,垂直和水平滑动步长均为 ``stride``。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + - **padding** (int|list|tuple|str,可选) - 填充 padding 大小。padding 参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个 0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 5 个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 3 个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 - **output_padding** (int|list|tuple, optional):输出形状上一侧额外添加的大小。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和卷积核分别根据通道数量平均分为两组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算。默认:group = 1。 - - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **groups** (int,可选) - 三维转置卷积层的组数。从 Alex Krizhevsky 的 CNN Deep 论文中的群卷积中受到启发,当 group=2 时,输入和卷积核分别根据通道数量平均分为两组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算。默认:group = 1。 + - **output_size** (int|list|tuple,可选) - 输出尺寸,整数或包含一个整数的列表或元组。如果为 ``None``,则会用 filter_size(``weight``的 shape), ``padding`` 和 ``stride`` 计算出输出特征图的尺寸。默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -100,14 +100,14 @@ Tensor 抛出异常 :::::::::::: - - ``ValueError`` - 如果输入的shape、kernel_size、stride、padding和groups不匹配。 + - ``ValueError`` - 如果输入的 shape、kernel_size、stride、padding 和 groups 不匹配。 - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 - - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 - - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 - - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ValueError`` - 如果 ``padding`` 含有 5 个二元组,与批尺寸对应维度的值不为 0 或者与通道对应维度的值不为 0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为 None。 + - ``ShapeError`` - 如果输入不是 5-D Tensor。 - ``ShapeError`` - 如果输入和卷积核的维度大小不相同。 - - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是 2。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst index 3bc66432ab8..944536ddb7e 100644 --- a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst @@ -5,35 +5,35 @@ cosine_embedding_loss .. py:function:: paddle.nn.functional.cosine_embedding_loss(input1, input2, label, margin=0, reduction='mean', name=None) -该函数计算输入input1, input2和label之间的 `CosineEmbedding` 损失 +该函数计算输入 input1, input2 和 label 之间的 `CosineEmbedding` 损失 -如果label=1,则该损失函数的数学计算公式如下: +如果 label=1,则该损失函数的数学计算公式如下: .. math:: Out = 1 - cos(input1, input2) -如果label=-1,则该损失函数的数学计算公式如下: +如果 label=-1,则该损失函数的数学计算公式如下: .. math:: Out = max(0, cos(input1, input2)) - margin -其中cos计算公式如下: +其中 cos 计算公式如下: .. math:: cos(x1, x2) = \frac{x1 \cdot{} x2}{\Vert x1 \Vert_2 * \Vert x2 \Vert_2} 参数 ::::::::: - - **input1** (Tensor): - 输入的Tensor,维度是[N, M],其中N是batch size,可为0,M是数组长度。数据类型为:float32、float64。 - - **input2** (Tensor): - 输入的Tensor,维度是[N, M],其中N是batch size,可为0,M是数组长度。数据类型为:float32、float64。 - - **label** (Tensor): - 标签,维度是[N],N是数组长度,数据类型为:float32、float64、int32、int64。 - - **margin** (float,可选): - 可以设置的范围为[-1, 1],建议设置的范围为[0, 0.5]。其默认为 `0`。数据类型为float。 - - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `CosineEmbeddingLoss` 的均值;设置为 ``'sum'`` 时,计算 `CosineEmbeddingLoss` 的总和;设置为 ``'none'`` 时,则返回 `CosineEmbeddingLoss`。数据类型为string。 + - **input1** (Tensor): - 输入的 Tensor,维度是[N, M],其中 N 是 batch size,可为 0,M 是数组长度。数据类型为:float32、float64。 + - **input2** (Tensor): - 输入的 Tensor,维度是[N, M],其中 N 是 batch size,可为 0,M 是数组长度。数据类型为:float32、float64。 + - **label** (Tensor): - 标签,维度是[N],N 是数组长度,数据类型为:float32、float64、int32、int64。 + - **margin** (float,可选): - 可以设置的范围为[-1, 1],建议设置的范围为[0, 0.5]。其默认为 `0`。数据类型为 float。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `CosineEmbeddingLoss` 的均值;设置为 ``'sum'`` 时,计算 `CosineEmbeddingLoss` 的总和;设置为 ``'none'`` 时,则返回 `CosineEmbeddingLoss`。数据类型为 string。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 ``input1`` 、 ``input2`` 和标签 ``label`` 间的 `CosineEmbeddingLoss` 损失。如果 `reduction` 是 ``'none'``,则输出Loss的维度为 [N],与输入 ``input1`` 和 ``input2`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。 +``Tensor``,输入 ``input1`` 、 ``input2`` 和标签 ``label`` 间的 `CosineEmbeddingLoss` 损失。如果 `reduction` 是 ``'none'``,则输出 Loss 的维度为 [N],与输入 ``input1`` 和 ``input2`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。 代码示例 diff --git a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst index 475fbcbf715..5b64b913f54 100644 --- a/docs/api/paddle/nn/functional/cosine_similarity_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_similarity_cn.rst @@ -5,20 +5,20 @@ cosine_similarity .. py:function:: paddle.nn.functional.cosine_similarity(x1, x2, axis=1, eps=1e-8) -该OP用于计算x1与x2沿axis维度的余弦相似度。 +该 OP 用于计算 x1 与 x2 沿 axis 维度的余弦相似度。 参数 :::::::::::: - - **x1** (Tensor) - Tensor,数据类型支持float32, float64。 - - **x2** (Tensor) - Tensor,数据类型支持float32, float64。 - - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为1。 - - **eps** (float) - 很小的值,防止计算时分母为0,默认值为1e-8。 + - **x1** (Tensor) - Tensor,数据类型支持 float32, float64。 + - **x2** (Tensor) - Tensor,数据类型支持 float32, float64。 + - **axis** (int) - 指定计算的维度,会在该维度上计算余弦相似度,默认值为 1。 + - **eps** (float) - 很小的值,防止计算时分母为 0,默认值为 1e-8。 返回 :::::::::::: -Tensor,余弦相似度的计算结果,数据类型与x1, x2相同。 +Tensor,余弦相似度的计算结果,数据类型与 x1, x2 相同。 diff --git a/docs/api/paddle/nn/functional/cross_entropy_cn.rst b/docs/api/paddle/nn/functional/cross_entropy_cn.rst index d506c5b6ce1..9e17cb9a9b9 100644 --- a/docs/api/paddle/nn/functional/cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/cross_entropy_cn.rst @@ -5,35 +5,35 @@ cross_entropy .. py:function:: paddle.nn.functional.cross_entropy(input, label, weight=None, ignore_index=-100, reduction="mean", soft_label=False, axis=-1, name=None) -该OP实现了softmax交叉熵损失函数。该函数会将softmax操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的计算。 +该 OP 实现了 softmax 交叉熵损失函数。该函数会将 softmax 操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的计算。 -该OP默认会对结果进行求mean计算,您也可以影响该默认行为,具体参考reduction参数说明。 +该 OP 默认会对结果进行求 mean 计算,您也可以影响该默认行为,具体参考 reduction 参数说明。 -该OP可用于计算硬标签或软标签的交叉熵。其中,硬标签是指实际label值,例如:0, 1, 2...,软标签是指实际label的概率,例如:0.6, 0,8, 0,2..。 +该 OP 可用于计算硬标签或软标签的交叉熵。其中,硬标签是指实际 label 值,例如:0, 1, 2...,软标签是指实际 label 的概率,例如:0.6, 0,8, 0,2..。 -该OP的计算包括以下两个步骤: +该 OP 的计算包括以下两个步骤: -- **一。softmax交叉熵** +- **一。softmax 交叉熵** 1. 硬标签(每个样本仅可分到一个类别) .. math:: \\loss_j=-\text{logits}_{label_j}+\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right) - , j = 1,...,N, N为样本数,C为类别数 + , j = 1,...,N, N 为样本数,C 为类别数 -2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为1) +2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为 1) .. math:: \\loss_j=-\sum_{i=0}^{C}\text{label}_i\left(\text{logits}_i-\log\left(\sum_{i=0}^{C}\exp(\text{logits}_i)\right)\right) - , j = 1,...,N, N为样本数,C为类别数 + , j = 1,...,N, N 为样本数,C 为类别数 -- **二。weight及reduction处理** +- **二。weight 及 reduction 处理** -1. weight情况 +1. weight 情况 如果 ``weight`` 参数为 ``None``,则直接进入下一步。 -如果 ``weight`` 参数不为 ``None``,则对每个样本的交叉熵进行weight加权(区分soft_label = False or True): +如果 ``weight`` 参数不为 ``None``,则对每个样本的交叉熵进行 weight 加权(区分 soft_label = False or True): 1.1。硬标签情况(soft_label = False) @@ -45,7 +45,7 @@ cross_entropy .. math:: \\loss_j=loss_j*\sum_{i}\left(weight[label_i]*logits_i\right) -2. reduction情况 +2. reduction 情况 2.1 如果 ``reduction`` 参数为 ``none`` @@ -65,7 +65,7 @@ cross_entropy 则返回上一步结果的平均值 .. math:: - \\loss=\sum_{j}loss_j/N, N为样本数 + \\loss=\sum_{j}loss_j/N, N 为样本数 2.3.2。如果 ``weight`` 参数不为 ``None``,则返回上一步结果的加权平均值 @@ -81,19 +81,19 @@ cross_entropy 参数 ::::::::: - - **input** (Tensor) – 维度为 :math:`[N_1, N_2, ..., N_k, C]` 的多维Tensor,其中最后一维C是类别数目。数据类型为float32或float64。它需要未缩放的 ``input``。该OP不应该对softmax运算的输出进行操作,否则会产生错误的结果。 - - **label** (Tensor) – 输入input对应的标签值。若soft_label=False,要求label维度为 :math:`[N_1, N_2, ..., N_k]` 或 :math:`[N_1, N_2, ..., N_k, 1]`,数据类型为'int32', 'int64', 'float32', 'float64',且值必须大于等于0且小于C;若soft_label=True,要求label的维度、数据类型与input相同,且每个样本各软标签的总和为1。 - - **weight** (Tensor, optional) – 权重张量,需要手动给每个类调整权重,形状是(C)。它的维度与类别相同,数据类型为float32,float64。默认值为None。 - - **ignore_index** (int) – 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在soft_label=False时有效。默认值为-100。 - - **reduction** (str, optional) – 指示如何按批次大小平均损失,可选值为"none","mean","sum",如果选择是"mean",则返回reduce后的平均损失;如果选择是"sum",则返回reduce后的总损失。如果选择是"none",则返回没有reduce的损失。默认值是“mean”。 - - **soft_label** (bool, optional) – 指明label是否为软标签。默认为False,表示label为硬标签;若soft_label=True则表示软标签。 - - **axis** (int, optional) - 进行softmax计算的维度索引。它应该在 :math:`[-1,dim-1]` 范围内,而 ``dim`` 是输入logits的维度。默认值:-1。 - - **use_softmax** (bool, optional) - 指定是否对input进行softmax归一化。默认值:True。 + - **input** (Tensor) – 维度为 :math:`[N_1, N_2, ..., N_k, C]` 的多维 Tensor,其中最后一维 C 是类别数目。数据类型为 float32 或 float64。它需要未缩放的 ``input``。该 OP 不应该对 softmax 运算的输出进行操作,否则会产生错误的结果。 + - **label** (Tensor) – 输入 input 对应的标签值。若 soft_label=False,要求 label 维度为 :math:`[N_1, N_2, ..., N_k]` 或 :math:`[N_1, N_2, ..., N_k, 1]`,数据类型为'int32', 'int64', 'float32', 'float64',且值必须大于等于 0 且小于 C;若 soft_label=True,要求 label 的维度、数据类型与 input 相同,且每个样本各软标签的总和为 1。 + - **weight** (Tensor, optional) – 权重张量,需要手动给每个类调整权重,形状是(C)。它的维度与类别相同,数据类型为 float32,float64。默认值为 None。 + - **ignore_index** (int) – 指定一个忽略的标签值,此标签值不参与计算,负值表示无需忽略任何标签值。仅在 soft_label=False 时有效。默认值为-100。 + - **reduction** (str, optional) – 指示如何按批次大小平均损失,可选值为"none","mean","sum",如果选择是"mean",则返回 reduce 后的平均损失;如果选择是"sum",则返回 reduce 后的总损失。如果选择是"none",则返回没有 reduce 的损失。默认值是“mean”。 + - **soft_label** (bool, optional) – 指明 label 是否为软标签。默认为 False,表示 label 为硬标签;若 soft_label=True 则表示软标签。 + - **axis** (int, optional) - 进行 softmax 计算的维度索引。它应该在 :math:`[-1,dim-1]` 范围内,而 ``dim`` 是输入 logits 的维度。默认值:-1。 + - **use_softmax** (bool, optional) - 指定是否对 input 进行 softmax 归一化。默认值:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -表示交叉熵结果的Tensor,数据类型与input相同。若soft_label=False,则返回值维度与label维度相同;若soft_label=True,则返回值维度为 :math:`[N_1, N_2, ..., N_k, 1]` 。 +表示交叉熵结果的 Tensor,数据类型与 input 相同。若 soft_label=False,则返回值维度与 label 维度相同;若 soft_label=True,则返回值维度为 :math:`[N_1, N_2, ..., N_k, 1]` 。 代码示例 diff --git a/docs/api/paddle/nn/functional/ctc_loss_cn.rst b/docs/api/paddle/nn/functional/ctc_loss_cn.rst index fbd67f034ef..771e180cb2d 100644 --- a/docs/api/paddle/nn/functional/ctc_loss_cn.rst +++ b/docs/api/paddle/nn/functional/ctc_loss_cn.rst @@ -10,16 +10,16 @@ ctc_loss 参数 ::::::::: - - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持float32。 - - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持int32。 - - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape为 [batch_size]。数据类型支持int64。 - - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape为 [batch_size]。数据类型支持int64。 - - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1)。数据类型支持int32。默认值为0。 + - **log_probs** (Tensor): - 经过 padding 的概率序列,其 shape 必须是 [max_logit_length, batch_size, num_classes + 1]。其中 max_logit_length 是最长输入序列的长度。该输入不需要经过 softmax 操作,因为该 OP 的内部对 input 做了 softmax 操作。数据类型仅支持 float32。 + - **labels** (Tensor): - 经过 padding 的标签序列,其 shape 为 [batch_size, max_label_length],其中 max_label_length 是最长的 label 序列的长度。数据类型支持 int32。 + - **input_lengths** (Tensor): - 表示输入 ``log_probs`` 数据中每个序列的长度,shape 为 [batch_size]。数据类型支持 int64。 + - **label_lengths** (Tensor): - 表示 label 中每个序列的长度,shape 为 [batch_size]。数据类型支持 int64。 + - **blank** (int,可选): - 空格标记的 ID 值,其取值范围为 [0,num_classes+1)。数据类型支持 int32。默认值为 0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。设置为 ``'mean'`` 时,对 loss 值除以 label_lengths,并返回所得商的均值;设置为 ``'sum'`` 时,返回 loss 值的总和;设置为 ``'none'`` 时,则直接返回输出的 loss 值。默认值为 ``'mean'``。 返回 ::::::::: -``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 +``Tensor``,输入 ``log_probs`` 和标签 ``labels`` 间的 `ctc loss`。如果 :attr:`reduction` 是 ``'none'``,则输出 loss 的维度为 [batch_size]。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。数据类型与输入 ``log_probs`` 一致。 代码示例 diff --git a/docs/api/paddle/nn/functional/dice_loss_cn.rst b/docs/api/paddle/nn/functional/dice_loss_cn.rst index f3a524d7f3d..c316977d70f 100644 --- a/docs/api/paddle/nn/functional/dice_loss_cn.rst +++ b/docs/api/paddle/nn/functional/dice_loss_cn.rst @@ -8,9 +8,9 @@ dice_loss -该OP用来比较预测结果跟标签之间的相似度,通常用于二值图像分割,即标签为二值,也可以做多标签的分割。 +该 OP 用来比较预测结果跟标签之间的相似度,通常用于二值图像分割,即标签为二值,也可以做多标签的分割。 -dice_loss定义为: +dice_loss 定义为: .. math:: dice\_loss &= 1- \frac{2 * intersection\_area}{total\_rea}\\ @@ -20,13 +20,13 @@ dice_loss定义为: 参数 :::::::::::: - - **input** (Tensor) - 分类的预测概率,秩大于等于2的多维Tensor,维度为 :math:`[N_1, N_2, ..., N_k, D]`。第一个维度的大小是batch_size,最后一维的大小D是类别数目。数据类型是float32或者float64 - - **label** (Tensor)- 正确的标注数据(groud truth),与输入 ``input`` 的秩相同的Tensor,维度为 :math:`[N_1, N_2, ..., N_k, 1]`。第一个维度的大小是batch_size,最后一个维度的大小是1。数据类型为int32或者int64 - - **epsilon** (float,可选) - 将会加到分子和分母上的数,浮点型的数值。如果输入和标签都为空,则确保dice为1。默认值:0.00001 + - **input** (Tensor) - 分类的预测概率,秩大于等于 2 的多维 Tensor,维度为 :math:`[N_1, N_2, ..., N_k, D]`。第一个维度的大小是 batch_size,最后一维的大小 D 是类别数目。数据类型是 float32 或者 float64 + - **label** (Tensor)- 正确的标注数据(groud truth),与输入 ``input`` 的秩相同的 Tensor,维度为 :math:`[N_1, N_2, ..., N_k, 1]`。第一个维度的大小是 batch_size,最后一个维度的大小是 1。数据类型为 int32 或者 int64 + - **epsilon** (float,可选) - 将会加到分子和分母上的数,浮点型的数值。如果输入和标签都为空,则确保 dice 为 1。默认值:0.00001 返回 :::::::::::: - Tensor,shape为[batch_size, 1],数据类型与 ``input`` 相同 + Tensor,shape 为[batch_size, 1],数据类型与 ``input`` 相同 代码示例 diff --git a/docs/api/paddle/nn/functional/dropout2d_cn.rst b/docs/api/paddle/nn/functional/dropout2d_cn.rst index f57827431ad..c026c4d5071 100644 --- a/docs/api/paddle/nn/functional/dropout2d_cn.rst +++ b/docs/api/paddle/nn/functional/dropout2d_cn.rst @@ -5,21 +5,21 @@ dropout2d .. py:function:: paddle.nn.functional.dropout2d(x, p=0.5, training=True, name=None) -该算子根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCHW` 的4维张量,通道特征图指的是其中的形状为 `HW` 的2维特征图)。 +该算子根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置 0(对一个形状为 `NCHW` 的 4 维张量,通道特征图指的是其中的形状为 `HW` 的 2 维特征图)。 .. note:: - 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + 该 op 基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 参数 ::::::::: - - **x** (Tensor):形状为[N, C, H, W]或[N, H, W, C]的4D `Tensor`,数据类型为float32或float64。 - - **p** (float):将输入通道置0的概率,即丢弃概率。默认:0.5。 + - **x** (Tensor):形状为[N, C, H, W]或[N, H, W, C]的 4D `Tensor`,数据类型为 float32 或 float64。 + - **p** (float):将输入通道置 0 的概率,即丢弃概率。默认:0.5。 - **training** (bool):标记是否为训练阶段。默认:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -经过dropout2d之后的结果,与输入x形状相同的 `Tensor` 。 +经过 dropout2d 之后的结果,与输入 x 形状相同的 `Tensor` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/dropout3d_cn.rst b/docs/api/paddle/nn/functional/dropout3d_cn.rst index 087345d3af1..f1d98c9d8f6 100644 --- a/docs/api/paddle/nn/functional/dropout3d_cn.rst +++ b/docs/api/paddle/nn/functional/dropout3d_cn.rst @@ -5,21 +5,21 @@ dropout3d .. py:function:: paddle.nn.functional.dropout3d(x, p=0.5, training=True, name=None) -该算子根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置0(对一个形状为 `NCDHW` 的5维张量,通道指的是其中的形状为 `DHW` 的3维特征图)。 +该算子根据丢弃概率 `p`,在训练过程中随机将某些通道特征图置 0(对一个形状为 `NCDHW` 的 5 维张量,通道指的是其中的形状为 `DHW` 的 3 维特征图)。 .. note:: - 该op基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 + 该 op 基于 ``paddle.nn.functional.dropout`` 实现,如您想了解更多,请参见 :ref:`cn_api_nn_functional_dropout` 。 参数 ::::::::: - - **x** (Tensor):形状为[N, C, D, H, W]或[N, D, H, W, C]的5D `Tensor`,数据类型为float32或float64。 - - **p** (float):将输入通道置0的概率,即丢弃概率。默认:0.5。 + - **x** (Tensor):形状为[N, C, D, H, W]或[N, D, H, W, C]的 5D `Tensor`,数据类型为 float32 或 float64。 + - **p** (float):将输入通道置 0 的概率,即丢弃概率。默认:0.5。 - **training** (bool):标记是否为训练阶段。默认:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -经过dropout3d之后的结果,与输入x形状相同的 `Tensor` 。 +经过 dropout3d 之后的结果,与输入 x 形状相同的 `Tensor` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/dropout_cn.rst b/docs/api/paddle/nn/functional/dropout_cn.rst index 8b6d6d4a459..4c04bb1b372 100644 --- a/docs/api/paddle/nn/functional/dropout_cn.rst +++ b/docs/api/paddle/nn/functional/dropout_cn.rst @@ -5,13 +5,13 @@ dropout .. py:function:: paddle.nn.functional.dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train”, name=None) -Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为0,通过阻止神经元节点间的相关性来减少过拟合。 +Dropout 是一种正则化手段,该算子根据给定的丢弃概率 `p`,在训练过程中随机将一些神经元输出设置为 0,通过阻止神经元节点间的相关性来减少过拟合。 参数 ::::::::: - **x** (Tensor):输入的多维 `Tensor`,数据类型为:float32、float64。 - - **p** (float):将输入节点置0的概率,即丢弃概率。默认:0.5。 - - **axis** (int|list):指定对输入 `Tensor` 进行dropout操作的轴。默认:None。 + - **p** (float):将输入节点置 0 的概率,即丢弃概率。默认:0.5。 + - **axis** (int|list):指定对输入 `Tensor` 进行 dropout 操作的轴。默认:None。 - **training** (bool):标记是否为训练阶段。默认:True。 - **mode** (str):丢弃单元的方式,有两种'upscale_in_train'和'downscale_in_infer',默认:'upscale_in_train'。计算方法如下: @@ -29,62 +29,62 @@ Dropout是一种正则化手段,该算子根据给定的丢弃概率 `p`,在 返回 ::::::::: -经过dropout之后的结果,与输入x形状相同的 `Tensor` 。 +经过 dropout 之后的结果,与输入 x 形状相同的 `Tensor` 。 -使用示例1 +使用示例 1 ::::::::: -axis参数的默认值为None。当 ``axis=None`` 时,dropout的功能为:对输入张量x中的任意元素,以丢弃概率p随机将一些元素输出置0。这是我们最常见的dropout用法。 +axis 参数的默认值为 None。当 ``axis=None`` 时,dropout 的功能为:对输入张量 x 中的任意元素,以丢弃概率 p 随机将一些元素输出置 0。这是我们最常见的 dropout 用法。 - 下面以一个示例来解释它的实现逻辑,同时展示其它参数的含义。 .. code-block:: text - 假定x是形状为2*3的2维张量: + 假定 x 是形状为 2*3 的 2 维张量: [[1 2 3] [4 5 6]] - 在对x做dropout时,程序会先生成一个和x相同形状的mask张量,mask中每个元素的值为0或1。 + 在对 x 做 dropout 时,程序会先生成一个和 x 相同形状的 mask 张量,mask 中每个元素的值为 0 或 1。 每个元素的具体值,则是依据丢弃概率从伯努利分布中随机采样得到。 - 比如,我们可能得到下面这样一个2*3的mask: + 比如,我们可能得到下面这样一个 2*3 的 mask: [[0 1 0] [1 0 1]] - 将输入x和生成的mask点积,就得到了随机丢弃部分元素之后的结果: + 将输入 x 和生成的 mask 点积,就得到了随机丢弃部分元素之后的结果: [[0 2 0] [4 0 6]] - 假定dropout的概率使用默认值,即 ``p=0.5``,若mode参数使用默认值,即 ``mode='upscale_in_train'`` , + 假定 dropout 的概率使用默认值,即 ``p=0.5``,若 mode 参数使用默认值,即 ``mode='upscale_in_train'`` , 则在训练阶段,最终增大后的结果为: [[0 4 0 ] [8 0 12]] 在测试阶段,输出跟输入一致: [[1 2 3] [4 5 6]] - 若参数mode设置为'downscale_in_infer',则训练阶段的输出为: + 若参数 mode 设置为'downscale_in_infer',则训练阶段的输出为: [[0 2 0] [4 0 6]] 在测试阶段,缩小后的输出为: [[0.5 1. 1.5] [2. 2.5 3. ]] -使用示例2 +使用示例 2 ::::::::: -若参数axis不为None,dropout的功能为:以一定的概率从图像特征或语音序列中丢弃掉整个通道。 +若参数 axis 不为 None,dropout 的功能为:以一定的概率从图像特征或语音序列中丢弃掉整个通道。 - - axis应设置为:``[0,1,...,ndim(x)-1]`` 的子集(ndim(x)为输入x的维度),例如: + - axis 应设置为:``[0,1,...,ndim(x)-1]`` 的子集(ndim(x)为输入 x 的维度),例如: - - 若x的维度为2,参数axis可能的取值有4种:``None``, ``[0]``, ``[1]``, ``[0,1]`` - - 若x的维度为3,参数axis可能的取值有8种:``None``, ``[0]``, ``[1]``, ``[2]``, ``[0,1]``, ``[0,2]``, ``[1,2]``, ``[0,1,2]`` + - 若 x 的维度为 2,参数 axis 可能的取值有 4 种:``None``, ``[0]``, ``[1]``, ``[0,1]`` + - 若 x 的维度为 3,参数 axis 可能的取值有 8 种:``None``, ``[0]``, ``[1]``, ``[2]``, ``[0,1]``, ``[0,2]``, ``[1,2]``, ``[0,1,2]`` - - 下面以维度为2的输入张量展示axis参数的用法: + - 下面以维度为 2 的输入张量展示 axis 参数的用法: .. code-block:: text - 假定x是形状为2*3的2维Tensor: + 假定 x 是形状为 2*3 的 2 维 Tensor: [[1 2 3] [4 5 6]] - (1) 若 ``axis=[0]``,则表示只在第0个维度做dropout。这时生成mask的形状为2*1。 - 例如,我们可能会得到这样的mask: + (1) 若 ``axis=[0]``,则表示只在第 0 个维度做 dropout。这时生成 mask 的形状为 2*1。 + 例如,我们可能会得到这样的 mask: [[1] [0]] - 这个2*1的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + 这个 2*1 的 mask 在和 x 做点积的时候,会首先广播成一个 2*3 的矩阵: [[1 1 1] [0 0 0]] 点积所得的结果为: @@ -92,20 +92,20 @@ axis参数的默认值为None。当 ``axis=None`` 时,dropout的功能为: [0 0 0]] 之后依据其它参数的设置,得到最终的输出结果。 - (2) 若 ``axis=[1]``,则表示只在第1个维度做dropout。这时生成的mask形状为1*3。 - 例如,我们可能会得到这样的mask: + (2) 若 ``axis=[1]``,则表示只在第 1 个维度做 dropout。这时生成的 mask 形状为 1*3。 + 例如,我们可能会得到这样的 mask: [[1 0 1]] - 这个1*3的mask在和x做点积的时候,会首先广播成一个2*3的矩阵: + 这个 1*3 的 mask 在和 x 做点积的时候,会首先广播成一个 2*3 的矩阵: [[1 0 1] [1 0 1]] 点积所得结果为: [[1 0 3] [4 0 6]] - (3) 若 ``axis=[0, 1]``,则表示在第0维和第1维上做dropout。此时与默认设置 ``axis=None`` 的作用一致。 + (3) 若 ``axis=[0, 1]``,则表示在第 0 维和第 1 维上做 dropout。此时与默认设置 ``axis=None`` 的作用一致。 -若输入x为4维张量,形状为 `NCHW`,当设置 ``axis=[0,1]`` 时,则只会在通道 `N` 和 `C` 上做dropout,通道 `H` 和 `W` 的元素是绑定在一起的,即:``paddle.nn.functional.dropout(x, p, axis=[0,1])``,此时对4维张量中的某个2维特征图(形状 `HW` ),或者全部置0,或者全部保留,这便是dropout2d的实现。详情参考 :ref:`cn_api_nn_functional_dropout2d` 。 +若输入 x 为 4 维张量,形状为 `NCHW`,当设置 ``axis=[0,1]`` 时,则只会在通道 `N` 和 `C` 上做 dropout,通道 `H` 和 `W` 的元素是绑定在一起的,即:``paddle.nn.functional.dropout(x, p, axis=[0,1])``,此时对 4 维张量中的某个 2 维特征图(形状 `HW` ),或者全部置 0,或者全部保留,这便是 dropout2d 的实现。详情参考 :ref:`cn_api_nn_functional_dropout2d` 。 -类似的,若输入x为5维张量,形状为 `NCDHW`,当设置 ``axis=[0,1]`` 时,便可实现dropout3d。详情参考 :ref:`cn_api_nn_functional_dropout3d` 。 +类似的,若输入 x 为 5 维张量,形状为 `NCDHW`,当设置 ``axis=[0,1]`` 时,便可实现 dropout3d。详情参考 :ref:`cn_api_nn_functional_dropout3d` 。 .. note:: 关于广播(broadcasting)机制,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 diff --git a/docs/api/paddle/nn/functional/elu_cn.rst b/docs/api/paddle/nn/functional/elu_cn.rst index 4b2b7a67112..68f71515eb9 100644 --- a/docs/api/paddle/nn/functional/elu_cn.rst +++ b/docs/api/paddle/nn/functional/elu_cn.rst @@ -5,9 +5,9 @@ elu .. py:function:: paddle.nn.functional.elu(x, alpha=1.0, name=None) -elu激活层(ELU Activation Operator) +elu 激活层(ELU Activation Operator) -根据 `Exponential Linear Units `_ 对输入Tensor中每个元素应用以下计算。 +根据 `Exponential Linear Units `_ 对输入 Tensor 中每个元素应用以下计算。 .. math:: @@ -26,7 +26,7 @@ elu激活层(ELU Activation Operator) :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - alpha (float,可选) - elu的alpha值,默认值为1.0。 + - alpha (float,可选) - elu 的 alpha 值,默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/embedding_cn.rst b/docs/api/paddle/nn/functional/embedding_cn.rst index 5a1f3d4a64d..c82fb53c8e3 100644 --- a/docs/api/paddle/nn/functional/embedding_cn.rst +++ b/docs/api/paddle/nn/functional/embedding_cn.rst @@ -10,21 +10,21 @@ embedding 嵌入层(Embedding Layer) -该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 +该 OP 根据 input 中的 id 信息从 embedding 矩阵中查询对应 embedding 信息,并会根据输入的 size (vocab_size, emb_size)和 dtype 自动构造一个二维 embedding 矩阵。 -输出的Tensor的shape是将输入Tensor shape后追加一维emb_size。 +输出的 Tensor 的 shape 是将输入 Tensor shape 后追加一维 emb_size。 -注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 +注:input 中的 id 必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 .. code-block:: text - x是Tensor,且padding_idx = -1。 + x 是 Tensor,且 padding_idx = -1。 padding_idx = -1 x.data = [[1, 3], [2, 4], [4, 127]] x.shape = [3, 2] weight.shape = [128, 16] - 输出是Tensor: + 输出是 Tensor: out.shape = [3, 2, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654]], @@ -33,23 +33,23 @@ embedding [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data - 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 + 输入的 padding_idx 小于 0,则自动转换为 padding_idx = -1 + 128 = 127,对于输入 id 为 127 的词,进行 padding 处理。 参数 :::::::::::: - - **input** (Tensor) - 存储id信息的Tensor,数据类型必须为:int32/int64。input中的id必须满足 ``0 =< id < size[0]`` 。 - - **weight** (Tensor) - 存储词嵌入权重参数的Tensor,形状为(num_embeddings, embedding_dim)。 + - **input** (Tensor) - 存储 id 信息的 Tensor,数据类型必须为:int32/int64。input 中的 id 必须满足 ``0 =< id < size[0]`` 。 + - **weight** (Tensor) - 存储词嵌入权重参数的 Tensor,形状为(num_embeddings, embedding_dim)。 - **sparse** (bool) - 是否使用稀疏更新,在词嵌入权重较大的情况下,使用稀疏更新能够获得更快的训练速度及更小的内存/显存占用。 - - **padding_idx** (int|long|None) - padding_idx的配置区间为 ``[-weight.shape[0], weight.shape[0]``,如果配置了padding_idx,那么在训练过程中遇到此id时,其参数及对应的梯度将会以0进行填充。 + - **padding_idx** (int|long|None) - padding_idx 的配置区间为 ``[-weight.shape[0], weight.shape[0]``,如果配置了 padding_idx,那么在训练过程中遇到此 id 时,其参数及对应的梯度将会以 0 进行填充。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor, input映射后得到的Embedding Tensor,数据类型和权重定义的类型一致。 +Tensor, input 映射后得到的 Embedding Tensor,数据类型和权重定义的类型一致。 代码示例 diff --git a/docs/api/paddle/nn/functional/fold_cn.rst b/docs/api/paddle/nn/functional/fold_cn.rst index 12779759866..bb63695e522 100644 --- a/docs/api/paddle/nn/functional/fold_cn.rst +++ b/docs/api/paddle/nn/functional/fold_cn.rst @@ -6,10 +6,10 @@ fold .. py:function:: paddle.nn.functional.fold(x, output_sizes, kernel_sizes, strides=1, paddings=0, dilations=1, name=None) -该Op用于将一个滑动局部块组合成一个大的张量。通常也被称为col2im,用于批处理二维图像张量。Fold通过对所有包含块的值求和来计算结果中的每个大张量的组合值。 +该 Op 用于将一个滑动局部块组合成一个大的张量。通常也被称为 col2im,用于批处理二维图像张量。Fold 通过对所有包含块的值求和来计算结果中的每个大张量的组合值。 -对于输入x,如果形状为[N, C_in, L],其输出形状[N, C_out, H_out, W_out],计算过程如下: +对于输入 x,如果形状为[N, C_in, L],其输出形状[N, C_out, H_out, W_out],计算过程如下: .. math:: H_out &= output_size[0] @@ -19,19 +19,19 @@ fold 参数 ::::::::: - - **x** (Tensor) – 输入3-D Tensor,形状为[N, C, L],数据类型为float32或者float64 - - **output_sizes** (int|list|tuple) – 输出尺寸,整数或者整型列表。如为列表类型应包含两个元素 ``[output_size_h, output_size_w]``。如果为整数o,则输出形状会被认为 ``[o, o]``。 - - **kernel_size** (int|list|tuple) - 卷积核大小,整数或者整型列表。如为列表类型应包含两个元素 ``[k_h, k_w]``。如果为整数k,则输出形状会被认为 ``[k, k]``。 - - **strides** (int|list|tuple,可选) - 步长大小,整数或者整型列表。如为列表类型应包含两个元素 ``[stride_h, stride_w]``。如果为整数stride,则输出形状会被认为 ``[sride, stride]``。默认为[1,1]。 - - **paddings** (int|list|tuple,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为4或者2;长度为4 对应的padding参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为2对应的padding参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数padding,则会被当作[padding, padding, padding, padding]处理。默认值为0。 - - **dilations** (int|list|tuple,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数dilation,会被当作整型列表[dilation, dilation]处理。默认值为1。 + - **x** (Tensor) – 输入 3-D Tensor,形状为[N, C, L],数据类型为 float32 或者 float64 + - **output_sizes** (int|list|tuple) – 输出尺寸,整数或者整型列表。如为列表类型应包含两个元素 ``[output_size_h, output_size_w]``。如果为整数 o,则输出形状会被认为 ``[o, o]``。 + - **kernel_size** (int|list|tuple) - 卷积核大小,整数或者整型列表。如为列表类型应包含两个元素 ``[k_h, k_w]``。如果为整数 k,则输出形状会被认为 ``[k, k]``。 + - **strides** (int|list|tuple,可选) - 步长大小,整数或者整型列表。如为列表类型应包含两个元素 ``[stride_h, stride_w]``。如果为整数 stride,则输出形状会被认为 ``[sride, stride]``。默认为[1,1]。 + - **paddings** (int|list|tuple,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为 4 或者 2;长度为 4 对应的 padding 参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为 2 对应的 padding 参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数 padding,则会被当作[padding, padding, padding, padding]处理。默认值为 0。 + - **dilations** (int|list|tuple,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数 dilation,会被当作整型列表[dilation, dilation]处理。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **输出** : Tensor, fold操作之后的结果,形状如上面所描述的[N, Cout, H_out, W_out],数据类型与 ``x`` 相同 + - **输出** : Tensor, fold 操作之后的结果,形状如上面所描述的[N, Cout, H_out, W_out],数据类型与 ``x`` 相同 代码示例 diff --git a/docs/api/paddle/nn/functional/gather_tree_cn.rst b/docs/api/paddle/nn/functional/gather_tree_cn.rst index 16ea356a3fa..f1c145f9f9e 100644 --- a/docs/api/paddle/nn/functional/gather_tree_cn.rst +++ b/docs/api/paddle/nn/functional/gather_tree_cn.rst @@ -8,7 +8,7 @@ gather_tree -该OP在整个束搜索(Beam Search)结束后使用。在搜索结束后,可以获得每个时间步选择的的候选词 id 及其对应的在搜索树中的 parent 节点,``ids`` 和 ``parents`` 的形状布局均为 :math:`[max\_time, batch\_size, beam\_size]`,该OP从最后一个时间步回溯产生完整的 id 序列。 +该 OP 在整个束搜索(Beam Search)结束后使用。在搜索结束后,可以获得每个时间步选择的的候选词 id 及其对应的在搜索树中的 parent 节点,``ids`` 和 ``parents`` 的形状布局均为 :math:`[max\_time, batch\_size, beam\_size]`,该 OP 从最后一个时间步回溯产生完整的 id 序列。 示例: diff --git a/docs/api/paddle/nn/functional/gelu_cn.rst b/docs/api/paddle/nn/functional/gelu_cn.rst index 55d1053afde..81cfbc1322b 100644 --- a/docs/api/paddle/nn/functional/gelu_cn.rst +++ b/docs/api/paddle/nn/functional/gelu_cn.rst @@ -5,9 +5,9 @@ gelu .. py:function:: paddle.nn.functional.gelu(x, approximate=False, name=None) -gelu激活层(GELU Activation Operator) +gelu 激活层(GELU Activation Operator) -逐元素计算 gelu激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 +逐元素计算 gelu 激活函数。更多细节请参考 `Gaussian Error Linear Units `_ 。 如果使用近似计算: diff --git a/docs/api/paddle/nn/functional/grid_sample_cn.rst b/docs/api/paddle/nn/functional/grid_sample_cn.rst index d1958431c88..e417b02d4cc 100644 --- a/docs/api/paddle/nn/functional/grid_sample_cn.rst +++ b/docs/api/paddle/nn/functional/grid_sample_cn.rst @@ -8,8 +8,8 @@ grid_sample -该OP基于flow field网格的对输入X进行双线性插值采样。网格通常由affine_grid生成,shape为[N, H, W, 2],是shape为[N, H, W]的采样点张量的(x, y)坐标。 -其中,x坐标是对输入数据X的第四个维度(宽度维度)的索引,y坐标是第三维度(高维度)的索引,最终输出采样值为采样点的4个最接近的角点的双线性插值结果,输出张量的shape为[N, C, H, W]。 +该 OP 基于 flow field 网格的对输入 X 进行双线性插值采样。网格通常由 affine_grid 生成,shape 为[N, H, W, 2],是 shape 为[N, H, W]的采样点张量的(x, y)坐标。 +其中,x 坐标是对输入数据 X 的第四个维度(宽度维度)的索引,y 坐标是第三维度(高维度)的索引,最终输出采样值为采样点的 4 个最接近的角点的双线性插值结果,输出张量的 shape 为[N, C, H, W]。 step 1: @@ -21,7 +21,7 @@ step 1: step 2: - 在每个[H, W]区域用网格(X, y)作为输入数据X的索引,并将双线性插值点值由4个最近的点表示。 + 在每个[H, W]区域用网格(X, y)作为输入数据 X 的索引,并将双线性插值点值由 4 个最近的点表示。 .. code-block:: text @@ -55,16 +55,16 @@ step 2: 参数 :::::::::::: - - **x** (Tensor):输入张量,维度为 :math:`[N, C, H, W]` 的4-D Tensor,N为批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **grid** (Tensor):输入网格数据张量,维度为 :math:`[N, H, W, 2]` 的4-D Tensor,N为批尺寸,H是特征高度,W是特征宽度,数据类型为float32或float64。 + - **x** (Tensor):输入张量,维度为 :math:`[N, C, H, W]` 的 4-D Tensor,N 为批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **grid** (Tensor):输入网格数据张量,维度为 :math:`[N, H, W, 2]` 的 4-D Tensor,N 为批尺寸,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 - **mode** (str, optional):插值方式,可以为 'bilinear' 或者 'nearest'。默认值:'bilinear'。 - **padding_mode** (str, optional) 当原来的索引超过输入的图像大小时的填充方式。可以为 'zeros', 'reflection' 和 'border'。默认值:'zeros'。 - - **align_corners** (bool, optional):一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。默认值:True。 + - **align_corners** (bool, optional):一个可选的 bool 型参数,如果为 True,则将输入和输出张量的 4 个角落像素的中心对齐,并保留角点像素的值。默认值:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,输入X基于输入网格的双线性插值计算结果,维度为 :math:`[N, C, H, W]` 的4-D Tensor,数据类型与 ``x`` 一致。 +Tensor,输入 X 基于输入网格的双线性插值计算结果,维度为 :math:`[N, C, H, W]` 的 4-D Tensor,数据类型与 ``x`` 一致。 代码示例 diff --git a/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst b/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst index 10c4ae63cd8..4eaa00943d4 100644 --- a/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst +++ b/docs/api/paddle/nn/functional/gumbel_softmax_cn.rst @@ -5,22 +5,22 @@ gumbel_softmax .. py:function:: paddle.nn.functional.gumbel_softmax(x, temperature = 1.0, hard = False, axis = -1, name = None) -该OP实现了按Gumbel-Softmax分布进行采样的功能,通过hard可选择是否离散化。 -记temperature为t,涉及到的等式如下: +该 OP 实现了按 Gumbel-Softmax 分布进行采样的功能,通过 hard 可选择是否离散化。 +记 temperature 为 t,涉及到的等式如下: -1. 产生gumbel噪声 +1. 产生 gumbel 噪声 .. math:: G_i = -log(-log(U_i)),\ U_i \sim U(0,1) -2. 对输入x添加噪声 +2. 对输入 x 添加噪声 .. math:: v = [x_1 + G_1,...,x_n + G_n] -3. 计算gumbel_softmax +3. 计算 gumbel_softmax .. math:: @@ -29,15 +29,15 @@ gumbel_softmax 参数 :::::::::: - - x (Tensor) - 一个N-D Tensor,前N-1维用于独立分布batch的索引,最后一维表示每个类别的概率,dtype类型为float,double。 - - temperature (float,可选) - 大于0的标量。默认值:1.0。 - - hard (bool,可选) - 如果是True,返回离散的one-hot向量。如果是False,返回软样本。默认值:False。 - - axis (int,可选) - 按照维度axis计算softmax。默认值:-1。 + - x (Tensor) - 一个 N-D Tensor,前 N-1 维用于独立分布 batch 的索引,最后一维表示每个类别的概率,dtype 类型为 float,double。 + - temperature (float,可选) - 大于 0 的标量。默认值:1.0。 + - hard (bool,可选) - 如果是 True,返回离散的 one-hot 向量。如果是 False,返回软样本。默认值:False。 + - axis (int,可选) - 按照维度 axis 计算 softmax。默认值:-1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - 与 ``x`` 形状相同的符合gumbel-softmax分布的 ``Tensor``。如果 ``hard=True``,则返回的样本将是one-hot。如果 ``hard=False``,则返回的向量将是各维度加起来等于1的概率。 + 与 ``x`` 形状相同的符合 gumbel-softmax 分布的 ``Tensor``。如果 ``hard=True``,则返回的样本将是 one-hot。如果 ``hard=False``,则返回的向量将是各维度加起来等于 1 的概率。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/functional/hardshrink_cn.rst b/docs/api/paddle/nn/functional/hardshrink_cn.rst index 8ffb23cb886..cf396aa9642 100644 --- a/docs/api/paddle/nn/functional/hardshrink_cn.rst +++ b/docs/api/paddle/nn/functional/hardshrink_cn.rst @@ -4,7 +4,7 @@ hardshrink ------------------------------- .. py:function:: paddle.nn.functional.hardshrink(x, threshold=0.5, name=None) -hardshrink激活层。计算公式如下: +hardshrink 激活层。计算公式如下: .. math:: @@ -22,7 +22,7 @@ hardshrink激活层。计算公式如下: 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - threshold (float,可选) - hard_shrink激活计算公式中的threshold值。默认值为0.5。 + - threshold (float,可选) - hard_shrink 激活计算公式中的 threshold 值。默认值为 0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/hardsigmoid_cn.rst b/docs/api/paddle/nn/functional/hardsigmoid_cn.rst index c1afe461bd9..c73053cfa96 100644 --- a/docs/api/paddle/nn/functional/hardsigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/hardsigmoid_cn.rst @@ -5,7 +5,7 @@ hardsigmoid .. py:function:: paddle.nn.functional.hardsigmoid(x, slope=0.1666667, offset=0.5, name=None) -hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmoid快,详细解释参见 https://arxiv.org/abs/1603.00391。 +hardsigmoid 激活层。sigmoid 的分段线性逼近激活函数,速度比 sigmoid 快,详细解释参见 https://arxiv.org/abs/1603.00391。 .. math:: @@ -25,8 +25,8 @@ hardsigmoid激活层。sigmoid的分段线性逼近激活函数,速度比sigmo :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - slope (float,可选) - hardsigmoid的斜率。默认值为0.1666667。 - - offset (float,可选) - hardsigmoid的截距。默认值为0.5。 + - slope (float,可选) - hardsigmoid 的斜率。默认值为 0.1666667。 + - offset (float,可选) - hardsigmoid 的截距。默认值为 0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/hardswish_cn.rst b/docs/api/paddle/nn/functional/hardswish_cn.rst index 8381cdc40d0..b96d9948cb5 100644 --- a/docs/api/paddle/nn/functional/hardswish_cn.rst +++ b/docs/api/paddle/nn/functional/hardswish_cn.rst @@ -5,7 +5,7 @@ hardswish .. py:function:: paddle.nn.functional.hardswish(x, name=None) -hardswish激活函数。在MobileNetV3架构中被提出,相较于swish函数,具有数值稳定性好,计算速度快等优点,具体原理请参考:https://arxiv.org/pdf/1905.02244.pdf +hardswish 激活函数。在 MobileNetV3 架构中被提出,相较于 swish 函数,具有数值稳定性好,计算速度快等优点,具体原理请参考:https://arxiv.org/pdf/1905.02244.pdf .. math:: diff --git a/docs/api/paddle/nn/functional/hardtanh_cn.rst b/docs/api/paddle/nn/functional/hardtanh_cn.rst index e7cf10de9c8..902ee7ed8aa 100644 --- a/docs/api/paddle/nn/functional/hardtanh_cn.rst +++ b/docs/api/paddle/nn/functional/hardtanh_cn.rst @@ -4,7 +4,7 @@ hardtanh ------------------------------- .. py:function:: paddle.nn.functional.hardtanh(x, min=-1.0, max=1.0, name=None): -hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: +hardtanh 激活层(Hardtanh Activation Operator)。计算公式如下: .. math:: @@ -22,8 +22,8 @@ hardtanh激活层(Hardtanh Activation Operator)。计算公式如下: 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - min (float,可选) - hardtanh激活计算公式中的min值。默认值为-1。 - - max (float,可选) - hardtanh激活计算公式中的max值。默认值为1。 + - min (float,可选) - hardtanh 激活计算公式中的 min 值。默认值为-1。 + - max (float,可选) - hardtanh 激活计算公式中的 max 值。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst b/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst index 1dc6d4099ff..6c778924854 100644 --- a/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst +++ b/docs/api/paddle/nn/functional/hsigmoid_loss_cn.rst @@ -5,39 +5,39 @@ hsigmoid_loss .. py:function:: paddle.nn.functional.hsigmoid_loss(input, label, num_classes, weight, bias=None, path_table=None, path_code=None, is_sparse=False, name=None) -层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 +层次 sigmoid(hierarchical sigmoid),该 OP 通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 -该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 +该 OP 建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid 累加这条路径上每个非叶子节点的损失得到总损失。 -相较于传统softmax的计算复杂度 :math:`O(N)` ,hsigmoid可以将计算复杂度降至 :math:`O(logN)`,其中 :math:`N` 表示类别总数(字典大小)。 +相较于传统 softmax 的计算复杂度 :math:`O(N)` ,hsigmoid 可以将计算复杂度降至 :math:`O(logN)`,其中 :math:`N` 表示类别总数(字典大小)。 若使用默认树结构,请参考 `Hierarchical Probabilistic Neural Network Language Model `_ 。 -若使用自定义树结构,请将参数 ``is_custom`` 设置为True,并完成以下步骤(以语言模型为例): +若使用自定义树结构,请将参数 ``is_custom`` 设置为 True,并完成以下步骤(以语言模型为例): 1. 使用自定义词典来建立二叉树,每个叶结点都应该是词典中的单词; -2. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; +2. 建立一个 dict 类型数据结构,用于存储 **单词 id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; -3. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。编码是指每次二分类的标签,1为真,0为假; +3. 建立一个 dict 类型数据结构,用于存储 **单词 id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。编码是指每次二分类的标签,1 为真,0 为假; 4. 每个单词都已经有自己的路径和路径编码,当对于同一批输入进行操作时,可以同时传入一批路径和路径编码进行运算。 参数 :::::::::: - - **input** (Tensor) - 输入Tensor。数据类型为float32或float64,形状为 ``[N, D]``,其中 ``N`` 为minibatch的大小,``D`` 为特征大小。 - - **label** (Tensor) - 训练数据的标签。数据类型为int64,形状为 ``[N, 1]`` 。 - - **num_classes** (int) - 类别总数(字典大小)必须大于等于2。若使用默认树结构,即当 ``path_table`` 和 ``path_code`` 都为None时,必须设置该参数。若使用自定义树结构,即当 ``path_table`` 和 ``path_code`` 都不为None时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 - - **weight** (Tensor) - 该OP的权重参数。形状为 ``[numclasses-1, D]``,数据类型和 ``input`` 相同。 - - **bias** (Tensor,可选) - 该OP的偏置参数。形状为 ``[numclasses-1, 1]``,数据类型和 ``input`` 相同。如果设置为None,将没有偏置参数。默认值为None。 - - **path_table** (Tensor,可选) – 存储每一批样本从类别(单词)到根节点的路径,按照从叶至根方向存储。数据类型为int64,形状为 ``[N, L]``,其中L为路径长度。``path_table`` 和 ``path_code`` 应具有相同的形状,对于每个样本i,path_table[i]为一个类似np.ndarray的结构,该数组内的每个元素都是其双亲结点权重矩阵的索引。默认值为None。 - - **path_code** (Tensor,可选) – 存储每一批样本从类别(单词)到根节点的路径编码,按从叶至根方向存储。数据类型为int64,形状为 ``[N, L]``。默认值为None。 - - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为True,W的梯度和输入梯度将会变得稀疏。默认值为False。 + - **input** (Tensor) - 输入 Tensor。数据类型为 float32 或 float64,形状为 ``[N, D]``,其中 ``N`` 为 minibatch 的大小,``D`` 为特征大小。 + - **label** (Tensor) - 训练数据的标签。数据类型为 int64,形状为 ``[N, 1]`` 。 + - **num_classes** (int) - 类别总数(字典大小)必须大于等于 2。若使用默认树结构,即当 ``path_table`` 和 ``path_code`` 都为 None 时,必须设置该参数。若使用自定义树结构,即当 ``path_table`` 和 ``path_code`` 都不为 None 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 + - **weight** (Tensor) - 该 OP 的权重参数。形状为 ``[numclasses-1, D]``,数据类型和 ``input`` 相同。 + - **bias** (Tensor,可选) - 该 OP 的偏置参数。形状为 ``[numclasses-1, 1]``,数据类型和 ``input`` 相同。如果设置为 None,将没有偏置参数。默认值为 None。 + - **path_table** (Tensor,可选) – 存储每一批样本从类别(单词)到根节点的路径,按照从叶至根方向存储。数据类型为 int64,形状为 ``[N, L]``,其中 L 为路径长度。``path_table`` 和 ``path_code`` 应具有相同的形状,对于每个样本 i,path_table[i]为一个类似 np.ndarray 的结构,该数组内的每个元素都是其双亲结点权重矩阵的索引。默认值为 None。 + - **path_code** (Tensor,可选) – 存储每一批样本从类别(单词)到根节点的路径编码,按从叶至根方向存储。数据类型为 int64,形状为 ``[N, L]``。默认值为 None。 + - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为 True,W 的梯度和输入梯度将会变得稀疏。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - - Tensor,层次sigmoid计算后的结果,形状为[N, 1],数据类型和 ``input`` 一致。 + - Tensor,层次 sigmoid 计算后的结果,形状为[N, 1],数据类型和 ``input`` 一致。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/functional/instance_norm_cn.rst b/docs/api/paddle/nn/functional/instance_norm_cn.rst index 6b72fc38728..c82f167c6c8 100644 --- a/docs/api/paddle/nn/functional/instance_norm_cn.rst +++ b/docs/api/paddle/nn/functional/instance_norm_cn.rst @@ -5,21 +5,21 @@ instance_norm .. py:class:: paddle.nn.functional.instance_norm(x, running_mean, running_var, weight, bias, training=False, epsilon=1e-05, momentum=0.9, use_input_stats=True, data_format='NCHW', name=None): -推荐使用nn.InstanceNorm1D,nn.InstanceNorm2D, nn.InstanceNorm3D,由内部调用此方法。 +推荐使用 nn.InstanceNorm1D,nn.InstanceNorm2D, nn.InstanceNorm3D,由内部调用此方法。 详情见 :ref:`cn_api_nn_InstanceNorm1D` 。 参数 :::::::::::: - - **x** (int) - 输入,数据类型为float32, float64。 - - **running_mean** (Tensor) - 均值的Tensor。 - - **running_var** (Tensor) - 方差的Tensor。 - - **weight** (Tensor) - 权重的Tensor。 - - **bias** (Tensor) - 偏置的Tensor。 + - **x** (int) - 输入,数据类型为 float32, float64。 + - **running_mean** (Tensor) - 均值的 Tensor。 + - **running_var** (Tensor) - 方差的 Tensor。 + - **weight** (Tensor) - 权重的 Tensor。 + - **bias** (Tensor) - 偏置的 Tensor。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **momentum** (float,可选) - 此值用于计算 ``moving_mean`` 和 ``moving_var``。默认值:0.9。更新公式如上所示。 - - **use_input_stats** (bool,可选) - 默认是True。 + - **use_input_stats** (bool,可选) - 默认是 True。 - **data_format** (string,可选) - 指定输入数据格式,数据格式可以为“NC", "NCL", "NCHW" 或者"NCDHW"。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/interpolate_cn.rst b/docs/api/paddle/nn/functional/interpolate_cn.rst index 1d9638268e1..e62eab6a4dc 100644 --- a/docs/api/paddle/nn/functional/interpolate_cn.rst +++ b/docs/api/paddle/nn/functional/interpolate_cn.rst @@ -7,9 +7,9 @@ interpolate -该OP用于调整一个batch中图片的大小。 +该 OP 用于调整一个 batch 中图片的大小。 -输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 +输入为 4-D Tensor 时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为 5-D Tensor 时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 支持的插值方法: @@ -28,13 +28,13 @@ interpolate 最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 -双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 +双线性插值是线性插值的扩展,用于在直线 2D 网格上插值两个变量(例如,该操作中的 H 方向和 W 方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 -三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 +三线插值是线性插值的一种扩展,是 3 参数的插值方程(比如 op 里的 D,H,W 方向),在三个方向上进行线性插值。 双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 -Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 +Align_corners 和 align_mode 是可选参数,插值的计算方法可以由它们选择。 示例: @@ -141,18 +141,18 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 参数 :::::::::::: - - **x** (Tensor) - 4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 - - **size** (list|tuple|Tensor|None) - 输出Tensor,输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 - - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。如果scale_factor是一个list或tuple,它必须与输入的shape匹配。 + - **x** (Tensor) - 4-D 或 5-D Tensor,数据类型为 float32、float64 或 uint8,其数据格式由参数 ``data_format`` 指定。 + - **size** (list|tuple|Tensor|None) - 输出 Tensor,输入为 4D 张量时,形状为为(out_h, out_w)的 2-D Tensor。输入为 5-D Tensor 时,形状为(out_d, out_h, out_w)的 3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为 1。默认值为 None。 + - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape 和 scale 至少要设置一个。out_shape 的优先级高于 scale。默认值为 None。如果 scale_factor 是一个 list 或 tuple,它必须与输入的 shape 匹配。 - **mode** (str,可选) - 插值方法。支持"bilinear"或"trilinear"或"nearest"或"bicubic"或"linear"或"area"。默认值为"nearest"。 - - **align_corners** (bool,可选)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。默认值为False。 - - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表src_idx = scale * dst_index。默认值:0。 - - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + - **align_corners** (bool,可选)- 一个可选的 bool 型参数,如果为 True,则将输入和输出张量的 4 个角落像素的中心对齐,并保留角点像素的值。默认值为 False。 + - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表 src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表 src_idx = scale * dst_index。默认值:0。 + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于 4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于 5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 +4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者 5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 代码示例 diff --git a/docs/api/paddle/nn/functional/kl_div_cn.rst b/docs/api/paddle/nn/functional/kl_div_cn.rst index ce23ca168b9..8f38b7a6d94 100644 --- a/docs/api/paddle/nn/functional/kl_div_cn.rst +++ b/docs/api/paddle/nn/functional/kl_div_cn.rst @@ -5,33 +5,33 @@ kl_div .. py:function:: paddle.nn.functional.kl_div(input, label, reduction='mean', name=None) -该算子计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 +该算子计算输入(Input)和输入(Label)之间的 Kullback-Leibler 散度损失。注意其中输入(Input)应为对数概率值,输入(Label)应为概率值。 -kL发散损失计算如下: +kL 发散损失计算如下: .. math:: l(input, label) = label * (log(label) - input) -当 ``reduction`` 为 ``none`` 时,输出损失与输入(x)形状相同,各点的损失单独计算,不会对结果做reduction 。 +当 ``reduction`` 为 ``none`` 时,输出损失与输入(x)形状相同,各点的损失单独计算,不会对结果做 reduction 。 当 ``reduction`` 为 ``mean`` 时,输出损失为[1]的形状,输出为所有损失的平均值。 当 ``reduction`` 为 ``sum`` 时,输出损失为[1]的形状,输出为所有损失的总和。 -当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N为批大小,输出为所有损失的总和除以批量大小。 +当 ``reduction`` 为 ``batchmean`` 时,输出损失为[N]的形状,N 为批大小,输出为所有损失的总和除以批量大小。 参数 ::::::::: - - **input** (Tensor) - KL散度损失算子的输入张量。维度为[N, \*]的多维Tensor,其中N是批大小,\*表示任何数量的附加维度,数据类型为float32或float64。 - - **label** (Tensor) - KL散度损失算子的张量。与输入 ``input`` 的维度和数据类型一致的多维Tensor。 - - **reduction** (str,可选) - 要应用于输出的reduction类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 + - **input** (Tensor) - KL 散度损失算子的输入张量。维度为[N, \*]的多维 Tensor,其中 N 是批大小,\*表示任何数量的附加维度,数据类型为 float32 或 float64。 + - **label** (Tensor) - KL 散度损失算子的张量。与输入 ``input`` 的维度和数据类型一致的多维 Tensor。 + - **reduction** (str,可选) - 要应用于输出的 reduction 类型,可用类型为‘none’ | ‘batchmean’ | ‘mean’ | ‘sum’,‘none’表示无 reduction,‘batchmean’ 表示输出的总和除以批大小,‘mean’ 表示所有输出的平均值,‘sum’表示输出的总和。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor KL散度损失。 +Tensor KL 散度损失。 代码示例 diff --git a/docs/api/paddle/nn/functional/l1_loss_cn.rst b/docs/api/paddle/nn/functional/l1_loss_cn.rst index 933c27d883c..b0bd48ad0c5 100644 --- a/docs/api/paddle/nn/functional/l1_loss_cn.rst +++ b/docs/api/paddle/nn/functional/l1_loss_cn.rst @@ -27,14 +27,14 @@ l1_loss 参数 ::::::::: - - **input** (Tensor): - 输入的Tensor,维度是[N, *],其中N是batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 + - **input** (Tensor): - 输入的 Tensor,维度是[N, *],其中 N 是 batch size, `*` 是任意数量的额外维度。数据类型为:float32、float64、int32、int64。 - **label** (Tensor): - 标签,维度是[N, *],与 ``input`` 相同。数据类型为:float32、float64、int32、int64。 - **reduction** (str,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `L1Loss` 的均值;设置为 ``'sum'`` 时,计算 `L1Loss` 的总和;设置为 ``'none'`` 时,则返回 `L1Loss`。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``,则输出Loss的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出Loss的维度为 [1]。 +``Tensor``,输入 ``input`` 和标签 ``label`` 间的 `L1 loss` 损失。如果 `reduction` 是 ``'none'``,则输出 Loss 的维度为 [N, *],与输入 ``input`` 相同。如果 `reduction` 是 ``'mean'`` 或 ``'sum'``,则输出 Loss 的维度为 [1]。 代码示例 diff --git a/docs/api/paddle/nn/functional/label_smooth_cn.rst b/docs/api/paddle/nn/functional/label_smooth_cn.rst index fffc08d3774..865ee47e837 100644 --- a/docs/api/paddle/nn/functional/label_smooth_cn.rst +++ b/docs/api/paddle/nn/functional/label_smooth_cn.rst @@ -8,7 +8,7 @@ label_smooth -该OP实现了标签平滑的功能。标签平滑是一种对分类器层进行正则化的机制,称为标签平滑正则化(LSR)。由于直接优化正确标签的对数似然可能会导致过拟合,降低模型的适应能力,因此提出了标签平滑的方法来降低模型置信度。 +该 OP 实现了标签平滑的功能。标签平滑是一种对分类器层进行正则化的机制,称为标签平滑正则化(LSR)。由于直接优化正确标签的对数似然可能会导致过拟合,降低模型的适应能力,因此提出了标签平滑的方法来降低模型置信度。 标签平滑使用标签 :math:`y` 和一些固定模式随机分布变量 :math:`\mu`。对 :math:`k` 标签,标签平滑的计算方式如下。 @@ -25,9 +25,9 @@ label_smooth 参数 :::::::::::: - - **label** (Tensor) - 包含标签数据的输入变量。标签数据应使用 one-hot 表示,是维度为 :math:`[N_1, ..., Depth]` 的多维Tensor,其中Depth为字典大小。 - - **prior_dist** (Tensor,可选) - 用于平滑标签的先验分布,是维度为 :math:`[1,class\_num]` 的2D Tensor。如果未设置,则使用均匀分布。默认值为None。 - - **epsilon** (float,可选) - 用于混合原始真实分布和固定分布的权重。默认值为0.1。 + - **label** (Tensor) - 包含标签数据的输入变量。标签数据应使用 one-hot 表示,是维度为 :math:`[N_1, ..., Depth]` 的多维 Tensor,其中 Depth 为字典大小。 + - **prior_dist** (Tensor,可选) - 用于平滑标签的先验分布,是维度为 :math:`[1,class\_num]` 的 2D Tensor。如果未设置,则使用均匀分布。默认值为 None。 + - **epsilon** (float,可选) - 用于混合原始真实分布和固定分布的权重。默认值为 0.1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/layer_norm_cn.rst b/docs/api/paddle/nn/functional/layer_norm_cn.rst index e2a41d4f6f0..acf8122b746 100644 --- a/docs/api/paddle/nn/functional/layer_norm_cn.rst +++ b/docs/api/paddle/nn/functional/layer_norm_cn.rst @@ -5,17 +5,17 @@ layer_norm .. py:class:: paddle.nn.functional.layer_norm(x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None): -推荐使用nn.LayerNorm。 +推荐使用 nn.LayerNorm。 详情见 :ref:`cn_api_nn_LayerNorm` 。 参数 :::::::::::: - - **x** (int) - 输入,数据类型为float32, float64。 + - **x** (int) - 输入,数据类型为 float32, float64。 - **normalized_shape** (int|list|tuple) - 期望的输入是 :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`,如果是一个整数,会作用在最后一个维度。 - - **weight** (Tensor) - 权重的Tensor,默认为None。 - - **bias** (Tensor) - 偏置的Tensor,默认为None。 + - **weight** (Tensor) - 权重的 Tensor,默认为 None。 + - **bias** (Tensor) - 偏置的 Tensor,默认为 None。 - **epsilon** (float,可选) - 为了数值稳定加在分母上的值。默认值:1e-05。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/leaky_relu_cn.rst b/docs/api/paddle/nn/functional/leaky_relu_cn.rst index 28c47d54c4f..dd2faf75cc6 100644 --- a/docs/api/paddle/nn/functional/leaky_relu_cn.rst +++ b/docs/api/paddle/nn/functional/leaky_relu_cn.rst @@ -4,7 +4,7 @@ leaky_relu ------------------------------- .. py:function:: paddle.nn.functional.leaky_relu(x, negative_slope=0.01, name=None) -leaky_relu激活层。计算公式如下: +leaky_relu 激活层。计算公式如下: .. math:: @@ -20,8 +20,8 @@ leaky_relu激活层。计算公式如下: 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 - - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为0.01。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 + - negative_slope (float,可选) - :math:`x < 0` 时的斜率。默认值为 0.01。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/linear_cn.rst b/docs/api/paddle/nn/functional/linear_cn.rst index 7c0e0484845..b502a9c8259 100644 --- a/docs/api/paddle/nn/functional/linear_cn.rst +++ b/docs/api/paddle/nn/functional/linear_cn.rst @@ -7,7 +7,7 @@ linear .. py:function:: paddle.nn.functional.linear(x, weight, bias=None, name=None) -**线性变换OP**。对于每个输入Tensor :math:`X`,计算公式为: +**线性变换 OP**。对于每个输入 Tensor :math:`X`,计算公式为: .. math:: @@ -15,24 +15,24 @@ linear 其中,:math:`W` 和 :math:`b` 分别为权重和偏置。 -如果权重 :math:`W` 是一个形状为 :math:`[in\_features, out\_features]` 的2-D Tensor,输入则可以是一个多维Tensor形状为 :math:`[batch\_size, *, in\_features]`,其中 :math:`*` 表示可以为任意个额外的维度。 -linear接口可以计算输入Tensor与权重矩阵 :math:`W` 的乘积,生成形状为 :math:`[batch\_size, *, out\_features]` 的输出Tensor。 -如果偏置 :math:`bias` 不是None,它必须是一个形状为 :math:`[out\_features]` 的1-D Tensor,且将会被其加到输出中。 +如果权重 :math:`W` 是一个形状为 :math:`[in\_features, out\_features]` 的 2-D Tensor,输入则可以是一个多维 Tensor 形状为 :math:`[batch\_size, *, in\_features]`,其中 :math:`*` 表示可以为任意个额外的维度。 +linear 接口可以计算输入 Tensor 与权重矩阵 :math:`W` 的乘积,生成形状为 :math:`[batch\_size, *, out\_features]` 的输出 Tensor。 +如果偏置 :math:`bias` 不是 None,它必须是一个形状为 :math:`[out\_features]` 的 1-D Tensor,且将会被其加到输出中。 参数 ::::::::: -- **x** (Tensor) – 输入Tensor。它的数据类型可以为float16,float32或float64。 -- **weight** (Tensor) – 权重Tensor。它的数据类型可以为float16,float32或float64。 -- **bias** (Tensor,可选) – 偏置Tensor。它的数据类型可以为float16,float32或float64。如果不为None,则将会被加到输出中。默认值为None。 +- **x** (Tensor) – 输入 Tensor。它的数据类型可以为 float16,float32 或 float64。 +- **weight** (Tensor) – 权重 Tensor。它的数据类型可以为 float16,float32 或 float64。 +- **bias** (Tensor,可选) – 偏置 Tensor。它的数据类型可以为 float16,float32 或 float64。如果不为 None,则将会被加到输出中。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,形状为 :math:`[batch\_size, *, out\_features]`,数据类型与输入Tensor相同。 +Tensor,形状为 :math:`[batch\_size, *, out\_features]`,数据类型与输入 Tensor 相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/local_response_norm_cn.rst b/docs/api/paddle/nn/functional/local_response_norm_cn.rst index b1418a8d00d..838cea39e60 100644 --- a/docs/api/paddle/nn/functional/local_response_norm_cn.rst +++ b/docs/api/paddle/nn/functional/local_response_norm_cn.rst @@ -7,7 +7,7 @@ local_response_norm 局部响应正则化(Local Response Normalization)用于对局部输入区域进行正则化,执行一种侧向抑制(lateral inhibition)。更多详情参考:`ImageNet Classification with Deep Convolutional Neural Networks `_ -其中 ``input`` 是mini-batch的输入特征。计算过程如下: +其中 ``input`` 是 mini-batch 的输入特征。计算过程如下: .. math:: @@ -24,15 +24,15 @@ local_response_norm ::::::::: - **x** (Tensor)- 输入的三维/四维/五维 `Tensor`,数据类型为:float32。 - **size** (int) - 累加的通道数。 - - **alpha** (float,可选)- 缩放参数,正数。默认值为1e-4。 - - **beta** (float,可选)- 指数,正数。默认值为0.75。 - - **k** (float,可选)- 位移,正数。默认值为1.0。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致。如果输入是三维 `Tensor`,该参数可以是"NCL"或"NLC",其中N是批尺寸,C是通道数,L是特征长度。如果输入是四维 `Tensor`,该参数可以是"NCHW"或"NHWC",其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。如果输入是五维 `Tensor`,该参数可以是"NCDHW"或"NDHWC",其中N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **alpha** (float,可选)- 缩放参数,正数。默认值为 1e-4。 + - **beta** (float,可选)- 指数,正数。默认值为 0.75。 + - **k** (float,可选)- 位移,正数。默认值为 1.0。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致。如果输入是三维 `Tensor`,该参数可以是"NCL"或"NLC",其中 N 是批尺寸,C 是通道数,L 是特征长度。如果输入是四维 `Tensor`,该参数可以是"NCHW"或"NHWC",其中 N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。如果输入是五维 `Tensor`,该参数可以是"NCDHW"或"NDHWC",其中 N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -局部响应正则化得到的输出特征,数据类型及维度和input相同的 `Tensor` 。 +局部响应正则化得到的输出特征,数据类型及维度和 input 相同的 `Tensor` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/log_loss_cn.rst b/docs/api/paddle/nn/functional/log_loss_cn.rst index 07b348a4d1e..464e268bcc9 100644 --- a/docs/api/paddle/nn/functional/log_loss_cn.rst +++ b/docs/api/paddle/nn/functional/log_loss_cn.rst @@ -8,7 +8,7 @@ log_loss -**负log loss层** +**负 log loss 层** 该 OP 对输入的预测结果和目标标签进行计算,返回负对数损失值。 diff --git a/docs/api/paddle/nn/functional/log_sigmoid_cn.rst b/docs/api/paddle/nn/functional/log_sigmoid_cn.rst index 24ac76664fc..684a0f26814 100644 --- a/docs/api/paddle/nn/functional/log_sigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/log_sigmoid_cn.rst @@ -5,7 +5,7 @@ log_sigmoid .. py:function:: paddle.nn.functional.log_sigmoid(x, name=None) -log_sigmoid激活层。计算公式如下: +log_sigmoid 激活层。计算公式如下: .. math:: diff --git a/docs/api/paddle/nn/functional/log_softmax_cn.rst b/docs/api/paddle/nn/functional/log_softmax_cn.rst index 24428dcb7de..659cb145fb6 100644 --- a/docs/api/paddle/nn/functional/log_softmax_cn.rst +++ b/docs/api/paddle/nn/functional/log_softmax_cn.rst @@ -4,7 +4,7 @@ log_softmax ------------------------------- .. py:function:: paddle.nn.functional.log_softmax(x, axis=-1, dtype=None, name=None) -该OP实现了log_softmax层。OP的计算公式如下: +该 OP 实现了 log_softmax 层。OP 的计算公式如下: .. math:: @@ -16,8 +16,8 @@ log_softmax 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - axis (int,可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 - - dtype (str|np.dtype|core.VarDesc.VarType,可选) - 输入Tensor的数据类型。如果指定了 ``dtype``,则输入Tensor的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为None,则输出Tensor的数据类型和 ``x`` 相同。默认值为None。 + - axis (int,可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D 是输入 ``x`` 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str|np.dtype|core.VarDesc.VarType,可选) - 输入 Tensor 的数据类型。如果指定了 ``dtype``,则输入 Tensor 的数据类型会在计算前转换到 ``dtype`` 。``dtype``可以用来避免数据溢出。如果 ``dtype`` 为 None,则输出 Tensor 的数据类型和 ``x`` 相同。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst index e5670203b2f..3f8e1cff177 100644 --- a/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst +++ b/docs/api/paddle/nn/functional/margin_ranking_loss_cn.rst @@ -5,7 +5,7 @@ margin_ranking_loss .. py:function:: paddle.nn.functional.margin_ranking_loss(input, other, label, margin=0.0, reduction='mean', name=None) -该算子计算输入input,other 和 标签label间的 `margin rank loss` 损失。该损失函数的数学计算公式如下: +该算子计算输入 input,other 和 标签 label 间的 `margin rank loss` 损失。该损失函数的数学计算公式如下: .. math:: margin\_rank\_loss = max(0, -label * (input - other) + margin) @@ -27,13 +27,13 @@ margin_ranking_loss - **input** (Tensor):第一个输入的 `Tensor`,数据类型为:float32、float64。 - **other** (Tensor):第二个输入的 `Tensor`,数据类型为:float32、float64。 - **label** (Tensor):训练数据的标签,数据类型为:float32, float64。 - - **margin** (float,可选): - 用于加和的margin值,默认值为0。 + - **margin** (float,可选): - 用于加和的 margin 值,默认值为 0。 - **reduction** (string,可选): - 指定应用于输出结果的计算方式,可选值有:``'none'`` 、 ``'mean'`` 、 ``'sum'``。如果设置为 ``'none'``,则直接返回 最原始的 ``margin_rank_loss``。如果设置为 ``'sum'``,则返回 ``margin_rank_loss`` 的总和。如果设置为 ``'mean'``,则返回 ``margin_rank_loss`` 的平均值。默认值为 ``'none'`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::: -Tensor,如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'``,则形状为 :math:`[1]`,否则shape和输入 `input` 保持一致。数据类型与 ``input``、 ``other`` 相同。 +Tensor,如果 :attr:`reduction` 为 ``'sum'`` 或者是 ``'mean'``,则形状为 :math:`[1]`,否则 shape 和输入 `input` 保持一致。数据类型与 ``input``、 ``other`` 相同。 代码示例 :::::::: diff --git a/docs/api/paddle/nn/functional/max_pool1d_cn.rst b/docs/api/paddle/nn/functional/max_pool1d_cn.rst index fb231fbf084..73763dcad8d 100755 --- a/docs/api/paddle/nn/functional/max_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool1d_cn.rst @@ -6,7 +6,7 @@ max_pool1d .. py:function:: paddle.nn.functional.max_pool1d(x, kernel_size, stride=None, padding=0, return_mask=False, ceil_mode=False, name=None) -该算子根据输入 `x` , `kernel_size` 等参数对一个输入Tensor计算1D的最大值池化。输入和输出都是3-D Tensor, +该算子根据输入 `x` , `kernel_size` 等参数对一个输入 Tensor 计算 1D 的最大值池化。输入和输出都是 3-D Tensor, 默认是以 `NCL` 格式表示的,其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。 .. note:: @@ -15,12 +15,12 @@ max_pool1d 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的3-D Tensor。其中 `N` 是batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为float32或者float64。 - - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果kernel_size为list或tuple类型,其必须包含一个整数。 - - **stride** (int|list|tuple):池化操作步长。如果stride为list或tuple类型,其必须包含一个整数。 - - **padding** (string|int|list|tuple):池化补零的方式。如果padding是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是turple或者list类型,则应是 `[pad_left, pad_right]` 形式。如果padding是一个非0值,那么表示会在输入的两端都padding上同样长度的0。 - - **return_mask** (bool):是否返回最大值的索引,默认为False。 - - **ceil_mode** (bool):是否用ceil函数计算输出的height和width,如果设置为False,则使用floor函数来计算,默认为False。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, L]` 的 3-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`L` 是输入特征的长度。其数据类型为 float32 或者 float64。 + - **kernel_size** (int|list|tuple):池化核的尺寸大小。如果 kernel_size 为 list 或 tuple 类型,其必须包含一个整数。 + - **stride** (int|list|tuple):池化操作步长。如果 stride 为 list 或 tuple 类型,其必须包含一个整数。 + - **padding** (string|int|list|tuple):池化补零的方式。如果 padding 是一个字符串,则必须为 `SAME` 或者 `VALID`。如果是 turple 或者 list 类型,则应是 `[pad_left, pad_right]` 形式。如果 padding 是一个非 0 值,那么表示会在输入的两端都 padding 上同样长度的 0。 + - **return_mask** (bool):是否返回最大值的索引,默认为 False。 + - **ceil_mode** (bool):是否用 ceil 函数计算输出的 height 和 width,如果设置为 False,则使用 floor 函数来计算,默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -28,7 +28,7 @@ max_pool1d 返回 ::::::::: -``Tensor``,输入 `x` 经过最大值池化计算得到的目标3-D Tensor,其数据类型与输入相同。 +``Tensor``,输入 `x` 经过最大值池化计算得到的目标 3-D Tensor,其数据类型与输入相同。 代码示例 diff --git a/docs/api/paddle/nn/functional/max_pool2d_cn.rst b/docs/api/paddle/nn/functional/max_pool2d_cn.rst index bad909ede7b..f6c4dbaabff 100644 --- a/docs/api/paddle/nn/functional/max_pool2d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool2d_cn.rst @@ -25,13 +25,13 @@ max_pool2d 参数 ::::::::: - - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若pool_size=2,则池化核大小为2x2。 - - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示H和W维度上stride均为该值。默认值为kernel_size。 - - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有3种格式:(1)包含2个整数值:[pad_height, pad_width];(2)包含4个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含4个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示H和W维度上均为该值。默认值:0。 - - **ceil_mode** (bool):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为None - - **return_mask** (bool):是否返回最大索引和输出。默认为False。 - - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含两个整数值,(pool_size_Height, pool_size_Width)。若为一个整数,则它的平方值将作为池化核大小,比如若 pool_size=2,则池化核大小为 2x2。 + - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含两个整数,(pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 H 和 W 维度上 stride 均为该值。默认值为 kernel_size。 + - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 2 个整数值:[pad_height, pad_width];(2)包含 4 个整数值:[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 4 个二元组:当 data_format 为"NCHW"时为 [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NHWC"时为[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 H 和 W 维度上均为该值。默认值:0。 + - **ceil_mode** (bool):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 None + - **return_mask** (bool):是否返回最大索引和输出。默认为 False。 + - **data_format** (str):输入和输出的数据格式,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/max_pool3d_cn.rst b/docs/api/paddle/nn/functional/max_pool3d_cn.rst index 284a36d82cf..0d087a03589 100644 --- a/docs/api/paddle/nn/functional/max_pool3d_cn.rst +++ b/docs/api/paddle/nn/functional/max_pool3d_cn.rst @@ -26,13 +26,13 @@ max_pool3d 参数 ::::::::: - - **x** (Tensor):形状为 [N,C,D,H,W] 或 [N,D,H,W,C] 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth,pool_size_Height, pool_size_Width)。若为一个整数,则表示D,H和W维度上均为该值,比如若pool_size=2,则池化核大小为[2,2,2]。 - - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含三个整数,(pool_stride_Depth,pool_stride_Height, pool_stride_Width)。若为一个整数,则表示D, H和W维度上stride均为该值。默认值为kernel_size。 - - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有3种格式:(1)包含3个整数值:[pad_depth, pad_height, pad_width];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含5个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示D、H和W维度上均为该值。默认值:0 - - **ceil_mode** (bool):是否用ceil函数计算输出高度和宽度。如果是True,则使用 `ceil` 计算输出形状的大小。默认为False - - **return_mask** (bool):是否返回最大索引和输出。默认为False。 - - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NDCHW"。 + - **x** (Tensor):形状为 [N,C,D,H,W] 或 [N,D,H,W,C] 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **kernel_size** (int|list|tuple):池化核大小。如果它是一个元组或列表,它必须包含三个整数值,(pool_size_Depth,pool_size_Height, pool_size_Width)。若为一个整数,则表示 D,H 和 W 维度上均为该值,比如若 pool_size=2,则池化核大小为[2,2,2]。 + - **stride** (int|list|tuple):池化层的步长。如果它是一个元组或列表,它将包含三个整数,(pool_stride_Depth,pool_stride_Height, pool_stride_Width)。若为一个整数,则表示 D, H 和 W 维度上 stride 均为该值。默认值为 kernel_size。 + - **padding** (string|int|list|tuple) 池化填充。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法。如果它是一个元组或列表,它可以有 3 种格式:(1)包含 3 个整数值:[pad_depth, pad_height, pad_width];(2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含 5 个二元组:当 data_format 为"NCDHW"时为[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 data_format 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]。若为一个整数,则表示 D、H 和 W 维度上均为该值。默认值:0 + - **ceil_mode** (bool):是否用 ceil 函数计算输出高度和宽度。如果是 True,则使用 `ceil` 计算输出形状的大小。默认为 False + - **return_mask** (bool):是否返回最大索引和输出。默认为 False。 + - **data_format** (str):输入和输出的数据格式,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NDCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/max_unpool1d_cn.rst b/docs/api/paddle/nn/functional/max_unpool1d_cn.rst index 490c5c198f9..457f4828ad8 100644 --- a/docs/api/paddle/nn/functional/max_unpool1d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool1d_cn.rst @@ -6,7 +6,7 @@ max_unpool1d .. py:function:: paddle.nn.functional.max_unpool1d(x, indices, kernel_size, stride=None, padding=0, data_format="NCL", output_size=None, name=None) -这个API实现了 `1D最大反池化` 操作 +这个 API 实现了 `1D 最大反池化` 操作 .. note:: 更多细节请参考对应的 `Class` 请参考 :ref:`cn_api_nn_MaxUnPool1D` 。 @@ -25,13 +25,13 @@ max_unpool1d 参数 ::::::::: - - **x** (Tensor):形状为 `[N,C,L]` 的3-D Tensor,N是批尺寸,C是通道数,L是特征长度,数据类型为float32或float64。 - - **indices** (Tensor):形状为 `[N,C,L]` 的3-D Tensor,N是批尺寸,C是通道数,L是特征长度,数据类型为int32。 + - **x** (Tensor):形状为 `[N,C,L]` 的 3-D Tensor,N 是批尺寸,C 是通道数,L 是特征长度,数据类型为 float32 或 float64。 + - **indices** (Tensor):形状为 `[N,C,L]` 的 3-D Tensor,N 是批尺寸,C 是通道数,L 是特征长度,数据类型为 int32。 - **kernel_size** (int|list|tuple):反池化的滑动窗口大小。 - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须包含一个整数,(pool_stride_Length),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, stride, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCL"。N是批尺寸,C是通道数,L是特征长度。默认值:"NCL" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCL"。N 是批尺寸,C 是通道数,L 是特征长度。默认值:"NCL" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/max_unpool2d_cn.rst b/docs/api/paddle/nn/functional/max_unpool2d_cn.rst index a4aef5f774c..372d360d7a9 100644 --- a/docs/api/paddle/nn/functional/max_unpool2d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool2d_cn.rst @@ -6,7 +6,7 @@ max_unpool2d .. py:function:: paddle.nn.functional.max_unpool2d(x, indices, kernel_size, stride=None,padding=0,data_format="NCHW",output_size=None,name=None) -这个API实现了 `2D最大反池化` 操作 +这个 API 实现了 `2D 最大反池化` 操作 .. note:: 更多细节请参考对应的 `Class` 请参考 :ref:`cn_api_nn_MaxUnPool2D` 。 @@ -28,13 +28,13 @@ max_unpool2d 参数 ::::::::: - - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **indices** (Tensor):形状为 `[N,C,H,W]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为int32。 + - **x** (Tensor):形状为 `[N,C,H,W]` 或 `[N,H,W,C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **indices** (Tensor):形状为 `[N,C,H,W]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 int32。 - **kernel_size** (int|list|tuple):反池化的滑动窗口大小。 - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须是两个相等的整数,(pool_stride_Height, pool_stride_Width),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCHW"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCHW"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/max_unpool3d_cn.rst b/docs/api/paddle/nn/functional/max_unpool3d_cn.rst index 1f2c9c32f3c..e53b79ce794 100644 --- a/docs/api/paddle/nn/functional/max_unpool3d_cn.rst +++ b/docs/api/paddle/nn/functional/max_unpool3d_cn.rst @@ -6,7 +6,7 @@ max_unpool3d .. py:function:: paddle.nn.functional.max_unpool3d(x, indices, kernel_size, stride=None, padding=0, data_format="NCDHW", output_size=None, name=None) -这个API实现了 `3D最大反池化` 操作 +这个 API 实现了 `3D 最大反池化` 操作 .. note:: 更多细节请参考对应的 `Class` 请参考 :ref:`cn_api_nn_MaxUnPool3D` 。 @@ -31,13 +31,13 @@ max_unpool3d 参数 ::::::::: - - **x** (Tensor):形状为 `[N,C,D,H,W]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - **indices** (Tensor):形状为 `[N,C,D,H,W]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为int32。 + - **x** (Tensor):形状为 `[N,C,D,H,W]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - **indices** (Tensor):形状为 `[N,C,D,H,W]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型为 int32。 - **kernel_size** (int|list|tuple):反池化的滑动窗口大小。 - **stride** (int|list|tuple,可选):池化层的步长。如果它是一个元组或列表,它必须是三个相等的整数,(pool_stride_Depth, pool_stride_Height, pool_stride_Width),默认值:None。 - **padding** (string|int|list|tuple,可选) 池化填充,默认值:0。 - **output_size** (list|tuple,可选):目标输出尺寸。如果 output_size 没有被设置,则实际输出尺寸会通过(input_shape, kernel_size, stride, padding)自动计算得出,默认值:None。 - - **data_format** (str,可选):输入和输出的数据格式,只能是"NCDHW"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW" + - **data_format** (str,可选):输入和输出的数据格式,只能是"NCDHW"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCDHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/maxout_cn.rst b/docs/api/paddle/nn/functional/maxout_cn.rst index 2e29a9cbded..0594a8cffaa 100644 --- a/docs/api/paddle/nn/functional/maxout_cn.rst +++ b/docs/api/paddle/nn/functional/maxout_cn.rst @@ -5,7 +5,7 @@ maxout .. py:function:: paddle.nn.functional.maxout(x, groups, axis=1, name=None) -maxout激活层。 +maxout 激活层。 假设输入形状为(N, Ci, H, W),输出形状为(N, Co, H, W),则 :math:`Co=Ci/groups` 运算公式如下: @@ -22,9 +22,9 @@ maxout激活层。 :::::::::::: :::::::::: - - x (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float32或float64。 - - groups (int) - 指定将输入张量的channel通道维度进行分组的数目。输出的通道数量为通道数除以组数。 - - axis (int,可选) - 指定通道所在维度的索引。当数据格式为NCHW时,axis应该被设置为1,当数据格式为NHWC时,axis应该被设置为-1或者3。默认值为1。 + - x (Tensor) - 输入是形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float32 或 float64。 + - groups (int) - 指定将输入张量的 channel 通道维度进行分组的数目。输出的通道数量为通道数除以组数。 + - axis (int,可选) - 指定通道所在维度的索引。当数据格式为 NCHW 时,axis 应该被设置为 1,当数据格式为 NHWC 时,axis 应该被设置为-1 或者 3。默认值为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/mish_cn.rst b/docs/api/paddle/nn/functional/mish_cn.rst index 36fc6ba2b02..7e1809c6d6e 100644 --- a/docs/api/paddle/nn/functional/mish_cn.rst +++ b/docs/api/paddle/nn/functional/mish_cn.rst @@ -5,7 +5,7 @@ mish .. py:function:: paddle.nn.functional.mish(x, name=None) -mish激活层。计算公式如下: +mish 激活层。计算公式如下: .. math:: diff --git a/docs/api/paddle/nn/functional/mse_loss_cn.rst b/docs/api/paddle/nn/functional/mse_loss_cn.rst index c098fed27c8..77c5619c53f 100644 --- a/docs/api/paddle/nn/functional/mse_loss_cn.rst +++ b/docs/api/paddle/nn/functional/mse_loss_cn.rst @@ -5,9 +5,9 @@ mse_loss .. py:function:: paddle.nn.functional.mse_loss(input, label, reduction='mean', name=None) -该OP用于计算预测值和目标值的均方差误差。 +该 OP 用于计算预测值和目标值的均方差误差。 -对于预测值input和目标值label,公式为: +对于预测值 input 和目标值 label,公式为: 当 `reduction` 设置为 ``'none'`` 时, @@ -27,8 +27,8 @@ mse_loss 参数 ::::::::: - - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 - - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor。数据类型为float32或float64。 + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维 Tensor。数据类型为 float32 或 float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k]` 的多维 Tensor。数据类型为 float32 或 float64。 返回 ::::::::: diff --git a/docs/api/paddle/nn/functional/multi_label_soft_margin_loss_cn.rst b/docs/api/paddle/nn/functional/multi_label_soft_margin_loss_cn.rst index cd0d8746a9e..92f7fcd69e3 100644 --- a/docs/api/paddle/nn/functional/multi_label_soft_margin_loss_cn.rst +++ b/docs/api/paddle/nn/functional/multi_label_soft_margin_loss_cn.rst @@ -16,26 +16,26 @@ multi_label_soft_margin_loss 如果添加权重则再乘以对应的权重值 -最后,添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 参数 ::::::::: - - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]` , 其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]` , 标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **weight** (Tensor,可选) - 手动设定权重,默认为None + - **weight** (Tensor,可选) - 手动设定权重,默认为 None - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``, ``'mean'``, ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - - **input** (Tensor) - :math:`[N, *]` , 其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]` , 其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 + 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` , 与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``, 则输出的维度为 :math:`[1]` 。 代码示例 diff --git a/docs/api/paddle/nn/functional/nll_loss_cn.rst b/docs/api/paddle/nn/functional/nll_loss_cn.rst index 904032822b3..14d5cd1c1fc 100644 --- a/docs/api/paddle/nn/functional/nll_loss_cn.rst +++ b/docs/api/paddle/nn/functional/nll_loss_cn.rst @@ -8,11 +8,11 @@ nll_loss 参数 ::::::::: - - **input** (Tensor): - 输入 `Tensor`,其形状为 :math:`[N, C]`,其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]`。数据类型为float32或float64。 - - **label** (Tensor): - 输入x对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`,数据类型为int64。 - - **weight** (Tensor,可选): - 手动指定每个类别的权重。其默认为 `None`。如果提供该参数的话,长度必须为 `num_classes`。数据类型为float32或float64。 - - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为int64。 - - **reduction** (str,可选): - 指定应用于输出结果的计算方式,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。数据类型为string。 + - **input** (Tensor): - 输入 `Tensor`,其形状为 :math:`[N, C]`,其中 `C` 为类别数。但是对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_K]`。数据类型为 float32 或 float64。 + - **label** (Tensor): - 输入 x 对应的标签值。其形状为 :math:`[N,]` 或者 :math:`[N, d_1, d_2, ..., d_K]`,数据类型为 int64。 + - **weight** (Tensor,可选): - 手动指定每个类别的权重。其默认为 `None`。如果提供该参数的话,长度必须为 `num_classes`。数据类型为 float32 或 float64。 + - **ignore_index** (int64,可选): - 指定一个忽略的标签值,此标签值不参与计算。默认值为-100。数据类型为 int64。 + - **reduction** (str,可选): - 指定应用于输出结果的计算方式,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算 `mini-batch` loss 均值。设置为 `sum` 时,计算 `mini-batch` loss 的总和。设置为 `none` 时,则返回 loss Tensor。数据类型为 string。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/normalize_cn.rst b/docs/api/paddle/nn/functional/normalize_cn.rst index c31655dfd60..7e4bbc8c2f5 100644 --- a/docs/api/paddle/nn/functional/normalize_cn.rst +++ b/docs/api/paddle/nn/functional/normalize_cn.rst @@ -19,10 +19,10 @@ normalize 参数 ::::::::: - - **x** (Tensor) - 输入可以是N-D Tensor。数据类型为:float32、float64。 + - **x** (Tensor) - 输入可以是 N-D Tensor。数据类型为:float32、float64。 - **p** (float|int,可选) - 范数公式中的指数值。默认值:2 - - **axis** (int,可选)- 要进行归一化的轴。如果 ``x`` 是1-D Tensor,轴固定为0。如果 `axis < 0`,轴为 `x.ndim + axis`。-1表示最后一维。 - - **epsilon** (float,可选) - 添加到分母上的值以防止分母除0。默认值为1e-12。 + - **axis** (int,可选)- 要进行归一化的轴。如果 ``x`` 是 1-D Tensor,轴固定为 0。如果 `axis < 0`,轴为 `x.ndim + axis`。-1 表示最后一维。 + - **epsilon** (float,可选) - 添加到分母上的值以防止分母除 0。默认值为 1e-12。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/npair_loss_cn.rst b/docs/api/paddle/nn/functional/npair_loss_cn.rst index 7c8d43a4d51..0e94c7e8190 100644 --- a/docs/api/paddle/nn/functional/npair_loss_cn.rst +++ b/docs/api/paddle/nn/functional/npair_loss_cn.rst @@ -7,22 +7,22 @@ npair_loss 参考阅读 `Improved Deep Metric Learning with Multi class N pair Loss Objective `_ -NPair损失需要成对的数据。NPair损失分为两部分:第一部分是对嵌入向量进行L2正则化;第二部分是每一对数据的相似性矩阵的每一行和映射到ont-hot之后的标签的交叉熵损失的和。 +NPair 损失需要成对的数据。NPair 损失分为两部分:第一部分是对嵌入向量进行 L2 正则化;第二部分是每一对数据的相似性矩阵的每一行和映射到 ont-hot 之后的标签的交叉熵损失的和。 参数 :::::::::::: ::::::::: -- **anchor** (Tensor) - 锚点图像的嵌入特征,形状为[batch_size, embedding_dims]的2-D `Tensor`。数据类型:float32和float64。 -- **positive** (Tensor) - 正例图像的嵌入特征,形状为[batch_size, embedding_dims]的2-D `Tensor`。数据类型:float32和float64。 -- **labels** (Tensor) - 标签向量,形状为[batch_size]的1-D `Tensor`。数据类型:float32、float64和int64。 -- **l2_reg** (float) - 嵌入向量的L2正则化系数,默认:0.002。 +- **anchor** (Tensor) - 锚点图像的嵌入特征,形状为[batch_size, embedding_dims]的 2-D `Tensor`。数据类型:float32 和 float64。 +- **positive** (Tensor) - 正例图像的嵌入特征,形状为[batch_size, embedding_dims]的 2-D `Tensor`。数据类型:float32 和 float64。 +- **labels** (Tensor) - 标签向量,形状为[batch_size]的 1-D `Tensor`。数据类型:float32、float64 和 int64。 +- **l2_reg** (float) - 嵌入向量的 L2 正则化系数,默认:0.002。 返回 :::::::::::: -经过npair loss计算之后的结果 `Tensor` 。 +经过 npair loss 计算之后的结果 `Tensor` 。 代码示例 diff --git a/docs/api/paddle/nn/functional/one_hot_cn.rst b/docs/api/paddle/nn/functional/one_hot_cn.rst index e6e845502a1..675a05596af 100644 --- a/docs/api/paddle/nn/functional/one_hot_cn.rst +++ b/docs/api/paddle/nn/functional/one_hot_cn.rst @@ -4,11 +4,11 @@ one_hot ------------------------------- .. py:function:: paddle.nn.functional.one_hot(x, num_classes, name=None) -该OP将输入'x'中的每个id转换为一个one-hot向量,其长度为 ``num_classes``,该id对应的向量维度上的值为1,其余维度的值为0。 +该 OP 将输入'x'中的每个 id 转换为一个 one-hot 向量,其长度为 ``num_classes``,该 id 对应的向量维度上的值为 1,其余维度的值为 0。 -输出的Tensor的shape是在输入shape的最后一维后面添加了num_classes的维度。 +输出的 Tensor 的 shape 是在输入 shape 的最后一维后面添加了 num_classes 的维度。 -- 示例1: +- 示例 1: .. code-block:: text @@ -24,7 +24,7 @@ one_hot [0., 0., 0., 1.], [1., 0., 0., 0.]] -- 示例2: +- 示例 2: .. code-block:: text @@ -34,19 +34,19 @@ one_hot num_classes = 4 输出:抛出 Illegal value 的异常 - X中第2维的值是5,超过了num_classes,因此抛异常。 + X 中第 2 维的值是 5,超过了 num_classes,因此抛异常。 参数 :::::::::::: - - **x** (Tensor) - 维度为 :math:`[N_1, ..., N_n]` 的多维Tensor,维度至少1维。数据类型为int32或int64。 - - **num_classes** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``num_classes`` 通常取值为词典大小。 + - **x** (Tensor) - 维度为 :math:`[N_1, ..., N_n]` 的多维 Tensor,维度至少 1 维。数据类型为 int32 或 int64。 + - **num_classes** (int) - 用于定义一个 one-hot 向量的长度。若输入为词 id,则 ``num_classes`` 通常取值为词典大小。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,转换后的one_hot Tensor,数据类型为float32。 +Tensor,转换后的 one_hot Tensor,数据类型为 float32。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/functional/pad_cn.rst b/docs/api/paddle/nn/functional/pad_cn.rst index 707e53774a6..2d91dcbecce 100644 --- a/docs/api/paddle/nn/functional/pad_cn.rst +++ b/docs/api/paddle/nn/functional/pad_cn.rst @@ -5,24 +5,24 @@ pad .. py:function:: paddle.nn.functional.pad(x, pad, mode="constant", value=0.0, data_format="NCHW", name=None) -该OP依照 ``pad`` 和 ``mode`` 属性对 ``x`` 进行 ``pad``。如果 ``mode`` 为 ``'constant'``,并且 ``pad`` 的长度为 ``x`` 维度的2倍时,则会根据 ``pad`` 和 ``value`` 对 ``x`` 从前面的维度向后依次补齐;否则只会对 ``x`` 在除 ``batch size`` 和 ``channel`` 之外的所有维度进行补齐。如果 ``mode`` 为 ``reflect``,则 ``x`` 对应维度上的长度必须大于对应的 ``pad`` 值。 +该 OP 依照 ``pad`` 和 ``mode`` 属性对 ``x`` 进行 ``pad``。如果 ``mode`` 为 ``'constant'``,并且 ``pad`` 的长度为 ``x`` 维度的 2 倍时,则会根据 ``pad`` 和 ``value`` 对 ``x`` 从前面的维度向后依次补齐;否则只会对 ``x`` 在除 ``batch size`` 和 ``channel`` 之外的所有维度进行补齐。如果 ``mode`` 为 ``reflect``,则 ``x`` 对应维度上的长度必须大于对应的 ``pad`` 值。 参数 :::::::::::: - - **x** (Tensor) - Tensor,format可以为 ``'NCL'``, ``'NLC'``, ``'NCHW'``, ``'NHWC'``, ``'NCDHW'`` - 或 ``'NDHWC'``,默认值为 ``'NCHW'``,数据类型支持float16, float32, float64, int32, int64。 - - **pad** (Tensor | List[int] | Tuple[int]) - 填充大小。如果 ``mode`` 为 ``'constant'``,并且 ``pad`` 的长度为 ``x`` 维度的2倍时, - 则会根据 ``pad`` 和 ``value`` 对 ``x`` 从前面的维度向后依次补齐;否则:1。当输入维度为3时,pad的格式为[pad_left, pad_right]; - 2. 当输入维度为4时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom]; - 3. 当输入维度为5时,pad的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 - - **mode** (str) - padding的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 + - **x** (Tensor) - Tensor,format 可以为 ``'NCL'``, ``'NLC'``, ``'NCHW'``, ``'NHWC'``, ``'NCDHW'`` + 或 ``'NDHWC'``,默认值为 ``'NCHW'``,数据类型支持 float16, float32, float64, int32, int64。 + - **pad** (Tensor | List[int] | Tuple[int]) - 填充大小。如果 ``mode`` 为 ``'constant'``,并且 ``pad`` 的长度为 ``x`` 维度的 2 倍时, + 则会根据 ``pad`` 和 ``value`` 对 ``x`` 从前面的维度向后依次补齐;否则:1。当输入维度为 3 时,pad 的格式为[pad_left, pad_right]; + 2. 当输入维度为 4 时,pad 的格式为[pad_left, pad_right, pad_top, pad_bottom]; + 3. 当输入维度为 5 时,pad 的格式为[pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back]。 + - **mode** (str) - padding 的四种模式,分别为 ``'constant'``, ``'reflect'``, ``'replicate'`` 和 ``'circular'``。 ``'constant'`` 表示填充常数 ``value``; ``'reflect'`` 表示填充以 ``x`` 边界值为轴的映射;``'replicate'`` 表示 填充 ``x`` 边界值;``'circular'`` 为循环填充 ``x``。具体结果可见以下示例。默认值为 ``'constant'``。 - - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为0.0。 - - **data_format** (str) - 指定 ``x`` 的format,可为 ``'NCL'``, ``'NLC'``, ``'NCHW'``, ``'NHWC'``, ``'NCDHW'`` + - **value** (float32) - 以 ``'constant'`` 模式填充区域时填充的值。默认值为 0.0。 + - **data_format** (str) - 指定 ``x`` 的 format,可为 ``'NCL'``, ``'NLC'``, ``'NCHW'``, ``'NHWC'``, ``'NCDHW'`` 或 ``'NDHWC'``,默认值为 ``'NCHW'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst b/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst index a8983cc6e65..3e6b21dca96 100644 --- a/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst +++ b/docs/api/paddle/nn/functional/pixel_shuffle_cn.rst @@ -5,21 +5,21 @@ pixel_shuffle ------------------------------- .. py:function:: paddle.nn.functional.pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None) -该算子将一个形为[N, C, H, W]或是[N, H, W, C]的Tensor重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的Tensor。这样做有利于实现步长(stride)为1/r的高效sub-pixel(亚像素)卷积。详见Shi等人在2016年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 +该算子将一个形为[N, C, H, W]或是[N, H, W, C]的 Tensor 重新排列成形为 [N, C/r**2, H*r, W*r]或 [N, H*r, W*r, C/r**2] 的 Tensor。这样做有利于实现步长(stride)为 1/r 的高效 sub-pixel(亚像素)卷积。详见 Shi 等人在 2016 年发表的论文 `Real Time Single Image and Video Super Resolution Using an Efficient Sub Pixel Convolutional Neural Network `_ 。 .. note:: 详细请参考对应的 `Class` 请参考::ref:`cn_api_nn_PixelShuffle` 。 参数 ::::::::: - - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, H, W]` 的4-D Tensor。其中 `N` 是batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。其数据类型为float32或者float64。 + - **x** (Tensor):当前算子的输入,其是一个形状为 `[N, C, H, W]` 的 4-D Tensor。其中 `N` 是 batch size, `C` 是通道数,`H` 是输入特征的高度,`W` 是输入特征的宽度。其数据类型为 float32 或者 float64。 - **upscale_factor** (int):增大空间分辨率的增大因子 - **data_format** (str,可选):数据格式,可选:"NCHW"或"NHWC",默认:"NCHW" - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,输出Tensor,其数据类型与输入相同。 +``Tensor``,输出 Tensor,其数据类型与输入相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/prelu_cn.rst b/docs/api/paddle/nn/functional/prelu_cn.rst index 5fa250b30f6..c9a63174b91 100644 --- a/docs/api/paddle/nn/functional/prelu_cn.rst +++ b/docs/api/paddle/nn/functional/prelu_cn.rst @@ -5,7 +5,7 @@ prelu .. py:function:: paddle.nn.functional.prelu(x, weight, data_format="NCHW", name=None) -prelu激活层(PRelu Activation Operator)。计算公式如下: +prelu 激活层(PRelu Activation Operator)。计算公式如下: .. math:: @@ -16,7 +16,7 @@ prelu激活层(PRelu Activation Operator)。计算公式如下: 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - weight (Tensor) - 可训练参数,数据类型同``x`` 一致,形状支持2种:[1] 或者 [in],其中`in`为输入的通道数。 + - weight (Tensor) - 可训练参数,数据类型同``x`` 一致,形状支持 2 种:[1] 或者 [in],其中`in`为输入的通道数。 - data_format (str,可选) – 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" 或者 "NDHWC"。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/relu6_cn.rst b/docs/api/paddle/nn/functional/relu6_cn.rst index d8e0c43fa28..9440906580b 100644 --- a/docs/api/paddle/nn/functional/relu6_cn.rst +++ b/docs/api/paddle/nn/functional/relu6_cn.rst @@ -5,7 +5,7 @@ relu6 .. py:function:: paddle.nn.functional.relu6(x, name=None) -relu6激活层 +relu6 激活层 .. math:: diff --git a/docs/api/paddle/nn/functional/relu_cn.rst b/docs/api/paddle/nn/functional/relu_cn.rst index 6eaae69fe6b..480f4d7bc1e 100644 --- a/docs/api/paddle/nn/functional/relu_cn.rst +++ b/docs/api/paddle/nn/functional/relu_cn.rst @@ -5,7 +5,7 @@ relu .. py:function:: paddle.nn.functional.relu(x, name=None) -relu激活层(Rectified Linear Unit)。计算公式如下: +relu 激活层(Rectified Linear Unit)。计算公式如下: .. math:: diff --git a/docs/api/paddle/nn/functional/rrelu_cn.rst b/docs/api/paddle/nn/functional/rrelu_cn.rst index 32d2703e388..4f5af367775 100644 --- a/docs/api/paddle/nn/functional/rrelu_cn.rst +++ b/docs/api/paddle/nn/functional/rrelu_cn.rst @@ -5,7 +5,7 @@ rrelu .. py:function:: paddle.nn.functional.rrelu(x, lower=1. / 8., upper=1. / 3., training=True, name=None) -rrelu激活函数,应用随机纠正线性单元对神经元激活,参考论文: +rrelu 激活函数,应用随机纠正线性单元对神经元激活,参考论文: `Empirical Evaluation of Rectified Activations in Convolutional Network `_ 。 训练阶段对负斜率进行均匀分布随机采样: @@ -39,8 +39,8 @@ rrelu激活函数,应用随机纠正线性单元对神经元激活,参考论 参数 :::::::::: - **x** (Tensor) - 输入的 `Tensor`,数据类型为:float16、float32、float64。 - - **lower** (float,可选) - 负值斜率的随机值范围下限,`lower` 包含在范围中。支持的数据类型:float。默认值为0.125。 - - **upper** (float,可选) - 负值斜率的随机值范围上限,`upper` 包含在范围中。支持的数据类型:float。默认值为0.333。 + - **lower** (float,可选) - 负值斜率的随机值范围下限,`lower` 包含在范围中。支持的数据类型:float。默认值为 0.125。 + - **upper** (float,可选) - 负值斜率的随机值范围上限,`upper` 包含在范围中。支持的数据类型:float。默认值为 0.333。 - **training** (bool,可选) - 标记是否为训练阶段。默认:True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/selu_cn.rst b/docs/api/paddle/nn/functional/selu_cn.rst index c772212b3c3..66c7e83e234 100644 --- a/docs/api/paddle/nn/functional/selu_cn.rst +++ b/docs/api/paddle/nn/functional/selu_cn.rst @@ -5,7 +5,7 @@ selu .. py:function:: paddle.nn.functional.selu(x, scale=1.0507009873554804934193349852946, alpha=1.6732632423543772848170429916717, name=None) -selu激活层 +selu 激活层 .. math:: @@ -22,8 +22,8 @@ selu激活层 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - scale (float,可选) - selu激活计算公式中的scale值,必须大于1.0。默认值为1.0507009873554804934193349852946。 - - alpha (float,可选) - selu激活计算公式中的alpha值,必须大于等于零。默认值为1.6732632423543772848170429916717。 + - scale (float,可选) - selu 激活计算公式中的 scale 值,必须大于 1.0。默认值为 1.0507009873554804934193349852946。 + - alpha (float,可选) - selu 激活计算公式中的 alpha 值,必须大于等于零。默认值为 1.6732632423543772848170429916717。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/sequence_mask_cn.rst b/docs/api/paddle/nn/functional/sequence_mask_cn.rst index cc07d7b3378..e2a60cd30ae 100644 --- a/docs/api/paddle/nn/functional/sequence_mask_cn.rst +++ b/docs/api/paddle/nn/functional/sequence_mask_cn.rst @@ -35,14 +35,14 @@ sequence_mask 参数 ::::::::: - - **x** (Tensor) - 输入张量,其元素是小于等于 ``maxlen`` 的整数,形状为 ``[d_1, d_2,…, d_n]`` 的Tensor。 + - **x** (Tensor) - 输入张量,其元素是小于等于 ``maxlen`` 的整数,形状为 ``[d_1, d_2,…, d_n]`` 的 Tensor。 - **maxlen** (int,可选) - 序列的最大长度。默认为空,此时 ``maxlen`` 取 ``x`` 中所有元素的最大值。 - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) - 输出的数据类型,默认为 ``int64`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -mask张量,Tensor,形状为 ``[d_1, d_2,… ,d_n, maxlen]``,数据类型由 ``dtype`` 指定,支持float32、float64、int32和int64,默认为int64。 +mask 张量,Tensor,形状为 ``[d_1, d_2,… ,d_n, maxlen]``,数据类型由 ``dtype`` 指定,支持 float32、float64、int32 和 int64,默认为 int64。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/sigmoid_cn.rst b/docs/api/paddle/nn/functional/sigmoid_cn.rst index 278a8e157ed..dbd46260e7d 100755 --- a/docs/api/paddle/nn/functional/sigmoid_cn.rst +++ b/docs/api/paddle/nn/functional/sigmoid_cn.rst @@ -8,7 +8,7 @@ sigmoid -sigmoid激活函数 +sigmoid 激活函数 .. math:: out = \frac{1}{1 + e^{-x}} @@ -17,12 +17,12 @@ sigmoid激活函数 参数 ::::::::: - - **x** Tensor - 数据类型为float32,float64。激活函数的输入值。 + - **x** Tensor - 数据类型为 float32,float64。激活函数的输入值。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。默认:None 返回 ::::::::: -Tensor,激活函数的输出值,数据类型为float32。 +Tensor,激活函数的输出值,数据类型为 float32。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst b/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst index c519de86bb5..6aae53111ce 100644 --- a/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst +++ b/docs/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst @@ -7,35 +7,35 @@ sigmoid_focal_loss `Focal Loss `_ 用于解决分类任务中的前景类-背景类数量不均衡的问题。在这种损失函数,易分样本的占比被减少,而难分样本的比重被增加。例如在一阶段的目标检测任务中,前景-背景不均衡表现得非常严重。 -该算子通过下式计算focal loss: +该算子通过下式计算 focal loss: .. math:: Out = -Labels * alpha * {(1 - \sigma(Logit))}^{gamma}\log(\sigma(Logit)) - (1 - Labels) * (1 - alpha) * {\sigma(Logit)}^{gamma}\log(1 - \sigma(Logit)) 其中 :math:`\sigma(Logit) = \frac{1}{1 + \exp(-Logit)}` -当 `normalizer` 不为None时,该算子会将输出损失Out除以张量 `normalizer` : +当 `normalizer` 不为 None 时,该算子会将输出损失 Out 除以张量 `normalizer` : .. math:: Out = \frac{Out}{normalizer} -最后,该算子会添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 ``'none'`` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 ``'mean'`` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 ``'sum'`` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,该算子会添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 ``'none'`` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 ``'mean'`` 时,返回输出的均值 :math:`Out = MEAN(Out)`。当 `reduction` 为 ``'sum'`` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 -**注意**:标签值0表示背景类(即负样本),1表示前景类(即正样本)。 +**注意**:标签值 0 表示背景类(即负样本),1 表示前景类(即正样本)。 参数 ::::::::: - - **logit** (Tensor) - 维度为 :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是卷积层的输出,不需要经过 ``sigmoid`` 层。数据类型是float32、float64。 + - **logit** (Tensor) - 维度为 :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。输入数据 ``logit`` 一般是卷积层的输出,不需要经过 ``sigmoid`` 层。数据类型是 float32、float64。 - **label** (Tensor) - 维度为 :math:`[N, *]`,标签 ``label`` 的维度、数据类型与输入 ``logit`` 相同。 - - **normalizer** (Tensor,可选) - 维度为 :math:`[1]` ,focal loss的归一化系数,数据类型与输入 ``logit`` 相同。若设置为None,则不会将focal loss做归一化操作(即不会将focal loss除以normalizer)。在目标检测任务中,设置为正样本的数量。默认值为None。 - - **alpha** (int|float,可选) - 用于平衡正样本和负样本的超参数,取值范围 :math:`[0,1]`。默认值设置为0.25。 - - **gamma** (int|float,可选) - 用于平衡易分样本和难分样本的超参数,默认值设置为2.0。 - - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `focal loss` 的均值;设置为 ``'sum'`` 时,计算 `focal loss` 的总和;设置为 ``'none'`` 时,则返回原始loss。 + - **normalizer** (Tensor,可选) - 维度为 :math:`[1]` ,focal loss 的归一化系数,数据类型与输入 ``logit`` 相同。若设置为 None,则不会将 focal loss 做归一化操作(即不会将 focal loss 除以 normalizer)。在目标检测任务中,设置为正样本的数量。默认值为 None。 + - **alpha** (int|float,可选) - 用于平衡正样本和负样本的超参数,取值范围 :math:`[0,1]`。默认值设置为 0.25。 + - **gamma** (int|float,可选) - 用于平衡易分样本和难分样本的超参数,默认值设置为 2.0。 + - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 `focal loss` 的均值;设置为 ``'sum'`` 时,计算 `focal loss` 的总和;设置为 ``'none'`` 时,则返回原始 loss。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - - Tensor,输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``logit`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - Tensor,输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``logit`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/silu_cn.rst b/docs/api/paddle/nn/functional/silu_cn.rst index ece21e7b147..a5bb3d2f641 100644 --- a/docs/api/paddle/nn/functional/silu_cn.rst +++ b/docs/api/paddle/nn/functional/silu_cn.rst @@ -5,7 +5,7 @@ silu .. py:function:: paddle.nn.functional.silu(x, name=None) -silu激活层。计算公式如下: +silu 激活层。计算公式如下: .. math:: diff --git a/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst b/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst index 1440fafce44..7473c85c0c0 100644 --- a/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst +++ b/docs/api/paddle/nn/functional/smooth_l1_loss_cn.rst @@ -5,8 +5,8 @@ smooth_l1_loss .. py:function:: paddle.nn.functional.smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None) -该OP计算输入input和标签label间的SmoothL1损失,如果逐个元素的绝对误差低于1,则创建使用平方项的条件 -,否则为L1损失。在某些情况下,它可以防止爆炸梯度,也称为Huber损失,该损失函数的数学计算公式如下: +该 OP 计算输入 input 和标签 label 间的 SmoothL1 损失,如果逐个元素的绝对误差低于 1,则创建使用平方项的条件 +,否则为 L1 损失。在某些情况下,它可以防止爆炸梯度,也称为 Huber 损失,该损失函数的数学计算公式如下: .. math:: loss(x,y) = \frac{1}{n}\sum_{i}z_i @@ -22,10 +22,10 @@ smooth_l1_loss 参数 :::::::::: - - **input** (Tensor):输入 `Tensor`,数据类型为float32。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 - - **label** (Tensor):输入input对应的标签值,数据类型为float32。数据类型和input相同。 - - **reduction** (string,可选): - 指定应用于输出结果的计算方式,数据类型为string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算`mini-batch` loss均值。设置为 `sum` 时,计算 `mini-batch` loss的总和。设置为 `none` 时,则返回loss Tensor。 - - **delta** (string,可选): SmoothL1Loss损失的阈值参数,用于控制Huber损失对线性误差或平方误差的侧重。数据类型为float32。默认值= 1.0。 + - **input** (Tensor):输入 `Tensor`,数据类型为 float32。其形状为 :math:`[N, C]`,其中 `C` 为类别数。对于多维度的情形下,它的形状为 :math:`[N, C, d_1, d_2, ..., d_k]`,k >= 1。 + - **label** (Tensor):输入 input 对应的标签值,数据类型为 float32。数据类型和 input 相同。 + - **reduction** (string,可选): - 指定应用于输出结果的计算方式,数据类型为 string,可选值有:`none`, `mean`, `sum`。默认为 `mean`,计算`mini-batch` loss 均值。设置为 `sum` 时,计算 `mini-batch` loss 的总和。设置为 `none` 时,则返回 loss Tensor。 + - **delta** (string,可选): SmoothL1Loss 损失的阈值参数,用于控制 Huber 损失对线性误差或平方误差的侧重。数据类型为 float32。默认值= 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/nn/functional/soft_margin_loss_cn.rst b/docs/api/paddle/nn/functional/soft_margin_loss_cn.rst index 4fa9fb2bc92..882638aec6c 100644 --- a/docs/api/paddle/nn/functional/soft_margin_loss_cn.rst +++ b/docs/api/paddle/nn/functional/soft_margin_loss_cn.rst @@ -14,20 +14,20 @@ soft_margin_loss \text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()} -最后,添加 `reduce` 操作到前面的输出Out上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 +最后,添加 `reduce` 操作到前面的输出 Out 上。当 `reduction` 为 `none` 时,直接返回最原始的 `Out` 结果。当 `reduction` 为 `mean` 时,返回输出的均值 :math:`Out = MEAN(Out)` 。当 `reduction` 为 `sum` 时,返回输出的求和 :math:`Out = SUM(Out)` 。 参数 ::::::::: - - **input** (Tensor) - :math:`[N, *]` ,其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]` ,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **label** (Tensor) - :math:`[N, *]` ,标签 ``label`` 的维度、数据类型与输入 ``input`` 相同。 - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有: ``'none'``、 ``'mean'``、 ``'sum'`` 。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 返回 ::::::::: - - 输出的结果Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` ,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - 输出的结果 Tensor。如果 :attr:`reduction` 是 ``'none'``, 则输出的维度为 :math:`[N, *]` ,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 代码示例 diff --git a/docs/api/paddle/nn/functional/softmax_cn.rst b/docs/api/paddle/nn/functional/softmax_cn.rst index b38ed66756f..45b14069849 100644 --- a/docs/api/paddle/nn/functional/softmax_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_cn.rst @@ -5,27 +5,27 @@ softmax .. py:function:: paddle.nn.functional.softmax(x, axis=-1, dtype=None, name=None) -该OP实现了softmax层。OP的计算过程如下: +该 OP 实现了 softmax 层。OP 的计算过程如下: -步骤1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; +步骤 1:输入 ``x`` 的 ``axis`` 维会被置换到最后一维; -步骤2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; +步骤 2:将输入 ``x`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax 操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为 1; -步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 +步骤 3:softmax 操作执行完成后,执行步骤 1 和步骤 2 的逆运算,将二维矩阵恢复至和输入 ``x`` 相同的维度。 -上述步骤2中softmax操作计算过程如下: +上述步骤 2 中 softmax 操作计算过程如下: - - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 + - 对于二维矩阵的每一行,计算 K 维向量(K 是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 - - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 + - 指定位置指数值与全部位置指数值之和的比值就是 softmax 操作的输出。 -对于二维矩阵中的第i行和第j列有: +对于二维矩阵中的第 i 行和第 j 列有: .. math:: softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])} -- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) +- 示例 1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做 softmax 操作) .. code-block:: text @@ -53,7 +53,7 @@ softmax [0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] -- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) +- 示例 2(矩阵一共有三维。axis = 1,表示沿着第二维做 softmax 操作) .. code-block:: text @@ -85,8 +85,8 @@ softmax 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - axis (int,可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D是输入 ``x`` 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 - - dtype (str,可选) - 输出 `Tensor` 的数据类型,支持float32、float64。 + - axis (int,可选) - 指定对输入 ``x`` 进行运算的轴。``axis`` 的有效范围是[-D, D),D 是输入 ``x`` 的维度,``axis`` 为负值时与 :math:`axis + D` 等价。默认值为-1。 + - dtype (str,可选) - 输出 `Tensor` 的数据类型,支持 float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst index 0d29957930e..a7c4088d5bb 100644 --- a/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst @@ -6,11 +6,11 @@ softmax_with_cross_entropy .. py:function:: paddle.nn.functional.softmax_with_cross_entropy(logits, label, soft_label=False, ignore_index=-100, numeric_stable_mode=True, return_softmax=False, axis=-1) -该OP实现了softmax交叉熵损失函数。该函数会将softmax操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的梯度值。 +该 OP 实现了 softmax 交叉熵损失函数。该函数会将 softmax 操作、交叉熵损失函数的计算过程进行合并,从而提供了数值上更稳定的梯度值。 -因为该运算对 ``logits`` 的 ``axis`` 维执行softmax运算,所以它需要未缩放的 ``logits``。该运算不应该对softmax运算的输出进行操作,否则会产生错误的结果。 +因为该运算对 ``logits`` 的 ``axis`` 维执行 softmax 运算,所以它需要未缩放的 ``logits``。该运算不应该对 softmax 运算的输出进行操作,否则会产生错误的结果。 -当 ``soft_label`` 为 ``False`` 时,``label`` 除了 ``axis`` 维度上的形状为1,其余维度和 ``logits`` 一致,表示一批数据中的每一个样本仅可分类到一个类别。 +当 ``soft_label`` 为 ``False`` 时,``label`` 除了 ``axis`` 维度上的形状为 1,其余维度和 ``logits`` 一致,表示一批数据中的每一个样本仅可分类到一个类别。 涉及到的等式如下: @@ -19,12 +19,12 @@ softmax_with_cross_entropy .. math:: loss_j = -\text{logits}_{label_j} +\log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right), j = 1,..., K -2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为1) +2. 软标签(每个样本以一定的概率被分配至多个类别中,概率和为 1) .. math:: loss_j = -\sum_{i=0}^{K}\text{label}_i\left(\text{logits}_i - \log\left(\sum_{i=0}^{K}\exp(\text{logits}_i)\right)\right), j = 1,...,K -3. 如果 ``numeric_stable_mode`` 为 ``True`` ,softmax结果首先经由下式计算得出,然后使用softmax结果和 ``label`` 计算交叉熵损失。 +3. 如果 ``numeric_stable_mode`` 为 ``True`` ,softmax 结果首先经由下式计算得出,然后使用 softmax 结果和 ``label`` 计算交叉熵损失。 .. math:: max_j &= \max_{i=0}^{K}{\text{logits}_i} \\ @@ -34,19 +34,19 @@ softmax_with_cross_entropy 参数 :::::::::::: - - **logits** (Tensor) - 维度为任意维的多维 ``Tensor``,数据类型为float32或float64。表示未缩放的输入。 - - **label** (Tensor) - 如果 ``soft_label`` 为True, ``label`` 是一个和 ``logits`` 维度相同的的 ``Tensor``。如果 ``soft_label`` 为False, ``label`` 是一个在axis维度上大小为1,其它维度上与 ``logits`` 维度相同的 ``Tensor`` 。 + - **logits** (Tensor) - 维度为任意维的多维 ``Tensor``,数据类型为 float32 或 float64。表示未缩放的输入。 + - **label** (Tensor) - 如果 ``soft_label`` 为 True, ``label`` 是一个和 ``logits`` 维度相同的的 ``Tensor``。如果 ``soft_label`` 为 False, ``label`` 是一个在 axis 维度上大小为 1,其它维度上与 ``logits`` 维度相同的 ``Tensor`` 。 - **soft_label** (bool,可选) - 指明是否将输入标签当作软标签。默认值:False。 - - **ignore_index** (int,可选) - 指明要无视的目标值,使其不对输入梯度有贡献。仅在 ``soft_label`` 为False时有效,默认值:kIgnoreIndex(-100)。 - - **numeric_stable_mode** (bool,可选) – 指明是否使用一个具有更佳数学稳定性的算法。仅在 ``soft_label`` 为 False的GPU模式下生效。若 ``soft_label`` 为 True或者执行设备为CPU,算法一直具有数学稳定性。注意使用稳定算法时速度可能会变慢。默认值:True。 - - **return_softmax** (bool,可选) – 指明是否在返回交叉熵计算结果的同时返回softmax结果。默认值:False。 - - **axis** (int,可选) – 执行softmax计算的维度索引。其范围为 :math:`[-1,rank-1]`,其中 ``rank`` 是输入 ``logits`` 的秩。默认值:-1。 + - **ignore_index** (int,可选) - 指明要无视的目标值,使其不对输入梯度有贡献。仅在 ``soft_label`` 为 False 时有效,默认值:kIgnoreIndex(-100)。 + - **numeric_stable_mode** (bool,可选) – 指明是否使用一个具有更佳数学稳定性的算法。仅在 ``soft_label`` 为 False 的 GPU 模式下生效。若 ``soft_label`` 为 True 或者执行设备为 CPU,算法一直具有数学稳定性。注意使用稳定算法时速度可能会变慢。默认值:True。 + - **return_softmax** (bool,可选) – 指明是否在返回交叉熵计算结果的同时返回 softmax 结果。默认值:False。 + - **axis** (int,可选) – 执行 softmax 计算的维度索引。其范围为 :math:`[-1,rank-1]`,其中 ``rank`` 是输入 ``logits`` 的秩。默认值:-1。 返回 :::::::::::: - - 如果 ``return_softmax`` 为 False,则返回交叉熵损失结果的 ``Tensor``,数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为1,其余维度和 ``logits`` 一致。 - - 如果 ``return_softmax`` 为 True,则返回交叉熵损失结果的 ``Tensor`` 和softmax结果的 ``Tensor`` 组成的元组。其中交叉熵损失结果的数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为1,其余维度上交叉熵损失结果和 ``logits`` 一致;softmax结果的数据类型和 ``logits`` 一致,维度和 ``logits`` 一致。 + - 如果 ``return_softmax`` 为 False,则返回交叉熵损失结果的 ``Tensor``,数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为 1,其余维度和 ``logits`` 一致。 + - 如果 ``return_softmax`` 为 True,则返回交叉熵损失结果的 ``Tensor`` 和 softmax 结果的 ``Tensor`` 组成的元组。其中交叉熵损失结果的数据类型和 ``logits`` 一致,除了 ``axis`` 维度上的形状为 1,其余维度上交叉熵损失结果和 ``logits`` 一致;softmax 结果的数据类型和 ``logits`` 一致,维度和 ``logits`` 一致。 代码示例 diff --git a/docs/api/paddle/nn/functional/softplus_cn.rst b/docs/api/paddle/nn/functional/softplus_cn.rst index 3fef83d3903..bd5f9192343 100644 --- a/docs/api/paddle/nn/functional/softplus_cn.rst +++ b/docs/api/paddle/nn/functional/softplus_cn.rst @@ -5,12 +5,12 @@ softplus .. py:function:: paddle.nn.functional.softplus(x, beta=1, threshold=20, name=None) -softplus激活层 +softplus 激活层 .. math:: softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\ - \text{为了保证数值稳定性,当}\,beta * x > threshold\,\text{时,函数转变为线性函数x}。 + \text{为了保证数值稳定性,当}\,beta * x > threshold\,\text{时,函数转变为线性函数 x}。 其中,:math:`x` 为输入的 Tensor @@ -19,8 +19,8 @@ softplus激活层 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - beta (float,可选) - Softplus激活计算公式中的beta值。默认值为1。 - - threshold (float,可选) - Softplus激活计算公式中的threshold值。默认值为20。 + - beta (float,可选) - Softplus 激活计算公式中的 beta 值。默认值为 1。 + - threshold (float,可选) - Softplus 激活计算公式中的 threshold 值。默认值为 20。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/softshrink_cn.rst b/docs/api/paddle/nn/functional/softshrink_cn.rst index 67afbd8a1a5..c70092345f5 100644 --- a/docs/api/paddle/nn/functional/softshrink_cn.rst +++ b/docs/api/paddle/nn/functional/softshrink_cn.rst @@ -5,7 +5,7 @@ softshrink .. py:function:: paddle.nn.functional.softshrink(x, threshold=0.5, name=None) -softshrink激活层 +softshrink 激活层 .. math:: @@ -22,7 +22,7 @@ softshrink激活层 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - threshold (float,可选) - softshrink激活计算公式中的threshold值,必须大于等于零。默认值为0.5。 + - threshold (float,可选) - softshrink 激活计算公式中的 threshold 值,必须大于等于零。默认值为 0.5。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/softsign_cn.rst b/docs/api/paddle/nn/functional/softsign_cn.rst index f379ecdcfe7..d376ec6382b 100644 --- a/docs/api/paddle/nn/functional/softsign_cn.rst +++ b/docs/api/paddle/nn/functional/softsign_cn.rst @@ -5,7 +5,7 @@ softsign .. py:function:: paddle.nn.functional.softsign(x, name=None) -softsign激活层 +softsign 激活层 .. math:: diff --git a/docs/api/paddle/nn/functional/sparse_attention_cn.rst b/docs/api/paddle/nn/functional/sparse_attention_cn.rst index a052aaeff8a..84a78483340 100755 --- a/docs/api/paddle/nn/functional/sparse_attention_cn.rst +++ b/docs/api/paddle/nn/functional/sparse_attention_cn.rst @@ -5,9 +5,9 @@ sparse_attention .. py:function:: paddle.nn.functional.sparse_attention(query, key, value, sparse_csr_offset, sparse_csr_columns, name=None) -该OP对Transformer模块中的Attention矩阵进行了稀疏化,从而减少内存消耗和计算量。 +该 OP 对 Transformer 模块中的 Attention 矩阵进行了稀疏化,从而减少内存消耗和计算量。 -其稀疏数据排布通过CSR格式表示,CSR格式包含两个参数,``offset`` 和 ``colunms``。计算公式为: +其稀疏数据排布通过 CSR 格式表示,CSR 格式包含两个参数,``offset`` 和 ``colunms``。计算公式为: .. math:: result=softmax(\frac{ Q * K^T }{\sqrt{d}}) * V @@ -15,19 +15,19 @@ sparse_attention 其中,``Q``,``K``,``V`` 表示注意力模块的三个输入参数。这三个参数的维度是一样的。``d`` 代表这三个参数的最后一个维度的大小。 .. warning:: - 目前该API只在CUDA11.3及以上版本中使用。 + 目前该 API 只在 CUDA11.3 及以上版本中使用。 参数 ::::::::: - - query (Tensor) - 输入的Tensor,代表注意力模块中的 ``query``,这是一个4维Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为float32或float64。 - - key (Tensor) - 输入的Tensor,代表注意力模块中的 ``key``,这是一个4维Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为float32或float64。 - - value (Tensor) - 输入的Tensor,代表注意力模块中的 ``value``,这是一个4维Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为float32或float64。 - - sparse_csr_offset (Tensor) - 输入的Tensor,注意力模块中的稀疏特性,稀疏特性使用CSR格式表示,``offset`` 代表矩阵中每一行非零元的数量。这是一个3维Tensor,形状为:[batch_size, num_heads, seq_len + 1],数据类型为int32。 - - sparse_csr_columns (Tensor) - 输入的Tensor,注意力模块中的稀疏特性,稀疏特性使用CSR格式表示,``colunms`` 代表矩阵中每一行非零元的列索引值。这是一个3维Tensor,形状为:[batch_size, num_heads, sparse_nnz],数据类型为int32。 + - query (Tensor) - 输入的 Tensor,代表注意力模块中的 ``query``,这是一个 4 维 Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为 float32 或 float64。 + - key (Tensor) - 输入的 Tensor,代表注意力模块中的 ``key``,这是一个 4 维 Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为 float32 或 float64。 + - value (Tensor) - 输入的 Tensor,代表注意力模块中的 ``value``,这是一个 4 维 Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为 float32 或 float64。 + - sparse_csr_offset (Tensor) - 输入的 Tensor,注意力模块中的稀疏特性,稀疏特性使用 CSR 格式表示,``offset`` 代表矩阵中每一行非零元的数量。这是一个 3 维 Tensor,形状为:[batch_size, num_heads, seq_len + 1],数据类型为 int32。 + - sparse_csr_columns (Tensor) - 输入的 Tensor,注意力模块中的稀疏特性,稀疏特性使用 CSR 格式表示,``colunms`` 代表矩阵中每一行非零元的列索引值。这是一个 3 维 Tensor,形状为:[batch_size, num_heads, sparse_nnz],数据类型为 int32。 返回 ::::::::: - ``Tensor``,代表注意力模块的结果。这是一个4维Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为float32或float64。 + ``Tensor``,代表注意力模块的结果。这是一个 4 维 Tensor,形状为:[batch_size, num_heads, seq_len, head_dim],数据类型为 float32 或 float64。 代码示例 :::::::::: diff --git a/docs/api/paddle/nn/functional/square_error_cost_cn.rst b/docs/api/paddle/nn/functional/square_error_cost_cn.rst index 0cc886f2986..7b8d9257d9f 100644 --- a/docs/api/paddle/nn/functional/square_error_cost_cn.rst +++ b/docs/api/paddle/nn/functional/square_error_cost_cn.rst @@ -6,9 +6,9 @@ square_error_cost .. py:function:: paddle.nn.functional.square_error_cost(input,label) -该OP用于计算预测值和目标值的方差估计。 +该 OP 用于计算预测值和目标值的方差估计。 -对于预测值input和目标值label,公式为: +对于预测值 input 和目标值 label,公式为: .. math:: @@ -17,8 +17,8 @@ square_error_cost 参数 :::::::::::: - - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 - - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **input** (Tensor) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维 Tensor,其中最后一维 D 是类别数目。数据类型为 float32 或 float64。 + - **label** (Tensor) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维 Tensor,其中最后一维 D 是类别数目。数据类型为 float32 或 float64。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/functional/swish_cn.rst b/docs/api/paddle/nn/functional/swish_cn.rst index c8ae0158d1f..9cc89a61e7a 100644 --- a/docs/api/paddle/nn/functional/swish_cn.rst +++ b/docs/api/paddle/nn/functional/swish_cn.rst @@ -5,7 +5,7 @@ swish .. py:function:: paddle.nn.functional.swish(x, name=None) -swish激活层。计算公式如下: +swish 激活层。计算公式如下: .. math:: diff --git a/docs/api/paddle/nn/functional/tanhshrink_cn.rst b/docs/api/paddle/nn/functional/tanhshrink_cn.rst index c50ca10af56..3c63b76b0c9 100644 --- a/docs/api/paddle/nn/functional/tanhshrink_cn.rst +++ b/docs/api/paddle/nn/functional/tanhshrink_cn.rst @@ -5,7 +5,7 @@ tanhshrink .. py:function:: paddle.nn.functional.tanhshrink(x, name=None) -tanhshrink激活层 +tanhshrink 激活层 .. math:: diff --git a/docs/api/paddle/nn/functional/temporal_shift_cn.rst b/docs/api/paddle/nn/functional/temporal_shift_cn.rst index 14df752e6c9..4b67485eea0 100644 --- a/docs/api/paddle/nn/functional/temporal_shift_cn.rst +++ b/docs/api/paddle/nn/functional/temporal_shift_cn.rst @@ -5,17 +5,17 @@ temporal_shift .. py:function:: paddle.nn.functional.temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW") -该OP用于对输入X做时序通道T上的位移操作,为TSM(Temporal Shift Module)中使用的操作。 +该 OP 用于对输入 X 做时序通道 T 上的位移操作,为 TSM(Temporal Shift Module)中使用的操作。 -输入(X)的形状应为[N*T, C, H, W]或[N*T, H, W, C],N是批大小,T是 ``seg_num`` 指定的时间段号,C是通道号,H和W是特征的高度和宽度。 +输入(X)的形状应为[N*T, C, H, W]或[N*T, H, W, C],N 是批大小,T 是 ``seg_num`` 指定的时间段号,C 是通道号,H 和 W 是特征的高度和宽度。 以 data_format="NCHW" 为例,时间偏移计算如下: -步骤1:将输入(X)reshape为[N, T, C, H, W]。 +步骤 1:将输入(X)reshape 为[N, T, C, H, W]。 -步骤2:填充0到第二个(T)尺寸的变形结果,填充宽度每边为1,填充结果的形状为[N,T+2,C,H,W]。 +步骤 2:填充 0 到第二个(T)尺寸的变形结果,填充宽度每边为 1,填充结果的形状为[N,T+2,C,H,W]。 -步骤3:假设 ``shift_ratio`` 为1/4,切片填充结果如下: +步骤 3:假设 ``shift_ratio`` 为 1/4,切片填充结果如下: .. math:: @@ -25,17 +25,17 @@ temporal_shift slice3 &= x[:, 1:T+1, C/2:, :, :] -步骤4:沿第3(C)维连接三个切片,并将结果重塑为[N*T, C, H, W]。 +步骤 4:沿第 3(C)维连接三个切片,并将结果重塑为[N*T, C, H, W]。 有关时序移动的详细信息,请参阅文件:`Temporal Shift Module `_ 参数 ::::::::: - - **x** (Tensor) – 时移算符的输入张量。维度为 :math:`[N*T,C,H,W]` 的4-D Tensor。N为批量大小,T为时间段数,C为信道数,H为特征高度,W为特征宽度,数据类型为float32或float64。 + - **x** (Tensor) – 时移算符的输入张量。维度为 :math:`[N*T,C,H,W]` 的 4-D Tensor。N 为批量大小,T 为时间段数,C 为信道数,H 为特征高度,W 为特征宽度,数据类型为 float32 或 float64。 - **seg_num** (int) – 时间段编号,这应该是一个正整数。 - - **shift_ratio** (float) – 通道的移位比、通道的第一个 ``shift_ratio`` 部分沿时间维度移动-1,通道的第二个 ``shift_ratio`` 部分沿时间维度移动1,范围须在[0, 0.5]内。默认值0.25 + - **shift_ratio** (float) – 通道的移位比、通道的第一个 ``shift_ratio`` 部分沿时间维度移动-1,通道的第二个 ``shift_ratio`` 部分沿时间维度移动 1,范围须在[0, 0.5]内。默认值 0.25 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"或"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"或"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 返回 ::::::::: diff --git a/docs/api/paddle/nn/functional/thresholded_relu_cn.rst b/docs/api/paddle/nn/functional/thresholded_relu_cn.rst index ef7448d6ed4..ee60b67fb5a 100644 --- a/docs/api/paddle/nn/functional/thresholded_relu_cn.rst +++ b/docs/api/paddle/nn/functional/thresholded_relu_cn.rst @@ -5,7 +5,7 @@ thresholded_relu .. py:function:: paddle.nn.functional.thresholded_relu(x, threshold=1.0, name=None) -thresholded relu激活层。计算公式如下: +thresholded relu 激活层。计算公式如下: .. math:: @@ -20,7 +20,7 @@ thresholded relu激活层。计算公式如下: 参数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - threshold (float,可选) - thresholded_relu激活计算公式中的threshold值。默认值为1.0。 + - threshold (float,可选) - thresholded_relu 激活计算公式中的 threshold 值。默认值为 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/functional/triplet_margin_loss_cn.rst b/docs/api/paddle/nn/functional/triplet_margin_loss_cn.rst index f8482f1a6f2..d75d7006f49 100644 --- a/docs/api/paddle/nn/functional/triplet_margin_loss_cn.rst +++ b/docs/api/paddle/nn/functional/triplet_margin_loss_cn.rst @@ -20,30 +20,30 @@ triplet_margin_loss -``p`` 为距离函数的范数。``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为True时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 +``p`` 为距离函数的范数。``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为 True 时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 参数 ::::::::: - **input** (Tensor) - :math:`[N, * ]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,正样本。 - **negative** (Tensor) - :math:`[N, *]`,负样本。 - - **margin** (float,可选) - 手动指定间距,默认为1。 - - **p** (float,可选) - 手动指定范数,默认为2。 - - **epsilon** (float,可选) - 防止除数为零,默认为1e-6。 + - **margin** (float,可选) - 手动指定间距,默认为 1。 + - **p** (float,可选) - 手动指定范数,默认为 2。 + - **epsilon** (float,可选) - 防止除数为零,默认为 1e-6。 - **swap** (bool,可选) - 默认为 False。 - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - - **input** (Tensor) - :math:`[N, * ]`,其中N是batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 + - **input** (Tensor) - :math:`[N, * ]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,标签 ``positive`` 的维度、数据类型与输入 ``input`` 相同。 - **negative** (Tensor) - :math:`[N, *]`,标签 ``negative`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 返回计算的Loss。 + 返回计算的 Loss。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/triplet_margin_with_distance_loss_cn.rst b/docs/api/paddle/nn/functional/triplet_margin_with_distance_loss_cn.rst index 62a367b6c3e..eef185ddc19 100644 --- a/docs/api/paddle/nn/functional/triplet_margin_with_distance_loss_cn.rst +++ b/docs/api/paddle/nn/functional/triplet_margin_with_distance_loss_cn.rst @@ -14,36 +14,36 @@ triplet_margin_with_distance_loss L(input, pos, neg) = \max \{d(input_i, pos_i) - d(input_i, neg_i) + {\rm margin}, 0\} -其中的距离函数 ``distance_function`` 可以由用户自定义,使用 lambda 或是 def 都可以。如果未定义则调用2范数计算距离 +其中的距离函数 ``distance_function`` 可以由用户自定义,使用 lambda 或是 def 都可以。如果未定义则调用 2 范数计算距离 .. math:: d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_2 -``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为True时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)的值换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 +``margin`` 为(input,positive)与(input,negative)的距离间隔,``swap`` 为 True 时,会比较(input,negative)和(positive,negative)的大小,并将(input,negative)的值换为其中较小的值,内容详见论文 `Learning shallow convolutional feature descriptors with triplet losses `_ 。 参数 ::::::::: - - **input** (Tensor) - :math:`[N, * ]`,其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, * ]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,正样本,维度、数据类型与输入 ``input`` 相同。 - **negative** (Tensor) - :math:`[N, *]`,负样本,维度、数据类型与输入 ``input`` 相同。 - **distance_function** (Callable,可选) - 手动指定范数,默认为 None,计算欧式距离。 - - **margin** (float,可选) - 手动指定间距,默认为1。 + - **margin** (float,可选) - 手动指定间距,默认为 1。 - **swap** (bool,可选) - 默认为 False。 - **reduction** (str,可选) - 指定应用于输出结果的计算方式,可选值有:``'none'``, ``'mean'``, ``'sum'``。默认为 ``'mean'``,计算 Loss 的均值;设置为 ``'sum'`` 时,计算 Loss 的总和;设置为 ``'none'`` 时,则返回原始 Loss。 - - **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name` 。 + - **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name` 。 形状 ::::::::: - - **input** (Tensor) - :math:`[N, *]`,其中N是batch_size, `*` 是任意其他维度。数据类型是float32、float64。 + - **input** (Tensor) - :math:`[N, *]`,其中 N 是 batch_size, `*` 是任意其他维度。数据类型是 float32、float64。 - **positive** (Tensor) - :math:`[N, *]`,标签 ``positive`` 的维度、数据类型与输入 ``input`` 相同。 - **negative** (Tensor) - :math:`[N, *]`,标签 ``negative`` 的维度、数据类型与输入 ``input`` 相同。 - - **output** (Tensor) - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + - **output** (Tensor) - 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 返回 ::::::::: - 输出的Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 + 输出的 Tensor。如果 :attr:`reduction` 是 ``'none'``,则输出的维度为 :math:`[N, *]`,与输入 ``input`` 的形状相同。如果 :attr:`reduction` 是 ``'mean'`` 或 ``'sum'``,则输出的维度为 :math:`[1]` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/functional/unfold_cn.rst b/docs/api/paddle/nn/functional/unfold_cn.rst index fd3e4de1cd2..59eac650810 100644 --- a/docs/api/paddle/nn/functional/unfold_cn.rst +++ b/docs/api/paddle/nn/functional/unfold_cn.rst @@ -8,7 +8,7 @@ unfold -该OP实现的功能与卷积中用到的im2col函数一样,通常也被称作为im2col过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x``,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 +该 OP 实现的功能与卷积中用到的 im2col 函数一样,通常也被称作为 im2col 过程。对于每一个卷积核覆盖下的区域,元素会被重新排成一列。当卷积核在整个图片上滑动时,将会形成一系列的列向量。对于每一个输入形状为[N, C, H, W]的 ``x``,都将会按照下面公式计算出一个形状为[N, Cout, Lout]的输出。 .. math:: @@ -42,17 +42,17 @@ unfold 参数 :::::::::::: - - **x** (Tensor) – 输入4-D Tensor,形状为[N, C, H, W],数据类型为float32或者float64 - - **kernel_size** (int|list of int) – 卷积核的尺寸,整数或者整型列表。如果为整型列表,应包含两个元素 ``[k_h, k_w]``,卷积核大小为 ``k_h * k_w``;如果为整数k,会被当作整型列表 ``[k, k]`` 处理 - - **strides** (int|list of int,可选) – 卷积步长,整数或者整型列表。如果为整型列表,应该包含两个元素 ``[stride_h, stride_w]``。如果为整数,则 ``stride_h = stride_w = strides``。默认值为1 - - **paddings** (int|list of int,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为4或者2;长度为4 对应的padding参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为2对应的padding参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数padding,则会被当作[padding, padding, padding, padding]处理。默认值为0 - - **dilations** (int|list of int,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数dilation,会被当作整型列表[dilation, dilation]处理。默认值为1 + - **x** (Tensor) – 输入 4-D Tensor,形状为[N, C, H, W],数据类型为 float32 或者 float64 + - **kernel_size** (int|list of int) – 卷积核的尺寸,整数或者整型列表。如果为整型列表,应包含两个元素 ``[k_h, k_w]``,卷积核大小为 ``k_h * k_w``;如果为整数 k,会被当作整型列表 ``[k, k]`` 处理 + - **strides** (int|list of int,可选) – 卷积步长,整数或者整型列表。如果为整型列表,应该包含两个元素 ``[stride_h, stride_w]``。如果为整数,则 ``stride_h = stride_w = strides``。默认值为 1 + - **paddings** (int|list of int,可选) – 每个维度的扩展,整数或者整型列表。如果为整型列表,长度应该为 4 或者 2;长度为 4 对应的 padding 参数是:[padding_top, padding_left,padding_bottom, padding_right],长度为 2 对应的 padding 参数是[padding_h, padding_w],会被当作[padding_h, padding_w, padding_h, padding_w]处理。如果为整数 padding,则会被当作[padding, padding, padding, padding]处理。默认值为 0 + - **dilations** (int|list of int,可选) – 卷积膨胀,整型列表或者整数。如果为整型列表,应该包含两个元素[dilation_h, dilation_w]。如果是整数 dilation,会被当作整型列表[dilation, dilation]处理。默认值为 1 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor, unfold操作之后的结果,形状如上面所描述的[N, Cout, Lout],Cout每一个滑动block里面覆盖的元素个数,Lout是滑动block的个数,数据类型与 ``x`` 相同 +Tensor, unfold 操作之后的结果,形状如上面所描述的[N, Cout, Lout],Cout 每一个滑动 block 里面覆盖的元素个数,Lout 是滑动 block 的个数,数据类型与 ``x`` 相同 代码示例 diff --git a/docs/api/paddle/nn/functional/upsample_cn.rst b/docs/api/paddle/nn/functional/upsample_cn.rst index f297d58f600..1483aefd111 100644 --- a/docs/api/paddle/nn/functional/upsample_cn.rst +++ b/docs/api/paddle/nn/functional/upsample_cn.rst @@ -6,9 +6,9 @@ upsample .. py:function:: paddle.nn.functional.upsample(x, size=None, scale_factor=None, mode='nearest', align_corners=False, align_mode=0, data_format='NCHW', name=None) -该OP用于调整一个batch中图片的大小。 +该 OP 用于调整一个 batch 中图片的大小。 -输入为4-D Tensor时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为5-D Tensor时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 +输入为 4-D Tensor 时形状为(num_batches, channels, in_h, in_w)或者(num_batches, in_h, in_w, channels),输入为 5-D Tensor 时形状为(num_batches, channels, in_d, in_h, in_w)或者(num_batches, in_d, in_h, in_w, channels),并且调整大小只适用于深度,高度和宽度对应的维度。 支持的插值方法: @@ -27,13 +27,13 @@ upsample 最近邻插值是在输入张量的高度和宽度上进行最近邻插值。 -双线性插值是线性插值的扩展,用于在直线2D网格上插值两个变量(例如,该操作中的H方向和W方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 +双线性插值是线性插值的扩展,用于在直线 2D 网格上插值两个变量(例如,该操作中的 H 方向和 W 方向)的函数。关键思想是首先在一个方向上执行线性插值,然后在另一个方向上再次执行线性插值。 -三线插值是线性插值的一种扩展,是3参数的插值方程(比如op里的D,H,W方向),在三个方向上进行线性插值。 +三线插值是线性插值的一种扩展,是 3 参数的插值方程(比如 op 里的 D,H,W 方向),在三个方向上进行线性插值。 双三次插值是在二维网格上对数据点进行插值的三次插值的扩展,它能创造出比双线性和最近临插值更为光滑的图像边缘。 -Align_corners和align_mode是可选参数,插值的计算方法可以由它们选择。 +Align_corners 和 align_mode 是可选参数,插值的计算方法可以由它们选择。 示例: @@ -140,18 +140,18 @@ https://en.wikipedia.org/wiki/Bicubic_interpolation 参数 :::::::::::: - - **x** (Tensor) - 4-D或5-D Tensor,数据类型为float32、float64或uint8,其数据格式由参数 ``data_format`` 指定。 - - **size** (list|tuple|Tensor|None) - 输出Tensor,输入为4D张量时,形状为为(out_h, out_w)的2-D Tensor。输入为5-D Tensor时,形状为(out_d, out_h, out_w)的3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为1。默认值为None。 - - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape和scale至少要设置一个。out_shape的优先级高于scale。默认值为None。如果scale_factor是一个list或tuple,它必须与输入的shape匹配。 + - **x** (Tensor) - 4-D 或 5-D Tensor,数据类型为 float32、float64 或 uint8,其数据格式由参数 ``data_format`` 指定。 + - **size** (list|tuple|Tensor|None) - 输出 Tensor,输入为 4D 张量时,形状为为(out_h, out_w)的 2-D Tensor。输入为 5-D Tensor 时,形状为(out_d, out_h, out_w)的 3-D Tensor。如果 :code:`out_shape` 是列表,每一个元素可以是整数或者形状为[1]的变量。如果 :code:`out_shape` 是变量,则其维度大小为 1。默认值为 None。 + - **scale_factor** (float|Tensor|list|tuple|None)-输入的高度或宽度的乘数因子。out_shape 和 scale 至少要设置一个。out_shape 的优先级高于 scale。默认值为 None。如果 scale_factor 是一个 list 或 tuple,它必须与输入的 shape 匹配。 - **mode** (str,可选) - 插值方法。支持"bilinear"或"trilinear"或"nearest"或"bicubic"或"linear"或"area"。默认值为"nearest"。 - - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表src_idx = scale * dst_index。 - - **align_corners** (bool,可选)- 一个可选的bool型参数,如果为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。默认值为True - - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 + - **align_mode** (int,可选)- 双线性插值的可选项。可以是 '0' 代表 src_idx = scale *(dst_indx + 0.5)-0.5;如果为'1',代表 src_idx = scale * dst_index。 + - **align_corners** (bool,可选)- 一个可选的 bool 型参数,如果为 True,则将输入和输出张量的 4 个角落像素的中心对齐,并保留角点像素的值。默认值为 True + - **data_format** (str,可选)- 指定输入的数据格式,输出的数据格式将与输入保持一致。对于 4-D Tensor,支持 NCHW(num_batches, channels, height, width) 或者 NHWC(num_batches, height, width, channels),对于 5-D Tensor,支持 NCDHW(num_batches, channels, depth, height, width)或者 NDHWC(num_batches, depth, height, width, channels),默认值:'NCHW'。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 +4-D Tensor,形状为 (num_batches, channels, out_h, out_w) 或 (num_batches, out_h, out_w, channels);或者 5-D Tensor,形状为 (num_batches, channels, out_d, out_h, out_w) 或 (num_batches, out_d, out_h, out_w, channels)。 代码示例 diff --git a/docs/api/paddle/nn/functional/zeropad2d_cn.rst b/docs/api/paddle/nn/functional/zeropad2d_cn.rst index 7abaf5d91b2..e8aa343bc15 100644 --- a/docs/api/paddle/nn/functional/zeropad2d_cn.rst +++ b/docs/api/paddle/nn/functional/zeropad2d_cn.rst @@ -4,13 +4,13 @@ zeropad2d ------------------------------- .. py:function:: paddle.nn.functional.zeropad2d(x, padding, data_format="NCHW", name=None) -该OP返回一个按照 ``padding`` 属性对 ``x`` 进行零填充的Tensor,数据类型与 ``x`` 相同。 +该 OP 返回一个按照 ``padding`` 属性对 ``x`` 进行零填充的 Tensor,数据类型与 ``x`` 相同。 参数 :::::::::: - - **x** (Tensor) - Tensor,format可以为 ``'NCHW'``, ``'NHWC'``,默认值为 ``'NCHW'``,数据类型支持float16, float32, float64, int32, int64。 - - **padding** (Tensor | List[int] | Tuple[int]) - 填充大小。pad的格式为[pad_left, pad_right, pad_top, pad_bottom]; - - **data_format** (str) - 指定 ``x`` 的format,可为 ``'NCHW'``, ``'NHWC'``,默认值为 ``'NCHW'``。 + - **x** (Tensor) - Tensor,format 可以为 ``'NCHW'``, ``'NHWC'``,默认值为 ``'NCHW'``,数据类型支持 float16, float32, float64, int32, int64。 + - **padding** (Tensor | List[int] | Tuple[int]) - 填充大小。pad 的格式为[pad_left, pad_right, pad_top, pad_bottom]; + - **data_format** (str) - 指定 ``x`` 的 format,可为 ``'NCHW'``, ``'NHWC'``,默认值为 ``'NCHW'``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/initializer/Assign_cn.rst b/docs/api/paddle/nn/initializer/Assign_cn.rst index 40513cc7697..93ef42d7447 100644 --- a/docs/api/paddle/nn/initializer/Assign_cn.rst +++ b/docs/api/paddle/nn/initializer/Assign_cn.rst @@ -6,18 +6,18 @@ Assign .. py:class:: paddle.nn.initializer.Assign(value, name=None) -该OP使用Numpy数组、Python列表、Tensor来初始化参数。 +该 OP 使用 Numpy 数组、Python 列表、Tensor 来初始化参数。 参数 :::::::::::: - - **value** (Tensor|numpy.ndarray|list) - 用于初始化参数的一个Numpy数组、Python列表、Tensor。 + - **value** (Tensor|numpy.ndarray|list) - 用于初始化参数的一个 Numpy 数组、Python 列表、Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - 由Numpy数组、Python列表、Tensor初始化的参数。 + 由 Numpy 数组、Python 列表、Tensor 初始化的参数。 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/initializer/Bilinear_cn.rst b/docs/api/paddle/nn/initializer/Bilinear_cn.rst index d2f842668e6..b3cabcdf19b 100644 --- a/docs/api/paddle/nn/initializer/Bilinear_cn.rst +++ b/docs/api/paddle/nn/initializer/Bilinear_cn.rst @@ -8,7 +8,7 @@ Bilinear -该接口为参数初始化函数,用于转置卷积函数中,对输入进行上采样。用户通过任意整型因子放大shape为(B,C,H,W)的特征图。 +该接口为参数初始化函数,用于转置卷积函数中,对输入进行上采样。用户通过任意整型因子放大 shape 为(B,C,H,W)的特征图。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/initializer/Constant_cn.rst b/docs/api/paddle/nn/initializer/Constant_cn.rst index 50d85e8f64c..994d3c1e39b 100644 --- a/docs/api/paddle/nn/initializer/Constant_cn.rst +++ b/docs/api/paddle/nn/initializer/Constant_cn.rst @@ -13,7 +13,7 @@ Constant 参数 :::::::::::: - - **value** (float16|float32,可选) - 用于初始化输入变量的值,默认值为0。 + - **value** (float16|float32,可选) - 用于初始化输入变量的值,默认值为 0。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/initializer/Dirac_cn.rst b/docs/api/paddle/nn/initializer/Dirac_cn.rst index d92a85c4b2a..bf557d7e6ce 100644 --- a/docs/api/paddle/nn/initializer/Dirac_cn.rst +++ b/docs/api/paddle/nn/initializer/Dirac_cn.rst @@ -6,11 +6,11 @@ Dirac .. py:class:: paddle.nn.initializer.Dirac(groups=1, name=None) -通过 ``狄拉克delta函数`` 来初始化3D/4D/5D Tensor。 +通过 ``狄拉克 delta 函数`` 来初始化 3D/4D/5D Tensor。 该初始化方式一般用于 Conv1D/Conv2D/Conv3D 卷积层,能尽可能多的保留卷积层输入的特性。(如果 `out_channels` > `in_channels`,则可保留全部的输入 `channel` 特性) -被初始化的参数,每个卷积核中间的元素会被置为1,其余元素为0。公式可以描述为: +被初始化的参数,每个卷积核中间的元素会被置为 1,其余元素为 0。公式可以描述为: .. math:: @@ -21,7 +21,7 @@ Dirac 参数 ::::::::: - - groups (int,可选) - 将参数在0维上进行等分为 `groups` 份,每一份执行相同的初始化。默认:1。 + - groups (int,可选) - 将参数在 0 维上进行等分为 `groups` 份,每一份执行相同的初始化。默认:1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst b/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst index d96b964eb0e..f713170a3b8 100644 --- a/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst +++ b/docs/api/paddle/nn/initializer/KaimingNormal_cn.rst @@ -8,11 +8,11 @@ KaimingNormal -该接口实现Kaiming正态分布方式的权重初始化。 +该接口实现 Kaiming 正态分布方式的权重初始化。 -该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文:`Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 +该接口为权重初始化函数,方法来自 Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun 所写的论文:`Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 -在正态分布中,均值为0,标准差为: +在正态分布中,均值为 0,标准差为: .. math:: @@ -23,7 +23,7 @@ KaimingNormal - **fan_in** (float16|float32,可选) - 可训练的 Tensor 的 in_features 值。如果设置为 None,程序会自动计算该值。如果你不想使用 in_features,你可以自己设置这个值。默认值为 None。 - **negative_slope** (float,可选) - 只适用于使用 leaky_relu 作为激活函数时的 negative_slope 参数。默认值为 :math:`0.0`。 - - **nonlinearity** (str,可选) - 非线性激活函数。默认值为relu。 + - **nonlinearity** (str,可选) - 非线性激活函数。默认值为 relu。 .. note:: diff --git a/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst b/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst index 22ae851c818..b5cb60bad61 100644 --- a/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst +++ b/docs/api/paddle/nn/initializer/KaimingUniform_cn.rst @@ -8,9 +8,9 @@ KaimingUniform -该接口实现Kaiming均匀分布方式的权重初始化。 +该接口实现 Kaiming 均匀分布方式的权重初始化。 -该接口为权重初始化函数,方法来自Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun所写的论文:`Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 +该接口为权重初始化函数,方法来自 Kaiming He,Xiangyu Zhang,Shaoqing Ren 和 Jian Sun 所写的论文:`Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ 。这是一个鲁棒性特别强的初始化方法,并且适应了非线性激活函数(rectifier nonlinearities)。 在均匀分布中,范围为[-x,x],其中: @@ -21,7 +21,7 @@ KaimingUniform 参数 :::::::::::: - - **fan_in** (float16|float32,可选) - 可训练的 Tensor 的 in_features值。如果设置为 None,程序会自动计算该值。如果你不想使用 in_features,你可以自己设置这个值。默认值为None。 + - **fan_in** (float16|float32,可选) - 可训练的 Tensor 的 in_features 值。如果设置为 None,程序会自动计算该值。如果你不想使用 in_features,你可以自己设置这个值。默认值为 None。 - **negative_slope** (float,可选) - 只适用于使用 leaky_relu 作为激活函数时的 negative_slope 参数。默认值为 :math:`0.0`。 - **nonlinearity** (str,可选) - 非线性激活函数。默认值为 relu。 diff --git a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst index 1f072db6113..9bee111c7bf 100644 --- a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst +++ b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst @@ -7,7 +7,7 @@ Orthogonal 正交矩阵初始化,被初始化的参数为 (半)正交的。 -该初始化策略仅适用于 2-D及以上的参数。对于维度超过2的参数,将0维作为行数,将1维及之后的维度展平为列数。 +该初始化策略仅适用于 2-D 及以上的参数。对于维度超过 2 的参数,将 0 维作为行数,将 1 维及之后的维度展平为列数。 具体可以描述为: diff --git a/docs/api/paddle/nn/initializer/calculate_gain_cn.rst b/docs/api/paddle/nn/initializer/calculate_gain_cn.rst index 585dbb75ca2..3c643b24815 100644 --- a/docs/api/paddle/nn/initializer/calculate_gain_cn.rst +++ b/docs/api/paddle/nn/initializer/calculate_gain_cn.rst @@ -5,16 +5,16 @@ calculate_gain .. py:function:: paddle.nn.initializer.calculate_gain(nonlinearity, param=None) -部分激活函数的推荐增益值(增益值可用于设置某些初始化API,以调整初始化值)。 +部分激活函数的推荐增益值(增益值可用于设置某些初始化 API,以调整初始化值)。 参数 ::::::::: - - nonlinearity (str) - 非线性激活函数的名称。如果输入一个线性的函数,例如:`linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose`,则返回1.0。 - - param (bool|int|float,可选) - 某些激活函数的参数,目前仅用于 ``leaky_relu`` 中的计算。默认为 ``None``,此时以0.01来参与 ``leaky_relu`` 的增益值计算。 + - nonlinearity (str) - 非线性激活函数的名称。如果输入一个线性的函数,例如:`linear/conv1d/conv2d/conv3d/conv1d_transpose/conv2d_transpose/conv3d_transpose`,则返回 1.0。 + - param (bool|int|float,可选) - 某些激活函数的参数,目前仅用于 ``leaky_relu`` 中的计算。默认为 ``None``,此时以 0.01 来参与 ``leaky_relu`` 的增益值计算。 返回 ::::::::: -Python float数,推荐的增益值。 +Python float 数,推荐的增益值。 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst b/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst index 513e58d2278..145939287ff 100644 --- a/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst +++ b/docs/api/paddle/nn/initializer/set_global_initializer_cn.rst @@ -5,18 +5,18 @@ set_global_initializer .. py:function:: paddle.nn.initializer.set_global_initializer(weight_init, bias_init=None) -该API用于设置Paddle框架中全局的参数初始化方法。该API只对位于其后的代码生效。 +该 API 用于设置 Paddle 框架中全局的参数初始化方法。该 API 只对位于其后的代码生效。 -模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。 -该API的设置仅对模型参数生效,对通过 :ref:`cn_api_fluid_layers_create_global_var` 、 :ref:`cn_api_fluid_layers_create_tensor` 等API创建的变量不会生效。 +模型参数为模型中的 weight 和 bias 统称,在 fluid 中对应 fluid.Parameter 类,继承自 fluid.Variable,是一种可持久化的 variable。 +该 API 的设置仅对模型参数生效,对通过 :ref:`cn_api_fluid_layers_create_global_var` 、 :ref:`cn_api_fluid_layers_create_tensor` 等 API 创建的变量不会生效。 如果创建网络层时还通过 ``param_attr`` 、 ``bias_attr`` 设置了初始化方式,这里的全局设置将不会生效,因为其优先级更低。 参数 :::::::::::: - - **weight_init** (Initializer) - 设置框架的全局的weight参数初始化方法。 - - **bias_init** (Initializer,可选) - 设置框架的全局的bias参数初始化方法。默认:None。 + - **weight_init** (Initializer) - 设置框架的全局的 weight 参数初始化方法。 + - **bias_init** (Initializer,可选) - 设置框架的全局的 bias 参数初始化方法。默认:None。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst b/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst index 42abc4d4e8d..7682090a289 100644 --- a/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst +++ b/docs/api/paddle/nn/utils/parameters_to_vector_cn.rst @@ -5,16 +5,16 @@ parameters_to_vector .. py:function:: paddle.nn.utils.parameters_to_vector(parameters, name=None) -将输入的多个parameter展平并连接为1个1-D Tensor。 +将输入的多个 parameter 展平并连接为 1 个 1-D Tensor。 参数 ::::::::: - - parameters (Iterable[Tensor]) - 可迭代的多个parameter。parameter为Layer中可训练的Tensor。 + - parameters (Iterable[Tensor]) - 可迭代的多个 parameter。parameter 为 Layer 中可训练的 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,多个parameter展平并连接的1-D Tensor +``Tensor``,多个 parameter 展平并连接的 1-D Tensor 代码示例 ::::::::: diff --git a/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst b/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst index 043ac52d3ba..4a248fdb639 100644 --- a/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst +++ b/docs/api/paddle/nn/utils/remove_weight_norm_cn.rst @@ -16,7 +16,7 @@ remove_weight_norm 返回 :::::::::::: - ``Layer``,移除权重归一化hook之后的层 + ``Layer``,移除权重归一化 hook 之后的层 代码示例 :::::::::::: diff --git a/docs/api/paddle/nn/utils/spectral_norm_cn.rst b/docs/api/paddle/nn/utils/spectral_norm_cn.rst index 60543a5879d..aee5ca5376c 100644 --- a/docs/api/paddle/nn/utils/spectral_norm_cn.rst +++ b/docs/api/paddle/nn/utils/spectral_norm_cn.rst @@ -8,16 +8,16 @@ spectral_norm 该接口根据以下步骤对传入的 ``layer`` 中的权重参数进行谱归一化: -步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 +步骤 1:生成形状为[H]的向量 U,以及形状为[W]的向量 V,其中 H 是输入权重张量的第 ``dim`` 个维度,W 是剩余维度的乘积。 -步骤2: ``n_power_iterations`` 是一个正整数,用U和V迭代计算 ``n_power_iterations`` 轮,迭代步骤如下。 +步骤 2: ``n_power_iterations`` 是一个正整数,用 U 和 V 迭代计算 ``n_power_iterations`` 轮,迭代步骤如下。 .. math:: \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ \mathbf{u} &:= \frac{\mathbf{W} \mathbf{v}}{\|\mathbf{W} \mathbf{v}\|_2} -步骤3:计算 :math:`\sigma(\mathbf{W})` 并将特征值归一化。 +步骤 3:计算 :math:`\sigma(\mathbf{W})` 并将特征值归一化。 .. math:: \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ @@ -32,7 +32,7 @@ spectral_norm - **name** (str,可选) - 权重参数的名字。默认值为 ``weight``。 - **n_power_iterations** (int,可选) - 将用于计算的 ``SpectralNorm`` 幂迭代次数,默认值:1。 - **eps** (float,可选) - ``eps`` 用于保证计算中的数值稳定性,分母会加上 ``eps`` 防止除零。默认值:1e-12。 - - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1。默认值:None。 + - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果 input(weight)是 fc 层的权重,则应设置为 0;如果 input(weight)是 conv 层的权重,则应设置为 1。默认值:None。 返回 :::::::::::: diff --git a/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst b/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst index fd0275406df..887ad14a425 100644 --- a/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst +++ b/docs/api/paddle/nn/utils/vector_to_parameters_cn.rst @@ -5,12 +5,12 @@ vector_to_parameters .. py:function:: paddle.nn.utils.vector_to_parameters(vec, parameters, name=None) -将1个1-D Tensor按顺序切分给输入的多个parameter。 +将 1 个 1-D Tensor 按顺序切分给输入的多个 parameter。 参数 ::::::::: - - vec (Tensor) - 一个1-D Tensor。 - - parameters (Iterable[Tensor]) - 可迭代的多个parameter。parameter为Layer中可训练的Tensor。 + - vec (Tensor) - 一个 1-D Tensor。 + - parameters (Iterable[Tensor]) - 可迭代的多个 parameter。parameter 为 Layer 中可训练的 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/nn/utils/weight_norm_cn.rst b/docs/api/paddle/nn/utils/weight_norm_cn.rst index 9d966a5414b..04b366c7b4a 100644 --- a/docs/api/paddle/nn/utils/weight_norm_cn.rst +++ b/docs/api/paddle/nn/utils/weight_norm_cn.rst @@ -17,12 +17,12 @@ weight_norm - **layer** (paddle.nn.Layer) - 要添加权重归一化的层。 - **name** (str,可选) - 权重参数的名字。默认值为 ``weight``。 - - **dim** (int|None,可选) - 进行归一化操作的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 [cout,cin,kh,kw] , rank是4,则dim可以选0,1,2,3;fc的权重shape是 [cout,cin] ,rank是2,dim可以选0,1。如果为None就对所有维度上的元素做归一化。默认:0。 + - **dim** (int|None,可选) - 进行归一化操作的切片所在维度,是小于权重 Tensor rank 的非负数。比如卷积的权重 shape 是 [cout,cin,kh,kw] , rank 是 4,则 dim 可以选 0,1,2,3;fc 的权重 shape 是 [cout,cin] ,rank 是 2,dim 可以选 0,1。如果为 None 就对所有维度上的元素做归一化。默认:0。 返回 :::::::::::: - ``Layer``,添加了权重归一化hook的层 + ``Layer``,添加了权重归一化 hook 的层 代码示例 :::::::::::: diff --git a/docs/api/paddle/no_grad_cn.rst b/docs/api/paddle/no_grad_cn.rst index 1b255268082..9c6da63972a 100644 --- a/docs/api/paddle/no_grad_cn.rst +++ b/docs/api/paddle/no_grad_cn.rst @@ -7,7 +7,7 @@ no_grad -创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有stop_gradient=True。 +创建一个上下文来禁用动态图梯度计算。在此模式下,每次计算的结果都将具有 stop_gradient=True。 也可以用作一个装饰器(需要创建实例对象作为装饰器)。 diff --git a/docs/api/paddle/normal_cn.rst b/docs/api/paddle/normal_cn.rst index 2c98a150987..25eb5e49781 100644 --- a/docs/api/paddle/normal_cn.rst +++ b/docs/api/paddle/normal_cn.rst @@ -16,8 +16,8 @@ normal 参数 :::::::::: - - mean (float|Tensor,可选) - 输出 Tensor 的正态分布的平均值。如果 ``mean`` 是 float,则表示输出 Tensor 中所有元素的正态分布的平均值。如果 ``mean`` 是 Tensor (支持的数据类型为 float32、float64),则表示输出Tensor中每个元素的正态分布的平均值。默认值为0.0。 - - std (float|Tensor,可选) - 输出 Tensor 的正态分布的标准差。如果 ``std`` 是 float,则表示输出 Tensor 中所有元素的正态分布的标准差。如果 ``std`` 是 Tensor (支持的数据类型为 float32、float64),则表示输出Tensor中每个元素的正态分布的标准差。默认值为0.0。 + - mean (float|Tensor,可选) - 输出 Tensor 的正态分布的平均值。如果 ``mean`` 是 float,则表示输出 Tensor 中所有元素的正态分布的平均值。如果 ``mean`` 是 Tensor (支持的数据类型为 float32、float64),则表示输出 Tensor 中每个元素的正态分布的平均值。默认值为 0.0。 + - std (float|Tensor,可选) - 输出 Tensor 的正态分布的标准差。如果 ``std`` 是 float,则表示输出 Tensor 中所有元素的正态分布的标准差。如果 ``std`` 是 Tensor (支持的数据类型为 float32、float64),则表示输出 Tensor 中每个元素的正态分布的标准差。默认值为 0.0。 - shape (list|tuple|Tensor,可选) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的一维 Tensor。如果 ``mean`` 或者 ``std`` 是 Tensor,输出 Tensor 的形状和 ``mean`` 或者 ``std`` 相同(此时 ``shape`` 无效)。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/not_equal_cn.rst b/docs/api/paddle/not_equal_cn.rst index 2935d5cbf0d..d0b66ea20fc 100644 --- a/docs/api/paddle/not_equal_cn.rst +++ b/docs/api/paddle/not_equal_cn.rst @@ -13,8 +13,8 @@ not_equal 参数 :::::::::::: - - **x** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - - **y** (Tensor) - 输入Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 + - **y** (Tensor) - 输入 Tensor,支持的数据类型包括 bool、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/numel_cn.rst b/docs/api/paddle/numel_cn.rst index acddcbe21cc..1c42d14086b 100644 --- a/docs/api/paddle/numel_cn.rst +++ b/docs/api/paddle/numel_cn.rst @@ -6,16 +6,16 @@ numel .. py:function:: paddle.numel(x) -返回一个长度为1并且元素值为输入 ``x`` 元素个数的 Tensor。 +返回一个长度为 1 并且元素值为输入 ``x`` 元素个数的 Tensor。 参数 :::::::::::: - - **x** (Tensor) - 输入 Tensor,数据类型为int32、int64、float16、float32、float64、int32、int64。 + - **x** (Tensor) - 输入 Tensor,数据类型为 int32、int64、float16、float32、float64、int32、int64。 返回 :::::::::::: - 返回长度为1并且元素值为 ``x`` 元素个数的 Tensor。 + 返回长度为 1 并且元素值为 ``x`` 元素个数的 Tensor。 代码示例 diff --git a/docs/api/paddle/ones_like_cn.rst b/docs/api/paddle/ones_like_cn.rst index 31ce37fe18d..5e3039f70f9 100644 --- a/docs/api/paddle/ones_like_cn.rst +++ b/docs/api/paddle/ones_like_cn.rst @@ -6,18 +6,18 @@ ones_like .. py:function:: paddle.ones_like(x, dtype=None, name=None) -返回一个和输入参数 ``x`` 具有相同形状的数值都为1的 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同,如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 +返回一个和输入参数 ``x`` 具有相同形状的数值都为 1 的 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同,如果 ``dtype`` 为 None,则输出 Tensor 的数据类型与 ``x`` 相同。 参数 :::::::::: - - **x** (Tensor) – 输入的Tensor,数据类型可以是 bool,float16,float32,float64,int32,int64。 + - **x** (Tensor) – 输入的 Tensor,数据类型可以是 bool,float16,float32,float64,int32,int64。 - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 bool,float16, float32,float64,int32,int64。当该参数值为 None 时,输出 Tensor 的数据类型与 ``x`` 相同。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: -Tensor:和 ``x`` 具有相同形状的数值都为1的 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 +Tensor:和 ``x`` 具有相同形状的数值都为 1 的 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 代码示例 diff --git a/docs/api/paddle/onnx/export_cn.rst b/docs/api/paddle/onnx/export_cn.rst index 700e6ece309..410f1f1e23a 100644 --- a/docs/api/paddle/onnx/export_cn.rst +++ b/docs/api/paddle/onnx/export_cn.rst @@ -5,7 +5,7 @@ export .. py:function:: paddle.onnx.export(layer, path, input_spec=None, opset_version=9, **configs) -将输入的 ``Layer`` 存储为 ``ONNX`` 格式的模型,可使用onnxruntime或其他框架进行推理。 +将输入的 ``Layer`` 存储为 ``ONNX`` 格式的模型,可使用 onnxruntime 或其他框架进行推理。 .. note:: @@ -15,9 +15,9 @@ export ::::::::: - layer (Layer) - 导出的 ``Layer`` 对象。 - path (str) - 存储模型的路径前缀。格式为 ``dirname/file_prefix`` 或者 ``file_prefix``,导出后``ONNX``模型自动添加后缀 ``.onnx`` 。 - - input_spec (list[InputSpec|Tensor],可选) - 描述存储模型forward方法的输入,可以通过InputSpec或者示例Tensor进行描述。如果为 ``None``,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - - opset_version(int,可选) - 导出 ``ONNX`` 模型的Opset版本,目前稳定支持导出的版本为9、10和11。默认为 ``9``。 - - **configs (dict,可选) - 其他用于兼容的存储配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项:(1) output_spec (list[Tensor]) - 选择存储模型的输出目标。默认情况下,所有原 ``Layer`` forward方法的返回值均会作为存储模型的输出。如果传入的 ``output_spec`` 列表不是所有的输出变量,存储的模型将会根据 ``output_spec`` 所包含的结果被裁剪。 + - input_spec (list[InputSpec|Tensor],可选) - 描述存储模型 forward 方法的输入,可以通过 InputSpec 或者示例 Tensor 进行描述。如果为 ``None``,所有原 ``Layer`` forward 方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 + - opset_version(int,可选) - 导出 ``ONNX`` 模型的 Opset 版本,目前稳定支持导出的版本为 9、10 和 11。默认为 ``9``。 + - **configs (dict,可选) - 其他用于兼容的存储配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 ``None``。目前支持以下配置选项:(1) output_spec (list[Tensor]) - 选择存储模型的输出目标。默认情况下,所有原 ``Layer`` forward 方法的返回值均会作为存储模型的输出。如果传入的 ``output_spec`` 列表不是所有的输出变量,存储的模型将会根据 ``output_spec`` 所包含的结果被裁剪。 返回 ::::::::: diff --git a/docs/api/paddle/optimizer/Adadelta_cn.rst b/docs/api/paddle/optimizer/Adadelta_cn.rst index 843440520bf..03c9aad5b9e 100644 --- a/docs/api/paddle/optimizer/Adadelta_cn.rst +++ b/docs/api/paddle/optimizer/Adadelta_cn.rst @@ -9,7 +9,7 @@ Adadelta .. note:: 此接口不支持稀疏参数更新。 -Adadelta优化器,是对 :ref:`Adagrad ` 的改进。 +Adadelta 优化器,是对 :ref:`Adagrad ` 的改进。 相关论文:`ADADELTA: AN ADAPTIVE LEARNING RATE METHOD `_ 。 @@ -25,16 +25,16 @@ Adadelta优化器,是对 :ref:`Adagrad ` 的 参数 :::::::::::: - - **learning_rate** (float|_LRScheduleri,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-06。 - - **rho** (float,可选) - 算法中的衰减率,默认值为0.95。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个float类型或者shape为[1],数据类型为float32的Tensor类型。默认值为0.01。 + - **learning_rate** (float|_LRScheduleri,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为 1e-06。 + - **rho** (float,可选) - 算法中的衰减率,默认值为 0.95。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个 float 类型或者 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.01。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 -Adadelta优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `,用来解决Adam优化器中L2正则化失效的问题。 +Adadelta 优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION 论文 `,用来解决 Adam 优化器中 L2 正则化失效的问题。 @@ -63,7 +63,7 @@ step() .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -91,18 +91,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -130,7 +130,7 @@ clear_grad() .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -156,9 +156,9 @@ set_lr(value) .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -196,9 +196,9 @@ get_lr() .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Adagrad_cn.rst b/docs/api/paddle/optimizer/Adagrad_cn.rst index c4bfae53c9b..e243d2661d9 100644 --- a/docs/api/paddle/optimizer/Adagrad_cn.rst +++ b/docs/api/paddle/optimizer/Adagrad_cn.rst @@ -6,7 +6,7 @@ Adagrad .. py:class:: paddle.optimizer.Adagrad(learning_rate, epsilon=1e-06, parameters=None, weight_decay=None, grad_clip=None, name=None, initial_accumulator_value=0.0) -Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以针对不同参数样本数不平均的问题,自适应地为各个参数分配不同的学习率。 +Adaptive Gradient 优化器(自适应梯度优化器,简称 Adagrad)可以针对不同参数样本数不平均的问题,自适应地为各个参数分配不同的学习率。 其参数更新的计算过程如下: @@ -18,23 +18,23 @@ Adaptive Gradient 优化器(自适应梯度优化器,简称Adagrad)可以 相关论文:`Adaptive Subgradient Methods for Online Learning and Stochastic Optimization `_ 。 -原始论文的算法中没有引入上述公式中的 ``epsilon`` 属性,此处引入该属性用于维持数值稳定性,避免除0错误发生。 +原始论文的算法中没有引入上述公式中的 ``epsilon`` 属性,此处引入该属性用于维持数值稳定性,避免除 0 错误发生。 -引入epsilon参数依据:`Per-parameter adaptive learning rate methods `_ 。 +引入 epsilon 参数依据:`Per-parameter adaptive learning rate methods `_ 。 参数 :::::::::::: - - **learning_rate** (float|Tensor) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的Tensor。 - - **epsilon** (float,可选) - 维持数值稳定性的浮点型值,默认值为1e-06。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **learning_rate** (float|Tensor) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个值为浮点型的 Tensor。 + - **epsilon** (float,可选) - 维持数值稳定性的浮点型值,默认值为 1e-06。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是 float 类型的 L2 正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为 None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **initial_accumulator_value** (float,可选) - moment累加器的初始值,默认值为0.0。 + - **initial_accumulator_value** (float,可选) - moment 累加器的初始值,默认值为 0.0。 代码示例 :::::::::::: diff --git a/docs/api/paddle/optimizer/AdamW_cn.rst b/docs/api/paddle/optimizer/AdamW_cn.rst index bd4f96d4916..6cd79791d19 100755 --- a/docs/api/paddle/optimizer/AdamW_cn.rst +++ b/docs/api/paddle/optimizer/AdamW_cn.rst @@ -8,7 +8,7 @@ AdamW -AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION `_ ,用来解决 :ref:`Adam ` 优化器中L2正则化失效的问题。 +AdamW 优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION `_ ,用来解决 :ref:`Adam ` 优化器中 L2 正则化失效的问题。 其参数更新的计算公式如下: @@ -26,23 +26,23 @@ AdamW优化器出自 `DECOUPLED WEIGHT DECAY REGULARIZATION `_ .. note:: - 当前,AdamW不支持稀疏参数优化。 + 当前,AdamW 不支持稀疏参数优化。 参数 :::::::::::: - - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **beta1** (float|Tensor,可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9。 - - **beta2** (float|Tensor,可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999。 - - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个float类型或者shape为[1],数据类型为float32的Tensor类型。默认值为0.01。 - - **lr_ratio** (function|None,可选) – 传入函数时,会为每个参数计算一个权重衰减系数,并使用该系数与学习率的乘积作为新的学习率。否则,使用原学习率。仅支持GPU设备,默认值为None。 - - **apply_decay_param_fun** (function|None,可选):传入函数时,只有可以使 apply_decay_param_fun(Tensor.name)==True的Tensor会进行weight decay更新。只有在想要指定特定需要进行weight decay更新的参数时使用。默认值为None。 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **beta1** (float|Tensor,可选) - 一阶矩估计的指数衰减率,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.9。 + - **beta2** (float|Tensor,可选) - 二阶矩估计的指数衰减率,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.999。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为 1e-08。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个 float 类型或者 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.01。 + - **lr_ratio** (function|None,可选) – 传入函数时,会为每个参数计算一个权重衰减系数,并使用该系数与学习率的乘积作为新的学习率。否则,使用原学习率。仅支持 GPU 设备,默认值为 None。 + - **apply_decay_param_fun** (function|None,可选):传入函数时,只有可以使 apply_decay_param_fun(Tensor.name)==True 的 Tensor 会进行 weight decay 更新。只有在想要指定特定需要进行 weight decay 更新的参数时使用。默认值为 None。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 - - **lazy_mode** (bool,可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False。 - - **multi_precision** (bool,可选) – 在基于GPU设备的混合精度训练场景中,该参数主要用于保证梯度更新的数值稳定性。设置为True时,优化器会针对FP16类型参数保存一份与其值相等的FP32类型参数备份。梯度更新时,首先将梯度类型提升到FP32,然后将其更新到FP32类型参数备份中。最后,更新后的FP32类型值会先转换为FP16类型,再赋值给实际参与计算的FP16类型参数。默认为False。 + 默认值为 None,此时将不进行梯度裁剪。 + - **lazy_mode** (bool,可选) - 设为 True 时,仅更新当前具有梯度的元素。官方 Adam 算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。lazy mode 仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为 False。 + - **multi_precision** (bool,可选) – 在基于 GPU 设备的混合精度训练场景中,该参数主要用于保证梯度更新的数值稳定性。设置为 True 时,优化器会针对 FP16 类型参数保存一份与其值相等的 FP32 类型参数备份。梯度更新时,首先将梯度类型提升到 FP32,然后将其更新到 FP32 类型参数备份中。最后,更新后的 FP32 类型值会先转换为 FP16 类型,再赋值给实际参与计算的 FP16 类型参数。默认为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -99,7 +99,7 @@ step() ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -126,18 +126,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program` 。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** -tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 +tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -165,7 +165,7 @@ clear_grad() ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -190,9 +190,9 @@ set_lr(value) ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -229,9 +229,9 @@ get_lr() ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Adam_cn.rst b/docs/api/paddle/optimizer/Adam_cn.rst index afa37f99bf5..d50dce4d920 100755 --- a/docs/api/paddle/optimizer/Adam_cn.rst +++ b/docs/api/paddle/optimizer/Adam_cn.rst @@ -8,7 +8,7 @@ Adam -Adam优化器出自 `Adam论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 +Adam 优化器出自 `Adam 论文 `_ 的第二节,能够利用梯度的一阶矩估计和二阶矩估计动态调整每个参数的学习率。 其参数更新的计算公式如下: @@ -28,18 +28,18 @@ Adam优化器出自 `Adam论文 `_ 的第二节 参数 :::::::::::: - - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **beta1** (float|Tensor,可选) - 一阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.9。 - - **beta2** (float|Tensor,可选) - 二阶矩估计的指数衰减率,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。默认值为0.999。 - - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **beta1** (float|Tensor,可选) - 一阶矩估计的指数衰减率,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.9。 + - **beta2** (float|Tensor,可选) - 二阶矩估计的指数衰减率,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.999。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为 1e-08。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是 float 类型的 L2 正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为 None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **lazy_mode** (bool,可选) - 设为True时,仅更新当前具有梯度的元素。官方Adam算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。lazy mode仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为False。 + - **lazy_mode** (bool,可选) - 设为 True 时,仅更新当前具有梯度的元素。官方 Adam 算法有两个移动平均累加器(moving-average accumulators)。累加器在每一步都会更新。在密集模式和稀疏模式下,两条移动平均线的每个元素都会更新。如果参数非常大,那么更新可能很慢。lazy mode 仅更新当前具有梯度的元素,所以它会更快。但是这种模式与原始的算法有不同的描述,可能会导致不同的结果,默认为 False。 代码示例 @@ -91,7 +91,7 @@ step() ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -120,18 +120,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -161,7 +161,7 @@ clear_grad() ''''''''' .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -188,9 +188,9 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -227,9 +227,9 @@ get_lr() ''''''''' .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Adamax_cn.rst b/docs/api/paddle/optimizer/Adamax_cn.rst index 53539923ab6..7125659ec43 100755 --- a/docs/api/paddle/optimizer/Adamax_cn.rst +++ b/docs/api/paddle/optimizer/Adamax_cn.rst @@ -8,7 +8,7 @@ Adamax -Adamax优化器是参考 `Adam论文 `_ 第7节Adamax优化相关内容所实现的。Adamax算法是基于无穷大范数的 :ref:`Adam <_cn_api_paddle_optimizer_Adam>` 算法的一个变种,使学习率更新的算法更加稳定和简单。 +Adamax 优化器是参考 `Adam 论文 `_ 第 7 节 Adamax 优化相关内容所实现的。Adamax 算法是基于无穷大范数的 :ref:`Adam <_cn_api_paddle_optimizer_Adam>` 算法的一个变种,使学习率更新的算法更加稳定和简单。 其参数更新的计算公式如下: @@ -25,21 +25,21 @@ Adamax优化器是参考 `Adam论文 `_ 第7节 相关论文:`Adam: A Method for Stochastic Optimization `_ -论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性,避免除0错误,此处增加了这个参数。 +论文中没有 ``epsilon`` 参数。但是,为了保持数值稳定性,避免除 0 错误,此处增加了这个参数。 参数 :::::::::::: - - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **beta1** (float,可选) - 一阶矩估计的指数衰减率,默认值为0.9。 - - **beta2** (float,可选) - 二阶矩估计的指数衰减率,默认值为0.999。 - - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-08。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **learning_rate** (float|_LRScheduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **beta1** (float,可选) - 一阶矩估计的指数衰减率,默认值为 0.9。 + - **beta2** (float,可选) - 二阶矩估计的指数衰减率,默认值为 0.999。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为 1e-08。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是 float 类型的 L2 正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为 None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 .. note:: @@ -72,7 +72,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -101,18 +101,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -143,7 +143,7 @@ clear_grad() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -170,9 +170,9 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -210,9 +210,9 @@ get_lr() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Lamb_cn.rst b/docs/api/paddle/optimizer/Lamb_cn.rst index c6b47bf5437..10b7c8aabf9 100755 --- a/docs/api/paddle/optimizer/Lamb_cn.rst +++ b/docs/api/paddle/optimizer/Lamb_cn.rst @@ -22,20 +22,20 @@ LAMB(Layer-wise Adaptive Moments optimizer for Batching training)优化器 .. math:: w_t=w_{t_1}-\eta_t*\frac{\left \| w_{t-1}\right \|}{\left \| r_t+\lambda*w_{t-1}\right \|}*(r_t+\lambda*w_{t-1}) \\ -其中 :math:`m` 表示第一个动量,:math:`v` 代表第二个动量,:math:`\eta` 代表学习率,:math:`\lambda` 代表LAMB的权重学习率。 +其中 :math:`m` 表示第一个动量,:math:`v` 代表第二个动量,:math:`\eta` 代表学习率,:math:`\lambda` 代表 LAMB 的权重学习率。 相关论文:`Large Batch Optimization for Deep Learning: Training BERT in 76 minutes `_ 参数 :::::::::::: - - **learning_rate** (float|Tensor,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个Tensor,默认值为0.001。 - - **lamb_weight_decay** (float,可选) – LAMB权重衰减率。默认值为0.01。 - - **beta1** (float,可选) - 第一个动量估计的指数衰减率。默认值为0.9。 - - **beta2** (float,可选) - 第二个动量估计的指数衰减率。默认值为0.999。 - - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为1e-06。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue `。默认值为None,此时将不进行梯度裁剪。 + - **learning_rate** (float|Tensor,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个 Tensor,默认值为 0.001。 + - **lamb_weight_decay** (float,可选) – LAMB 权重衰减率。默认值为 0.01。 + - **beta1** (float,可选) - 第一个动量估计的指数衰减率。默认值为 0.9。 + - **beta2** (float,可选) - 第二个动量估计的指数衰减率。默认值为 0.999。 + - **epsilon** (float,可选) - 保持数值稳定性的短浮点类型值,默认值为 1e-06。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue `。默认值为 None,此时将不进行梯度裁剪。 - **exclude_from_weight_decay_fn** (function) - 当某个参数作为输入该函数返回值为 True 时,为该参数跳过权重衰减。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -67,7 +67,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -95,18 +95,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -135,7 +135,7 @@ clear_grad() ''''''''' .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -162,9 +162,9 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -202,9 +202,9 @@ get_lr() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Momentum_cn.rst b/docs/api/paddle/optimizer/Momentum_cn.rst index e87ced5126c..0119c754b52 100644 --- a/docs/api/paddle/optimizer/Momentum_cn.rst +++ b/docs/api/paddle/optimizer/Momentum_cn.rst @@ -6,7 +6,7 @@ Momentum .. py:class:: paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameters=None, use_nesterov=False, weight_decay=None, grad_clip=None, name=None) -该接口实现含有速度状态的Simple Momentum 优化器。 +该接口实现含有速度状态的 Simple Momentum 优化器。 该优化器含有牛顿动量标志,公式更新如下: @@ -23,13 +23,13 @@ Momentum 参数 :::::::::::: - - **learning_rate** (float|_LRScheduler,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 + - **learning_rate** (float|_LRScheduler,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 - **momentum** (float,可选) - 动量因子。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **use_nesterov** (bool,可选) - 赋能牛顿动量,默认值False。 - - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个float类型或者shape为[1],数据类型为float32的Tensor类型。默认值为0.01。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **use_nesterov** (bool,可选) - 赋能牛顿动量,默认值 False。 + - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个 float 类型或者 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.01。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -58,7 +58,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -84,18 +84,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -122,7 +122,7 @@ clear_grad() .. note:: - 该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 + 该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -147,4 +147,4 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 diff --git a/docs/api/paddle/optimizer/Optimizer_cn.rst b/docs/api/paddle/optimizer/Optimizer_cn.rst index 93e8a27dc03..c30c5a5a475 100755 --- a/docs/api/paddle/optimizer/Optimizer_cn.rst +++ b/docs/api/paddle/optimizer/Optimizer_cn.rst @@ -12,13 +12,13 @@ Optimizer 参数 :::::::::::: - - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **learning_rate** (float|_LRSeduler) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是 float 类型的 L2 正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为 None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -27,7 +27,7 @@ Optimizer .. code-block:: python - #以子类Adam为例 + #以子类 Adam 为例 import paddle import numpy as np @@ -49,7 +49,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -79,18 +79,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -121,7 +121,7 @@ clear_grad() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -148,9 +148,9 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -188,9 +188,9 @@ get_lr() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/Overview_cn.rst b/docs/api/paddle/optimizer/Overview_cn.rst index bef0516d854..aa57de0ba91 100644 --- a/docs/api/paddle/optimizer/Overview_cn.rst +++ b/docs/api/paddle/optimizer/Overview_cn.rst @@ -3,43 +3,43 @@ paddle.optimizer --------------------- -paddle.optimizer 目录下包含飞桨框架支持的优化器算法相关的API与学习率衰减相关的API。具体如下: +paddle.optimizer 目录下包含飞桨框架支持的优化器算法相关的 API 与学习率衰减相关的 API。具体如下: -- :ref:`优化器算法相关API ` -- :ref:`学习率下降相关API ` +- :ref:`优化器算法相关 API ` +- :ref:`学习率下降相关 API ` .. _about_optimizer: -优化器算法相关API +优化器算法相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`Adadelta ` ", "Adadelta优化器" - " :ref:`Adagrad ` ", "Adagrad优化器" - " :ref:`Adam ` ", "Adam优化器" - " :ref:`Adamax ` ", "Adamax优化器" - " :ref:`AdamW ` ", "AdamW优化器" - " :ref:`Lamb ` ", "Lamb优化器" - " :ref:`Momentum ` ", "Momentum优化器" + " :ref:`Adadelta ` ", "Adadelta 优化器" + " :ref:`Adagrad ` ", "Adagrad 优化器" + " :ref:`Adam ` ", "Adam 优化器" + " :ref:`Adamax ` ", "Adamax 优化器" + " :ref:`AdamW ` ", "AdamW 优化器" + " :ref:`Lamb ` ", "Lamb 优化器" + " :ref:`Momentum ` ", "Momentum 优化器" " :ref:`Optimizer ` ", "飞桨框架优化器基类" - " :ref:`RMSProp ` ", "RMSProp优化器" - " :ref:`SGD ` ", "SGD优化器" + " :ref:`RMSProp ` ", "RMSProp 优化器" + " :ref:`SGD ` ", "SGD 优化器" .. _about_lr: -学习率衰减相关API +学习率衰减相关 API ::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`CosineAnnealingDecay ` ", "Cosine Annealing学习率衰减" + " :ref:`CosineAnnealingDecay ` ", "Cosine Annealing 学习率衰减" " :ref:`ExponentialDecay ` ", "Exponential 学习率衰减" " :ref:`InverseTimeDecay ` ", "Inverse Time 学习率衰减" " :ref:`LRScheduler ` ", "学习率衰减的基类" @@ -47,11 +47,11 @@ paddle.optimizer 目录下包含飞桨框架支持的优化器算法相关的API " :ref:`LinearWarmup ` ", "Linear Warmup 学习率衰减" " :ref:`MultiStepDecay ` ", "MultiStep 学习率衰减" " :ref:`NaturalExpDecay ` ", "NatualExp 学习率衰减" - " :ref:`NoamDecay ` ", "Norm学习率衰减" + " :ref:`NoamDecay ` ", "Norm 学习率衰减" " :ref:`PiecewiseDecay ` ", "分段设置学习率" " :ref:`PolynomialDecay ` ", "多项式学习率衰减" " :ref:`ReduceOnPlateau ` ", "loss 自适应学习率衰减" " :ref:`StepDecay ` ", "按指定间隔轮数学习率衰减" - " :ref:`MultiplicativeDecay ` ", "根据lambda函数进行学习率衰减" - " :ref:`OneCycleLR ` ", "One Cycle学习率衰减" - " :ref:`CyclicLR ` ", "Cyclic学习率衰减" + " :ref:`MultiplicativeDecay ` ", "根据 lambda 函数进行学习率衰减" + " :ref:`OneCycleLR ` ", "One Cycle 学习率衰减" + " :ref:`CyclicLR ` ", "Cyclic 学习率衰减" diff --git a/docs/api/paddle/optimizer/RMSProp_cn.rst b/docs/api/paddle/optimizer/RMSProp_cn.rst index e913a986f84..a391f0eb333 100755 --- a/docs/api/paddle/optimizer/RMSProp_cn.rst +++ b/docs/api/paddle/optimizer/RMSProp_cn.rst @@ -8,7 +8,7 @@ RMSProp -该接口实现均方根传播(RMSProp)法,是一种未发表的,自适应学习率的方法。原演示幻灯片中的第29张提出了 `RMSProp `_ 。等式如下所示: +该接口实现均方根传播(RMSProp)法,是一种未发表的,自适应学习率的方法。原演示幻灯片中的第 29 张提出了 `RMSProp `_ 。等式如下所示: .. math:: r(w, t) & = \rho r(w, t-1) + (1 - \rho)(\nabla Q_{i}(w))^2\\ @@ -29,22 +29,22 @@ RMSProp v(w, t) & = \beta v(w, t-1) + \frac{\eta} {\sqrt{r(w,t) - (g(w, t))^2 +\epsilon}} \nabla Q_{i}(w)\\ w & = w - v(w, t) -其中,:math:`ρ` 是超参数,典型值为0.9,0.95等。:math:`beta` 是动量术语。:math:`epsilon` 是一个平滑项,用于避免除零,通常设置在1e-4到1e-8的范围内。 +其中,:math:`ρ` 是超参数,典型值为 0.9,0.95 等。:math:`beta` 是动量术语。:math:`epsilon` 是一个平滑项,用于避免除零,通常设置在 1e-4 到 1e-8 的范围内。 参数 :::::::::::: - **learning_rate** (float) - 全局学习率。 - - **rho** (float,可选) - rho是等式中的 :math:`rho`,默认值0.95。 - - **epsilon** (float,可选) - 等式中的epsilon是平滑项,避免被零除,默认值1e-6。 - - **momentum** (float,可选) - 方程中的β是动量项,默认值0.0。 - - **centered** (bool,可选) - 如果为True,则通过梯度的估计方差,对梯度进行归一化;如果False,则由未centered的第二个moment归一化。将此设置为True有助于模型训练,但会消耗额外计算和内存资源。默认为False。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是float类型的L2正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 + - **rho** (float,可选) - rho 是等式中的 :math:`rho`,默认值 0.95。 + - **epsilon** (float,可选) - 等式中的 epsilon 是平滑项,避免被零除,默认值 1e-6。 + - **momentum** (float,可选) - 方程中的β是动量项,默认值 0.0。 + - **centered** (bool,可选) - 如果为 True,则通过梯度的估计方差,对梯度进行归一化;如果 False,则由未 centered 的第二个 moment 归一化。将此设置为 True 有助于模型训练,但会消耗额外计算和内存资源。默认为 False。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|WeightDecayRegularizer,可选) - 正则化方法。可以是 float 类型的 L2 正则化系数或者正则化策略::ref:`cn_api_fluid_regularizer_L1Decay` 、 :ref:`cn_api_fluid_regularizer_L2Decay`。如果一个参数已经在 :ref:`cn_api_fluid_ParamAttr` 中设置了正则化,这里的正则化设置将被忽略; - 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为None,表示没有正则化。 + 如果没有在 :ref:`cn_api_fluid_ParamAttr` 中设置正则化,这里的设置才会生效。默认值为 None,表示没有正则化。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -74,7 +74,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -100,18 +100,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -137,7 +137,7 @@ clear_gradients() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -162,9 +162,9 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler时,无法使用该API手动设置学习率,因为这将导致冲突。 +手动设置当前 ``optimizer`` 的学习率。当使用_LRScheduler 时,无法使用该 API 手动设置学习率,因为这将导致冲突。 **参数** @@ -202,9 +202,9 @@ get_lr() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 -获取当前步骤的学习率。当不使用_LRScheduler时,每次调用的返回值都相同,否则返回当前步骤的学习率。 +获取当前步骤的学习率。当不使用_LRScheduler 时,每次调用的返回值都相同,否则返回当前步骤的学习率。 **返回** diff --git a/docs/api/paddle/optimizer/SGD_cn.rst b/docs/api/paddle/optimizer/SGD_cn.rst index 451706e2275..a4ebd1e9831 100644 --- a/docs/api/paddle/optimizer/SGD_cn.rst +++ b/docs/api/paddle/optimizer/SGD_cn.rst @@ -11,16 +11,16 @@ SGD \\param\_out=param-learning\_rate*grad\\ -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 参数 :::::::::::: - - **learning_rate** (float|_LRScheduler,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler类,默认值为0.001。 - - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为None,这时所有的参数都将被优化。 - - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个float类型或者shape为[1],数据类型为float32的Tensor类型。默认值为0.01。 + - **learning_rate** (float|_LRScheduler,可选) - 学习率,用于参数更新的计算。可以是一个浮点型值或者一个_LRScheduler 类,默认值为 0.001。 + - **parameters** (list,可选) - 指定优化器需要优化的参数。在动态图模式下必须提供该参数;在静态图模式下默认值为 None,这时所有的参数都将被优化。 + - **weight_decay** (float|Tensor,可选) - 权重衰减系数,是一个 float 类型或者 shape 为[1],数据类型为 float32 的 Tensor 类型。默认值为 0.01。 - **grad_clip** (GradientClipBase,可选) – 梯度裁剪的策略,支持三种裁剪策略::ref:`paddle.nn.ClipGradByGlobalNorm ` 、 :ref:`paddle.nn.ClipGradByNorm ` 、 :ref:`paddle.nn.ClipGradByValue ` 。 - 默认值为None,此时将不进行梯度裁剪。 + 默认值为 None,此时将不进行梯度裁剪。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -50,7 +50,7 @@ step() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 执行一次优化器并进行参数更新。 @@ -75,18 +75,18 @@ step() minimize(loss, startup_program=None, parameters=None, no_grad_set=None) ''''''''' -为网络添加反向计算过程,并根据反向计算所得的梯度,更新parameters中的Parameters,最小化网络损失值loss。 +为网络添加反向计算过程,并根据反向计算所得的梯度,更新 parameters 中的 Parameters,最小化网络损失值 loss。 **参数** - **loss** (Tensor) – 需要最小化的损失值变量。 - - **startup_program** (Program,可选) – 用于初始化parameters中参数的 :ref:`cn_api_fluid_Program`,默认值为None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 - - **parameters** (list,可选) – 待更新的Parameter或者Parameter.name组成的列表,默认值为None,此时将更新所有的Parameter。 - - **no_grad_set** (set,可选) – 不需要更新的Parameter或者Parameter.name组成的集合,默认值为None。 + - **startup_program** (Program,可选) – 用于初始化 parameters 中参数的 :ref:`cn_api_fluid_Program`,默认值为 None,此时将使用 :ref:`cn_api_fluid_default_startup_program`。 + - **parameters** (list,可选) – 待更新的 Parameter 或者 Parameter.name 组成的列表,默认值为 None,此时将更新所有的 Parameter。 + - **no_grad_set** (set,可选) – 不需要更新的 Parameter 或者 Parameter.name 组成的集合,默认值为 None。 **返回** - tuple(optimize_ops, params_grads),其中optimize_ops为参数优化OP列表;param_grads为由(param, param_grad)组成的列表,其中param和param_grad分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 + tuple(optimize_ops, params_grads),其中 optimize_ops 为参数优化 OP 列表;param_grads 为由(param, param_grad)组成的列表,其中 param 和 param_grad 分别为参数和参数的梯度。在静态图模式下,该返回值可以加入到 ``Executor.run()`` 接口的 ``fetch_list`` 参数中,若加入,则会重写 ``use_prune`` 参数为 True,并根据 ``feed`` 和 ``fetch_list`` 进行剪枝,详见 ``Executor`` 的文档。 **代码示例** @@ -113,7 +113,7 @@ clear_grad() .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 清除需要优化的参数的梯度。 @@ -139,4 +139,4 @@ set_lr(value) .. note:: -该API只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 +该 API 只在 `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式下生效。 diff --git a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst index 2a48352deef..2acd1601dab 100644 --- a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst @@ -18,7 +18,7 @@ CosineAnnealingDecay \end{aligned} -:math:`\eta_{max}` 的初始值为 ``learning_rate``, :math:`T_{cur}` 是SGDR(重启训练SGD)训练过程中的当前训练轮数。SGDR的训练方法可以参考论文, +:math:`\eta_{max}` 的初始值为 ``learning_rate``, :math:`T_{cur}` 是 SGDR(重启训练 SGD)训练过程中的当前训练轮数。SGDR 的训练方法可以参考论文, 这里只是实现了 ``cosine annealing`` 动态学习率,热启训练部分没有实现。 相关论文:`SGDR: Stochastic Gradient Descent with Warm Restarts `_ @@ -26,10 +26,10 @@ CosineAnnealingDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,也就是公式中的 :math:`\eta_{max}`,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,也就是公式中的 :math:`\eta_{max}`,数据类型为 Python float。 - **T_max** (float|int) - 训练的上限轮数,是余弦衰减周期的一半。 - - **eta_min** (float|int,可选) - 学习率的最小值,即公式中的 :math:`\eta_{min}`。默认值为0。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **eta_min** (float|int,可选) - 学习率的最小值,即公式中的 :math:`\eta_{min}`。默认值为 0。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -92,11 +92,11 @@ CosineAnnealingDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst index 9567dbdbbc6..2ee51df8394 100644 --- a/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/CyclicLR_cn.rst @@ -13,20 +13,20 @@ CyclicLR 内置了三种学习率缩放策略:**triangular**:没有任何缩放的三角循环。**triangular2**:每个三角循环里将初始幅度缩放一半。**exp_range**:每个循环中将初始幅度按照指数函数进行缩放,公式为 :math:`gamma^{iterations}`。 -初始幅度由 `max_learning_rate - base_learning_rate` 定义。同时需要注意CyclicLR应在每次迭代后调用 ``step`` 方法。 +初始幅度由 `max_learning_rate - base_learning_rate` 定义。同时需要注意 CyclicLR 应在每次迭代后调用 ``step`` 方法。 参数 :::::::::::: - **base_learning_rate** (float) - 初始学习率,也是学习率变化的下边界。论文中建议将其设置为最大学习率的三分之一或四分之一。 - **max_learning_rate** (float) - 最大学习率,需要注意的是,实际的学习率由 ``base_learning_rate`` 与初始幅度的缩放求和而来,因此实际学习率可能达不到 ``max_learning_rate`` 。 - - **step_size_up** (int) - 学习率从初始学习率增长到最大学习率所需步数。每个循环总的步长 ``step_size`` 由 ``step_size_up + step_size_down`` 定义,论文中建议将 ``step_size`` 设置为单个epoch中步长的3或4倍。 + - **step_size_up** (int) - 学习率从初始学习率增长到最大学习率所需步数。每个循环总的步长 ``step_size`` 由 ``step_size_up + step_size_down`` 定义,论文中建议将 ``step_size`` 设置为单个 epoch 中步长的 3 或 4 倍。 - **step_size_down** (int,可选) - 学习率从最大学习率下降到初始学习率所需步数。若未指定,则其值默认等于 ``step_size_up`` 。 - - **mode** (str,可选) - 可以是triangular、triangular2或者exp_range,对应策略已在上文描述,当scale_fn被指定时时,该参数将被忽略。默认值为triangular。 - - **exp_gamma** (float,可选) - exp_range缩放函数中的常量。默认值为1.0。 - - **sacle_fn** (function,可选) - 一个有且仅有单个参数的函数,且对于任意的输入x,都必须满足0 ≤ scale_fn(x) ≤ 1;如果该参数被指定,则会忽略mode参数。默认值为 ``False`` 。 - - **scale_mode** (str,可选) - cycle或者iterations,表示缩放函数使用cycle数或iterations数作为输入。默认值为cycle。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **mode** (str,可选) - 可以是 triangular、triangular2 或者 exp_range,对应策略已在上文描述,当 scale_fn 被指定时时,该参数将被忽略。默认值为 triangular。 + - **exp_gamma** (float,可选) - exp_range 缩放函数中的常量。默认值为 1.0。 + - **sacle_fn** (function,可选) - 一个有且仅有单个参数的函数,且对于任意的输入 x,都必须满足 0 ≤ scale_fn(x) ≤ 1;如果该参数被指定,则会忽略 mode 参数。默认值为 ``False`` 。 + - **scale_mode** (str,可选) - cycle 或者 iterations,表示缩放函数使用 cycle 数或 iterations 数作为输入。默认值为 cycle。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回: @@ -43,11 +43,11 @@ COPY-FROM: paddle.optimizer.lr.CyclicLR step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst index a0818576404..0f655cd9531 100644 --- a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst @@ -16,9 +16,9 @@ ExponentialDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 - - **gamma** (float) - 衰减率,``new_lr = origin_lr * gamma`` 。gamma应该在区间 (0.0, 1.0) 内。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 + - **gamma** (float) - 衰减率,``new_lr = origin_lr * gamma`` 。gamma 应该在区间 (0.0, 1.0) 内。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -81,11 +81,11 @@ ExponentialDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst index d97d2dc643d..2a28e133abf 100644 --- a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst @@ -18,9 +18,9 @@ InverseTimeDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 - **gamma** (float) - 衰减率,``new_lr = origin_lr * gamma`` 。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -82,11 +82,11 @@ InverseTimeDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst b/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst index cc58ec927b7..43c50132df5 100644 --- a/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst +++ b/docs/api/paddle/optimizer/lr/LRScheduler_cn.rst @@ -7,7 +7,7 @@ LRScheduler 学习率策略的基类。定义了所有学习率调整策略的公共接口。 -目前在paddle中基于该基类,已经实现了14种策略,分别为: +目前在 paddle 中基于该基类,已经实现了 14 种策略,分别为: * :code:`NoamDecay`:诺姆衰减,相关算法请参考 `《Attention Is All You Need》 `_ 。请参考 :ref:`cn_api_paddle_optimizer_lr_NoamDecay`。 @@ -19,25 +19,25 @@ LRScheduler * :code:`PolynomialDecay`:多项式衰减,即得到的学习率为初始学习率和给定最终学习之间由多项式计算权重定比分点的插值。请参考 :ref:`cn_api_paddle_optimizer_lr_PolynomialDecay`。 -* :code:`PiecewiseDecay`:分段衰减,即由给定step数分段呈阶梯状衰减,每段内学习率相同。请参考 :ref:`cn_api_paddle_optimizer_lr_PiecewiseDecay`。 +* :code:`PiecewiseDecay`:分段衰减,即由给定 step 数分段呈阶梯状衰减,每段内学习率相同。请参考 :ref:`cn_api_paddle_optimizer_lr_PiecewiseDecay`。 -* :code:`CosineAnnealingDecay`:余弦式衰减,即学习率随step数变化呈余弦函数周期变化。请参考 :ref:`cn_api_paddle_optimizer_lr_CosineAnnealingDecay`。 +* :code:`CosineAnnealingDecay`:余弦式衰减,即学习率随 step 数变化呈余弦函数周期变化。请参考 :ref:`cn_api_paddle_optimizer_lr_CosineAnnealingDecay`。 -* :code:`LinearWarmup`:学习率随step数线性增加到指定学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_LinearWarmup`。 +* :code:`LinearWarmup`:学习率随 step 数线性增加到指定学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_LinearWarmup`。 -* :code:`StepDecay`:学习率每隔固定间隔的step数进行衰减,需要指定step的间隔数。请参考 :ref:`cn_api_paddle_optimizer_lr_StepDecay`。 +* :code:`StepDecay`:学习率每隔固定间隔的 step 数进行衰减,需要指定 step 的间隔数。请参考 :ref:`cn_api_paddle_optimizer_lr_StepDecay`。 -* :code:`MultiStepDecay`:学习率在特定的step数时进行衰减,需要指定衰减时的节点位置。请参考 :ref:`cn_api_paddle_optimizer_lr_MultiStepDecay`。 +* :code:`MultiStepDecay`:学习率在特定的 step 数时进行衰减,需要指定衰减时的节点位置。请参考 :ref:`cn_api_paddle_optimizer_lr_MultiStepDecay`。 -* :code:`LambdaDecay`:学习率根据自定义的lambda函数进行衰减。请参考 :ref:`cn_api_paddle_optimizer_lr_LambdaDecay`。 +* :code:`LambdaDecay`:学习率根据自定义的 lambda 函数进行衰减。请参考 :ref:`cn_api_paddle_optimizer_lr_LambdaDecay`。 -* :code:`ReduceOnPlateau`:学习率根据当前监控指标(一般为loss)来进行自适应调整,当loss趋于稳定时衰减学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_ReduceOnPlateau`。 +* :code:`ReduceOnPlateau`:学习率根据当前监控指标(一般为 loss)来进行自适应调整,当 loss 趋于稳定时衰减学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_ReduceOnPlateau`。 -* :code:`MultiplicativeDecay`:每次将当前学习率乘以lambda函数得到下一个学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_MultiplicativeDecay`。 +* :code:`MultiplicativeDecay`:每次将当前学习率乘以 lambda 函数得到下一个学习率。请参考 :ref:`cn_api_paddle_optimizer_lr_MultiplicativeDecay`。 -* :code:`OneCycleLR`: One Cycle衰减,学习率上升至最大,再下降至最小。请参考 :ref:`cn_api_paddle_optimizer_lr_OneCycleLR`。 +* :code:`OneCycleLR`: One Cycle 衰减,学习率上升至最大,再下降至最小。请参考 :ref:`cn_api_paddle_optimizer_lr_OneCycleLR`。 -* :code:`CyclicLR`: Cyclic学习率衰减,其将学习率变化的过程视为一个又一个循环,学习率根据固定的频率在最小和最大学习率之间不停变化。请参考 :ref:`cn_api_paddle_optimizer_lr_CyclicLR`。 +* :code:`CyclicLR`: Cyclic 学习率衰减,其将学习率变化的过程视为一个又一个循环,学习率根据固定的频率在最小和最大学习率之间不停变化。请参考 :ref:`cn_api_paddle_optimizer_lr_CyclicLR`。 你可以继承该基类实现任意的学习率策略,导出基类的方法为 ``form paddle.optimizer.lr import LRScheduler`` , 必须要重写该基类的 ``get_lr()`` 函数,否则会抛出 ``NotImplementedError`` 异常。 @@ -45,8 +45,8 @@ LRScheduler 参数 :::::::::::: - - **learning_rate** (float,可选) - 初始学习率,数据类型为Python float。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning_rate** (float,可选) - 初始学习率,数据类型为 Python float。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -90,11 +90,11 @@ LRScheduler step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** @@ -102,7 +102,7 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 **代码示例** -请参考 ``基类LRScheduler`` 的任意子类实现,这里以 ``StepLR`` 为例进行了示例: +请参考 ``基类 LRScheduler`` 的任意子类实现,这里以 ``StepLR`` 为例进行了示例: .. code-block:: python @@ -127,7 +127,7 @@ step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用 get_lr() ''''''''' -如果一个子类继承了 ``基类LRScheduler``,则用户必须重写方法 ``get_lr()``,否则,将会抛出 ``NotImplementedError`` 异常, +如果一个子类继承了 ``基类 LRScheduler``,则用户必须重写方法 ``get_lr()``,否则,将会抛出 ``NotImplementedError`` 异常, 上述给出了实现 ``StepLR`` 的一个简单示例。 @@ -135,6 +135,6 @@ _state_keys() ''''''''' 该函数通过定义字典 ``self.keys`` 来设置 ``optimizer.state_dict()`` 时的存储对象,默认情况下:``self.keys=['last_epoch', 'last_lr']``,其中 ``last_epoch`` -是当前的epoch数,``last_lr`` 是当前的学习率值。 +是当前的 epoch 数,``last_lr`` 是当前的学习率值。 如果需要改变默认的行为,用户需要重写该方法,来重新定义字典 ``self.keys``,一般无需重新设置。 diff --git a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst index bdf0986a928..a2aba4c4476 100644 --- a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst @@ -23,9 +23,9 @@ LambdaDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 - - **lr_lambda** (function):lr_lambda 为一个lambda函数,其通过 epoch 计算出一个因子,该因子会乘以初始学习率。 - - **last_epoch** (int,可选):上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 + - **lr_lambda** (function):lr_lambda 为一个 lambda 函数,其通过 epoch 计算出一个因子,该因子会乘以初始学习率。 + - **last_epoch** (int,可选):上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选):如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -87,11 +87,11 @@ LambdaDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst index f862a1d8385..ecf4609b038 100644 --- a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst +++ b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst @@ -7,28 +7,28 @@ LinearWarmup 该接口提供一种学习率优化策略-线性学习率热身(warm up)对学习率进行初步调整。在正常调整学习率之前,先逐步增大学习率。 -当训练步数小于热身步数(warmup_steps)时,学习率lr按如下方式更新: +当训练步数小于热身步数(warmup_steps)时,学习率 lr 按如下方式更新: .. math:: lr = start\_lr + (end\_lr - start\_lr) * \frac{epoch}{warmup\_steps} -当训练步数大于等于热身步数(warmup_steps)时,学习率lr为: +当训练步数大于等于热身步数(warmup_steps)时,学习率 lr 为: .. math:: lr = learning\_rate -其中learning_rate为热身之后的学习率,可以是python的float类型或者 ``_LRScheduler`` 的任意子类。 +其中 learning_rate 为热身之后的学习率,可以是 python 的 float 类型或者 ``_LRScheduler`` 的任意子类。 参数 :::::::::::: - - **learning rate** (float|_LRScheduler) - 热启训练之后的学习率,可以是python的float类型或者 ``_LRScheduler`` 的任意子类。 - - **warmup_steps** (int) - 进行warm up过程的步数。 - - **start_lr** (float) - warm up的起始学习率。 - - **end_lr** (float) - warm up的最终学习率。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning rate** (float|_LRScheduler) - 热启训练之后的学习率,可以是 python 的 float 类型或者 ``_LRScheduler`` 的任意子类。 + - **warmup_steps** (int) - 进行 warm up 过程的步数。 + - **start_lr** (float) - warm up 的起始学习率。 + - **end_lr** (float) - warm up 的最终学习率。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 @@ -93,11 +93,11 @@ LinearWarmup step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst index 7299f2b3b33..ba969d3adba 100644 --- a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst @@ -23,10 +23,10 @@ MultiStepDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 - **milestones** (list) - 轮数下标列表。必须递增。 - - **gamma** (float,可选) - 衰减率,``new_lr = origin_lr * gamma``,衰减率必须小于等于1.0,默认值为0.1。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **gamma** (float,可选) - 衰减率,``new_lr = origin_lr * gamma``,衰减率必须小于等于 1.0,默认值为 0.1。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 @@ -89,11 +89,11 @@ MultiStepDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst index f4a04972418..78d69338bc7 100644 --- a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst @@ -23,9 +23,9 @@ MultiplicativeDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 - - **lr_lambda** (function)- lr_lambda 为一个lambda函数,其通过 epoch 计算出一个因子,该因子会乘以当前学习率。 - - **last_epoch** (int,可选)- 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 + - **lr_lambda** (function)- lr_lambda 为一个 lambda 函数,其通过 epoch 计算出一个因子,该因子会乘以当前学习率。 + - **last_epoch** (int,可选)- 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选)- 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -60,11 +60,11 @@ MultiplicativeDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选)- 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选)- 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst index 47aedfe5825..62049530e27 100644 --- a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst @@ -16,9 +16,9 @@ NaturalExpDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 - - **gamma** (float) - 衰减率,gamma应该大于0.0,才能使学习率衰减。默认值为0.1。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 + - **gamma** (float) - 衰减率,gamma 应该大于 0.0,才能使学习率衰减。默认值为 0.1。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -80,11 +80,11 @@ NaturalExpDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst index e26bfa16c18..24d1826186d 100644 --- a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst @@ -6,9 +6,9 @@ NoamDecay .. py:class:: paddle.optimizer.lr.NoamDecay(d_model, warmup_steps, learning_rate=1.0, last_epoch=-1, verbose=False) -该接口提供Noam衰减学习率的策略。 +该接口提供 Noam 衰减学习率的策略。 -Noam衰减的计算方式如下: +Noam 衰减的计算方式如下: .. math:: @@ -19,10 +19,10 @@ Noam衰减的计算方式如下: 参数 :::::::::::: - - **d$_{model}$** (int) - 模型的输入、输出向量特征维度,为超参数。数据类型为Python int。 - - **warmup_steps** (int) - 预热步数,为超参数。数据类型为Python int。 - - **learning_rate** (float) - 初始学习率,数据类型为Python float。默认值为1.0。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **d$_{model}$** (int) - 模型的输入、输出向量特征维度,为超参数。数据类型为 Python int。 + - **warmup_steps** (int) - 预热步数,为超参数。数据类型为 Python int。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。默认值为 1.0。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 `True`,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -86,11 +86,11 @@ Noam衰减的计算方式如下: step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst b/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst index 3d94fb94c38..974a41f75ac 100644 --- a/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst +++ b/docs/api/paddle/optimizer/lr/OneCycleLR_cn.rst @@ -11,21 +11,21 @@ OneCycleLR 相关论文:`Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates `_ -注意,本调度器默认行为参考fastai仓库,其声称二阶段拥有比三阶段更好的效果。设置 ``three_phase=True`` 可以与论文中所描述的行为保持一致。 +注意,本调度器默认行为参考 fastai 仓库,其声称二阶段拥有比三阶段更好的效果。设置 ``three_phase=True`` 可以与论文中所描述的行为保持一致。 同时也请注意本调度器需要在每次迭代后调用 ``step`` 方法。 参数 :::::::::::: - - **max_learning_rate** (float) - 最大学习率,学习率变化的上边界,数据类型为Python float。功能上其通过 ``divide_factor`` 定义了初始学习率。 + - **max_learning_rate** (float) - 最大学习率,学习率变化的上边界,数据类型为 Python float。功能上其通过 ``divide_factor`` 定义了初始学习率。 - **total_steps** (int,可选) - 训练过程中总的迭代数。 - - **divide_factor** (float,可选) - 该参数用于推断初始学习率,公式为initial_learning_rate = max_learning_rate / divide_factor。默认值为25。 + - **divide_factor** (float,可选) - 该参数用于推断初始学习率,公式为 initial_learning_rate = max_learning_rate / divide_factor。默认值为 25。 - **end_learning_rate** (float,可选) - 最小学习率,学习率变化的下边界。它应该是一个远小于初始学习率的数。 - - **phase_pct** (float) - 学习率从初始学习率增长到最大学习率所需迭代数占总迭代数的比例。默认值为0.3。 + - **phase_pct** (float) - 学习率从初始学习率增长到最大学习率所需迭代数占总迭代数的比例。默认值为 0.3。 - **anneal_strategy** (str,可选) - 调整学习率的策略。必须是 ( ``cos`` , ``linear`` )其中之一,``cos`` 表示使用余弦退火,``linear`` 表示线性退火。默认值为 ``cos`` 。 - **three_phase** (bool,可选) - 是否使用三阶段调度策略。如果是 ``True``,学习率将先从初始学习率上升到最大学习率,再从最大学习率下降到初始学习率(这两阶段所需要的迭代数是一致的),最后学习率会下降至最小学习率。如果是 ``False``,学习率在上升至最大学习率之后,直接下降至最小学习率。默认值为 ``False`` 。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -42,11 +42,11 @@ COPY-FROM: paddle.optimizer.lr.OneCycleLR step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst b/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst index 9e6fa065a65..fe1e0642981 100644 --- a/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst @@ -6,14 +6,14 @@ PiecewiseDecay .. py:class:: paddle.optimizer.lr.PiecewiseDecay(boundaries, values, last_epoch=-1, verbose=False) -该接口提供分段设置学习率的策略。`boundaries` 表示学习率变化的边界步数,对应epoch的值,`values` 表示学习率变化的值。 +该接口提供分段设置学习率的策略。`boundaries` 表示学习率变化的边界步数,对应 epoch 的值,`values` 表示学习率变化的值。 过程可以描述如下: .. code-block:: text - boundaries = [100, 200] # epoch仅代表当前步数,无实义 - values = [1.0, 0.5, 0.1] # 在第[0,100), [100,200), [200,+∞)分别对应value中学习率的值 + boundaries = [100, 200] # epoch 仅代表当前步数,无实义 + values = [1.0, 0.5, 0.1] # 在第[0,100), [100,200), [200,+∞)分别对应 value 中学习率的值 learning_rate = 1.0 if epoch < 100 learning_rate = 0.5 if 100 <= epoch < 200 @@ -24,9 +24,9 @@ PiecewiseDecay 参数 :::::::::::: - - **boundaries** (list) - 指定学习率的边界值列表。列表的数据元素为Python int类型。 - - **values** (list) - 学习率列表。数据元素类型为Python float的列表。与边界值列表有对应的关系。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **boundaries** (list) - 指定学习率的边界值列表。列表的数据元素为 Python int 类型。 + - **values** (list) - 学习率列表。数据元素类型为 Python float 的列表。与边界值列表有对应的关系。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 ``True``,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -88,11 +88,11 @@ PiecewiseDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst index 9e7ba71fdd4..2be30a55785 100644 --- a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst @@ -8,7 +8,7 @@ PolynomialDecay 该接口提供学习率按多项式衰减的策略。通过多项式衰减函数,使得学习率值逐步从初始的 ``learning_rate``,衰减到 ``end_lr`` 。 -若cycle为True,则计算公式为: +若 cycle 为 True,则计算公式为: .. math:: @@ -16,7 +16,7 @@ PolynomialDecay new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\frac{epoch}{decay\_steps})^{power}+end\_lr -若cycle为False,则计算公式为: +若 cycle 为 False,则计算公式为: .. math:: @@ -28,12 +28,12 @@ PolynomialDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 - **decay_steps** (int) - 进行衰减的步长,这个决定了衰减周期。 - - **end_lr** (float,可选)- 最小的最终学习率。默认值为0.0001。 - - **power** (float,可选) - 多项式的幂,power应该大于0.0,才能使学习率衰减。默认值为1.0。 - - **cycle** (bool,可选) - 学习率下降后是否重新上升。若为True,则学习率衰减到最低学习率值时,会重新上升。若为False,则学习率单调递减。默认值为False。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **end_lr** (float,可选)- 最小的最终学习率。默认值为 0.0001。 + - **power** (float,可选) - 多项式的幂,power 应该大于 0.0,才能使学习率衰减。默认值为 1.0。 + - **cycle** (bool,可选) - 学习率下降后是否重新上升。若为 True,则学习率衰减到最低学习率值时,会重新上升。若为 False,则学习率单调递减。默认值为 False。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 `True`,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -96,11 +96,11 @@ PolynomialDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst b/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst index 59adabab7bd..198732cd134 100644 --- a/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst +++ b/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst @@ -5,25 +5,25 @@ ReduceOnPlateau .. py:class:: paddle.optimizer.lr.ReduceOnPlateau(learning_rate, mode='min', factor=0.1, patience=10, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-8, verbose=False) -`loss` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率。其思想是:一旦模型表现不再提升,将学习率降低2-10倍对模型的训练往往有益。 +`loss` 自适应的学习率衰减策略。默认情况下,当 ``loss`` 停止下降时,降低学习率。其思想是:一旦模型表现不再提升,将学习率降低 2-10 倍对模型的训练往往有益。 -`loss` 是传入到该类方法 ``step`` 中的 ``metrics`` 参数,其可以是float或者shape为[1]的Tensor或numpy\.ndarray。如果 loss 停止下降超过 ``patience`` 个epoch,学习率将会衰减为 ``learning_rate * factor`` (特殊地,``mode`` 也可以被设置为 ``'max'``,此时逻辑相反)。 +`loss` 是传入到该类方法 ``step`` 中的 ``metrics`` 参数,其可以是 float 或者 shape 为[1]的 Tensor 或 numpy\.ndarray。如果 loss 停止下降超过 ``patience`` 个 epoch,学习率将会衰减为 ``learning_rate * factor`` (特殊地,``mode`` 也可以被设置为 ``'max'``,此时逻辑相反)。 -此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个epoch的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。在冷静期之后,会继续监控 ``loss`` 的上升或下降。 +此外,每降低一次学习率后,将会进入一个时长为 ``cooldown`` 个 epoch 的冷静期,在冷静期内,将不会监控 ``loss`` 的变化情况,也不会衰减。在冷静期之后,会继续监控 ``loss`` 的上升或下降。 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 - **mode** (str,可选) - ``'min'`` 和 ``'max'`` 之一。通常情况下,为 ``'min'``,此时当 ``loss`` 停止下降时学习率将衰减。默认:``'min'`` 。 (注意:仅在特殊用法时,可以将其设置为 ``'max'``,此时判断逻辑相反,``loss`` 停止上升学习率才衰减)。 - - **factor** (float,可选) - 学习率衰减的比例。``new_lr = origin_lr * factor``,它是值小于1.0的float型数字,默认:0.1。 - - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个epoch没有下降(对应mode: 'min')或上升(对应mode: 'max')时,学习率才会衰减。默认:10。 + - **factor** (float,可选) - 学习率衰减的比例。``new_lr = origin_lr * factor``,它是值小于 1.0 的 float 型数字,默认:0.1。 + - **patience** (int,可选) - 当 ``loss`` 连续 ``patience`` 个 epoch 没有下降(对应 mode: 'min')或上升(对应 mode: 'max')时,学习率才会衰减。默认:10。 - **threshold** (float,可选) - ``threshold`` 和 ``threshold_mode`` 两个参数将会决定 ``loss`` 最小变化的阈值。小于该阈值的变化将会被忽视。默认:1e-4。 - - **threshold_mode** (str,可选) - ``'rel'`` 和 ``'abs'`` 之一。在 ``'rel'`` 模式下,``loss`` 最小变化的阈值是 ``last_loss * threshold``,其中 ``last_loss`` 是 ``loss`` 在上个epoch的值。在 ``'abs'`` 模式下,``loss`` 最小变化的阈值是 ``threshold``。默认:``'rel'`` 。 + - **threshold_mode** (str,可选) - ``'rel'`` 和 ``'abs'`` 之一。在 ``'rel'`` 模式下,``loss`` 最小变化的阈值是 ``last_loss * threshold``,其中 ``last_loss`` 是 ``loss`` 在上个 epoch 的值。在 ``'abs'`` 模式下,``loss`` 最小变化的阈值是 ``threshold``。默认:``'rel'`` 。 - **cooldown** (int,可选) - 在学习率每次衰减之后,会进入时长为 ``cooldown`` 个 step 的冷静期。默认:0。 - **min_lr** (float,可选) - 最小的学习率。衰减后的学习率最低下界限。默认:0。 - - **epsilon** (float,可选) - 如果新旧学习率间的差异小于epsilon,则不会更新。默认值:1e-8。 + - **epsilon** (float,可选) - 如果新旧学习率间的差异小于 epsilon,则不会更新。默认值:1e-8。 - **verbose** (bool,可选) - 如果是 `True`,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 返回 @@ -86,12 +86,12 @@ ReduceOnPlateau step(metrics, epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,其根据传入的 metrics 调整optimizer中的学习率,调整后的学习率将会在下一个 ``step`` 时生效。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,其根据传入的 metrics 调整 optimizer 中的学习率,调整后的学习率将会在下一个 ``step`` 时生效。 **参数** - - **metrics** (Tensor|numpy.ndarray|float)- 用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个 ``step`` 没有下降,将会降低学习率。可以是Tensor或者numpy.array,但是shape必须为[1],也可以是Python的float类型。 - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **metrics** (Tensor|numpy.ndarray|float)- 用来判断是否需要降低学习率。如果 ``loss`` 连续 ``patience`` 个 ``step`` 没有下降,将会降低学习率。可以是 Tensor 或者 numpy.array,但是 shape 必须为[1],也可以是 Python 的 float 类型。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst index aa8e66eef28..bdb1dc70c59 100644 --- a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst @@ -23,10 +23,10 @@ StepDecay 参数 :::::::::::: - - **learning_rate** (float) - 初始学习率,数据类型为Python float。 + - **learning_rate** (float) - 初始学习率,数据类型为 Python float。 - **step_size** (int) - 学习率衰减轮数间隔。 - - **gamma** (float,可选) - 衰减率,``new_lr = origin_lr * gamma``,衰减率必须小于等于1.0,默认值为0.1。 - - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的epoch数。默认值为 -1,则为初始学习率。 + - **gamma** (float,可选) - 衰减率,``new_lr = origin_lr * gamma``,衰减率必须小于等于 1.0,默认值为 0.1。 + - **last_epoch** (int,可选) - 上一轮的轮数,重启训练时设置为上一轮的 epoch 数。默认值为 -1,则为初始学习率。 - **verbose** (bool,可选) - 如果是 `True`,则在每一轮更新时在标准输出 `stdout` 输出一条信息。默认值为 ``False`` 。 @@ -89,11 +89,11 @@ StepDecay step(epoch=None) ''''''''' -step函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据epoch数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 +step 函数需要在优化器的 `optimizer.step()` 函数之后调用,调用之后将会根据 epoch 数来更新学习率,更新之后的学习率将会在优化器下一轮更新参数时使用。 **参数** - - **epoch** (int,可选) - 指定具体的epoch数。默认值None,此时将会从-1自动累加 ``epoch`` 数。 + - **epoch** (int,可选) - 指定具体的 epoch 数。默认值 None,此时将会从-1 自动累加 ``epoch`` 数。 **返回** diff --git a/docs/api/paddle/outer_cn.rst b/docs/api/paddle/outer_cn.rst index df27ba3032d..e6c05a01ee9 100644 --- a/docs/api/paddle/outer_cn.rst +++ b/docs/api/paddle/outer_cn.rst @@ -6,21 +6,21 @@ outer .. py:function:: paddle.outer(x, y, name=None) -计算两个Tensor的外积。 +计算两个 Tensor 的外积。 -对于1维Tensor正常计算外积,对于大于1维的Tensor先展平为1维再计算外积。 +对于 1 维 Tensor 正常计算外积,对于大于 1 维的 Tensor 先展平为 1 维再计算外积。 参数 ::::::::: - - **x** (Tensor) - 一个N维Tensor或者标量Tensor。 - - **y** (Tensor) - 一个N维Tensor或者标量Tensor。 + - **x** (Tensor) - 一个 N 维 Tensor 或者标量 Tensor。 + - **y** (Tensor) - 一个 N 维 Tensor 或者标量 Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor, x、y的外积结果,Tensor shape为 [x.size, y.size]。 +Tensor, x、y 的外积结果,Tensor shape 为 [x.size, y.size]。 代码示例: :::::::::: diff --git a/docs/api/paddle/poisson_cn.rst b/docs/api/paddle/poisson_cn.rst index 20d33d3717f..9deba0c27cd 100644 --- a/docs/api/paddle/poisson_cn.rst +++ b/docs/api/paddle/poisson_cn.rst @@ -5,19 +5,19 @@ poisson .. py:function:: paddle.poisson(x, name=None) -以输入参数 ``x`` 为泊松分布的 `lambda` 参数,生成一个泊松分布的随机数Tensor,输出Tensor的shape和dtype与输入Tensor相同。 +以输入参数 ``x`` 为泊松分布的 `lambda` 参数,生成一个泊松分布的随机数 Tensor,输出 Tensor 的 shape 和 dtype 与输入 Tensor 相同。 .. math:: out_i \sim Poisson(lambda = x_i) 参数 ::::::::: - - **x** (Tensor) - Tensor的每个元素,对应泊松分布的 ``lambda`` 参数。数据类型为:float32、float64。 + - **x** (Tensor) - Tensor 的每个元素,对应泊松分布的 ``lambda`` 参数。数据类型为:float32、float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -`Tensor`:泊松分布的随机数Tensor,shape和dtype与输入 ``x`` 相同。 +`Tensor`:泊松分布的随机数 Tensor,shape 和 dtype 与输入 ``x`` 相同。 代码示例 diff --git a/docs/api/paddle/profiler/Overview_cn.rst b/docs/api/paddle/profiler/Overview_cn.rst index cbc6541139a..61f6d9c4659 100644 --- a/docs/api/paddle/profiler/Overview_cn.rst +++ b/docs/api/paddle/profiler/Overview_cn.rst @@ -4,22 +4,22 @@ paddle.profiler --------------------- paddle.profiler 目录下包含飞桨框架的性能分析器,提供对模型训练和推理过程的 -性能数据进行展示和统计分析的功能,帮助用户定位模型的性能瓶颈点。所提供的API具体如下: +性能数据进行展示和统计分析的功能,帮助用户定位模型的性能瓶颈点。所提供的 API 具体如下: -- :ref:`Profiler功能使用相关的枚举类 API ` -- :ref:`Profiler周期控制和性能数据IO API ` -- :ref:`Profiler性能分析器 API ` -- :ref:`Profiler性能数据自定义记录 API ` +- :ref:`Profiler 功能使用相关的枚举类 API ` +- :ref:`Profiler 周期控制和性能数据 IO API ` +- :ref:`Profiler 性能分析器 API ` +- :ref:`Profiler 性能数据自定义记录 API ` .. _about_profiler_enum: -Profiler功能使用相关的枚举类 API +Profiler 功能使用相关的枚举类 API :::::::::::::::::::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`ProfilerTarget ` ", "用来指定性能分析的设备" @@ -28,36 +28,36 @@ Profiler功能使用相关的枚举类 API .. _about_profiler_control: -Profiler周期控制和性能数据IO API +Profiler 周期控制和性能数据 IO API ::::::::::::::::::::::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`make_scheduler ` ", "用于生成性能分析器状态的调度器" - " :ref:`export_chrome_tracing ` ", "用于生成将性能数据保存到google chrome tracing文件的回调函数" - " :ref:`export_protobuf ` ", "用于生成将性能数据保存到protobuf文件的回调函数" - " :ref:`load_profiler_result ` ", "用于载入所保存到protobuf文件的性能数据" + " :ref:`export_chrome_tracing ` ", "用于生成将性能数据保存到 google chrome tracing 文件的回调函数" + " :ref:`export_protobuf ` ", "用于生成将性能数据保存到 protobuf 文件的回调函数" + " :ref:`load_profiler_result ` ", "用于载入所保存到 protobuf 文件的性能数据" .. _about_profiler_profiler: -Profiler性能分析器 API +Profiler 性能分析器 API ::::::::::::::::::::::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`Profiler ` ", "性能分析器" .. _about_profiler_record: -Profiler性能数据自定义记录 API +Profiler 性能数据自定义记录 API ::::::::::::::::::::::::::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`RecordEvent ` ", "用于用户自定义打点记录时间" diff --git a/docs/api/paddle/profiler/ProfilerState_cn.rst b/docs/api/paddle/profiler/ProfilerState_cn.rst index dc1e4252d1c..194095723a5 100644 --- a/docs/api/paddle/profiler/ProfilerState_cn.rst +++ b/docs/api/paddle/profiler/ProfilerState_cn.rst @@ -6,7 +6,7 @@ ProfilerState .. py:class:: paddle.profiler.ProfilerState -ProfilerState枚举类用来表示 :ref:`性能分析器 ` 的状态。 +ProfilerState 枚举类用来表示 :ref:`性能分析器 ` 的状态。 状态说明 :::::::::::: diff --git a/docs/api/paddle/profiler/ProfilerTarget_cn.rst b/docs/api/paddle/profiler/ProfilerTarget_cn.rst index ec418c6af1a..909a9feddbd 100644 --- a/docs/api/paddle/profiler/ProfilerTarget_cn.rst +++ b/docs/api/paddle/profiler/ProfilerTarget_cn.rst @@ -6,11 +6,11 @@ ProfilerTarget .. py:class:: paddle.profiler.ProfilerTarget -ProfilerTarget枚举类用来指定 :ref:`性能分析 ` 的设备。目前仅支持CPU,GPU和MLU。 +ProfilerTarget 枚举类用来指定 :ref:`性能分析 ` 的设备。目前仅支持 CPU,GPU 和 MLU。 设备说明 :::::::::::: - - **ProfilerTarget.CPU** - 性能分析对象为CPU上的活动。 - - **ProfilerTarget.GPU** - 性能分析对象为GPU上的活动。 - - **ProfilerTarget.MLU** - 性能分析对象为MLU上的活动。 + - **ProfilerTarget.CPU** - 性能分析对象为 CPU 上的活动。 + - **ProfilerTarget.GPU** - 性能分析对象为 GPU 上的活动。 + - **ProfilerTarget.MLU** - 性能分析对象为 MLU 上的活动。 diff --git a/docs/api/paddle/profiler/Profiler_cn.rst b/docs/api/paddle/profiler/Profiler_cn.rst index d2d908c0ce5..549bcc04eda 100644 --- a/docs/api/paddle/profiler/Profiler_cn.rst +++ b/docs/api/paddle/profiler/Profiler_cn.rst @@ -10,10 +10,10 @@ Profiler 参数 ::::::::: - - **targets** (list,可选) - 指定性能分析所要分析的设备,默认会自动分析所有存在且支持的设备,当前支持CPU,GPU和MLU(可选值见 :ref:`ProfilerState ` )。 - - **scheduler** (Callable|tuple,可选) - 如果是Callable对象,代表是性能分析器状态的调度器,该调度器会接受一个step_num参数并返回相应的状态(详情见 :ref:`状态说明 ` ),可以通过 :ref:`make_scheduler ` 接口生成调度器。如果没有设置这个参数(None),默认的调度器会一直让性能分析器保持RECORD状态到结束。如果是tuple类型,有两个值start_batch和end_batch,则会在[start_batch, end_batch)(前闭后开区间)内处于RECORD状态进行性能分析。 - - **on_trace_ready** (Callable,可选) - 处理性能分析器的回调函数,该回调函数接受Profiler对象作为参数,提供了一种自定义后处理的方式。当性能分析器处于RECORD_AND_RETURN状态或者结束时返回性能数据,将会调用该回调函数进行处理,默认为 :ref:`export_chrome_tracing ` (./profiler_log/)。 - - **timer_only** (bool,可选) - 如果设置为True,将只统计模型的数据读取和每一个迭代所消耗的时间,而不进行性能分析。否则,模型将被计时,同时进行性能分析。默认值:False。 + - **targets** (list,可选) - 指定性能分析所要分析的设备,默认会自动分析所有存在且支持的设备,当前支持 CPU,GPU 和 MLU(可选值见 :ref:`ProfilerState ` )。 + - **scheduler** (Callable|tuple,可选) - 如果是 Callable 对象,代表是性能分析器状态的调度器,该调度器会接受一个 step_num 参数并返回相应的状态(详情见 :ref:`状态说明 ` ),可以通过 :ref:`make_scheduler ` 接口生成调度器。如果没有设置这个参数(None),默认的调度器会一直让性能分析器保持 RECORD 状态到结束。如果是 tuple 类型,有两个值 start_batch 和 end_batch,则会在[start_batch, end_batch)(前闭后开区间)内处于 RECORD 状态进行性能分析。 + - **on_trace_ready** (Callable,可选) - 处理性能分析器的回调函数,该回调函数接受 Profiler 对象作为参数,提供了一种自定义后处理的方式。当性能分析器处于 RECORD_AND_RETURN 状态或者结束时返回性能数据,将会调用该回调函数进行处理,默认为 :ref:`export_chrome_tracing ` (./profiler_log/)。 + - **timer_only** (bool,可选) - 如果设置为 True,将只统计模型的数据读取和每一个迭代所消耗的时间,而不进行性能分析。否则,模型将被计时,同时进行性能分析。默认值:False。 代码示例 1 :::::::::: @@ -49,8 +49,8 @@ COPY-FROM: paddle.profiler.Profiler:code-example-timer1 start() ''''''''' -开启性能分析器,进入状态scheduler(0)。即 -性能分析器状态从CLOSED -> scheduler(0),并根据新的状态触发相应行为。 +开启性能分析器,进入状态 scheduler(0)。即 +性能分析器状态从 CLOSED -> scheduler(0),并根据新的状态触发相应行为。 **代码示例** @@ -60,8 +60,8 @@ COPY-FROM: paddle.profiler.Profiler.start:code-example4 stop() ''''''''' -停止性能分析器,并且进入状态CLOSED。即 -性能分析器状态从当前状态 -> CLOSED,性能分析器关闭,如果有性能数据返回,调用on_trace_ready回调函数进行处理。 +停止性能分析器,并且进入状态 CLOSED。即 +性能分析器状态从当前状态 -> CLOSED,性能分析器关闭,如果有性能数据返回,调用 on_trace_ready 回调函数进行处理。 **代码示例** @@ -71,11 +71,11 @@ COPY-FROM: paddle.profiler.Profiler.stop:code-example5 step(num_samples: Optional[int]=None) ''''''''' -指示性能分析器进入下一个step,根据scheduler计算新的性能分析器状态,并根据新的状态触发相应行为。如果有性能数据返回,调用on_trace_ready回调函数进行处理。 +指示性能分析器进入下一个 step,根据 scheduler 计算新的性能分析器状态,并根据新的状态触发相应行为。如果有性能数据返回,调用 on_trace_ready 回调函数进行处理。 **参数** - - **num_samples** (int|None,可选) - 模型运行中每一步的样本数量batch size,当timer_only为True时该参数被用于计算吞吐量。默认值:None。 + - **num_samples** (int|None,可选) - 模型运行中每一步的样本数量 batch size,当 timer_only 为 True 时该参数被用于计算吞吐量。默认值:None。 **代码示例** @@ -89,13 +89,13 @@ step_info(unit: Optional[int]=None) 1. reader_cost:加载数据的开销,单位为秒。 -2. batch_cost:1次迭代的开销,单位为秒。 +2. batch_cost:1 次迭代的开销,单位为秒。 -3. ips(Instance Per Second):模型吞吐量,单位为samples/s或其他,取决于参数unit的设置。当step()的num_samples为None时,单位为steps/s。 +3. ips(Instance Per Second):模型吞吐量,单位为 samples/s 或其他,取决于参数 unit 的设置。当 step()的 num_samples 为 None 时,单位为 steps/s。 **参数** - - **unit** (string,可选) - 输入数据的单位,仅在step()的num_samples指定为实数时有效。例如,当unit为images时,吞吐量的单位为images/s。默认值:None,吞吐量的单位是samples/s。 + - **unit** (string,可选) - 输入数据的单位,仅在 step()的 num_samples 指定为实数时有效。例如,当 unit 为 images 时,吞吐量的单位为 images/s。默认值:None,吞吐量的单位是 samples/s。 **返回** @@ -114,7 +114,7 @@ export(path, format="json") **参数** - **path** (str) – 性能数据导出的文件名。 - - **format** (str,可选) – 性能数据导出的格式,目前支持"json"和"pb"两种。即"json"为导出chrome tracing文件,"pb"为导出protobuf文件,默认值为"json"。 + - **format** (str,可选) – 性能数据导出的格式,目前支持"json"和"pb"两种。即"json"为导出 chrome tracing 文件,"pb"为导出 protobuf 文件,默认值为"json"。 **代码示例** @@ -130,9 +130,9 @@ summary(sorted_by=SortedKeys.CPUTotal, op_detail=True, thread_sep=False, time_un **参数** - - **sorted_by** ( :ref:`SortedKeys `,可选) – 表单的数据项排列方式,默认值SortedKeys.CPUTotal。 - - **op_detail** (bool,可选) – 是否打印算子内各过程的详细信息,默认值True。 - - **thread_sep** (bool,可选) - 是否分线程打印,默认值False。 + - **sorted_by** ( :ref:`SortedKeys `,可选) – 表单的数据项排列方式,默认值 SortedKeys.CPUTotal。 + - **op_detail** (bool,可选) – 是否打印算子内各过程的详细信息,默认值 True。 + - **thread_sep** (bool,可选) - 是否分线程打印,默认值 False。 - **time_unit** (str,可选) - 表单数据的时间单位,默认为'ms',可选's'、'us'、'ns'。 diff --git a/docs/api/paddle/profiler/RecordEvent_cn.rst b/docs/api/paddle/profiler/RecordEvent_cn.rst index 442cefbd6ef..32c8c977a3b 100644 --- a/docs/api/paddle/profiler/RecordEvent_cn.rst +++ b/docs/api/paddle/profiler/RecordEvent_cn.rst @@ -12,7 +12,7 @@ RecordEvent ::::::::: - **name** (str) - 记录打点的名字。 - - **event_type** (TracerEventType,可选) - 可选参数,默认值为TracerEventType.UserDefined。该参数预留为内部使用,最好不要指定该参数。 + - **event_type** (TracerEventType,可选) - 可选参数,默认值为 TracerEventType.UserDefined。该参数预留为内部使用,最好不要指定该参数。 代码示例 :::::::::: @@ -20,7 +20,7 @@ RecordEvent COPY-FROM: paddle.profiler.RecordEvent:code-example1 .. note:: - RecordEvent只有在 :ref:`性能分析器 ` 处于RECORD状态才会生效。 + RecordEvent 只有在 :ref:`性能分析器 ` 处于 RECORD 状态才会生效。 方法 :::::::::::: diff --git a/docs/api/paddle/profiler/SortedKeys_cn.rst b/docs/api/paddle/profiler/SortedKeys_cn.rst index 10f0775c5bd..849eb91d53e 100644 --- a/docs/api/paddle/profiler/SortedKeys_cn.rst +++ b/docs/api/paddle/profiler/SortedKeys_cn.rst @@ -6,16 +6,16 @@ SortedKeys .. py:class:: paddle.profiler.SortedKeys -SortedKeys枚举类用来指定打印的统计 :ref:`表单 ` 内数据的排序方式。 +SortedKeys 枚举类用来指定打印的统计 :ref:`表单 ` 内数据的排序方式。 排序方式说明 :::::::::::: - - **SortedKeys.CPUTotal** - 按活动的CPU总时间排序。 - - **SortedKeys.CPUAvg** - 按活动的CPU平均时间排序。 - - **SortedKeys.CPUMax** - 按活动的CPU上最大时间排序。 - - **SortedKeys.CPUMin** - 按活动的CPU上最小时间排序。 - - **SortedKeys.GPUTotal** - 按活动的GPU总时间排序。 - - **SortedKeys.GPUAvg** - 按活动的GPU平均时间排序。 - - **SortedKeys.GPUMax** - 按活动的GPU上最大时间排序。 - - **SortedKeys.GPUMin** - 按活动的GPU上最小时间排序。 + - **SortedKeys.CPUTotal** - 按活动的 CPU 总时间排序。 + - **SortedKeys.CPUAvg** - 按活动的 CPU 平均时间排序。 + - **SortedKeys.CPUMax** - 按活动的 CPU 上最大时间排序。 + - **SortedKeys.CPUMin** - 按活动的 CPU 上最小时间排序。 + - **SortedKeys.GPUTotal** - 按活动的 GPU 总时间排序。 + - **SortedKeys.GPUAvg** - 按活动的 GPU 平均时间排序。 + - **SortedKeys.GPUMax** - 按活动的 GPU 上最大时间排序。 + - **SortedKeys.GPUMin** - 按活动的 GPU 上最小时间排序。 diff --git a/docs/api/paddle/profiler/export_chrome_tracing_cn.rst b/docs/api/paddle/profiler/export_chrome_tracing_cn.rst index 7d8813aa53c..d3c639579a9 100644 --- a/docs/api/paddle/profiler/export_chrome_tracing_cn.rst +++ b/docs/api/paddle/profiler/export_chrome_tracing_cn.rst @@ -5,7 +5,7 @@ export_chrome_tracing .. py:function:: paddle.profiler.export_chrome_tracing(dir_name: str, worker_name: Optional[str]=None) -该接口返回一个回调函数,用于将采集的性能数据保存到google chrome tracing格式的文件。 +该接口返回一个回调函数,用于将采集的性能数据保存到 google chrome tracing 格式的文件。 输出的文件将会保存在目录 ``dir_name`` 中,文件名的前缀将会被设置成 ``worker_name`` 。 如果 ``worker_name`` 没有被设置,默认名字为 [hostname]_[pid]。 @@ -18,11 +18,11 @@ export_chrome_tracing 返回 ::::::::: -回调函数(callable),该函数会接收一个参数prof(Profiler对象),调用prof的export方法保存采集到的性能数据到chrome tracing文件。 +回调函数(callable),该函数会接收一个参数 prof(Profiler 对象),调用 prof 的 export 方法保存采集到的性能数据到 chrome tracing 文件。 代码示例 :::::::::: -用于 :ref:`性能分析器 ` 的on_trace_ready参数。 +用于 :ref:`性能分析器 ` 的 on_trace_ready 参数。 COPY-FROM: paddle.profiler.export_chrome_tracing:code-example1 diff --git a/docs/api/paddle/profiler/export_protobuf_cn.rst b/docs/api/paddle/profiler/export_protobuf_cn.rst index 332f188396b..c94cf0b4ad9 100644 --- a/docs/api/paddle/profiler/export_protobuf_cn.rst +++ b/docs/api/paddle/profiler/export_protobuf_cn.rst @@ -5,7 +5,7 @@ export_protobuf .. py:function:: paddle.profiler.export_protobuf(dir_name: str, worker_name: Optional[str]=None) -该接口返回一个回调函数,用于将采集的性能数据保存到protobuf格式的文件。 +该接口返回一个回调函数,用于将采集的性能数据保存到 protobuf 格式的文件。 输出的文件将会保存在目录 ``dir_name`` 中,文件名的前缀将会被设置成 ``worker_name`` 。 如果 ``worker_name`` 没有被设置,默认名字为 [hostname]_[pid]。 @@ -18,11 +18,11 @@ export_protobuf 返回 ::::::::: -回调函数(callable),该函数会接收一个参数prof(Profiler对象),调用prof的export方法保存采集到的性能数据到protobuf文件。 +回调函数(callable),该函数会接收一个参数 prof(Profiler 对象),调用 prof 的 export 方法保存采集到的性能数据到 protobuf 文件。 代码示例 :::::::::: -用于 :ref:`性能分析器 ` 的on_trace_ready参数。 +用于 :ref:`性能分析器 ` 的 on_trace_ready 参数。 COPY-FROM: paddle.profiler.export_protobuf:code-example1 diff --git a/docs/api/paddle/profiler/load_profiler_result_cn.rst b/docs/api/paddle/profiler/load_profiler_result_cn.rst index ede7935d5de..d98761af04e 100644 --- a/docs/api/paddle/profiler/load_profiler_result_cn.rst +++ b/docs/api/paddle/profiler/load_profiler_result_cn.rst @@ -5,17 +5,17 @@ load_profiler_result .. py:function:: paddle.profiler.load_profiler_result(file_name: str) -该接口用于载入所保存到protobuf文件的性能数据到内存。 +该接口用于载入所保存到 protobuf 文件的性能数据到内存。 参数 ::::::::: - - **file_name** (str) - protobuf格式的性能数据文件路径。 + - **file_name** (str) - protobuf 格式的性能数据文件路径。 返回 ::::::::: -ProfilerResult对象,底层存储性能数据的结构。 +ProfilerResult 对象,底层存储性能数据的结构。 代码示例 :::::::::: diff --git a/docs/api/paddle/profiler/make_scheduler_cn.rst b/docs/api/paddle/profiler/make_scheduler_cn.rst index 77b97d3ce82..d5b0ea1eb66 100644 --- a/docs/api/paddle/profiler/make_scheduler_cn.rst +++ b/docs/api/paddle/profiler/make_scheduler_cn.rst @@ -21,16 +21,16 @@ make_scheduler 参数 ::::::::: - - **closed** (int) - 处于ProfilerState.CLOSED状态的step数量。 - - **ready** (int) - 处于ProfilerState.CLOSED状态的step数量。 - - **record** (int) - 处于ProfilerState.RECORD状态的step数量,record的最后一个step会处于ProfilerState.RECORD_AND_RETURN状态。 - - **repeat** (int,可选) - 调度器重复该状态调度过程的次数,默认值为0,意味着一直重复该调度过程直到性能分析器结束。 - - **skip_first** (int,可选) - 跳过前skip_first个step,不参与状态调度,并处于ProfilerState.CLOSED状态,默认值为0。 + - **closed** (int) - 处于 ProfilerState.CLOSED 状态的 step 数量。 + - **ready** (int) - 处于 ProfilerState.CLOSED 状态的 step 数量。 + - **record** (int) - 处于 ProfilerState.RECORD 状态的 step 数量,record 的最后一个 step 会处于 ProfilerState.RECORD_AND_RETURN 状态。 + - **repeat** (int,可选) - 调度器重复该状态调度过程的次数,默认值为 0,意味着一直重复该调度过程直到性能分析器结束。 + - **skip_first** (int,可选) - 跳过前 skip_first 个 step,不参与状态调度,并处于 ProfilerState.CLOSED 状态,默认值为 0。 返回 ::::::::: -调度函数(callable),该函数会接收一个参数step_num,并计算返回相应的ProfilerState。调度函数会根据上述状态转换过程进行调度。 +调度函数(callable),该函数会接收一个参数 step_num,并计算返回相应的 ProfilerState。调度函数会根据上述状态转换过程进行调度。 代码示例 1 @@ -38,7 +38,7 @@ make_scheduler 性能分析 batch [2, 5]。 -设定第0个batch处于CLOSED,第1个batch处于READY,第[2 - 5]个batch处于RECORD,在第5个batch返回收集的性能数据。 +设定第 0 个 batch 处于 CLOSED,第 1 个 batch 处于 READY,第[2 - 5]个 batch 处于 RECORD,在第 5 个 batch 返回收集的性能数据。 COPY-FROM: paddle.profiler.make_scheduler:code-example1 @@ -47,7 +47,7 @@ COPY-FROM: paddle.profiler.make_scheduler:code-example1 性能分析 batch [3,6], [9,12], [15, 18]..。 -设定第0个batch跳过,第1个batch处于CLOSED,第2个batch处于READ,第[3 - 6]个batch处于RECORD,在第6个batch返回收集的性能数据。即第7个batch处于CLOSED,第8个batch处于READY, -第[9-12]个batch处于RECORD,并在第12个batch返回第二轮所收集到的性能数据。以此类推,直到性能分析器结束。 +设定第 0 个 batch 跳过,第 1 个 batch 处于 CLOSED,第 2 个 batch 处于 READ,第[3 - 6]个 batch 处于 RECORD,在第 6 个 batch 返回收集的性能数据。即第 7 个 batch 处于 CLOSED,第 8 个 batch 处于 READY, +第[9-12]个 batch 处于 RECORD,并在第 12 个 batch 返回第二轮所收集到的性能数据。以此类推,直到性能分析器结束。 COPY-FROM: paddle.profiler.make_scheduler:code-example2 diff --git a/docs/api/paddle/put_along_axis_cn.rst b/docs/api/paddle/put_along_axis_cn.rst index 15688b2f331..4127ddc4384 100644 --- a/docs/api/paddle/put_along_axis_cn.rst +++ b/docs/api/paddle/put_along_axis_cn.rst @@ -4,21 +4,21 @@ put_along_axis ------------------------------- .. py:function:: paddle.put_along_axis(arr, indices, values, axis, reduce='assign') -基于输入index矩阵,将输入value沿着指定axis放置入arr矩阵。索引矩阵和value必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐。 +基于输入 index 矩阵,将输入 value 沿着指定 axis 放置入 arr 矩阵。索引矩阵和 value 必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐。 参数 ::::::::: -- **arr** (Tensor) - 输入的Tensor 作为目标矩阵,数据类型为:float32、float64。 -- **indices** (Tensor) - 索引矩阵,包含沿轴提取1d切片的下标,必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐,数据类型为:int、int64。 -- **value** (float)- 需要插入的值,形状和维度需要能够被broadcast与indices矩阵匹配,数据类型为:float32、float64。 +- **arr** (Tensor) - 输入的 Tensor 作为目标矩阵,数据类型为:float32、float64。 +- **indices** (Tensor) - 索引矩阵,包含沿轴提取 1d 切片的下标,必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐,数据类型为:int、int64。 +- **value** (float)- 需要插入的值,形状和维度需要能够被 broadcast 与 indices 矩阵匹配,数据类型为:float32、float64。 - **axis** (int) - 指定沿着哪个维度获取对应的值,数据类型为:int。 -- **reduce** (str,可选) - 归约操作类型,默认为 ``assign``,可选为 ``add`` 或 ``multiple``。不同的规约操作插入值value对于输入矩阵arr会有不同的行为,如为 ``assgin`` 则覆盖输入矩阵,``add`` 则累加至输入矩阵,``multiple`` 则累乘至输入矩阵。 +- **reduce** (str,可选) - 归约操作类型,默认为 ``assign``,可选为 ``add`` 或 ``multiple``。不同的规约操作插入值 value 对于输入矩阵 arr 会有不同的行为,如为 ``assgin`` 则覆盖输入矩阵,``add`` 则累加至输入矩阵,``multiple`` 则累乘至输入矩阵。 返回 ::::::::: -- **out** (Tensor) - 输出Tensor,indeces矩阵选定的下标会被插入value,与 ``arr`` 数据类型相同。 +- **out** (Tensor) - 输出 Tensor,indeces 矩阵选定的下标会被插入 value,与 ``arr`` 数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/quantile_cn.rst b/docs/api/paddle/quantile_cn.rst index 20e3b2efb98..26d47cbf282 100644 --- a/docs/api/paddle/quantile_cn.rst +++ b/docs/api/paddle/quantile_cn.rst @@ -9,15 +9,15 @@ quantile 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 - - q (int|float|list) - 待计算的分位数,需要在符合取值范围[0, 1]。如果 ``q`` 是List,其中的每一个q分位数都会被计算,并且输出的首维大小与列表中元素的数量相同。 - - axis (int|list,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int或内部元素为int类型的list。``axis`` 值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是list,对给定的轴上的所有元素计算分位数。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算分位数。默认值为None。 - - keepdim (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 + - q (int|float|list) - 待计算的分位数,需要在符合取值范围[0, 1]。如果 ``q`` 是 List,其中的每一个 q 分位数都会被计算,并且输出的首维大小与列表中元素的数量相同。 + - axis (int|list,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int 或内部元素为 int 类型的 list。``axis`` 值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 list,对给定的轴上的所有元素计算分位数。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算分位数。默认值为 None。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - ``Tensor``,沿着 ``axis`` 进行分位数计算的结果。如果 ``x`` 的数据类型为float64,则返回值的数据类型为float64,反之返回值数据类型为float32。 + ``Tensor``,沿着 ``axis`` 进行分位数计算的结果。如果 ``x`` 的数据类型为 float64,则返回值的数据类型为 float64,反之返回值数据类型为 float32。 代码示例 :::::::::: diff --git a/docs/api/paddle/rad2deg_cn.rst b/docs/api/paddle/rad2deg_cn.rst index f286cbd4bfc..7a79f52c6ff 100644 --- a/docs/api/paddle/rad2deg_cn.rst +++ b/docs/api/paddle/rad2deg_cn.rst @@ -14,13 +14,13 @@ rad2deg 参数 ::::::::: -- **x** (Tensor) - 输入的Tensor,数据类型为:int32、int64、float32、float64。 -- **name** (str,可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 +- **x** (Tensor) - 输入的 Tensor,数据类型为:int32、int64、float32、float64。 +- **name** (str,可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 返回 ::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同(输入为int时,输出数据类型为float32)。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同(输入为 int 时,输出数据类型为 float32)。 代码示例 ::::::::: diff --git a/docs/api/paddle/rand_cn.rst b/docs/api/paddle/rand_cn.rst index cfd70e380d5..08a069d0cfc 100644 --- a/docs/api/paddle/rand_cn.rst +++ b/docs/api/paddle/rand_cn.rst @@ -5,17 +5,17 @@ rand .. py:function:: paddle.rand(shape, dtype=None, name=None) -返回符合均匀分布的、范围在[0, 1)的Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +返回符合均匀分布的、范围在[0, 1)的 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 参数 :::::::::: - - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 - - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时,输出Tensor的数据类型为float32。默认值为None。 + - **shape** (list|tuple|Tensor) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的 1-D Tensor。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 float32、float64。当该参数值为 None 时,输出 Tensor 的数据类型为 float32。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor:符合均匀分布的范围为[0, 1)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + Tensor:符合均匀分布的范围为[0, 1)的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 示例代码 :::::::::: diff --git a/docs/api/paddle/randint_cn.rst b/docs/api/paddle/randint_cn.rst index 8f9a2c470c1..72c07399f6a 100644 --- a/docs/api/paddle/randint_cn.rst +++ b/docs/api/paddle/randint_cn.rst @@ -5,13 +5,13 @@ randint .. py:function:: paddle.randint(low=0, high=None, shape=[1], dtype=None, name=None) -返回服从均匀分布的、范围在[``low``, ``high``)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。当 ``high`` 为None时(默认),均匀采样的区间为[0, ``low``)。 +返回服从均匀分布的、范围在[``low``, ``high``)的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。当 ``high`` 为 None 时(默认),均匀采样的区间为[0, ``low``)。 参数 :::::::::: - - **low** (int,可选) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为 None 时,均匀采样的区间为[0, ``low``)。默认值为0。 + - **low** (int,可选) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为 None 时,均匀采样的区间为[0, ``low``)。默认值为 0。 - **high** (int,可选) - 要生成的随机值范围的上限,``high`` 不包含在范围中。默认值为 None,此时范围是[0, ``low``)。 - - **shape** (list|tuple|Tensor,可选) - 生成的随机 Tensor 的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的1-D Tensor。默认值为[1]。 + - **shape** (list|tuple|Tensor,可选) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的 1-D Tensor。默认值为[1]。 - **dtype** (str|np.dtype|core.VarDesc.VarType,可选) - 输出 Tensor 的数据类型,支持 int32、int64。当该参数值为 None 时, 输出 Tensor 的数据类型为 int64。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/randint_like_cn.rst b/docs/api/paddle/randint_like_cn.rst index a2b8db3044e..b5b7290fd40 100644 --- a/docs/api/paddle/randint_like_cn.rst +++ b/docs/api/paddle/randint_like_cn.rst @@ -5,19 +5,19 @@ randint_like .. py:function:: paddle.randint_like(x, low=0, high=None, dtype=None, name=None) -返回服从均匀分布的、范围在[``low``, ``high``)的随机Tensor,输出的形状与x的形状一致,当数据类型 ``dtype`` 为None时(默认),输出的数据类型与x的数据类型一致,当数据类型 ``dtype`` 不为None时,将输出用户指定的数据类型。当 ``high`` 为None时(默认),均匀采样的区间为[0, ``low``)。 +返回服从均匀分布的、范围在[``low``, ``high``)的随机 Tensor,输出的形状与 x 的形状一致,当数据类型 ``dtype`` 为 None 时(默认),输出的数据类型与 x 的数据类型一致,当数据类型 ``dtype`` 不为 None 时,将输出用户指定的数据类型。当 ``high`` 为 None 时(默认),均匀采样的区间为[0, ``low``)。 参数 :::::::::: - - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,int32,int64,float16,float32,float64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 - - **low** (int) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为None时,均匀采样的区间为[0, ``low``)。默认值为0。 - - **high** (int,可选) - 要生成的随机值范围的上限,``high`` 不包含在范围中。默认值为None,此时范围是[0, ``low``)。 - - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持bool,int32,int64,float16,float32,float64。当该参数值为None时,输出Tensor的数据类型与输入Tensor的数据类型一致。默认值为None。 + - **x** (Tensor) – 输入的多维 Tensor,数据类型可以是 bool,int32,int64,float16,float32,float64。输出 Tensor 的形状和 ``x`` 相同。如果 ``dtype`` 为 None,则输出 Tensor 的数据类型与 ``x`` 相同。 + - **low** (int) - 要生成的随机值范围的下限,``low`` 包含在范围中。当 ``high`` 为 None 时,均匀采样的区间为[0, ``low``)。默认值为 0。 + - **high** (int,可选) - 要生成的随机值范围的上限,``high`` 不包含在范围中。默认值为 None,此时范围是[0, ``low``)。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 bool,int32,int64,float16,float32,float64。当该参数值为 None 时,输出 Tensor 的数据类型与输入 Tensor 的数据类型一致。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor:从区间[``low``,``high``)内均匀分布采样的随机Tensor,形状为 ``x.shape``,数据类型为 ``dtype``。 + Tensor:从区间[``low``,``high``)内均匀分布采样的随机 Tensor,形状为 ``x.shape``,数据类型为 ``dtype``。 代码示例 ::::::::::: diff --git a/docs/api/paddle/randn_cn.rst b/docs/api/paddle/randn_cn.rst index cd8daba71da..7353b074e74 100644 --- a/docs/api/paddle/randn_cn.rst +++ b/docs/api/paddle/randn_cn.rst @@ -5,11 +5,11 @@ randn .. py:function:: paddle.randn(shape, dtype=None, name=None) -返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +返回符合标准正态分布(均值为 0,标准差为 1 的正态随机分布)的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 参数 :::::::::: - - **shape** (list|tuple|Tensor) - 生成的随机 Tensor 的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的1-D Tensor。 + - **shape** (list|tuple|Tensor) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的 1-D Tensor。 - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 float32、float64。当该参数值为 None 时,输出 Tensor 的数据类型为 float32。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/randperm_cn.rst b/docs/api/paddle/randperm_cn.rst index 60a21841795..c97eaaabf15 100644 --- a/docs/api/paddle/randperm_cn.rst +++ b/docs/api/paddle/randperm_cn.rst @@ -5,17 +5,17 @@ randperm .. py:function:: paddle.randperm(n, dtype="int64", name=None) -返回一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype``。 +返回一个数值在 0 到 n-1、随机排列的 1-D Tensor,数据类型为 ``dtype``。 参数 :::::::::::: - - **n** (int) - 随机序列的上限(不包括在序列中),应该大于0。 + - **n** (int) - 随机序列的上限(不包括在序列中),应该大于 0。 - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 int32、int64、float32、float64。默认值为 int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor:一个数值在0到n-1、随机排列的1-D Tensor,数据类型为 ``dtype`` 。 + Tensor:一个数值在 0 到 n-1、随机排列的 1-D Tensor,数据类型为 ``dtype`` 。 代码示例 :::::::::: diff --git a/docs/api/paddle/rank_cn.rst b/docs/api/paddle/rank_cn.rst index 1e618a32943..b8bd6e7b0bb 100644 --- a/docs/api/paddle/rank_cn.rst +++ b/docs/api/paddle/rank_cn.rst @@ -8,16 +8,16 @@ rank -计算输入Tensor的维度(秩)。 +计算输入 Tensor 的维度(秩)。 参数 :::::::::::: - - **input** (Tensor) — 输入input是shape为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor,数据类型可以任意类型。 + - **input** (Tensor) — 输入 input 是 shape 为 :math:`[N_1, N_2, ..., N_k]` 的多维 Tensor,数据类型可以任意类型。 返回 :::::::::::: -输出Tensor的秩,是一个0-D Tensor。 +输出 Tensor 的秩,是一个 0-D Tensor。 代码示例 diff --git a/docs/api/paddle/reciprocal_cn.rst b/docs/api/paddle/reciprocal_cn.rst index 63e98483c38..183e9f47d4a 100644 --- a/docs/api/paddle/reciprocal_cn.rst +++ b/docs/api/paddle/reciprocal_cn.rst @@ -8,7 +8,7 @@ reciprocal -reciprocal 对输入Tensor取倒数 +reciprocal 对输入 Tensor 取倒数 .. math:: @@ -18,13 +18,13 @@ reciprocal 对输入Tensor取倒数 :::::::::::: - - **x** - 输入的多维Tensor,支持的数据类型为float32,float64。 + - **x** - 输入的多维 Tensor,支持的数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - 对输入取倒数得到的Tensor,输出Tensor数据类型和维度与输入相同。 + 对输入取倒数得到的 Tensor,输出 Tensor 数据类型和维度与输入相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/regularizer/L1Decay_cn.rst b/docs/api/paddle/regularizer/L1Decay_cn.rst index d81a9c68969..50ecc1a82d6 100644 --- a/docs/api/paddle/regularizer/L1Decay_cn.rst +++ b/docs/api/paddle/regularizer/L1Decay_cn.rst @@ -6,7 +6,7 @@ L1Decay .. py:attribute:: paddle.regularizer.L1Decay(coeff=0.0) -L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 +L1Decay 实现 L1 权重衰减正则化,用于模型训练,使得权重矩阵稀疏。 该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_Momentum` )中,在 ``ParamAttr`` 中设置时,只对该 @@ -15,7 +15,7 @@ L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵 ``ParamAttr`` 中定义了正则化,那么会忽略 ``optimizer`` 中的正则化;否则会使用 ``optimizer``中的 正则化。 -具体实现中,L1权重衰减正则化的损失函数计算如下: +具体实现中,L1 权重衰减正则化的损失函数计算如下: .. math:: \\loss = coeff * reduce\_sum(abs(x))\\ @@ -23,7 +23,7 @@ L1Decay实现L1权重衰减正则化,用于模型训练,使得权重矩阵 参数 :::::::::::: - - **coeff** (float) – L1正则化系数,默认值为0.0。 + - **coeff** (float) – L1 正则化系数,默认值为 0.0。 代码示例 1 :::::::::::: diff --git a/docs/api/paddle/regularizer/L2Decay_cn.rst b/docs/api/paddle/regularizer/L2Decay_cn.rst index 8aed5423ff3..b4582e7e133 100644 --- a/docs/api/paddle/regularizer/L2Decay_cn.rst +++ b/docs/api/paddle/regularizer/L2Decay_cn.rst @@ -6,7 +6,7 @@ L2Decay .. py:attribute:: paddle.regularizer.L2Decay(coeff=0.0) -L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 +L2Decay 实现 L2 权重衰减正则化,用于模型训练,有助于防止模型对训练数据过拟合。 该类生成的实例对象,需要设置在 :ref:`cn_api_paddle_ParamAttr` 或者 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_Momentum` )中,在 ``ParamAttr`` 中设置时, @@ -15,7 +15,7 @@ L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模 ``ParamAttr`` 中定义了正则化,那么会忽略 ``optimizer`` 中的正则化;否则会使用 ``optimizer``中的 正则化。 -具体实现中,L2权重衰减正则化的损失函数计算如下: +具体实现中,L2 权重衰减正则化的损失函数计算如下: .. math:: \\loss = 0.5 * coeff * reduce\_sum(square(x))\\ @@ -23,7 +23,7 @@ L2Decay实现L2权重衰减正则化,用于模型训练,有助于防止模 参数 :::::::::::: - - **coeff** (float) – 正则化系数,默认值为0.0。 + - **coeff** (float) – 正则化系数,默认值为 0.0。 代码示例 1 :::::::::::: diff --git a/docs/api/paddle/repeat_interleave_cn.rst b/docs/api/paddle/repeat_interleave_cn.rst index 08d4486719c..77cd33d3e47 100644 --- a/docs/api/paddle/repeat_interleave_cn.rst +++ b/docs/api/paddle/repeat_interleave_cn.rst @@ -6,11 +6,11 @@ repeat_interleave .. py:function:: paddle.repeat_interleave(x, repeats, axis=None, name=None) -沿着指定轴 ``axis`` 对输入 ``x`` 进行复制,创建并返回到一个新的Tensor。当 ``repeats`` 为 ``1-D`` Tensor 时,``repeats`` 长度必须和指定轴 ``axis`` 维度一致,``repeats`` 对应位置的值表示 ``x`` 对应位置元素需要复制的次数。当 ``repeats`` 为 int 时,``x`` 沿指定轴 ``axis`` 上所有元素复制 ``repeats`` 次。 +沿着指定轴 ``axis`` 对输入 ``x`` 进行复制,创建并返回到一个新的 Tensor。当 ``repeats`` 为 ``1-D`` Tensor 时,``repeats`` 长度必须和指定轴 ``axis`` 维度一致,``repeats`` 对应位置的值表示 ``x`` 对应位置元素需要复制的次数。当 ``repeats`` 为 int 时,``x`` 沿指定轴 ``axis`` 上所有元素复制 ``repeats`` 次。 参数 ::::::::: - - **x** (Tensor)– 输入Tensor。 ``x`` 的数据类型可以是float32,float64,int32,int64。 + - **x** (Tensor)– 输入 Tensor。 ``x`` 的数据类型可以是 float32,float64,int32,int64。 - **repeats** (Tensor, int)– 包含复制次数的 1-D Tensor 或指定的复制次数。 - **axis** (int,可选) – 指定对输入 ``x`` 进行运算的轴,若未指定,默认值为 None,使用输入 Tensor 的 flatten 形式。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -18,7 +18,7 @@ repeat_interleave 返回 ::::::::: - - **Tensor**:返回一个数据类型同输入的Tensor。 + - **Tensor**:返回一个数据类型同输入的 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/reshape_cn.rst b/docs/api/paddle/reshape_cn.rst index 403f51dca70..5dcabb8d19c 100644 --- a/docs/api/paddle/reshape_cn.rst +++ b/docs/api/paddle/reshape_cn.rst @@ -11,23 +11,23 @@ reshape 请注意,在动态图模式下,输出 Tensor 将与输入 Tensor 共享数据,并且没有 Tensor 数据拷贝的过程。 如果不希望输入与输出共享数据,请使用 `Tensor.clone`,例如 `reshape_clone_x = x.reshape([-1]).clone()` 。 -在指定目标shape时存在一些技巧: +在指定目标 shape 时存在一些技巧: - 1. -1 表示这个维度的值是从 x 的元素总数和剩余维度推断出来的。因此,有且只有一个维度可以被设置为-1。 - - 2. 0 表示实际的维数是从 x 的对应维数中复制出来的,因此 shape 中0的索引值不能超过 x 的维度。 + - 2. 0 表示实际的维数是从 x 的对应维数中复制出来的,因此 shape 中 0 的索引值不能超过 x 的维度。 这里有一些例子来解释它们: - - 1。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[6,8],则将x变换为形状为[6,8]的2-D张量,且 x 的数据保持不变。 - - 2。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[2,3,-1,2],则将x变换为形状为[2,3,4,2]的4-D张量,且 x 的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从 x 的元素总数和剩余维度推断出来的。 - - 3。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[-1,0,3,2],则将x变换为形状为[2,4,3,2]的4-D张量,且 x 的数据保持不变。在这种情况下,0对应位置的维度值将从 x 的对应维数中复制,-1对应位置的维度值由 x 的元素总数和剩余维度推断出来。 + - 1。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[6,8],则将 x 变换为形状为[6,8]的 2-D 张量,且 x 的数据保持不变。 + - 2。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[2,3,-1,2],则将 x 变换为形状为[2,3,4,2]的 4-D 张量,且 x 的数据保持不变。在这种情况下,目标形状的一个维度被设置为-1,这个维度的值是从 x 的元素总数和剩余维度推断出来的。 + - 3。给定一个形状为[2,4,6]的三维 Tensor x,目标形状为[-1,0,3,2],则将 x 变换为形状为[2,4,3,2]的 4-D 张量,且 x 的数据保持不变。在这种情况下,0 对应位置的维度值将从 x 的对应维数中复制,-1 对应位置的维度值由 x 的元素总数和剩余维度推断出来。 参数 :::::::::::: - **x** (Tensor) - N-D ``Tensor``,数据类型为 ``float32``、``float64``、``int32``、``int64`` 或者 ``bool``。 - - **shape** (list|tuple|Tensor) - 数据类型是 ``int32``。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是1-D的 ``Tensor``。 + - **shape** (list|tuple|Tensor) - 数据类型是 ``int32``。定义目标形状。目标形状最多只能有一个维度为-1。如果 ``shape`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``shape`` 的类型是 ``Tensor``,则是 1-D 的 ``Tensor``。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/rot90_cn.rst b/docs/api/paddle/rot90_cn.rst index e05dd88bb10..6706431966a 100644 --- a/docs/api/paddle/rot90_cn.rst +++ b/docs/api/paddle/rot90_cn.rst @@ -7,20 +7,20 @@ rot90 -沿axes指定的平面将n维tensor旋转90度。当k为正数,旋转方向为从axes[0]到axes[1],当k为负数,旋转方向为从axes[1]到axes[0],k的绝对值表示旋转次数。 +沿 axes 指定的平面将 n 维 tensor 旋转 90 度。当 k 为正数,旋转方向为从 axes[0]到 axes[1],当 k 为负数,旋转方向为从 axes[1]到 axes[0],k 的绝对值表示旋转次数。 参数 :::::::::: - - **x** (Tensor) - 输入张量。维度为多维,数据类型为bool, int32, int64, float16, float32或float64。float16只在gpu上支持。 + - **x** (Tensor) - 输入张量。维度为多维,数据类型为 bool, int32, int64, float16, float32 或 float64。float16 只在 gpu 上支持。 - **k** (int,可选) - 旋转方向和次数,默认值:1。 - - **axes** (list|tuple,可选) - axes指定旋转的平面,维度必须为2。默认值为[0, 1]。 + - **axes** (list|tuple,可选) - axes 指定旋转的平面,维度必须为 2。默认值为[0, 1]。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - - 在指定平面axes上翻转指定次数后的张量,与输入x数据类型相同。 + - 在指定平面 axes 上翻转指定次数后的张量,与输入 x 数据类型相同。 代码示例 diff --git a/docs/api/paddle/round_cn.rst b/docs/api/paddle/round_cn.rst index e19cd6a62d9..e899bb0747c 100644 --- a/docs/api/paddle/round_cn.rst +++ b/docs/api/paddle/round_cn.rst @@ -24,12 +24,12 @@ round :::::::::::: - - **x** (Tensor) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **x** (Tensor) - 支持任意维度的 Tensor。数据类型为 float32,float64 或 float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -返回类型为Tensor,数据类型同输入一致。 +返回类型为 Tensor,数据类型同输入一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/rsqrt_cn.rst b/docs/api/paddle/rsqrt_cn.rst index 79ea4b2c371..8fa341cec30 100644 --- a/docs/api/paddle/rsqrt_cn.rst +++ b/docs/api/paddle/rsqrt_cn.rst @@ -8,9 +8,9 @@ rsqrt -rsqrt激活函数。 +rsqrt 激活函数。 -注:输入x应确保为非 **0** 值,否则程序会抛异常退出。 +注:输入 x 应确保为非 **0** 值,否则程序会抛异常退出。 其运算公式如下: @@ -21,12 +21,12 @@ rsqrt激活函数。 参数 :::::::::::: - - **x** (Tensor) – 输入是多维Tensor,数据类型可以是float32和float64。 + - **x** (Tensor) – 输入是多维 Tensor,数据类型可以是 float32 和 float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,对输入x进行rsqrt激活函数计算结果,数据shape、类型和输入x的shape、类型一致。 +Tensor,对输入 x 进行 rsqrt 激活函数计算结果,数据 shape、类型和输入 x 的 shape、类型一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/save_cn.rst b/docs/api/paddle/save_cn.rst index 394ed3b7ca4..8a049899ceb 100644 --- a/docs/api/paddle/save_cn.rst +++ b/docs/api/paddle/save_cn.rst @@ -5,16 +5,16 @@ save .. py:function:: paddle.save(obj, path, protocol=4) -将对象实例obj保存到指定的路径中。 +将对象实例 obj 保存到指定的路径中。 .. note:: - 目前支持保存:Layer 或者 Optimizer 的 ``state_dict``,Tensor以及包含Tensor的嵌套list、tuple、dict,Program。对于Tensor对象,只保存了它的名字和数值,没有保存stop_gradient等属性,如果您需要这些没有保存的属性,请调用set_value接口将数值设置到带有这些属性的Tensor中。 + 目前支持保存:Layer 或者 Optimizer 的 ``state_dict``,Tensor 以及包含 Tensor 的嵌套 list、tuple、dict,Program。对于 Tensor 对象,只保存了它的名字和数值,没有保存 stop_gradient 等属性,如果您需要这些没有保存的属性,请调用 set_value 接口将数值设置到带有这些属性的 Tensor 中。 .. note:: - 不同于 ``paddle.jit.save``,由于 ``paddle.save`` 的存储结果是单个文件,所以不需要通过添加后缀的方式区分多个存储文件,``paddle.save`` 的输入参数 ``path`` 将直接作为存储结果的文件名而非前缀。为了统一存储文件名的格式,我们推荐使用paddle标椎文件后缀: + 不同于 ``paddle.jit.save``,由于 ``paddle.save`` 的存储结果是单个文件,所以不需要通过添加后缀的方式区分多个存储文件,``paddle.save`` 的输入参数 ``path`` 将直接作为存储结果的文件名而非前缀。为了统一存储文件名的格式,我们推荐使用 paddle 标椎文件后缀: 1. 对于 ``Layer.state_dict``,推荐使用后缀 ``.pdparams`` ; 2. 对于 ``Optimizer.state_dict``,推荐使用后缀 ``.pdopt`` 。 - 具体示例请参考API的代码示例。 + 具体示例请参考 API 的代码示例。 遇到使用问题,请参考: @@ -27,9 +27,9 @@ save 参数 ::::::::: - **obj** (Object) – 要保存的对象实例。 - - **path** (str|BytesIO) – 保存对象实例的路径/内存对象。如果存储到当前路径,输入的path字符串将会作为保存的文件名。 - - **protocol** (int,可选) – pickle模块的协议版本,默认值为4,取值范围是[2,4]。 - - **configs** (dict,可选) – 其他配置选项,目前支持以下选项:(1)use_binary_format(bool)- 如果被保存的对象是静态图的Tensor,你可以指定这个参数。如果被指定为 ``True``,这个Tensor会被保存为由paddle定义的二进制格式的文件;否则这个Tensor被保存为pickle格式。默认为 ``False`` 。 + - **path** (str|BytesIO) – 保存对象实例的路径/内存对象。如果存储到当前路径,输入的 path 字符串将会作为保存的文件名。 + - **protocol** (int,可选) – pickle 模块的协议版本,默认值为 4,取值范围是[2,4]。 + - **configs** (dict,可选) – 其他配置选项,目前支持以下选项:(1)use_binary_format(bool)- 如果被保存的对象是静态图的 Tensor,你可以指定这个参数。如果被指定为 ``True``,这个 Tensor 会被保存为由 paddle 定义的二进制格式的文件;否则这个 Tensor 被保存为 pickle 格式。默认为 ``False`` 。 返回 ::::::::: diff --git a/docs/api/paddle/scale_cn.rst b/docs/api/paddle/scale_cn.rst index d799dc1dedd..eeb0606b562 100644 --- a/docs/api/paddle/scale_cn.rst +++ b/docs/api/paddle/scale_cn.rst @@ -5,14 +5,14 @@ scale .. py:function:: paddle.scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) -对输入Tensor进行缩放和偏置,其公式如下: +对输入 Tensor 进行缩放和偏置,其公式如下: -``bias_after_scale`` 为True: +``bias_after_scale`` 为 True: .. math:: Out=scale*X+bias -``bias_after_scale`` 为False: +``bias_after_scale`` 为 False: .. math:: Out=scale*(X+bias) @@ -20,11 +20,11 @@ scale 参数 :::::::::::: - - **x** (Tensor) - 要进行缩放的多维Tensor,数据类型可以为float32,float64,int8,int16,int32,int64,uint8。 - - **scale** (float|Tensor) - 缩放的比例,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。 + - **x** (Tensor) - 要进行缩放的多维 Tensor,数据类型可以为 float32,float64,int8,int16,int32,int64,uint8。 + - **scale** (float|Tensor) - 缩放的比例,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。 - **bias** (float) - 缩放的偏置。 - - **bias_after_scale** (bool) - 判断在缩放之前或之后添加偏置。为True时,先缩放再偏置;为False时,先偏置再缩放。该参数在某些情况下,对数值稳定性很有用。 - - **act** (str,可选) - 应用于输出的激活函数,如tanh、softmax、sigmoid、relu等。 + - **bias_after_scale** (bool) - 判断在缩放之前或之后添加偏置。为 True 时,先缩放再偏置;为 False 时,先偏置再缩放。该参数在某些情况下,对数值稳定性很有用。 + - **act** (str,可选) - 应用于输出的激活函数,如 tanh、softmax、sigmoid、relu 等。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/scatter_cn.rst b/docs/api/paddle/scatter_cn.rst index f4cd3a63bbc..f397f6ca699 100644 --- a/docs/api/paddle/scatter_cn.rst +++ b/docs/api/paddle/scatter_cn.rst @@ -36,15 +36,15 @@ scatter 参数 ::::::::: - - **x** (Tensor) - ndim> = 1的输入N-D张量。数据类型可以是float32,float64。 - - **index** (Tensor)- 一维Tensor。数据类型可以是int32,int64。 ``index`` 的长度不能超过 ``updates`` 的长度,并且 ``index`` 中的值不能超过输入的长度。 - - **updates** (Tensor)- 根据 ``index`` 使用 ``update`` 参数更新输入 ``x``。形状应与输入 ``x`` 相同,并且dim>1的dim值应与输入 ``x`` 相同。 - - **overwrite** (bool,可选)- 指定索引 ``index`` 相同时,更新输出的方式。如果为True,则使用覆盖模式更新相同索引的输出,如果为False,则使用累加模式更新相同索引的输出。默认值为True。 + - **x** (Tensor) - ndim> = 1 的输入 N-D 张量。数据类型可以是 float32,float64。 + - **index** (Tensor)- 一维 Tensor。数据类型可以是 int32,int64。 ``index`` 的长度不能超过 ``updates`` 的长度,并且 ``index`` 中的值不能超过输入的长度。 + - **updates** (Tensor)- 根据 ``index`` 使用 ``update`` 参数更新输入 ``x``。形状应与输入 ``x`` 相同,并且 dim>1 的 dim 值应与输入 ``x`` 相同。 + - **overwrite** (bool,可选)- 指定索引 ``index`` 相同时,更新输出的方式。如果为 True,则使用覆盖模式更新相同索引的输出,如果为 False,则使用累加模式更新相同索引的输出。默认值为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,与x有相同形状和数据类型。 +Tensor,与 x 有相同形状和数据类型。 代码示例 diff --git a/docs/api/paddle/scatter_nd_add_cn.rst b/docs/api/paddle/scatter_nd_add_cn.rst index 96530eaac49..a3626a92d48 100644 --- a/docs/api/paddle/scatter_nd_add_cn.rst +++ b/docs/api/paddle/scatter_nd_add_cn.rst @@ -8,7 +8,7 @@ scatter_nd_add -通过对Tensor中的单个值或切片应用稀疏加法,从而得到输出的Tensor。 +通过对 Tensor 中的单个值或切片应用稀疏加法,从而得到输出的 Tensor。 :code:`x` 是维度为 :code:`R` 的张量。:code:`index` 是维度为 :code:`K` 的张量。因此,:code:`index` 的形状是 :math:`[i_0, i_1, ..., i_{K-2}, Q]`,其中 :math:`Q \leq R` 。:code:`updates` 是一个维度为 :math:`K - 1 + R - Q` 的张量,它的形状是 :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 @@ -45,8 +45,8 @@ scatter_nd_add 参数 :::::::::::: - - **x** (Tensor) - 输入张量,数据类型可以是int32,int64,float32,float64。 - - **index** (Tensor) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.ndim` 必须大于1,并且 :code:`index.shape[-1] <= x.ndim` + - **x** (Tensor) - 输入张量,数据类型可以是 int32,int64,float32,float64。 + - **index** (Tensor) - 输入的索引张量,数据类型为非负 int32 或非负 int64。它的维度 :code:`index.ndim` 必须大于 1,并且 :code:`index.shape[-1] <= x.ndim` - **updates** (Tensor) - 输入的更新张量,它必须和 :code:`x` 有相同的数据类型。形状必须是 :code:`index.shape[:-1] + x.shape[index.shape[-1]:]` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/scatter_nd_cn.rst b/docs/api/paddle/scatter_nd_cn.rst index 071e5f7f3c9..4744c27c5bd 100644 --- a/docs/api/paddle/scatter_nd_cn.rst +++ b/docs/api/paddle/scatter_nd_cn.rst @@ -8,14 +8,14 @@ scatter_nd -根据 :code:`index`,将 :code:`updates` 添加到一个新的张量中,从而得到输出的Tensor。这个操作与 :code:`scatter_nd_add` 类似,除了形状为 :code:`shape` 的张量是通过零初始化的。相应地,:code:`scatter_nd(index, updates, shape)` 等价于 :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)`。如果 :code:`index` 有重复元素,则将累积相应的更新,因此,由于数值近似问题,索引中重复元素的顺序不同可能会导致不同的输出结果。具体的计算方法可以参见 :code:`scatter_nd_add`。该OP是 :code:`gather_nd` 的反函数。 +根据 :code:`index`,将 :code:`updates` 添加到一个新的张量中,从而得到输出的 Tensor。这个操作与 :code:`scatter_nd_add` 类似,除了形状为 :code:`shape` 的张量是通过零初始化的。相应地,:code:`scatter_nd(index, updates, shape)` 等价于 :code:`scatter_nd_add(fluid.layers.zeros(shape, updates.dtype), index, updates)`。如果 :code:`index` 有重复元素,则将累积相应的更新,因此,由于数值近似问题,索引中重复元素的顺序不同可能会导致不同的输出结果。具体的计算方法可以参见 :code:`scatter_nd_add`。该 OP 是 :code:`gather_nd` 的反函数。 参数 :::::::::::: - - **index** (Tensor) - 输入的索引张量,数据类型为非负int32或非负int64。它的维度 :code:`index.ndim` 必须大于1,并且 :code:`index.shape[-1] <= len(shape)` - - **updates** (Tensor) - 输入的更新张量。形状必须是 :code:`index.shape[:-1] + shape[index.shape[-1]:]`。数据类型可以是float32,float64。 - - **shape** (tuple|list) - 要求输出张量的形状。类型是tuple或者list。 + - **index** (Tensor) - 输入的索引张量,数据类型为非负 int32 或非负 int64。它的维度 :code:`index.ndim` 必须大于 1,并且 :code:`index.shape[-1] <= len(shape)` + - **updates** (Tensor) - 输入的更新张量。形状必须是 :code:`index.shape[:-1] + shape[index.shape[-1]:]`。数据类型可以是 float32,float64。 + - **shape** (tuple|list) - 要求输出张量的形状。类型是 tuple 或者 list。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/searchsorted_cn.rst b/docs/api/paddle/searchsorted_cn.rst index 62cb0a26f25..42dfd7394b7 100644 --- a/docs/api/paddle/searchsorted_cn.rst +++ b/docs/api/paddle/searchsorted_cn.rst @@ -9,15 +9,15 @@ searchsorted 参数 :::::::: - - **sorted_sequence** (Tensor) - 输入的N维或一维Tensor,支持的数据类型:float32、float64、int32、int64。该Tensor的数值在其最后一个维度递增。 - - **values** (Tensor) - 输入的N维Tensor,支持的数据类型:float32、float64、int32、int64。 - - **out_int32** (bool,可选) - 输出的数据类型支持int32、int64。默认值为False,表示默认的输出数据类型为int64。 - - **right** (bool,可选) - 根据给定 ``values`` 在 ``sorted_sequence`` 查找对应的上边界或下边界。默认值为False,表示在 ``sorted_sequence`` 的查找给定 ``values`` 的下边界。 + - **sorted_sequence** (Tensor) - 输入的 N 维或一维 Tensor,支持的数据类型:float32、float64、int32、int64。该 Tensor 的数值在其最后一个维度递增。 + - **values** (Tensor) - 输入的 N 维 Tensor,支持的数据类型:float32、float64、int32、int64。 + - **out_int32** (bool,可选) - 输出的数据类型支持 int32、int64。默认值为 False,表示默认的输出数据类型为 int64。 + - **right** (bool,可选) - 根据给定 ``values`` 在 ``sorted_sequence`` 查找对应的上边界或下边界。默认值为 False,表示在 ``sorted_sequence`` 的查找给定 ``values`` 的下边界。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::: -Tensor(与 ``values`` 维度相同),如果参数 ``out_int32`` 为False,则返回数据类型为int32的Tensor,否则将返回int64的Tensor。 +Tensor(与 ``values`` 维度相同),如果参数 ``out_int32`` 为 False,则返回数据类型为 int32 的 Tensor,否则将返回 int64 的 Tensor。 diff --git a/docs/api/paddle/seed_cn.rst b/docs/api/paddle/seed_cn.rst index 3cf9ee3b069..6ff5a6837f0 100644 --- a/docs/api/paddle/seed_cn.rst +++ b/docs/api/paddle/seed_cn.rst @@ -6,7 +6,7 @@ seed .. py:function:: paddle.seed(seed) -设置全局默认generator的随机种子。 +设置全局默认 generator 的随机种子。 参数 @@ -18,7 +18,7 @@ seed 返回 :::::::::::: - Generator:全局默认generator对象。 + Generator:全局默认 generator 对象。 代码示例 :::::::::::: diff --git a/docs/api/paddle/set_default_dtype_cn.rst b/docs/api/paddle/set_default_dtype_cn.rst index ca3c65aa4c9..e272f8d200f 100644 --- a/docs/api/paddle/set_default_dtype_cn.rst +++ b/docs/api/paddle/set_default_dtype_cn.rst @@ -13,7 +13,7 @@ set_default_dtype :::::::::::: - - **d** (string|np.dtype) - 设为默认值的dtype。它仅支持float16、float32和float64。 + - **d** (string|np.dtype) - 设为默认值的 dtype。它仅支持 float16、float32 和 float64。 返回 :::::::::::: diff --git a/docs/api/paddle/set_flags_cn.rst b/docs/api/paddle/set_flags_cn.rst index 6fc6f64d94f..c349d25cb77 100644 --- a/docs/api/paddle/set_flags_cn.rst +++ b/docs/api/paddle/set_flags_cn.rst @@ -6,14 +6,14 @@ set_flags .. py:function:: paddle.set_flags(flags) -设置Paddle 环境变量FLAGS,详情请查看 :ref:`cn_guides_flags_flags` +设置 Paddle 环境变量 FLAGS,详情请查看 :ref:`cn_guides_flags_flags` 参数 :::::::::::: - - **flags** (dict {flags: value}) - 设置FLAGS标志 + - **flags** (dict {flags: value}) - 设置 FLAGS 标志 返回 :::::::::::: diff --git a/docs/api/paddle/set_printoptions_cn.rst b/docs/api/paddle/set_printoptions_cn.rst index 43f2f992713..9811fdf1d0b 100644 --- a/docs/api/paddle/set_printoptions_cn.rst +++ b/docs/api/paddle/set_printoptions_cn.rst @@ -11,11 +11,11 @@ set_printoptions 参数 ::::::::: - - **precision** (int,可选) - 浮点数的小数位数,默认值为8。 - - **threshold** (int,可选) - 打印的元素个数上限,默认值为1000。 - - **edgeitems** (int,可选) - 以缩略形式打印时左右两边的元素个数,默认值为3。 + - **precision** (int,可选) - 浮点数的小数位数,默认值为 8。 + - **threshold** (int,可选) - 打印的元素个数上限,默认值为 1000。 + - **edgeitems** (int,可选) - 以缩略形式打印时左右两边的元素个数,默认值为 3。 - **sci_mode** (bool,可选) - 是否以科学计数法打印,默认值为 False。 - - **linewidth** (int,可选) – 每行的字符数,默认值为80。 + - **linewidth** (int,可选) – 每行的字符数,默认值为 80。 返回 diff --git a/docs/api/paddle/shape_cn.rst b/docs/api/paddle/shape_cn.rst index 28b38846934..65b47ac758d 100755 --- a/docs/api/paddle/shape_cn.rst +++ b/docs/api/paddle/shape_cn.rst @@ -8,36 +8,36 @@ shape -shape层。 +shape 层。 -获得输入Tensor或SelectedRows的shape。 +获得输入 Tensor 或 SelectedRows 的 shape。 :: - 示例1: - 输入是 N-D Tensor类型: + 示例 1: + 输入是 N-D Tensor 类型: input = [ [1, 2, 3, 4], [5, 6, 7, 8] ] - 输出shape: + 输出 shape: input.shape = [2, 4] - 示例2: - 输入是 SelectedRows类型: + 示例 2: + 输入是 SelectedRows 类型: input.rows = [0, 4, 19] input.height = 20 input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor - 输出shape: + 输出 shape: input.shape = [3, 2] 参数 :::::::::::: - - **input** (Tensor)- 输入的多维Tensor或SelectedRows,数据类型为float16,float32,float64,int32,int64。如果输入是SelectedRows类型,则返回其内部持有Tensor的shape。 + - **input** (Tensor)- 输入的多维 Tensor 或 SelectedRows,数据类型为 float16,float32,float64,int32,int64。如果输入是 SelectedRows 类型,则返回其内部持有 Tensor 的 shape。 返回 :::::::::::: - Tensor,表示输入Tensor或SelectedRows的shape。 + Tensor,表示输入 Tensor 或 SelectedRows 的 shape。 代码示例 diff --git a/docs/api/paddle/shard_index_cn.rst b/docs/api/paddle/shard_index_cn.rst index 4b7485c71bc..12deefe76e5 100644 --- a/docs/api/paddle/shard_index_cn.rst +++ b/docs/api/paddle/shard_index_cn.rst @@ -5,7 +5,7 @@ shard_index .. py:function:: paddle.shard_index(input, index_num, nshards, shard_id, ignore_value=-1) -根据当前shard重新设置输入参数\ `input`\ 的值。输入\ `input`\ 中的值需要为非负整型;参数\ `index_num`\ 为用户设置的大于\ `input`\ 最大值的整型值。因此,\ `input`\ 中的值属于区间[0, index_num),且每个值可以被看作到区间起始的偏移量。区间可以被进一步划分为多个切片。具体地讲,我们首先根据下面的公式计算每个切片的大小:\ `shard_size`\,表示每个切片可以表示的整数的数量。因此,对于第\ `i`\ 个切片,其表示的区间为[i*shard_size, (i+1)*shard_size)。 +根据当前 shard 重新设置输入参数\ `input`\ 的值。输入\ `input`\ 中的值需要为非负整型;参数\ `index_num`\ 为用户设置的大于\ `input`\ 最大值的整型值。因此,\ `input`\ 中的值属于区间[0, index_num),且每个值可以被看作到区间起始的偏移量。区间可以被进一步划分为多个切片。具体地讲,我们首先根据下面的公式计算每个切片的大小:\ `shard_size`\,表示每个切片可以表示的整数的数量。因此,对于第\ `i`\ 个切片,其表示的区间为[i*shard_size, (i+1)*shard_size)。 :: @@ -20,10 +20,10 @@ shard_index 参数 :::::::::::: - - input (Tensor)- 输入tensor,最后一维的维度值为1,数据类型为int64或int32。 + - input (Tensor)- 输入 tensor,最后一维的维度值为 1,数据类型为 int64 或 int32。 - index_num (int) - 用户设置的大于\ `input`\ 最大值的整型值。 - nshards (int) - 分片数量。 - - shard_id (int) - 当前分片ID。 + - shard_id (int) - 当前分片 ID。 - ignore_value (int) - 超出分片范围的默认值。 返回 diff --git a/docs/api/paddle/sign_cn.rst b/docs/api/paddle/sign_cn.rst index 4755aed9bf1..2bf12a5cb5a 100644 --- a/docs/api/paddle/sign_cn.rst +++ b/docs/api/paddle/sign_cn.rst @@ -5,7 +5,7 @@ sign .. py:function:: paddle.sign(x, name=None) -对输入参数 ``x`` 中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零。 +对输入参数 ``x`` 中每个元素进行正负判断,并且输出正负判断值:1 代表正,-1 代表负,0 代表零。 参数 :::::::::::: diff --git a/docs/api/paddle/signal/Overview_cn.rst b/docs/api/paddle/signal/Overview_cn.rst index d717f9806ae..9844bc7e2b2 100644 --- a/docs/api/paddle/signal/Overview_cn.rst +++ b/docs/api/paddle/signal/Overview_cn.rst @@ -3,11 +3,11 @@ paddle.signal --------------------- -paddle.signal 目录下包含飞桨框架支持的数字信号处理的相关API。具体如下: +paddle.signal 目录下包含飞桨框架支持的数字信号处理的相关 API。具体如下: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" " :ref:`paddle.signal.stft ` ", "短时傅里叶变换" " :ref:`paddle.signal.istft ` ", "逆短时傅里叶变换" diff --git a/docs/api/paddle/signal/istft_cn.rst b/docs/api/paddle/signal/istft_cn.rst index bee5cd27a9a..88b723d4f2a 100644 --- a/docs/api/paddle/signal/istft_cn.rst +++ b/docs/api/paddle/signal/istft_cn.rst @@ -8,7 +8,7 @@ istft 逆短时傅里叶变换。 -当输入的窗函数满足NOLA条件时,可以通过逆短时傅里叶变换构建原始信号,NOLA条件: +当输入的窗函数满足 NOLA 条件时,可以通过逆短时傅里叶变换构建原始信号,NOLA 条件: .. math:: @@ -29,21 +29,21 @@ istft 参数 ::::::::: -- **x** (Tensor) - 输入数据,是维度为2D或者3D的Tensor,数据类型必须为复数(复信号),其 +- **x** (Tensor) - 输入数据,是维度为 2D 或者 3D 的 Tensor,数据类型必须为复数(复信号),其 形状为 ``[..., fft_size, num_frames]``; - **n_fft** (int) - 离散傅里叶变换的样本点个数; - **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` (为 ``n_fft//4``); - **win_length** (int,可选) - 信号窗的长度,默认为 ``None`` (为 ``n_fft``); -- **window** (int,可选) - 维度为1D长度为 ``win_length`` 的Tensor,数据类型可为复数。 - 如果 ``win_length < n_fft``,该Tensor将被补长至 ``n_fft``。默认为 ``None`` (长 - 度为 ``win_length`` 幅值为1的矩形窗); +- **window** (int,可选) - 维度为 1D 长度为 ``win_length`` 的 Tensor,数据类型可为复数。 + 如果 ``win_length < n_fft``,该 Tensor 将被补长至 ``n_fft``。默认为 ``None`` (长 + 度为 ``win_length`` 幅值为 1 的矩形窗); - **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 :math:`t \times hop\_length` 个样本点在第 :math:`t` 帧的中心,默认为 ``True``; - **normalized** (bool,可选) - 是否将傅里叶变换的结果乘以值为 ``1/sqrt(n)`` 的缩放 系数; - **onesided** (bool,可选) - 该参数与 ``paddle.signal.stft`` 中的有区别,此处表示 - 告知接口输入的 ``x`` 是否为满足共轭对称性的短时傅里叶变换Tensor的一半。若满足上述条件, + 告知接口输入的 ``x`` 是否为满足共轭对称性的短时傅里叶变换 Tensor 的一半。若满足上述条件, 且设为 ``True``,则 ``paddle.signal.istft`` 将返回一个实信号,默认为 ``True``; - **length** (int,可选) - 指定输出信号的长度,该信号将从逆短时傅里叶变换的结果中截取。 默认为 ``None`` (返回不截取的信号); @@ -54,7 +54,7 @@ istft 返回 ::::::::: -逆短时傅里叶变换的结果,是重构信号的最小二乘估计Tensor,其形状为 ``[..., seq_length]``。 +逆短时傅里叶变换的结果,是重构信号的最小二乘估计 Tensor,其形状为 ``[..., seq_length]``。 代码示例 ::::::::: diff --git a/docs/api/paddle/signal/stft_cn.rst b/docs/api/paddle/signal/stft_cn.rst index 93c790f8ab5..829a025be29 100644 --- a/docs/api/paddle/signal/stft_cn.rst +++ b/docs/api/paddle/signal/stft_cn.rst @@ -30,18 +30,18 @@ stft 参数 ::::::::: -- **x** (Tensor) - 输入数据,是维度为1D或者2D的Tensor,数据类型可为复数(复信号),其形状 +- **x** (Tensor) - 输入数据,是维度为 1D 或者 2D 的 Tensor,数据类型可为复数(复信号),其形状 为 ``[..., seq_length]``; - **n_fft** (int) - 离散傅里叶变换的样本点个数; - **hop_length** (int,可选) - 对输入分帧时,相邻两帧偏移的样本点个数,默认为 ``None`` (为 ``n_fft//4``); - **win_length** (int,可选) - 信号窗的长度,默认为 ``None`` (为 ``n_fft``); -- **window** (int,可选) - 维度为1D长度为 ``win_length`` 的Tensor,数据类型可为复数。 - 如果 ``win_length < n_fft``,该Tensor将被补长至 ``n_fft``。默认为 ``None`` (长度 - 为 ``win_length`` 幅值为1的矩形窗); +- **window** (int,可选) - 维度为 1D 长度为 ``win_length`` 的 Tensor,数据类型可为复数。 + 如果 ``win_length < n_fft``,该 Tensor 将被补长至 ``n_fft``。默认为 ``None`` (长度 + 为 ``win_length`` 幅值为 1 的矩形窗); - **center** (bool,可选) - 选择是否将输入信号进行补长,使得第 :math:`t \times hop\_length` 个样本点在第 ``t`` 帧的中心,默认为 ``True``; -- **pad_mode** (str,可选) - 当 ``center`` 为 ``True`` 时,确定padding的模式,模式 +- **pad_mode** (str,可选) - 当 ``center`` 为 ``True`` 时,确定 padding 的模式,模式 的选项可以参考 ``paddle.nn.functional.pad``,默认为 "reflect"; - **normalized** (bool,可选) - 是否将傅里叶变换的结果乘以值为 ``1/sqrt(n)`` 的缩放系 数; @@ -52,7 +52,7 @@ stft 返回 ::::::::: -短时傅里叶变换的结果,复数Tensor。当输入实信号和实窗函数,如果 ``onesided`` 为 ``True``, +短时傅里叶变换的结果,复数 Tensor。当输入实信号和实窗函数,如果 ``onesided`` 为 ``True``, 其形状为 ``[..., n_fft//2 + 1, num_frames]``;否则为 ``[..., n_fft, num_frames]``。 代码示例 diff --git a/docs/api/paddle/sin_cn.rst b/docs/api/paddle/sin_cn.rst index a69a2abf9f2..62bfde83b21 100644 --- a/docs/api/paddle/sin_cn.rst +++ b/docs/api/paddle/sin_cn.rst @@ -13,12 +13,12 @@ sin 参数 :::::::::::: - - **x** (Tensor) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **x** (Tensor) - 支持任意维度的 Tensor。数据类型为 float32,float64 或 float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -返回类型为Tensor,数据类型同输入一致。 +返回类型为 Tensor,数据类型同输入一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/sinh_cn.rst b/docs/api/paddle/sinh_cn.rst index 6f7c88a4535..3d26020341d 100644 --- a/docs/api/paddle/sinh_cn.rst +++ b/docs/api/paddle/sinh_cn.rst @@ -19,12 +19,12 @@ sinh 参数 :::::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64 、float16。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64 、float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -输出Tensor,与 ``x`` 维度相同、数据类型相同。 +输出 Tensor,与 ``x`` 维度相同、数据类型相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/slice_cn.rst b/docs/api/paddle/slice_cn.rst index 54c0ef0ddce..0fd6bb14da1 100755 --- a/docs/api/paddle/slice_cn.rst +++ b/docs/api/paddle/slice_cn.rst @@ -8,11 +8,11 @@ slice -沿多个轴生成 ``input`` 的切片。与numpy类似:https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html 该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置)。如果传递给 ``starts`` 或 ``end`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 三个参数的元素数目必须相等。以下示例将解释切片如何工作: +沿多个轴生成 ``input`` 的切片。与 numpy 类似:https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html 该 OP 使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``input`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以 0 为初始位置)。如果传递给 ``starts`` 或 ``end`` 的值大于 n(维度中的元素数目),则表示 n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 三个参数的元素数目必须相等。以下示例将解释切片如何工作: :: - 示例1: + 示例 1: 给定: data=[[1,2,3,4],[5,6,7,8],] axes=[0,1] @@ -21,11 +21,11 @@ slice 则: result=[[5,6,7],] - 示例2: + 示例 2: 给定: data=[[1,2,3,4],[5,6,7,8],] starts=[0,1] - ends=[-1,1000] # 此处-1表示第0维的反向第0个位置,索引值是1。 + ends=[-1,1000] # 此处-1 表示第 0 维的反向第 0 个位置,索引值是 1。 则: result=[[2,3,4],] # 即 data[0:1, 1:4] @@ -34,8 +34,8 @@ slice - **input** (Tensor)- 多维 ``Tensor``,数据类型为 ``float16``, ``float32``,``float64``,``int32``,或 ``int64``。 - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 - - **starts** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``starts`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的起始索引值。 - - **ends** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``ends`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的结束索引值。 + - **starts** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``starts`` 的类型是 ``Tensor``,则是 1-D ``Tensor``。表示在各个轴上切片的起始索引值。 + - **ends** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``ends`` 的类型是 ``Tensor``,则是 1-D ``Tensor``。表示在各个轴上切片的结束索引值。 返回 :::::::::::: diff --git a/docs/api/paddle/sort_cn.rst b/docs/api/paddle/sort_cn.rst index 0b0d00ce8a2..851915263e7 100644 --- a/docs/api/paddle/sort_cn.rst +++ b/docs/api/paddle/sort_cn.rst @@ -14,8 +14,8 @@ sort :::::::::::: - **x** (Tensor) - 输入的多维 ``Tensor``,支持的数据类型:float32、float64、int16、int32、int64、uint8。 - - **axis** (int,可选) - 指定对输入Tensor进行运算的轴,``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为-1。 - - **descending** (bool,可选) - 指定算法排序的方向。如果设置为True,算法按照降序排序。如果设置为False或者不设置,按照升序排序。默认值为False。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴,``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的 Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为-1。 + - **descending** (bool,可选) - 指定算法排序的方向。如果设置为 True,算法按照降序排序。如果设置为 False 或者不设置,按照升序排序。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst b/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst index 44e2c62486a..0904930d4de 100644 --- a/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst +++ b/docs/api/paddle/sparse/sparse_coo_tensor_cn.rst @@ -6,36 +6,36 @@ sparse_coo_tensor .. py:function:: paddle.sparse.sparse_coo_tensor(indices, values, shape=None, dtype=None, place=None, stop_gradient=True) -该API通过已知的非零元素的 ``indices`` 和 ``values`` 来创建一个coordinate格式的稀疏tensor,tensor类型为 ``paddle.Tensor`` 。 +该 API 通过已知的非零元素的 ``indices`` 和 ``values`` 来创建一个 coordinate 格式的稀疏 tensor,tensor 类型为 ``paddle.Tensor`` 。 -其中 ``indices`` 是存放坐标信息,是一个二维数组,每一列是对应非零元素的坐标,shape是 ``[sparse_dim, nnz]`` , ``sparse_dim`` 是坐标的维度,``nnz`` 是非零元素的个数。 +其中 ``indices`` 是存放坐标信息,是一个二维数组,每一列是对应非零元素的坐标,shape 是 ``[sparse_dim, nnz]`` , ``sparse_dim`` 是坐标的维度,``nnz`` 是非零元素的个数。 -其中 ``values`` 是存放非零元素,是一个多维数组,shape是 ``[nnz, {dense_dim}]`` , nnz是非零元素个数,``dense_dim`` 是非零元素的维度。 +其中 ``values`` 是存放非零元素,是一个多维数组,shape 是 ``[nnz, {dense_dim}]`` , nnz 是非零元素个数,``dense_dim`` 是非零元素的维度。 -如果 ``values`` 已经是一个tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 -否则会创建一个新的tensor,且不保留原来计算图。 +如果 ``values`` 已经是一个 tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 +否则会创建一个新的 tensor,且不保留原来计算图。 参数 ::::::::: - - **indices** (list|tuple|ndarray|Tensor) - 初始化tensor的数据,可以是 - list,tuple,numpy\.ndarray,paddle\.Tensor类型。 - - **values** (list|tuple|ndarray|Tensor) - 初始化tensor的数据,可以是 - list,tuple,numpy\.ndarray,paddle\.Tensor类型。 - - **shape** (list|tuple, optional) - 稀疏Tensor的形状,也是Tensor的形状,如果没有提供,将自动推测出最小的形状。 - - **dtype** (str|np.dtype, optional) - 创建tensor的数据类型,可以是 'bool' ,'float16','float32', + - **indices** (list|tuple|ndarray|Tensor) - 初始化 tensor 的数据,可以是 + list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 + - **values** (list|tuple|ndarray|Tensor) - 初始化 tensor 的数据,可以是 + list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 + - **shape** (list|tuple, optional) - 稀疏 Tensor 的形状,也是 Tensor 的形状,如果没有提供,将自动推测出最小的形状。 + - **dtype** (str|np.dtype, optional) - 创建 tensor 的数据类型,可以是 'bool' ,'float16','float32', 'float64' ,'int8','int16','int32','int64','uint8','complex64','complex128'。 - 默认值为None,如果 ``values`` 为python浮点类型,则从 + 默认值为 None,如果 ``values`` 为 python 浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``values`` 为其他类型, 则会自动推导类型。 - - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 - CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 - - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建 tensor 的设备位置,可以是 + CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为 None,使用全局的 place。 + - **stop_gradient** (bool, optional) - 是否阻断 Autograd 的梯度传导。默认值为 True,此时不进行梯度传传导。 返回 ::::::::: -通过 ``indices`` 和 ``values`` 创建的稀疏Tensor。 +通过 ``indices`` 和 ``values`` 创建的稀疏 Tensor。 代码示例 ::::::::: diff --git a/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst b/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst index c702dac3feb..355e8b8675e 100644 --- a/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst +++ b/docs/api/paddle/sparse/sparse_csr_tensor_cn.rst @@ -6,40 +6,40 @@ sparse_csr_tensor .. py:function:: paddle.sparse.sparse_csr_tensor(crows, cols, values, shape, dtype=None, place=None, stop_gradient=True) -该API通过已知的非零元素的 ``crows`` , ``cols`` 和 ``values`` 来创建一个CSR(Compressed Sparse Row) 格式的稀疏tensor,tensor类型为 ``paddle.Tensor`` 。 +该 API 通过已知的非零元素的 ``crows`` , ``cols`` 和 ``values`` 来创建一个 CSR(Compressed Sparse Row) 格式的稀疏 tensor,tensor 类型为 ``paddle.Tensor`` 。 -当前 ``sparse_csr_tensor`` 要求输入的 ``crows`` 中每个batch的数据是递增的,``cols`` 也是递增的。 +当前 ``sparse_csr_tensor`` 要求输入的 ``crows`` 中每个 batch 的数据是递增的,``cols`` 也是递增的。 ``crows`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor。 ``cols`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor。 ``values`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor。 -如果 ``values`` 已经是一个tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 -否则会创建一个新的tensor,且不保留原来计算图。 +如果 ``values`` 已经是一个 tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 tensor 的拷贝并返回原来的 tensor。 +否则会创建一个新的 tensor,且不保留原来计算图。 参数 ::::::::: - **crows** (list|tuple|ndarray|Tensor) - 每行第一个非零元素在 ``values`` 的起始位置。可以是 - list,tuple,numpy\.ndarray,paddle\.Tensor类型。 + list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 - **cols** (list|tuple|ndarray|Tensor) - 一维数组,存储每个非零元素的列信息。可以是 - list,tuple,numpy\.ndarray,paddle\.Tensor类型。 + list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 - **values** (list|tuple|ndarray|Tensor) - 一维数组,存储非零元素,可以是 - list,tuple,numpy\.ndarray,paddle\.Tensor类型。 - - **shape** (list|tuple) - 稀疏Tensor的形状,也是Tensor的形状,如果没有提供,将自动推测出最小的形状。 - - **dtype** (str|np.dtype, optional) - 创建tensor的数据类型,可以是 'bool' ,'float16','float32', + list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 + - **shape** (list|tuple) - 稀疏 Tensor 的形状,也是 Tensor 的形状,如果没有提供,将自动推测出最小的形状。 + - **dtype** (str|np.dtype, optional) - 创建 tensor 的数据类型,可以是 'bool' ,'float16','float32', 'float64' ,'int8','int16','int32','int64','uint8','complex64','complex128'。 - 默认值为None,如果 ``values`` 为python浮点类型,则从 + 默认值为 None,如果 ``values`` 为 python 浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``values`` 为其他类型, 则会自动推导类型。 - - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建tensor的设备位置,可以是 - CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为None,使用全局的place。 - - **stop_gradient** (bool, optional) - 是否阻断Autograd的梯度传导。默认值为True,此时不进行梯度传传导。 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional) - 创建 tensor 的设备位置,可以是 + CPUPlace, CUDAPinnedPlace, CUDAPlace。默认值为 None,使用全局的 place。 + - **stop_gradient** (bool, optional) - 是否阻断 Autograd 的梯度传导。默认值为 True,此时不进行梯度传传导。 返回 ::::::::: -通过 ``crows``, ``cols`` 和 ``values`` 创建的稀疏Tensor。 +通过 ``crows``, ``cols`` 和 ``values`` 创建的稀疏 Tensor。 **代码示例** diff --git a/docs/api/paddle/split_cn.rst b/docs/api/paddle/split_cn.rst index 9482686a01d..52c6792c3ef 100644 --- a/docs/api/paddle/split_cn.rst +++ b/docs/api/paddle/split_cn.rst @@ -11,9 +11,9 @@ split 参数 ::::::::: - - **x** (Tensor) - 输入变量,数据类型为bool、float16、float32、float64、int32、int64的多维Tensor。 + - **x** (Tensor) - 输入变量,数据类型为 bool、float16、float32、float64、int32、int64 的多维 Tensor。 - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示 Tensor 平均划分为相同大小子 Tensor 的数量。如果 ``num_or_sections`` 是一个 list 或 tuple,那么它的长度代表子 Tensor 的数量,它的元素可以是整数或者形状为[1]的 Tensor,依次代表子 Tensor 需要分割成的维度的大小。list 或 tuple 的长度不能超过输入 Tensor 待分割的维度的大小。在 list 或 tuple 中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4, 6, 6] Tensor 的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]``,输出的三个 Tensor 维度分别为:[4, 6, 2],[4, 6, 3],[4, 6, 1]。 - - **axis** (int|Tensor,可选) - 整数或者形状为[1]的 Tensor,数据类型为 int32 或 int64。表示需要分割的维度。如果 ``axis < 0``,则划分的维度为 ``rank(x) + axis``。默认值为0。 + - **axis** (int|Tensor,可选) - 整数或者形状为[1]的 Tensor,数据类型为 int32 或 int64。表示需要分割的维度。如果 ``axis < 0``,则划分的维度为 ``rank(x) + axis``。默认值为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/sqrt_cn.rst b/docs/api/paddle/sqrt_cn.rst index f0b4b09ef54..c159235ae8f 100644 --- a/docs/api/paddle/sqrt_cn.rst +++ b/docs/api/paddle/sqrt_cn.rst @@ -19,12 +19,12 @@ sqrt :::::::::::: - - **x** (Tensor) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 + - **x** (Tensor) - 支持任意维度的 Tensor。数据类型为 float32,float64 或 float16。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -返回类型为Tensor,数据类型同输入一致。 +返回类型为 Tensor,数据类型同输入一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/square_cn.rst b/docs/api/paddle/square_cn.rst index c364540201b..ce150e735b5 100644 --- a/docs/api/paddle/square_cn.rst +++ b/docs/api/paddle/square_cn.rst @@ -16,12 +16,12 @@ square 参数 :::::::::::: - - **x** (Tensor) - 任意维度的Tensor,支持的数据类型:float32,float64。 + - **x** (Tensor) - 任意维度的 Tensor,支持的数据类型:float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -取平方后的Tensor,维度和数据类型同输入一致。 +取平方后的 Tensor,维度和数据类型同输入一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/squeeze_cn.rst b/docs/api/paddle/squeeze_cn.rst index c698f577fc4..c8f9362fac8 100644 --- a/docs/api/paddle/squeeze_cn.rst +++ b/docs/api/paddle/squeeze_cn.rst @@ -5,7 +5,7 @@ squeeze .. py:function:: paddle.squeeze(x, axis=None, name=None) -删除输入 Tensor 的 Shape 中尺寸为1的维度。如果指定了 axis,则会删除 axis 中指定的尺寸为1的维度。如果没有指定 axis,那么所有等于1的维度都会被删除。 +删除输入 Tensor 的 Shape 中尺寸为 1 的维度。如果指定了 axis,则会删除 axis 中指定的尺寸为 1 的维度。如果没有指定 axis,那么所有等于 1 的维度都会被删除。 请注意,在动态图模式下,输出 Tensor 将与输入 Tensor 共享数据,并且没有 Tensor 数据拷贝的过程。 如果不希望输入与输出共享数据,请使用 `Tensor.clone` ,例如 `squeeze_clone_x = x.squeeze().clone()` 。 @@ -47,7 +47,7 @@ squeeze 参数 ::::::::: - **x** (Tensor) - 输入的 `Tensor` ,数据类型为:float32、float64、bool、int8、int32、int64。 - - **axis** (int|list|tuple,可选) - 输入一个或一列整数,代表要压缩的轴。axis 的范围: [−ndim(x), ndim(x))] 。 如果 axis 为负数, 则 axis=axis+ndim(x) 。默认为None,表示对所有尺寸为1的维度进行压缩。 + - **axis** (int|list|tuple,可选) - 输入一个或一列整数,代表要压缩的轴。axis 的范围: [−ndim(x), ndim(x))] 。 如果 axis 为负数, 则 axis=axis+ndim(x) 。默认为 None,表示对所有尺寸为 1 的维度进行压缩。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/stack_cn.rst b/docs/api/paddle/stack_cn.rst index d31769b92ea..0fa122a8adf 100644 --- a/docs/api/paddle/stack_cn.rst +++ b/docs/api/paddle/stack_cn.rst @@ -55,9 +55,9 @@ stack 参数 ::::::::: - - **x** (list[Tensor]|tuple[Tensor]) – 输入 x 是多个 Tensor,且这些Tensor的维度和数据类型必须相同。支持的数据类型:float32、float64、int32、int64。 + - **x** (list[Tensor]|tuple[Tensor]) – 输入 x 是多个 Tensor,且这些 Tensor 的维度和数据类型必须相同。支持的数据类型:float32、float64、int32、int64。 - - **axis** (int,可选) – 指定对输入 Tensor 进行堆叠运算的轴,有效 axis 的范围是:[−(R+1),R+1],R是输入中第一个 Tensor 的维数。如果 axis < 0,则 axis=axis+R+1。默认值为0。 + - **axis** (int,可选) – 指定对输入 Tensor 进行堆叠运算的轴,有效 axis 的范围是:[−(R+1),R+1],R 是输入中第一个 Tensor 的维数。如果 axis < 0,则 axis=axis+R+1。默认值为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/standard_normal_cn.rst b/docs/api/paddle/standard_normal_cn.rst index 11569ea156d..bb2166b0873 100644 --- a/docs/api/paddle/standard_normal_cn.rst +++ b/docs/api/paddle/standard_normal_cn.rst @@ -5,17 +5,17 @@ standard_normal .. py:function:: paddle.standard_normal(shape, dtype=None, name=None) -返回符合标准正态分布(均值为0,标准差为1的正态随机分布)的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +返回符合标准正态分布(均值为 0,标准差为 1 的正态随机分布)的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 参数 :::::::::: - - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 - - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持float32、float64。当该参数值为None时,输出Tensor的数据类型为float32。默认值为None。 + - **shape** (list|tuple|Tensor) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的 1-D Tensor。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 float32、float64。当该参数值为 None 时,输出 Tensor 的数据类型为 float32。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor:符合标准正态分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 + Tensor:符合标准正态分布的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 示例代码 :::::::::: diff --git a/docs/api/paddle/stanh_cn.rst b/docs/api/paddle/stanh_cn.rst index 1886ac16ae5..29b273577d1 100644 --- a/docs/api/paddle/stanh_cn.rst +++ b/docs/api/paddle/stanh_cn.rst @@ -16,8 +16,8 @@ stanh 激活函数 :::::::::: - x (Tensor) - 输入的 ``Tensor``,数据类型为:float32、float64。 - - scale_a (float,可选) - stanh激活计算公式中的输入缩放参数a。默认值为0.67。 - - scale_b (float,可选) - stanh激活计算公式中的输出缩放参数b。默认值为1.7159。 + - scale_a (float,可选) - stanh 激活计算公式中的输入缩放参数 a。默认值为 0.67。 + - scale_b (float,可选) - stanh 激活计算公式中的输出缩放参数 b。默认值为 1.7159。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/static/BuildStrategy_cn.rst b/docs/api/paddle/static/BuildStrategy_cn.rst index a30b5d273cc..b9fa0328927 100644 --- a/docs/api/paddle/static/BuildStrategy_cn.rst +++ b/docs/api/paddle/static/BuildStrategy_cn.rst @@ -9,7 +9,7 @@ BuildStrategy 返回 ::::::::: -BuildStrategy,一个BuildStrategy的实例。 +BuildStrategy,一个 BuildStrategy 的实例。 代码示例 ::::::::: @@ -45,7 +45,7 @@ BuildStrategy,一个BuildStrategy的实例。 debug_graphviz_path ''''''''' -str类型。表示以graphviz格式向文件中写入计算图的路径,有利于调试。默认值为空字符串。 +str 类型。表示以 graphviz 格式向文件中写入计算图的路径,有利于调试。默认值为空字符串。 **代码示例** @@ -63,7 +63,7 @@ str类型。表示以graphviz格式向文件中写入计算图的路径,有利 enable_sequential_execution ''''''''' -bool类型。如果设置为True,则算子的执行顺序将与算子定义的执行顺序相同。默认为False。 +bool 类型。如果设置为 True,则算子的执行顺序将与算子定义的执行顺序相同。默认为 False。 **代码示例** @@ -81,7 +81,7 @@ bool类型。如果设置为True,则算子的执行顺序将与算子定义的 fuse_broadcast_ops ''''''''' -bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模式下有效,使程序运行更快。默认为False。 +bool 类型。表明是否融合(fuse) broadcast ops。该选项指在 Reduce 模式下有效,使程序运行更快。默认为 False。 **代码示例** @@ -99,7 +99,7 @@ bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模 fuse_elewise_add_act_ops ''''''''' -bool类型。表明是否融合(fuse) elementwise_add_op和activation_op。这会使整体执行过程更快。默认为False。 +bool 类型。表明是否融合(fuse) elementwise_add_op 和 activation_op。这会使整体执行过程更快。默认为 False。 **代码示例** @@ -117,7 +117,7 @@ bool类型。表明是否融合(fuse) elementwise_add_op和activation_op。这 fuse_relu_depthwise_conv ''''''''' -bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存并可能加速执行过程。此选项仅适用于GPU设备。默认为False。 +bool 类型。表明是否融合(fuse) relu 和 depthwise_conv2d,节省 GPU 内存并可能加速执行过程。此选项仅适用于 GPU 设备。默认为 False。 **代码示例** @@ -134,7 +134,7 @@ bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存 gradient_scale_strategy ''''''''' -``paddle.static.BuildStrategy.GradientScaleStrategy`` 类型。在 ``ParallelExecutor`` 中,存在三种定义loss对应梯度( *loss@grad* )的方式,分别为 ``CoeffNumDevice``, ``One`` 与 ``Customized``。默认情况下,``ParallelExecutor`` 根据设备数目来设置 *loss@grad*。如果用户需要自定义 *loss@grad*,可以选择 ``Customized`` 方法。默认为 ``CoeffNumDevice`` 。 +``paddle.static.BuildStrategy.GradientScaleStrategy`` 类型。在 ``ParallelExecutor`` 中,存在三种定义 loss 对应梯度( *loss@grad* )的方式,分别为 ``CoeffNumDevice``, ``One`` 与 ``Customized``。默认情况下,``ParallelExecutor`` 根据设备数目来设置 *loss@grad*。如果用户需要自定义 *loss@grad*,可以选择 ``Customized`` 方法。默认为 ``CoeffNumDevice`` 。 **代码示例** @@ -189,7 +189,7 @@ gradient_scale_strategy memory_optimize ''''''''' -bool类型或None。设为True时可用于减少总内存消耗,False表示不使用,None表示框架会自动选择使用或者不使用优化策略。当前,None意味着当GC不能使用时,优化策略将被使用。默认为None。 +bool 类型或 None。设为 True 时可用于减少总内存消耗,False 表示不使用,None 表示框架会自动选择使用或者不使用优化策略。当前,None 意味着当 GC 不能使用时,优化策略将被使用。默认为 None。 reduce_strategy ''''''''' @@ -212,7 +212,7 @@ reduce_strategy remove_unnecessary_lock ''''''''' -bool类型。设置True会去除GPU操作中的一些锁操作,``ParallelExecutor`` 将运行得更快,默认为True。 +bool 类型。设置 True 会去除 GPU 操作中的一些锁操作,``ParallelExecutor`` 将运行得更快,默认为 True。 **代码示例** @@ -230,7 +230,7 @@ bool类型。设置True会去除GPU操作中的一些锁操作,``ParallelExecu sync_batch_norm ''''''''' -bool类型。表示是否使用同步的批正则化,即在训练阶段通过多个设备同步均值和方差。当前的实现不支持FP16训练和CPU。并且目前**仅支持**仅在一台机器上进行同步式批正则。默认为 False。 +bool 类型。表示是否使用同步的批正则化,即在训练阶段通过多个设备同步均值和方差。当前的实现不支持 FP16 训练和 CPU。并且目前**仅支持**仅在一台机器上进行同步式批正则。默认为 False。 **代码示例** diff --git a/docs/api/paddle/static/CompiledProgram_cn.rst b/docs/api/paddle/static/CompiledProgram_cn.rst index 2f66e6e2296..9576dbd9713 100644 --- a/docs/api/paddle/static/CompiledProgram_cn.rst +++ b/docs/api/paddle/static/CompiledProgram_cn.rst @@ -7,12 +7,12 @@ CompiledProgram .. py:class:: paddle.static.CompiledProgram(program_or_graph, build_strategy=None) -CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于build_strategy更多信息。请参阅 ``fluid.BuildStrategy`` 。 +CompiledProgram 根据 `build_strategy` 的配置将输入的 Program 或 Graph 进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于 build_strategy 更多信息。请参阅 ``fluid.BuildStrategy`` 。 参数 ::::::::: - - **program_or_graph** (Graph|Program):该参数为被执行的Program或Graph。 - - **build_strategy** (BuildStrategy):通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多信息,请参阅 ``fluid.BuildStrategy``。默认为None。 + - **program_or_graph** (Graph|Program):该参数为被执行的 Program 或 Graph。 + - **build_strategy** (BuildStrategy):通过配置 build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于 build_strategy 更多信息,请参阅 ``fluid.BuildStrategy``。默认为 None。 返回 ::::::::: @@ -28,26 +28,26 @@ COPY-FROM: paddle.static.CompiledProgram with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, share_vars_from=None, places=None) ''''''''' -该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。 +该接口用于将输入的 Program 或 Graph 进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的 AllReduce 操作进行融合、指定计算图运行过程中使用的线程池大小等。 .. note:: - 如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy。 + 如果在构建 CompiledProgram 和调用 with_data_parallel 时都指定了 build_strategy,在 CompiledProgram 中的 build_strategy 会被复写,因此,如果是数据并行训练,建议在调用 with_data_parallel 接口时设置 build_strategy。 **参数** - - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 - - **build_strategy** (BuildStrategy):通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``fluid.BuildStrategy``。默认为:None。 - - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。关于exec_strategy更多信息,请参阅 ``fluid.ExecutionStrategy``。默认为:None。 - - **share_vars_from** (CompiledProgram) - 如果设置了share_vars_from,当前的CompiledProgram将与share_vars_from指定的CompiledProgram共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的CompiledProgram在调用with_data_parallel时,需要将share_vars_from设置为训练对应的CompiledProgram。由于CompiledProgram只有在第一次执行时才会将变量分发到其他设备上,因此share_vars_from指定的CompiledProgram必须在当前CompiledProgram之前运行。默认为:None。 - - **places** (list(CUDAPlace)|list(CPUPlace)) - 该参数指定模型运行所在的设备。如果希望在GPU0和GPU1上运行,places为[fluid.CUDAPlace(0), fluid.CUDAPlace(1)];如果希望使用2个CPU运行,places为[fluid.CPUPlace()] * 2。如果没有设置该参数,即该参数为None,模型执行时,将从环境变量中获取可用的设备:如果使用GPU,模型执行时,从环境变量FLAGS_selected_gpus或CUDA_VISIBLE_DEVICES中获取当前可用的设备ID;如果使用CPU,模型执行时,从环境变量CPU_NUM中获取当前可利用的CPU个数。例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1。默认为:None。 + - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置 loss_name,否则计算结果可能会有问题。** 默认为:None。 + - **build_strategy** (BuildStrategy):通过配置 build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于 build_strategy 更多的信息,请参阅 ``fluid.BuildStrategy``。默认为:None。 + - **exec_strategy** (ExecutionStrategy) - 通过 exec_strategy 指定执行计算图过程可以调整的选项,例如线程池大小等。关于 exec_strategy 更多信息,请参阅 ``fluid.ExecutionStrategy``。默认为:None。 + - **share_vars_from** (CompiledProgram) - 如果设置了 share_vars_from,当前的 CompiledProgram 将与 share_vars_from 指定的 CompiledProgram 共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的 CompiledProgram 在调用 with_data_parallel 时,需要将 share_vars_from 设置为训练对应的 CompiledProgram。由于 CompiledProgram 只有在第一次执行时才会将变量分发到其他设备上,因此 share_vars_from 指定的 CompiledProgram 必须在当前 CompiledProgram 之前运行。默认为:None。 + - **places** (list(CUDAPlace)|list(CPUPlace)) - 该参数指定模型运行所在的设备。如果希望在 GPU0 和 GPU1 上运行,places 为[fluid.CUDAPlace(0), fluid.CUDAPlace(1)];如果希望使用 2 个 CPU 运行,places 为[fluid.CPUPlace()] * 2。如果没有设置该参数,即该参数为 None,模型执行时,将从环境变量中获取可用的设备:如果使用 GPU,模型执行时,从环境变量 FLAGS_selected_gpus 或 CUDA_VISIBLE_DEVICES 中获取当前可用的设备 ID;如果使用 CPU,模型执行时,从环境变量 CPU_NUM 中获取当前可利用的 CPU 个数。例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为 1。默认为:None。 **返回** CompiledProgram,配置之后的 ``CompiledProgram`` 对象。 .. note:: - 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 - 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的CompiledProgram时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 + 1. 如果只是进行多卡测试,不需要设置 loss_name 以及 share_vars_from。 + 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的 CompiledProgram 时必须设置 share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 **代码示例** diff --git a/docs/api/paddle/static/ExecutionStrategy_cn.rst b/docs/api/paddle/static/ExecutionStrategy_cn.rst index 451575e80cd..69c0f57ff4e 100644 --- a/docs/api/paddle/static/ExecutionStrategy_cn.rst +++ b/docs/api/paddle/static/ExecutionStrategy_cn.rst @@ -10,7 +10,7 @@ ExecutionStrategy 返回 ::::::::: -ExecutionStrategy,一个ExecutionStrategy的实例。 +ExecutionStrategy,一个 ExecutionStrategy 的实例。 代码示例 ::::::::: @@ -46,7 +46,7 @@ ExecutionStrategy,一个ExecutionStrategy的实例。 num_threads ''''''''' -int型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的大小,此线程池可用来并发执行program中的operator(算子,运算)。如果 :math:`num\_threads=1`,则所有的operator将一个接一个地执行,但在不同的program重复周期(iterations)中执行顺序可能不同。如果该选项没有被设置,则在 ``Executor`` 中,它会依据设备类型(device type)、设备数目(device count)而设置为相应值。对GPU,:math:`num\_threads=device\_count∗4`;对CPU, :math:`num\_threads=CPU\_NUM∗4`。在 ``Executor`` 中有关于 :math:`CPU\_NUM` 的详细解释。如果没有设置 :math:`CPU\_NUM`,则设置默认值为1,并提示用户进行 :math:`CPU\_NUM` 的设置。 +int 型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的大小,此线程池可用来并发执行 program 中的 operator(算子,运算)。如果 :math:`num\_threads=1`,则所有的 operator 将一个接一个地执行,但在不同的 program 重复周期(iterations)中执行顺序可能不同。如果该选项没有被设置,则在 ``Executor`` 中,它会依据设备类型(device type)、设备数目(device count)而设置为相应值。对 GPU,:math:`num\_threads=device\_count∗4`;对 CPU, :math:`num\_threads=CPU\_NUM∗4`。在 ``Executor`` 中有关于 :math:`CPU\_NUM` 的详细解释。如果没有设置 :math:`CPU\_NUM`,则设置默认值为 1,并提示用户进行 :math:`CPU\_NUM` 的设置。 **代码示例** @@ -63,11 +63,11 @@ int型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的 num_iteration_per_drop_scope ''''''''' -int型成员。该选项表示间隔多少次迭代之后清理一次临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,通常将其设为较大的值(比如10或者100)。默认值为100。 +int 型成员。该选项表示间隔多少次迭代之后清理一次临时变量。模型运行过程中,生成的中间临时变量将被放到 local execution scope 中,为了避免对临时变量频繁的申请与释放,通常将其设为较大的值(比如 10 或者 100)。默认值为 100。 .. note:: - 1. 如果你调用run的时候fetch了数据,ParallelExecutor将会在本轮迭代结束时清理临时变量。 - 2. 在一些NLP模型中,这个策略可能会造成的GPU显存不足,此时需要减少num_iteration_per_drop_scope的值。 + 1. 如果你调用 run 的时候 fetch 了数据,ParallelExecutor 将会在本轮迭代结束时清理临时变量。 + 2. 在一些 NLP 模型中,这个策略可能会造成的 GPU 显存不足,此时需要减少 num_iteration_per_drop_scope 的值。 **代码示例** @@ -85,7 +85,7 @@ int型成员。该选项表示间隔多少次迭代之后清理一次临时变 num_iteration_per_run ''''''''' -int型成员。它配置了当用户在python脚本中调用pe.run()时执行器会执行的迭代次数。Executor每次调用,会进行num_iteration_per_run次训练,它会使整体执行过程更快。默认值为1。 +int 型成员。它配置了当用户在 python 脚本中调用 pe.run()时执行器会执行的迭代次数。Executor 每次调用,会进行 num_iteration_per_run 次训练,它会使整体执行过程更快。默认值为 1。 **代码示例** diff --git a/docs/api/paddle/static/Executor_cn.rst b/docs/api/paddle/static/Executor_cn.rst index 55192305453..48ed619b6c0 100644 --- a/docs/api/paddle/static/Executor_cn.rst +++ b/docs/api/paddle/static/Executor_cn.rst @@ -10,16 +10,16 @@ Executor -Executor支持单GPU、多GPU以及CPU运行。 +Executor 支持单 GPU、多 GPU 以及 CPU 运行。 参数 :::::::::::: - - **place** (paddle.CPUPlace()|paddle.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()`,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)`。默认值为None。 + - **place** (paddle.CPUPlace()|paddle.CUDAPlace(N)|None) – 该参数表示 Executor 执行所在的设备,这里的 N 为 GPU 对应的 ID。当该参数为 `None` 时,PaddlePaddle 会根据其安装版本设置默认的运行设备。当安装的 Paddle 为 CPU 版时,默认运行设置会设置成 `CPUPlace()`,而当 Paddle 为 GPU 版时,默认运行设备会设置成 `CUDAPlace(0)`。默认值为 None。 .. note:: -多卡训练初始化Executor时也只用传入一个Place或None,其他API会处理使用的多卡,见 `多卡使用方式 `_ +多卡训练初始化 Executor 时也只用传入一个 Place 或 None,其他 API 会处理使用的多卡,见 `多卡使用方式 `_ 返回 :::::::::::: @@ -42,7 +42,7 @@ Executor支持单GPU、多GPU以及CPU运行。 # place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() # exe = paddle.static.Executor(place) - # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备 + # 如果不显示设置运行设备,PaddlePaddle 会设置默认运行设备 exe = paddle.static.Executor() train_program = paddle.static.Program() @@ -53,27 +53,27 @@ Executor支持单GPU、多GPU以及CPU运行。 loss = paddle.mean(hidden) paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - # 仅运行一次startup program - # 不需要优化/编译这个startup program + # 仅运行一次 startup program + # 不需要优化/编译这个 startup program startup_program.random_seed=1 exe.run(startup_program) - # 无需编译,直接运行main program + # 无需编译,直接运行 main program x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = exe.run(train_program, feed={"X": x}, fetch_list=[loss.name]) - # 另一种方法是,编译这个main program然后运行。 - # 参考CompiledProgram以获取更多信息。 - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, + # 另一种方法是,编译这个 main program 然后运行。 + # 参考 CompiledProgram 以获取更多信息。 + # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, + # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, + # 在这种情况下,输入的 batch size 应大于 CPU_NUM, # 否则程序会异常中断。 # 显式设置运行设备 # if not use_cuda: # os.environ['CPU_NUM'] = str(2) - # 未显示设置运行设备且安装的Paddle为CPU版本 + # 未显示设置运行设备且安装的 Paddle 为 CPU 版本 os.environ['CPU_NUM'] = str(2) compiled_prog = paddle.static.CompiledProgram( @@ -86,7 +86,7 @@ close() ''''''''' -关闭执行器。该接口主要用于对于分布式训练,调用该接口后不可以再使用该执行器。该接口会释放在PServers上和目前Trainer有关联的资源。 +关闭执行器。该接口主要用于对于分布式训练,调用该接口后不可以再使用该执行器。该接口会释放在 PServers 上和目前 Trainer 有关联的资源。 **返回** @@ -107,28 +107,28 @@ close() run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True, use_prune=False) ''''''''' -执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即paddle.static.global_scope()。 +执行指定的 Program 或者 CompiledProgram。需要注意的是,执行器会执行 Program 或 CompiledProgram 中的所有算子,而不会根据 fetch_list 对 Program 或 CompiledProgram 中的算子进行裁剪。同时,需要传入运行该模型用到的 scope,如果没有指定 scope,执行器将使用全局 scope,即 paddle.static.global_scope()。 **参数** - - **program** (Program|CompiledProgram) – 该参数为被执行的Program或CompiledProgram,如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为paddle.static.default_main_program()。默认为:None。 - - **feed** (list|dict) – 该参数表示模型的输入变量。如果是单卡训练,``feed`` 为 ``dict`` 类型,如果是多卡训练,参数 ``feed`` 可以是 ``dict`` 或者 ``list`` 类型变量,如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU),即输入数据被均匀分配到不同设备上;如果该参数类型为 ``list``,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 + - **program** (Program|CompiledProgram) – 该参数为被执行的 Program 或 CompiledProgram,如果未提供该参数,即该参数为 None,在该接口内,main_program 将被设置为 paddle.static.default_main_program()。默认为:None。 + - **feed** (list|dict) – 该参数表示模型的输入变量。如果是单卡训练,``feed`` 为 ``dict`` 类型,如果是多卡训练,参数 ``feed`` 可以是 ``dict`` 或者 ``list`` 类型变量,如果该参数类型为 ``dict`` ,feed 中的数据将会被分割(split)并分送给多个设备(CPU/GPU),即输入数据被均匀分配到不同设备上;如果该参数类型为 ``list``,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 - **fetch_list** (list) – 该参数表示模型运行之后需要返回的变量。默认为:None。 - **feed_var_name** (str) – 该参数表示数据输入算子(feed operator)的输入变量名称。默认为:"feed"。 - **fetch_var_name** (str) – 该参数表示结果获取算子(fetch operator)的输出变量名称。默认为:"fetch"。 - - **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:paddle.static.global_scope()。 - - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 - - **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``paddle.static.Program``,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。 - - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list中指定的变量)进行合并。如果 ``return_merged`` 设为False,返回值类型是一个Tensor的二维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的二维列表( ``return_numpy`` 设为True时)。如果 ``return_merged`` 设为True,返回值类型是一个Tensor的一维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的一维列表( ``return_numpy`` 设为True时)。更多细节请参考示例代码2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为False。 - - **use_prune** (bool) - 该参数表示输入Program是否会被裁剪。如果该参数为True,会根据feed和fetch_list裁剪Program,这意味着对生成fetch_list没有必要的算子和变量会被裁剪掉。默认为False,即算子和变量在运行过程不会被裁剪。注意如果Optimizer.minimize()返回的tuple被作为fetch_list参数,那么use_prune会被重载为True并且Program会被裁剪。 + - **scope** (Scope) – 该参数表示执行当前 program 所使用的作用域,用户可以为不同的 program 指定不同的作用域。默认值:paddle.static.global_scope()。 + - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list 中指定的变量)转化为 numpy;如果为 False,则每个变量返回的类型为 LoDTensor,否则返回变量的类型为 numpy.ndarray。默认为:True。 + - **use_program_cache** (bool) – 该参数表示是否对输入的 Program 进行缓存。如果该参数为 True,在以下情况时,模型运行速度可能会更快:输入的 program 为 ``paddle.static.Program``,并且模型运行过程中,调用该接口的参数(program、 feed 变量名和 fetch_list 变量)名始终不变。默认为:False。 + - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list 中指定的变量)进行合并。如果 ``return_merged`` 设为 False,返回值类型是一个 Tensor 的二维列表( ``return_numpy`` 设为 Fasle 时)或者一个 numpy.ndarray 的二维列表( ``return_numpy`` 设为 True 时)。如果 ``return_merged`` 设为 True,返回值类型是一个 Tensor 的一维列表( ``return_numpy`` 设为 Fasle 时)或者一个 numpy.ndarray 的一维列表( ``return_numpy`` 设为 True 时)。更多细节请参考示例代码 2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为 False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为 True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为 False。 + - **use_prune** (bool) - 该参数表示输入 Program 是否会被裁剪。如果该参数为 True,会根据 feed 和 fetch_list 裁剪 Program,这意味着对生成 fetch_list 没有必要的算子和变量会被裁剪掉。默认为 False,即算子和变量在运行过程不会被裁剪。注意如果 Optimizer.minimize()返回的 tuple 被作为 fetch_list 参数,那么 use_prune 会被重载为 True 并且 Program 会被裁剪。 **返回** -返回fetch_list中指定的变量值。 +返回 fetch_list 中指定的变量值。 .. note:: - 1. 如果是多卡训练,并且feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 - 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 + 1. 如果是多卡训练,并且 feed 参数为 dict 类型,输入数据将被均匀分配到不同的卡上,例如:使用 2 块 GPU 训练,输入样本数为 3,即[0, 1, 2],经过拆分之后,GPU0 上的样本数为 1,即[0],GPU1 上的样本数为 2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个 batch 的样本数是否少于当前可用的 CPU 核数或 GPU 卡数,如果是少于,建议丢弃该 batch。 + 2. 如果可用的 CPU 核数或 GPU 卡数大于 1,则 fetch 出来的结果为不同设备上的相同变量值(fetch_list 中的变量)在第 0 维拼接在一起。 **代码示例 1** @@ -151,7 +151,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na i = paddle.zeros(shape=[1], dtype='int64') array = paddle.fluid.layers.array_write(x=loss, i=i) - #仅运行startup程序一次 + #仅运行 startup 程序一次 exe.run(paddle.static.default_startup_program()) x = numpy.random.random(size=(10, 1)).astype('float32') @@ -167,7 +167,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na import paddle import numpy as np - # 创建Executor对象 + # 创建 Executor 对象 paddle.enable_static() place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) @@ -179,7 +179,7 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na adam = paddle.optimizer.Adam() adam.minimize(loss) - # 运行且仅运行一次startup program + # 运行且仅运行一次 startup program exe.run(paddle.static.default_startup_program()) build_strategy = paddle.static.BuildStrategy() binary = paddle.static.CompiledProgram( @@ -188,26 +188,26 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na batch_size = 6 x = np.random.random(size=(batch_size, 1)).astype('float32') - # 1) 设置 return_merged 参数为False以获取不合并的计算结果: + # 1) 设置 return_merged 参数为 False 以获取不合并的计算结果: unmerged_prediction, = exe.run(binary, feed={'X': x}, fetch_list=[prediction.name], return_merged=False) - # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。 - # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用 - # 的GPU卡数之商。 + # 如果用户使用两个 GPU 卡来运行此 python 代码示例,输出结果将为(2, 3, class_dim)。 + # 输出结果中第一个维度值代表所使用的 GPU 卡数,而第二个维度值代表 batch_size 和所使用 + # 的 GPU 卡数之商。 print("The unmerged prediction shape: {}".format( np.array(unmerged_prediction).shape)) print(unmerged_prediction) - # 2) 设置 return_merged 参数为True以获取合并的计算结果: + # 2) 设置 return_merged 参数为 True 以获取合并的计算结果: merged_prediction, = exe.run(binary, feed={'X': x}, fetch_list=[prediction.name], return_merged=True) - # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(6, class_dim)。输出结果 - # 中第一个维度值代表batch_size值。 + # 如果用户使用两个 GPU 卡来运行此 python 代码示例,输出结果将为(6, class_dim)。输出结果 + # 中第一个维度值代表 batch_size 值。 print("The merged prediction shape: {}".format( np.array(merged_prediction).shape)) print(merged_prediction) @@ -231,18 +231,18 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) ''''''''' -infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。infer_from_dataset()可以非常容易地用于多线程中的评估。 +infer_from_dataset 的文档与 train_from_dataset 几乎完全相同,只是在分布式训练中,推进梯度将在 infer_from_dataset 中禁用。infer_from_dataset()可以非常容易地用于多线程中的评估。 **参数** - - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的)。 - - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None。 - - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域。 - - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0。 - - **debug** (bool) – 是否开启debug模式,默认为False。 - - **fetch_list** (Tensor List) – 返回变量列表,每个变量都会在预测过程中被打印出来,默认为None。 - - **fetch_info** (String List) – 每个变量的打印信息,默认为None。 - - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100。 + - **program** (Program|CompiledProgram) – 需要执行的 program,如果没有给定那么默认使用 default_main_program (未编译的)。 + - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查 Dataset 文件。默认为 None。 + - **scope** (Scope) – 执行这个 program 的域,用户可以指定不同的域。默认为全局域。 + - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为 min(Dataset.thread_num, thread),如果 thread > 0,默认为 0。 + - **debug** (bool) – 是否开启 debug 模式,默认为 False。 + - **fetch_list** (Tensor List) – 返回变量列表,每个变量都会在预测过程中被打印出来,默认为 None。 + - **fetch_info** (String List) – 每个变量的打印信息,默认为 None。 + - **print_period** (int) – 每两次打印之间间隔的 mini-batches 的数量,默认为 100。 **返回** @@ -255,14 +255,14 @@ infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在 import paddle paddle.enable_static() - place = paddle.CPUPlace() # 使用GPU时可设置place = paddle.CUDAPlace(0) + place = paddle.CPUPlace() # 使用 GPU 时可设置 place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) dataset = paddle.fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) - # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] + # 您可以设置您自己的 filelist,如 filelist = ["dataA.txt"] filelist = [] dataset.set_filelist(filelist) exe.run(paddle.static.default_startup_program()) @@ -273,21 +273,21 @@ infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在 train_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) ''''''''' -从预定义的数据集中训练。数据集在paddle.fluid.dataset中定义。给定程序(或编译程序),train_from_dataset将使用数据集中的所有数据样本。输入范围可由用户给出。默认情况下,范围是global_scope()。训练中的线程总数是thread。训练中使用的线程数将是数据集中threadnum的最小值,同时也是此接口中线程的值。可以设置debug,以便执行器显示所有算子的运行时间和当前训练任务的吞吐量。 +从预定义的数据集中训练。数据集在 paddle.fluid.dataset 中定义。给定程序(或编译程序),train_from_dataset 将使用数据集中的所有数据样本。输入范围可由用户给出。默认情况下,范围是 global_scope()。训练中的线程总数是 thread。训练中使用的线程数将是数据集中 threadnum 的最小值,同时也是此接口中线程的值。可以设置 debug,以便执行器显示所有算子的运行时间和当前训练任务的吞吐量。 .. note:: -train_from_dataset将销毁每次运行在executor中创建的所有资源。 +train_from_dataset 将销毁每次运行在 executor 中创建的所有资源。 **参数** - - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的)。 - - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None。 - - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域。 - - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0。 - - **debug** (bool) – 是否开启debug模式,默认为False。 - - **fetch_list** (Tensor List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为None。 - - **fetch_info** (String List) – 每个变量的打印信息,默认为None。 - - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100。 + - **program** (Program|CompiledProgram) – 需要执行的 program,如果没有给定那么默认使用 default_main_program (未编译的)。 + - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查 Dataset 文件。默认为 None。 + - **scope** (Scope) – 执行这个 program 的域,用户可以指定不同的域。默认为全局域。 + - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为 min(Dataset.thread_num, thread),如果 thread > 0,默认为 0。 + - **debug** (bool) – 是否开启 debug 模式,默认为 False。 + - **fetch_list** (Tensor List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为 None。 + - **fetch_info** (String List) – 每个变量的打印信息,默认为 None。 + - **print_period** (int) – 每两次打印之间间隔的 mini-batches 的数量,默认为 100。 **返回** @@ -300,14 +300,14 @@ train_from_dataset将销毁每次运行在executor中创建的所有资源。 import paddle paddle.enable_static() - place = paddle.CPUPlace() # 使用GPU时可设置place = paddle.CUDAPlace(0) + place = paddle.CPUPlace() # 使用 GPU 时可设置 place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) dataset = paddle.fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) - # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] + # 您可以设置您自己的 filelist,如 filelist = ["dataA.txt"] filelist = [] dataset.set_filelist(filelist) exe.run(paddle.static.default_startup_program()) diff --git a/docs/api/paddle/static/ExponentialMovingAverage_cn.rst b/docs/api/paddle/static/ExponentialMovingAverage_cn.rst index fb7201d196d..f174ed33d6d 100644 --- a/docs/api/paddle/static/ExponentialMovingAverage_cn.rst +++ b/docs/api/paddle/static/ExponentialMovingAverage_cn.rst @@ -21,7 +21,7 @@ ExponentialMovingAverage **衰减率调节** -一个非常接近于1的很大的衰减率将会导致平均值滑动得很慢。更优的策略是,开始时设置一个相对较小的衰减率。参数 ``thres_steps`` 允许用户传递一个变量以设置衰减率,在这种情况下, +一个非常接近于 1 的很大的衰减率将会导致平均值滑动得很慢。更优的策略是,开始时设置一个相对较小的衰减率。参数 ``thres_steps`` 允许用户传递一个变量以设置衰减率,在这种情况下, 真实的衰减率变为: .. math:: diff --git a/docs/api/paddle/static/InputSpec_cn.rst b/docs/api/paddle/static/InputSpec_cn.rst index f82ce8e1389..4048be919a6 100644 --- a/docs/api/paddle/static/InputSpec_cn.rst +++ b/docs/api/paddle/static/InputSpec_cn.rst @@ -5,15 +5,15 @@ InputSpec .. py:class:: paddle.static.InputSpec(shape=None, dtype='float32', name=None) -用于描述模型输入的签名信息,包括shape、dtype和name。 +用于描述模型输入的签名信息,包括 shape、dtype 和 name。 -此接口常用于指定高层API中模型的输入张量信息,或动态图转静态图时,指定被 ``paddle.jit.to_static`` 装饰的forward函数每个输入参数的张量信息。 +此接口常用于指定高层 API 中模型的输入张量信息,或动态图转静态图时,指定被 ``paddle.jit.to_static`` 装饰的 forward 函数每个输入参数的张量信息。 参数 :::::::::::: - - **shape** (list|tuple)- 声明维度信息的list或tuple,默认值为None。 - - **dtype** (np.dtype|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为float32。 + - **shape** (list|tuple)- 声明维度信息的 list 或 tuple,默认值为 None。 + - **dtype** (np.dtype|str,可选)- 数据类型,支持 bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为 float32。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 @@ -38,17 +38,17 @@ InputSpec :::::::::::: from_tensor(tensor, name=None) ''''''''' -该接口将根据输入Tensor的shape、dtype等信息构建InputSpec对象。 +该接口将根据输入 Tensor 的 shape、dtype 等信息构建 InputSpec 对象。 **参数** - - **tensor** (Tensor) - 用于构建InputSpec的源Tensor + - **tensor** (Tensor) - 用于构建 InputSpec 的源 Tensor - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 **返回** -根据Tensor信息构造的 ``InputSpec`` 对象。 +根据 Tensor 信息构造的 ``InputSpec`` 对象。 **代码示例** @@ -66,17 +66,17 @@ from_tensor(tensor, name=None) from_numpy(ndarray, name=None) ''''''''' -该接口将根据输入numpy ndarray的shape、dtype等信息构建InputSpec对象。 +该接口将根据输入 numpy ndarray 的 shape、dtype 等信息构建 InputSpec 对象。 **参数** - - **ndarray** (Tensor) - 用于构建InputSpec的numpy ndarray + - **ndarray** (Tensor) - 用于构建 InputSpec 的 numpy ndarray - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 **返回** -根据ndarray信息构造的 ``InputSpec`` 对象。 +根据 ndarray 信息构造的 ``InputSpec`` 对象。 **代码示例** @@ -93,15 +93,15 @@ from_numpy(ndarray, name=None) batch(batch_size) ''''''''' -该接口将batch_size插入到当前InputSpec对象的shape元组最前面。 +该接口将 batch_size 插入到当前 InputSpec 对象的 shape 元组最前面。 **参数** - - **batch_size** (int) - 被插入的batch size整型数值 + - **batch_size** (int) - 被插入的 batch size 整型数值 **返回** - 更新shape信息后的 ``InputSpec`` 对象。 + 更新 shape 信息后的 ``InputSpec`` 对象。 **代码示例** @@ -117,12 +117,12 @@ batch(batch_size) unbatch() ''''''''' -该接口将当前InputSpec对象shape[0]值移除。 +该接口将当前 InputSpec 对象 shape[0]值移除。 **返回** - 更新shape信息后的 ``InputSpec`` 对象。 + 更新 shape 信息后的 ``InputSpec`` 对象。 **代码示例** diff --git a/docs/api/paddle/static/IpuCompiledProgram_cn.rst b/docs/api/paddle/static/IpuCompiledProgram_cn.rst index 916b35c124f..95d9812e010 100644 --- a/docs/api/paddle/static/IpuCompiledProgram_cn.rst +++ b/docs/api/paddle/static/IpuCompiledProgram_cn.rst @@ -7,13 +7,13 @@ IpuCompiledProgram .. py:class:: paddle.static.IpuCompiledProgram(program, scope=None, ipu_strategy=None) -IpuCompiledProgram将输入的Program转换和优化成IPU所需要的形式,例如:前向图提取、计算图转化、无用的scale算子删除等。 +IpuCompiledProgram 将输入的 Program 转换和优化成 IPU 所需要的形式,例如:前向图提取、计算图转化、无用的 scale 算子删除等。 参数 ::::::::: - - **program** (Program,可选):该参数为被执行的Program。默认值为None,表示将使用默认的program,即paddle.static.default_main_program()。 - - **scope** (Scope,可选):该参数表示执行当前program所使用的作用域。默认值为None,将使用全局scope,即paddle.static.global_scope()。 - - **ipu_strategy** (IpuStrategy,可选):根据传入的ipu_strategy实例,对Program进行转换和优化,例如:计算图的float16模式、是否是训练模式、计算图需要用几个IPU等。默认为None,表示将使用默认的ipu_strategy转换Program。 + - **program** (Program,可选):该参数为被执行的 Program。默认值为 None,表示将使用默认的 program,即 paddle.static.default_main_program()。 + - **scope** (Scope,可选):该参数表示执行当前 program 所使用的作用域。默认值为 None,将使用全局 scope,即 paddle.static.global_scope()。 + - **ipu_strategy** (IpuStrategy,可选):根据传入的 ipu_strategy 实例,对 Program 进行转换和优化,例如:计算图的 float16 模式、是否是训练模式、计算图需要用几个 IPU 等。默认为 None,表示将使用默认的 ipu_strategy 转换 Program。 返回 ::::::::: @@ -29,7 +29,7 @@ COPY-FROM: paddle.static.IpuCompiledProgram compile(self, feed_list, fetch_list) ''''''''' -该接口用于将Program进行编译,以便在ipu上运行。用户可以通过 `feed_list` 、`fetch_list` 传入计算图输入和输出的名字。 +该接口用于将 Program 进行编译,以便在 ipu 上运行。用户可以通过 `feed_list` 、`fetch_list` 传入计算图输入和输出的名字。 **参数** diff --git a/docs/api/paddle/static/IpuStrategy_cn.rst b/docs/api/paddle/static/IpuStrategy_cn.rst index 0c1671ec404..6bfa8cfb2e2 100644 --- a/docs/api/paddle/static/IpuStrategy_cn.rst +++ b/docs/api/paddle/static/IpuStrategy_cn.rst @@ -12,7 +12,7 @@ IpuStrategy 返回 ::::::::: -IpuStrategy实例。 +IpuStrategy 实例。 代码示例 :::::::::: @@ -24,14 +24,14 @@ COPY-FROM: paddle.static.IpuStrategy set_graph_config(self, num_ipus, is_training, micro_batch_size, enable_manual_shard) ''''''''' -该接口用于向IpuStrategy实例传递IPU构图的Graph配置。 +该接口用于向 IpuStrategy 实例传递 IPU 构图的 Graph 配置。 **参数** - - **num_ipus** (int,可选)- 指定IPU devices的个数,默认值为1,表示仅用一个IPU。 - - **is_training** (bool,可选)- 声明是训练还是推理,默认值为True,表示使用训练模式。 - - **micro_batch_size** (int,可选)- 当计算图输入的micro_batch_size可变时,指定计算图中输入micro_batch_size,默认值为1,表示如果micro_batch_size可变,将默认置1。 - - **enable_manual_shard** (bool,可选)- 是否使能分割计算图到不同IPU进行运算。仅支持当num_ipus > 1时,enable_manual_shard可以置为True。默认值为False,表示不使能该功能。 + - **num_ipus** (int,可选)- 指定 IPU devices 的个数,默认值为 1,表示仅用一个 IPU。 + - **is_training** (bool,可选)- 声明是训练还是推理,默认值为 True,表示使用训练模式。 + - **micro_batch_size** (int,可选)- 当计算图输入的 micro_batch_size 可变时,指定计算图中输入 micro_batch_size,默认值为 1,表示如果 micro_batch_size 可变,将默认置 1。 + - **enable_manual_shard** (bool,可选)- 是否使能分割计算图到不同 IPU 进行运算。仅支持当 num_ipus > 1 时,enable_manual_shard 可以置为 True。默认值为 False,表示不使能该功能。 **代码示例** @@ -40,14 +40,14 @@ COPY-FROM: paddle.static.IpuStrategy.set_graph_config set_pipelining_config(self, enable_pipelining, batches_per_step, enable_gradient_accumulation, accumulation_factor) ''''''''' -该接口用于向IpuStrategy实例传递IPU构图的子图数据流水线配置。 +该接口用于向 IpuStrategy 实例传递 IPU 构图的子图数据流水线配置。 **参数** - - **enable_pipelining** (bool,可选)- 是否使能子图之间的数据流水线。仅支持当enable_manual_shard=True时,enable_pipelining可以置为True。默认值为False,表示不使能该功能。 - - **batches_per_step** (int,可选)- 指定数据流水线每次运算多少个batch的数据。默认值为1,表示不使能数据流水线功能。 - - **enable_gradient_accumulation** (bool,可选)- 是否使能梯度累积,只用于训练模式。默认值为Flase,表示不使能梯度累积功能。 - - **accumulation_factor** (int,可选)- 指定累积运算多少个batch更新一次权重。默认值为1,表示不使能权重累积更新功能。 + - **enable_pipelining** (bool,可选)- 是否使能子图之间的数据流水线。仅支持当 enable_manual_shard=True 时,enable_pipelining 可以置为 True。默认值为 False,表示不使能该功能。 + - **batches_per_step** (int,可选)- 指定数据流水线每次运算多少个 batch 的数据。默认值为 1,表示不使能数据流水线功能。 + - **enable_gradient_accumulation** (bool,可选)- 是否使能梯度累积,只用于训练模式。默认值为 Flase,表示不使能梯度累积功能。 + - **accumulation_factor** (int,可选)- 指定累积运算多少个 batch 更新一次权重。默认值为 1,表示不使能权重累积更新功能。 **代码示例** @@ -56,11 +56,11 @@ COPY-FROM: paddle.static.IpuStrategy.set_pipelining_config set_precision_config(self, enable_fp16) ''''''''' -该接口用于向IpuStrategy实例传递IPU构图的精度配置。 +该接口用于向 IpuStrategy 实例传递 IPU 构图的精度配置。 **参数** - - **enable_fp16** (bool)- 是否使能fp16运算模式并将fp32转换为fp16。默认值为False,表示不使能fp16运算模式。 + - **enable_fp16** (bool)- 是否使能 fp16 运算模式并将 fp32 转换为 fp16。默认值为 False,表示不使能 fp16 运算模式。 **代码示例** @@ -69,14 +69,14 @@ COPY-FROM: paddle.static.IpuStrategy.set_precision_config add_custom_op(self, paddle_op, popart_op, domain, version) ''''''''' -该接口用于向IpuStrategy实例传递PopART自定义算子的信息。 +该接口用于向 IpuStrategy 实例传递 PopART 自定义算子的信息。 **参数** - - **paddle_op** (str)- 待添加的Paddle自定义算子在的名称,根据Paddle自定义算子的定义设置此参数。 - - **popart_op** (str,可选)- 待添加的PopART自定义算子的名称,默认值为None,表示和paddle_op相同,根据PopART自定算子的定义设置此参数。 - - **domain** (str,可选)- 待添加的PopART自定义算子的domain属性,默认值为"custom.ops",根据PopART自定算子的定义设置此参数。 - - **version** (int,可选)- 待添加的PopART自定义算子的version属性,默认值为1,根据PopART自定算子的定义设置此参数。 + - **paddle_op** (str)- 待添加的 Paddle 自定义算子在的名称,根据 Paddle 自定义算子的定义设置此参数。 + - **popart_op** (str,可选)- 待添加的 PopART 自定义算子的名称,默认值为 None,表示和 paddle_op 相同,根据 PopART 自定算子的定义设置此参数。 + - **domain** (str,可选)- 待添加的 PopART 自定义算子的 domain 属性,默认值为"custom.ops",根据 PopART 自定算子的定义设置此参数。 + - **version** (int,可选)- 待添加的 PopART 自定义算子的 version 属性,默认值为 1,根据 PopART 自定算子的定义设置此参数。 **代码示例** @@ -85,7 +85,7 @@ COPY-FROM: paddle.static.IpuStrategy.add_custom_op set_options(self, options) ''''''''' -批量向IpuStrategy实例传递参数。 +批量向 IpuStrategy 实例传递参数。 **参数** @@ -98,7 +98,7 @@ COPY-FROM: paddle.static.IpuStrategy.set_options get_option(self, option) ''''''''' -获取IpuStrategy实例的某一参数。 +获取 IpuStrategy 实例的某一参数。 **参数** @@ -111,11 +111,11 @@ COPY-FROM: paddle.static.IpuStrategy.get_option enable_pattern(self, pattern) ''''''''' -启用某一PopART Pattern。 +启用某一 PopART Pattern。 **参数** - - **pattern** (str)- 需要开启的Pattern名称。 + - **pattern** (str)- 需要开启的 Pattern 名称。 **代码示例** @@ -124,11 +124,11 @@ COPY-FROM: paddle.static.IpuStrategy.enable_pattern disable_pattern(self, pattern) ''''''''' -禁用某一PopART Pattern。 +禁用某一 PopART Pattern。 **参数** - - **pattern** (str)- 需要禁用的Pattern名称。 + - **pattern** (str)- 需要禁用的 Pattern 名称。 **代码示例** @@ -137,7 +137,7 @@ COPY-FROM: paddle.static.IpuStrategy.disable_pattern register_patch(self) ''''''''' -注册patch function以支持IPU上的动转静功能。该函数仅应在IPU动转静时使用,注册的函数会影响原动转静的逻辑,可通过``release_patch``释放注册的函数。 +注册 patch function 以支持 IPU 上的动转静功能。该函数仅应在 IPU 动转静时使用,注册的函数会影响原动转静的逻辑,可通过``release_patch``释放注册的函数。 **代码示例** @@ -146,7 +146,7 @@ COPY-FROM: paddle.static.IpuStrategy.register_patch release_patch(self) ''''''''' -释放IPU动转静所注册的函数。 +释放 IPU 动转静所注册的函数。 **代码示例** @@ -155,11 +155,11 @@ COPY-FROM: paddle.static.IpuStrategy.release_patch set_optimizer(self, optimizer) ''''''''' -该接口用于在IPU动转静时向IpuStrategy实例设置optimizer。 +该接口用于在 IPU 动转静时向 IpuStrategy 实例设置 optimizer。 **参数** - - **optimizer** (Optimizer)- 需要设置的Optimizer实例。 + - **optimizer** (Optimizer)- 需要设置的 Optimizer 实例。 **代码示例** @@ -168,11 +168,11 @@ COPY-FROM: paddle.static.IpuStrategy.set_optimizer parse_optimizer(self, optimizer) ''''''''' -该接口用于解析IPU动转静所需要的优化器参数,接收优化器实例并返回动转静所需要的优化器属性,当前仅支持解析学习率。 +该接口用于解析 IPU 动转静所需要的优化器参数,接收优化器实例并返回动转静所需要的优化器属性,当前仅支持解析学习率。 **参数** - - **optimizer** (Optimizer)- 需要解析的Optimizer实例。 + - **optimizer** (Optimizer)- 需要解析的 Optimizer 实例。 **代码示例** @@ -183,19 +183,19 @@ COPY-FROM: paddle.static.IpuStrategy.parse_optimizer num_ipus ''''''''' -返回IpuStrategy实例中的IPU设备个数,类型为 ``Int``。 +返回 IpuStrategy 实例中的 IPU 设备个数,类型为 ``Int``。 is_training ''''''''' -返回IpuStrategy实例中的计算模式是训练模式或推理模式,类型为 ``Bool``。 +返回 IpuStrategy 实例中的计算模式是训练模式或推理模式,类型为 ``Bool``。 enable_pipelining ''''''''' -返回IpuStrategy实例中是否使能数据流水线功能,类型为 ``Bool``。 +返回 IpuStrategy 实例中是否使能数据流水线功能,类型为 ``Bool``。 enable_fp16 ''''''''' -返回IpuStrategy实例中是否使能float16计算图,类型为 ``Bool``。 +返回 IpuStrategy 实例中是否使能 float16 计算图,类型为 ``Bool``。 diff --git a/docs/api/paddle/static/Overview_cn.rst b/docs/api/paddle/static/Overview_cn.rst index 05520c040f7..2f3890f5068 100644 --- a/docs/api/paddle/static/Overview_cn.rst +++ b/docs/api/paddle/static/Overview_cn.rst @@ -3,170 +3,170 @@ paddle.static --------------------- -paddle.static 下的API为飞桨静态图专用API。具体如下: +paddle.static 下的 API 为飞桨静态图专用 API。具体如下: -- :ref:`Program相关API ` -- :ref:`Executor相关API ` -- :ref:`组网相关API ` -- :ref:`io相关API ` -- :ref:`变量相关API ` -- :ref:`运行设备相关API ` -- :ref:`评估指标相关API ` -- :ref:`其他API ` +- :ref:`Program 相关 API ` +- :ref:`Executor 相关 API ` +- :ref:`组网相关 API ` +- :ref:`io 相关 API ` +- :ref:`变量相关 API ` +- :ref:`运行设备相关 API ` +- :ref:`评估指标相关 API ` +- :ref:`其他 API ` .. _about_program: -Program相关API +Program 相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`append_backward ` ", "向main_program添加反向" - " :ref:`default_main_program ` ", "获取当前用于存储OP和Tensor描述信息的 `default main program` " + " :ref:`append_backward ` ", "向 main_program 添加反向" + " :ref:`default_main_program ` ", "获取当前用于存储 OP 和 Tensor 描述信息的 `default main program` " " :ref:`default_startup_program ` ", "获取默认/全局的 `startup program` " - " :ref:`Program ` ", "飞桨用Program动态描述整个计算图" - " :ref:`program_guard ` ", "配合with语句将算子和变量添加进指定的 `main program` 和 `startup program` " + " :ref:`Program ` ", "飞桨用 Program 动态描述整个计算图" + " :ref:`program_guard ` ", "配合 with 语句将算子和变量添加进指定的 `main program` 和 `startup program` " " :ref:`set_program_state ` ", "设置 Program 的参数和优化器信息" " :ref:`normalize_program ` ", "根据指定的 feed_vars 和 fetch_vars,优化 program" .. _about_executor: -Executor相关API +Executor 相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`BuildStrategy ` ", "控制 ParallelExecutor 中计算图的建造方法" - " :ref:`CompiledProgram ` ", "转化和优化Program或Graph" - " :ref:`ExecutionStrategy ` ", "调整Executor执行配置" + " :ref:`CompiledProgram ` ", "转化和优化 Program 或 Graph" + " :ref:`ExecutionStrategy ` ", "调整 Executor 执行配置" " :ref:`Executor ` ", "执行器" " :ref:`ParallelExecutor ` ", "支持基于数据并行的多节点模型训练和测试的执行器" .. _about_nn: -组网相关API +组网相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`batch_norm ` ", "Batch Normalization方法" + " :ref:`batch_norm ` ", "Batch Normalization 方法" " :ref:`bilinear_tensor_product ` ", "对两个输入执行双线性张量积" - " :ref:`case ` ", "以OP的运行方式类似于python的if-elif-elif-else" + " :ref:`case ` ", "以 OP 的运行方式类似于 python 的 if-elif-elif-else" " :ref:`conv2d ` ", "二维卷积层" " :ref:`conv2d_transpose ` ", "二维转置卷积层" " :ref:`conv3d ` ", "三维卷积层" " :ref:`conv3d_transpose ` ", "三维转置卷积层" - " :ref:`crf_decoding ` ", "CRF Decode层" + " :ref:`crf_decoding ` ", "CRF Decode 层" " :ref:`data_norm ` ", "数据正则化层" " :ref:`deform_conv2d ` ", "可变形卷积层" " :ref:`embedding ` ", "嵌入层" " :ref:`sparse_embedding ` ", "稀疏嵌入层" " :ref:`fc ` ", "全连接层" - " :ref:`group_norm ` ", "Group Normalization方法" - " :ref:`instance_norm ` ", "Instance Normalization方法" - " :ref:`layer_norm ` ", "Layer Normalization方法" - " :ref:`multi_box_head ` ", "SSD检测头 " + " :ref:`group_norm ` ", "Group Normalization 方法" + " :ref:`instance_norm ` ", "Instance Normalization 方法" + " :ref:`layer_norm ` ", "Layer Normalization 方法" + " :ref:`multi_box_head ` ", "SSD 检测头 " " :ref:`nce ` ", "计算并返回噪音对比估计损失" - " :ref:`prelu ` ", "prelu激活函数" + " :ref:`prelu ` ", "prelu 激活函数" " :ref:`row_conv ` ", "行卷积" - " :ref:`spectral_norm ` ", "Spectral Normalization方法" - " :ref:`switch_case ` ", "类似于c++的switch/case" - " :ref:`sequence_concat ` ", "仅支持LoDTensor ,通过LoDTensor的LoD信息将输入的多个LoDTensor进行连接,输出连接后的LoDTensor" - " :ref:`sequence_conv ` ", "仅支持LoDTensor ,在给定的卷积参数下,对输入的变长序列LoDTensor进行卷积操作" - " :ref:`sequence_enumerate ` ", "仅支持LoDTensor ,枚举形状为 [d_1, 1] 的输入序列所有长度为 win_size 的子序列,生成一个形状为 [d_1, win_size] 的新序列,需要时以 pad_value 填充" - " :ref:`sequence_expand ` ", "仅支持LoDTensor ,根据输入 y 的第 ref_level 层lod对输入 x 进行扩展" - " :ref:`sequence_expand_as ` ", "仅支持LoDTensor ,根据输入 y 的第0级lod对输入 x 进行扩展" - " :ref:`sequence_first_step ` ", "仅支持LoDTensor ,对输入的LoDTensor,在最后一层lod_level上,选取其每个序列的第一个时间步的特征向量作为池化后的输出向量" - " :ref:`sequence_last_step ` ", "仅支持LoDTensor ,对输入的LoDTensor,在最后一层lod_level上,选取其每个序列的最后一个时间步的特征向量作为池化后的输出向量" - " :ref:`sequence_pad ` ", "仅支持LoDTensor ,将同一batch中的序列填充到一个一致的长度(由 maxlen 指定)" - " :ref:`sequence_pool ` ", "仅支持LoDTensor ,对输入的LoDTensor进行指定方式的池化操作" - " :ref:`sequence_reshape ` ", "仅支持LoDTensor ,对输入的LoDTensor进行指定方式的变形操作" - " :ref:`sequence_reverse ` ", "仅支持LoDTensor ,对输入的LoDTensor,在每个序列上进行反转" - " :ref:`sequence_slice ` ", "仅支持LoDTensor ,对输入的LoDTensor,实现序列切片运算" - " :ref:`sequence_softmax ` ", "仅支持LoDTensor ,根据LoDTensor信息将输入的第0维度进行划分,在划分的每一个区间内部进行运算" + " :ref:`spectral_norm ` ", "Spectral Normalization 方法" + " :ref:`switch_case ` ", "类似于 c++的 switch/case" + " :ref:`sequence_concat ` ", "仅支持 LoDTensor ,通过 LoDTensor 的 LoD 信息将输入的多个 LoDTensor 进行连接,输出连接后的 LoDTensor" + " :ref:`sequence_conv ` ", "仅支持 LoDTensor ,在给定的卷积参数下,对输入的变长序列 LoDTensor 进行卷积操作" + " :ref:`sequence_enumerate ` ", "仅支持 LoDTensor ,枚举形状为 [d_1, 1] 的输入序列所有长度为 win_size 的子序列,生成一个形状为 [d_1, win_size] 的新序列,需要时以 pad_value 填充" + " :ref:`sequence_expand ` ", "仅支持 LoDTensor ,根据输入 y 的第 ref_level 层 lod 对输入 x 进行扩展" + " :ref:`sequence_expand_as ` ", "仅支持 LoDTensor ,根据输入 y 的第 0 级 lod 对输入 x 进行扩展" + " :ref:`sequence_first_step ` ", "仅支持 LoDTensor ,对输入的 LoDTensor,在最后一层 lod_level 上,选取其每个序列的第一个时间步的特征向量作为池化后的输出向量" + " :ref:`sequence_last_step ` ", "仅支持 LoDTensor ,对输入的 LoDTensor,在最后一层 lod_level 上,选取其每个序列的最后一个时间步的特征向量作为池化后的输出向量" + " :ref:`sequence_pad ` ", "仅支持 LoDTensor ,将同一 batch 中的序列填充到一个一致的长度(由 maxlen 指定)" + " :ref:`sequence_pool ` ", "仅支持 LoDTensor ,对输入的 LoDTensor 进行指定方式的池化操作" + " :ref:`sequence_reshape ` ", "仅支持 LoDTensor ,对输入的 LoDTensor 进行指定方式的变形操作" + " :ref:`sequence_reverse ` ", "仅支持 LoDTensor ,对输入的 LoDTensor,在每个序列上进行反转" + " :ref:`sequence_slice ` ", "仅支持 LoDTensor ,对输入的 LoDTensor,实现序列切片运算" + " :ref:`sequence_softmax ` ", "仅支持 LoDTensor ,根据 LoDTensor 信息将输入的第 0 维度进行划分,在划分的每一个区间内部进行运算" .. _about_io: -io相关API +io 相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`deserialize_persistables ` ", "反序列化模型参数" - " :ref:`deserialize_program ` ", "反序列化program" + " :ref:`deserialize_program ` ", "反序列化 program" " :ref:`load ` ", "加载模型" " :ref:`load_from_file ` ", "从指定的文件中加载内容" " :ref:`load_inference_model ` ", "加载预测模型" - " :ref:`load_program_state ` ", "加载Program的参数与优化器信息" + " :ref:`load_program_state ` ", "加载 Program 的参数与优化器信息" " :ref:`save ` ", "保存模型" " :ref:`save_inference_model ` ", "保存预测模型" " :ref:`save_to_file ` ", "将内容写入指定的文件" " :ref:`serialize_persistables ` ", "序列化模型参数" - " :ref:`serialize_program ` ", "序列化program" + " :ref:`serialize_program ` ", "序列化 program" .. _about_variable: -变量相关API +变量相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`create_global_var ` ", "创建全局变量" - " :ref:`data ` ", "在全局block中创建变量" + " :ref:`data ` ", "在全局 block 中创建变量" " :ref:`gradients ` ", "将目标变量的梯度反向传播到输入变量" " :ref:`Print ` ", "打印正在访问的变量内容" " :ref:`Variable ` ", "创建参数" " :ref:`WeightNormParamAttr ` ", "权重归一化类" - " :ref:`sequence_scatter ` ", "仅支持LoDTensor,根据index提供的位置将updates中的信息更新到输出中" - " :ref:`sequence_unpad ` ", "仅支持LoDTensor ,根据length的信息,将input中padding元素移除,并且返回一个LoDTensor" + " :ref:`sequence_scatter ` ", "仅支持 LoDTensor,根据 index 提供的位置将 updates 中的信息更新到输出中" + " :ref:`sequence_unpad ` ", "仅支持 LoDTensor ,根据 length 的信息,将 input 中 padding 元素移除,并且返回一个 LoDTensor" .. _about_device: -运行设备相关API +运行设备相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`cpu_places ` ", "创建 `paddle.CPUPlace` 对象" " :ref:`cuda_places ` ", "创建 `paddle.CUDAPlace` 对象" - " :ref:`device_guard ` ", "用于指定OP运行设备的上下文管理器" + " :ref:`device_guard ` ", "用于指定 OP 运行设备的上下文管理器" .. _about_metrics: -评估指标相关API +评估指标相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`accuracy ` ", "计算精确率" - " :ref:`auc ` ", "计算AUC" + " :ref:`auc ` ", "计算 AUC" .. _about_others: -其他API +其他 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`global_scope ` ", "获取全局/默认作用域实例" " :ref:`InputSpec ` ", "描述模型输入的签名信息" - " :ref:`name_scope ` ", "为OP生成命名空间" + " :ref:`name_scope ` ", "为 OP 生成命名空间" " :ref:`py_func ` ", "自定义算子" " :ref:`scope_guard ` ", "切换作用域" - " :ref:`while_loop ` ", "while循环控制" + " :ref:`while_loop ` ", "while 循环控制" diff --git a/docs/api/paddle/static/ParallelExecutor_cn.rst b/docs/api/paddle/static/ParallelExecutor_cn.rst index 152b3eece49..f12807286bf 100644 --- a/docs/api/paddle/static/ParallelExecutor_cn.rst +++ b/docs/api/paddle/static/ParallelExecutor_cn.rst @@ -11,31 +11,31 @@ ParallelExecutor ``ParallelExecutor`` 是 ``Executor`` 的一个升级版本,可以支持基于数据并行的多节点模型训练和测试。如果采用数据并行模式,``ParallelExecutor`` 在构造时会将参数分发到不同的节点上,并将输入的 ``Program`` 拷贝到不同的节点,在执行过程中,各个节点独立运行模型,将模型反向计算得到的参数梯度在多个节点之间进行聚合,之后各个节点独立的进行参数的更新。 -- 如果使用GPU运行模型,即 ``use_cuda=True``,节点指代GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的GPU资源,用户也可以通过在环境变量设置可用的GPU资源,例如:希望使用GPU0、GPU1计算,export CUDA_VISIBLEDEVICES=0,1; -- 如果在CPU上进行操作,即 ``use_cuda=False``,节点指代CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM,并将该值设置为CPU设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1**。 +- 如果使用 GPU 运行模型,即 ``use_cuda=True``,节点指代 GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的 GPU 资源,用户也可以通过在环境变量设置可用的 GPU 资源,例如:希望使用 GPU0、GPU1 计算,export CUDA_VISIBLEDEVICES=0,1; +- 如果在 CPU 上进行操作,即 ``use_cuda=False``,节点指代 CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM,并将该值设置为 CPU 设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为 1**。 参数 :::::::::::: - - **use_cuda** (bool) – 该参数表示是否使用GPU执行。 - - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字。**注意:如果是数据并行模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 - - **main_program** (Program) – 需要被执行的Program。如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为paddle.static.default_main_program()。默认为:None。 - - **share_vars_from** (ParallelExecutor) - 如果设置了share_vars_from,当前的ParallelExecutor将与share_vars_from指定的ParallelExecutor共享参数值。 - 需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的ParallelExecutor在调用with_data_parallel时,需要将share_vars_from设置为训练所对应的ParallelExecutor。 - 由于ParallelExecutor只有在第一次执行时才会将参数变量分发到其他设备上,因此share_vars_from指定的ParallelExecutor必须在当前ParallelExecutor之前运行。默认为:None。 - - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。关于exec_strategy更多信息,请参阅 ``paddle.static.ExecutionStrategy``。默认为:None。 - - **build_strategy** (BuildStrategy):通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``paddle.static.BuildStrategy``。默认为:None。 - - **num_trainers** (int) – 进行GPU分布式训练时需要设置该参数。如果该参数值大于1,NCCL将会通过多层级节点的方式来初始化。每个节点应有相同的GPU数目。默认为:1。 - - **trainer_id** (int) – 进行GPU分布式训练时需要设置该参数。该参数必须与num_trainers参数同时使用。trainer_id指明是当前所在节点的 “rank”(层级)。trainer_id从0开始计数。默认为:0。 - - **scope** (Scope) – 指定执行Program所在的作用域。默认为:paddle.static.global_scope()。 + - **use_cuda** (bool) – 该参数表示是否使用 GPU 执行。 + - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字。**注意:如果是数据并行模型训练,必须设置 loss_name,否则计算结果可能会有问题。** 默认为:None。 + - **main_program** (Program) – 需要被执行的 Program。如果未提供该参数,即该参数为 None,在该接口内,main_program 将被设置为 paddle.static.default_main_program()。默认为:None。 + - **share_vars_from** (ParallelExecutor) - 如果设置了 share_vars_from,当前的 ParallelExecutor 将与 share_vars_from 指定的 ParallelExecutor 共享参数值。 + 需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的 ParallelExecutor 在调用 with_data_parallel 时,需要将 share_vars_from 设置为训练所对应的 ParallelExecutor。 + 由于 ParallelExecutor 只有在第一次执行时才会将参数变量分发到其他设备上,因此 share_vars_from 指定的 ParallelExecutor 必须在当前 ParallelExecutor 之前运行。默认为:None。 + - **exec_strategy** (ExecutionStrategy) - 通过 exec_strategy 指定执行计算图过程可以调整的选项,例如线程池大小等。关于 exec_strategy 更多信息,请参阅 ``paddle.static.ExecutionStrategy``。默认为:None。 + - **build_strategy** (BuildStrategy):通过配置 build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于 build_strategy 更多的信息,请参阅 ``paddle.static.BuildStrategy``。默认为:None。 + - **num_trainers** (int) – 进行 GPU 分布式训练时需要设置该参数。如果该参数值大于 1,NCCL 将会通过多层级节点的方式来初始化。每个节点应有相同的 GPU 数目。默认为:1。 + - **trainer_id** (int) – 进行 GPU 分布式训练时需要设置该参数。该参数必须与 num_trainers 参数同时使用。trainer_id 指明是当前所在节点的 “rank”(层级)。trainer_id 从 0 开始计数。默认为:0。 + - **scope** (Scope) – 指定执行 Program 所在的作用域。默认为:paddle.static.global_scope()。 返回 :::::::::::: 初始化后的 ``ParallelExecutor`` 对象。 .. note:: - 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 - 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的ParallelExecutor时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 + 1. 如果只是进行多卡测试,不需要设置 loss_name 以及 share_vars_from。 + 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的 ParallelExecutor 时必须设置 share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 代码示例 :::::::::::: @@ -50,9 +50,9 @@ ParallelExecutor paddle.enable_static() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, + # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, + # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, + # 在这种情况下,输入的 batch size 应大于 CPU_NUM, # 否则程序会异常中断。 if not use_cuda: os.environ['CPU_NUM'] = str(2) @@ -73,7 +73,7 @@ ParallelExecutor train_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, loss_name=loss.name) - # 注意:如果此处不设置share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 + # 注意:如果此处不设置 share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 test_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, main_program=test_program, share_vars_from=train_exe) @@ -90,22 +90,22 @@ ParallelExecutor run(fetch_list, feed=None, feed_dict=None, return_numpy=True) ''''''''' -该接口用于运行当前模型,需要注意的是,执行器会执行Program中的所有算子,而不会根据fetch_list对Program中的算子进行裁剪。 +该接口用于运行当前模型,需要注意的是,执行器会执行 Program 中的所有算子,而不会根据 fetch_list 对 Program 中的算子进行裁剪。 **参数** - **fetch_list** (list) – 该变量表示模型运行之后需要返回的变量。 - - **feed** (list|dict) – 该变量表示模型的输入变量。如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU);如果该参数类型为 ``list``,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 + - **feed** (list|dict) – 该变量表示模型的输入变量。如果该参数类型为 ``dict`` ,feed 中的数据将会被分割(split)并分送给多个设备(CPU/GPU);如果该参数类型为 ``list``,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 - **feed_dict** – 该参数已经停止使用。默认为:None。 - - **return_numpy** (bool) – 该变量表示是否将fetched tensor转换为numpy。默认为:True。 + - **return_numpy** (bool) – 该变量表示是否将 fetched tensor 转换为 numpy。默认为:True。 **返回** -返回fetch_list中指定的变量值。 +返回 fetch_list 中指定的变量值。 .. note:: - 1. 如果feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 - 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 + 1. 如果 feed 参数为 dict 类型,输入数据将被均匀分配到不同的卡上,例如:使用 2 块 GPU 训练,输入样本数为 3,即[0, 1, 2],经过拆分之后,GPU0 上的样本数为 1,即[0],GPU1 上的样本数为 2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个 batch 的样本数是否少于当前可用的 CPU 核数或 GPU 卡数,如果是少于,建议丢弃该 batch。 + 2. 如果可用的 CPU 核数或 GPU 卡数大于 1,则 fetch 出来的结果为不同设备上的相同变量值(fetch_list 中的变量)在第 0 维拼接在一起。 **代码示例** @@ -118,9 +118,9 @@ run(fetch_list, feed=None, feed_dict=None, return_numpy=True) paddle.enable_static() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, + # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, + # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, + # 在这种情况下,输入的 batch size 应大于 CPU_NUM, # 否则程序会异常中断。 if not use_cuda: os.environ['CPU_NUM'] = str(2) @@ -140,13 +140,13 @@ run(fetch_list, feed=None, feed_dict=None, return_numpy=True) train_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, main_program=train_program, loss_name=loss.name) - # 如果feed参数是dict类型: - # 图像会被split到设备中。假设有两个设备,那么每个设备将会处理形为 (5, 1)的图像 + # 如果 feed 参数是 dict 类型: + # 图像会被 split 到设备中。假设有两个设备,那么每个设备将会处理形为 (5, 1)的图像 x = numpy.random.random(size=(10, 1)).astype('float32') loss_data, = train_exe.run(feed={"X": x}, fetch_list=[loss.name]) - # 如果feed参数是list类型: + # 如果 feed 参数是 list 类型: # 各设备挨个处理列表中的每个元素 # 第一个设备处理形为 (10, 1) 的图像 # 第二个设备处理形为 (9, 1) 的图像 @@ -159,7 +159,7 @@ run(fetch_list, feed=None, feed_dict=None, return_numpy=True) drop_local_exe_scopes() ''''''''' -立即清除scope中的临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,ParallelExecutor中采取的策略是间隔若干次迭代之后清理一次临时变量。ParallelExecutor在ExecutionStrategy中提供了num_iteration_per_drop_scope选项,该选项表示间隔多少次迭代之后清理一次临时变量。如果num_iteration_per_drop_scope值为100,但是希望在迭代50次之后清理一次临时变量,可以通过手动调用该接口。 +立即清除 scope 中的临时变量。模型运行过程中,生成的中间临时变量将被放到 local execution scope 中,为了避免对临时变量频繁的申请与释放,ParallelExecutor 中采取的策略是间隔若干次迭代之后清理一次临时变量。ParallelExecutor 在 ExecutionStrategy 中提供了 num_iteration_per_drop_scope 选项,该选项表示间隔多少次迭代之后清理一次临时变量。如果 num_iteration_per_drop_scope 值为 100,但是希望在迭代 50 次之后清理一次临时变量,可以通过手动调用该接口。 **返回** @@ -174,9 +174,9 @@ drop_local_exe_scopes() import os use_cuda = True - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则PaddlePaddle会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, + # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, + # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, + # 在这种情况下,输入的 batch size 应大于 CPU_NUM, # 否则程序会异常中断。 if not use_cuda: os.environ['CPU_NUM'] = str(2) diff --git a/docs/api/paddle/static/Print_cn.rst b/docs/api/paddle/static/Print_cn.rst index e41fd3e0e3f..964fdbdb3ff 100644 --- a/docs/api/paddle/static/Print_cn.rst +++ b/docs/api/paddle/static/Print_cn.rst @@ -9,29 +9,29 @@ Print -创建一个打印操作,打印正在访问的Tensor内容。 +创建一个打印操作,打印正在访问的 Tensor 内容。 -封装传入的Tensor,以便无论何时访问Tensor,都会打印信息message和Tensor的当前值。 +封装传入的 Tensor,以便无论何时访问 Tensor,都会打印信息 message 和 Tensor 的当前值。 参数 :::::::::::: - - **input** (Variable)-将要打印的Tensor。 - - **summarize** (int)-打印Tensor中的元素数目,如果值为-1则打印所有元素。默认值为20。 - - **message** (str)-打印Tensor信息前自定义的字符串类型消息,作为前缀打印。 - - **first_n** (int)-打印Tensor的次数。 - - **print_tensor_name** (bool,可选)-指明是否打印Tensor名称,默认为True。 - - **print_tensor_type** (bool,可选)-指明是否打印Tensor类型,默认为True。 - - **print_tensor_shape** (bool,可选)-指明是否打印Tensor维度信息,默认为True。 - - **print_tensor_lod** (bool,可选)-指明是否打印Tensor的LoD信息,默认为True。 - - **print_phase** (str,可选)-指明打印的阶段,包括 ``forward`` , ``backward`` 和 ``both``,默认为 ``both``。设置为 ``forward`` 时,只打印Tensor的前向信息;设置为 ``backward`` 时,只打印Tensor的梯度信息;设置为 ``both`` 时,则同时打印Tensor的前向信息以及梯度信息。 + - **input** (Variable)-将要打印的 Tensor。 + - **summarize** (int)-打印 Tensor 中的元素数目,如果值为-1 则打印所有元素。默认值为 20。 + - **message** (str)-打印 Tensor 信息前自定义的字符串类型消息,作为前缀打印。 + - **first_n** (int)-打印 Tensor 的次数。 + - **print_tensor_name** (bool,可选)-指明是否打印 Tensor 名称,默认为 True。 + - **print_tensor_type** (bool,可选)-指明是否打印 Tensor 类型,默认为 True。 + - **print_tensor_shape** (bool,可选)-指明是否打印 Tensor 维度信息,默认为 True。 + - **print_tensor_lod** (bool,可选)-指明是否打印 Tensor 的 LoD 信息,默认为 True。 + - **print_phase** (str,可选)-指明打印的阶段,包括 ``forward`` , ``backward`` 和 ``both``,默认为 ``both``。设置为 ``forward`` 时,只打印 Tensor 的前向信息;设置为 ``backward`` 时,只打印 Tensor 的梯度信息;设置为 ``both`` 时,则同时打印 Tensor 的前向信息以及梯度信息。 返回 :::::::::::: -输出Tensor。 +输出 Tensor。 .. note:: - 输入和输出是两个不同的Variable,在接下来的过程中,应该使用输出Variable而非输入Variable,否则打印层将失去backward的信息。 + 输入和输出是两个不同的 Variable,在接下来的过程中,应该使用输出 Variable 而非输入 Variable,否则打印层将失去 backward 的信息。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/Program_cn.rst b/docs/api/paddle/static/Program_cn.rst index 94e7c0d63e8..57691a5f1f6 100644 --- a/docs/api/paddle/static/Program_cn.rst +++ b/docs/api/paddle/static/Program_cn.rst @@ -7,17 +7,17 @@ Program .. note:: -默认情况下,Paddle内部默认含有 :ref:`cn_api_fluid_default_startup_program` 和 :ref:`cn_api_fluid_default_main_program`,它们共享参数。:ref:`cn_api_fluid_default_startup_program` 只运行一次来初始化参数,:ref:`cn_api_fluid_default_main_program` 在每个mini batch中运行并更新权重。 +默认情况下,Paddle 内部默认含有 :ref:`cn_api_fluid_default_startup_program` 和 :ref:`cn_api_fluid_default_main_program`,它们共享参数。:ref:`cn_api_fluid_default_startup_program` 只运行一次来初始化参数,:ref:`cn_api_fluid_default_main_program` 在每个 mini batch 中运行并更新权重。 -Program是Paddle对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block`,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block`,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ +Program 是 Paddle 对于计算图的一种静态描述,使用 Program 的构造函数可以创建一个 Program。Program 中包括至少一个 :ref:`api_guide_Block`,当 :ref:`api_guide_Block` 中存在条件选择的控制流 OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该 Program 将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block`,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流 OP 来决定。关于 Program 具体的结构和包含的类型请参阅 `framework.proto `_ 。 -一个Program的集合通常包含初始化程序(startup_program)与主程序(main_program),初始化程序是一个包含一些初始化工作的Program,主程序将会包含用来训练的网络结构和变量,在使用同一个 :ref:`api_guide_executor` 执行时他们会共享初始化工作的结果,例如初始化的参数。一个Program的集合可以被用来测试或者训练,被用来训练时,``Paddle`` 将会利用所有用户使用的OP和变量来搭建一个训练网络,被用来测试时,可以通过调用Program相关的接口例如:`clone` 剪去一些与测试无关的OP和变量,比如反向传播的OP和变量。 +一个 Program 的集合通常包含初始化程序(startup_program)与主程序(main_program),初始化程序是一个包含一些初始化工作的 Program,主程序将会包含用来训练的网络结构和变量,在使用同一个 :ref:`api_guide_executor` 执行时他们会共享初始化工作的结果,例如初始化的参数。一个 Program 的集合可以被用来测试或者训练,被用来训练时,``Paddle`` 将会利用所有用户使用的 OP 和变量来搭建一个训练网络,被用来测试时,可以通过调用 Program 相关的接口例如:`clone` 剪去一些与测试无关的 OP 和变量,比如反向传播的 OP 和变量。 返回 ::::::::: -Program,创建的空的Program。 +Program,创建的空的 Program。 代码示例 :::::::::: @@ -45,16 +45,16 @@ Program,创建的空的Program。 to_string(throw_on_error, with_details=False) ''''''''' -将Program转换为字符串。 +将 Program 转换为字符串。 **参数** - **throw_on_error** (bool) - 是否在没有设置必需字段时抛出异常。 - - **with_details** (bool) - 值为true时,打印更多关于变量和参数的信息,如trainable, optimize_attr等。 + - **with_details** (bool) - 值为 true 时,打印更多关于变量和参数的信息,如 trainable, optimize_attr 等。 **返回** -str,由Program转换得到的字符串。 +str,由 Program 转换得到的字符串。 **代码示例** @@ -79,15 +79,15 @@ clone(for_test=False) .. note:: 1. ``Program.clone()`` 方法不会克隆例如 :ref:`cn_api_fluid_io_DataLoader` 这样的数据读取相关的部分,这可能会造成的数据读取部分在克隆后丢失; - 2. 此API当 ``for_test=True`` 时将会裁剪部分OP和变量。为防止错误的裁剪,推荐在 :ref:`cn_api_fluid_backward_append_backward` 和执行优化器之前使用;``clone(for_test=True)`` 。 + 2. 此 API 当 ``for_test=True`` 时将会裁剪部分 OP 和变量。为防止错误的裁剪,推荐在 :ref:`cn_api_fluid_backward_append_backward` 和执行优化器之前使用;``clone(for_test=True)`` 。 -当 ``for_test=True`` 时创建一个新的、仅包含当前Program前向内容的Program。否则创建一个新的,和当前Program完全相同的Program +当 ``for_test=True`` 时创建一个新的、仅包含当前 Program 前向内容的 Program。否则创建一个新的,和当前 Program 完全相同的 Program -有些OP,在训练和测试之间的行为是不同的,比如 :ref:`cn_api_fluid_layers_batch_norm`。它们有一个属性 ``is_test`` 来控制行为。当 ``for_test=True`` 时,此方法将把它们的 ``is_test`` 属性更改为True。 +有些 OP,在训练和测试之间的行为是不同的,比如 :ref:`cn_api_fluid_layers_batch_norm`。它们有一个属性 ``is_test`` 来控制行为。当 ``for_test=True`` 时,此方法将把它们的 ``is_test`` 属性更改为 True。 -- 克隆Program用于训练时,将 ``for_test`` 设置为False。 -- 克隆Program用于测试时,将 ``for_test`` 设置为True。虽然在这种情况下,如果在使用了优化器之后调用 ``clone`` 我们依旧会对Program当中反向执行以及优化器相关的内容进行自动裁剪,但是,我们强烈建议在使用优化器之前使用 ``clone`` 例如如果使用的是 :ref:`cn_api_fluid_optimizer_Momentum` 可以这样去使用: +- 克隆 Program 用于训练时,将 ``for_test`` 设置为 False。 +- 克隆 Program 用于测试时,将 ``for_test`` 设置为 True。虽然在这种情况下,如果在使用了优化器之后调用 ``clone`` 我们依旧会对 Program 当中反向执行以及优化器相关的内容进行自动裁剪,但是,我们强烈建议在使用优化器之前使用 ``clone`` 例如如果使用的是 :ref:`cn_api_fluid_optimizer_Momentum` 可以这样去使用: **代码示例** @@ -108,17 +108,17 @@ clone(for_test=False) **参数** - - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True,并裁剪反向OP和参数优化OP,默认值为False。 + - **for_test** (bool) – 取值为 True 时,clone 方法内部会把 operator 的属性 ``is_test`` 设置为 True,并裁剪反向 OP 和参数优化 OP,默认值为 False。 **返回** -Program,当 ``for_test=True`` 时返回一个新的、仅包含当前Program前向内容的Program。否则返回一个新的,和当前Program完全相同的Program。 +Program,当 ``for_test=True`` 时返回一个新的、仅包含当前 Program 前向内容的 Program。否则返回一个新的,和当前 Program 完全相同的 Program。 **代码示例** .. note:: - Program在clone后的顺序可能不同,这不会影响的训练或测试进程。在下面的示例中,我们提供了一个简单的方法print_prog(Program)来打印程序描述,以确保clone后仍能得到同样的打印结果: + Program 在 clone 后的顺序可能不同,这不会影响的训练或测试进程。在下面的示例中,我们提供了一个简单的方法 print_prog(Program)来打印程序描述,以确保 clone 后仍能得到同样的打印结果: .. code-block:: python @@ -135,7 +135,7 @@ Program,当 ``for_test=True`` 时返回一个新的、仅包含当前Program if key not in ['op_callstack', 'op_role_var']: print(" [ attrs: {}: {} ]".format(key, value)) -1. 克隆一个Program,示例代码如下。 +1. 克隆一个 Program,示例代码如下。 .. code-block:: python @@ -188,7 +188,7 @@ Program,当 ``for_test=True`` 时返回一个新的、仅包含当前Program sgd = paddle.optimizer.SGD(learning_rate=1e-3) sgd.minimize(avg_loss) -2. 如果分别运行 train Program 和 test Program,则可以不使用clone。 +2. 如果分别运行 train Program 和 test Program,则可以不使用 clone。 .. code-block:: python @@ -235,12 +235,12 @@ Program,当 ``for_test=True`` 时返回一个新的、仅包含当前Program avg_loss = network() print_prog(test_program_2) -上边两个代码片段生成和打印的Program是一样的。 +上边两个代码片段生成和打印的 Program 是一样的。 **static** parse_from_string(binary_str) ''''''''' -通过对 `protobuf `_ 的反序列化,转换成Program。 +通过对 `protobuf `_ 的反序列化,转换成 Program。 **参数** @@ -280,11 +280,11 @@ Program,反序列化后的 Program。 num_blocks ''''''''' -该Program中的 :ref:`api_guide_Block` 的个数。 +该 Program 中的 :ref:`api_guide_Block` 的个数。 **返回** -int,该Program中的 :ref:`api_guide_Block` 的个数。 +int,该 Program 中的 :ref:`api_guide_Block` 的个数。 **代码示例** @@ -306,13 +306,13 @@ random_seed ''''''''' .. note:: - 必须在相关OP被添加之前设置。 + 必须在相关 OP 被添加之前设置。 -程序中随机运算符的默认随机种子。0意味着随机生成随机种子。 +程序中随机运算符的默认随机种子。0 意味着随机生成随机种子。 **返回** -int64,该Program中当前正在使用的random seed。 +int64,该 Program 中当前正在使用的 random seed。 **代码示例** @@ -342,11 +342,11 @@ int64,该Program中当前正在使用的random seed。 global_block() ''''''''' -获取该Program的第一个 :ref:`api_guide_Block` 。 +获取该 Program 的第一个 :ref:`api_guide_Block` 。 **返回** -:ref:`api_guide_Block`,该Program的第一个 :ref:`api_guide_Block`。 +:ref:`api_guide_Block`,该 Program 的第一个 :ref:`api_guide_Block`。 **代码示例** @@ -365,15 +365,15 @@ global_block() block(index) ''''''''' -返回该Program中,``index`` 指定的 :ref:`api_guide_Block` 。 ``index`` 类型为int。 +返回该 Program 中,``index`` 指定的 :ref:`api_guide_Block` 。 ``index`` 类型为 int。 **参数** - - **index** (int) - 需要获取的 :ref:`api_guide_Block` 的index。 + - **index** (int) - 需要获取的 :ref:`api_guide_Block` 的 index。 **返回** -:ref:`api_guide_Block`,该Program中index对应的那个 :ref:`api_guide_Block`。 +:ref:`api_guide_Block`,该 Program 中 index 对应的那个 :ref:`api_guide_Block`。 **代码示例** @@ -391,11 +391,11 @@ block(index) current_block() ''''''''' -获取当前 :ref:`api_guide_Block`。当前 :ref:`api_guide_Block` 是用来添加OP的。 +获取当前 :ref:`api_guide_Block`。当前 :ref:`api_guide_Block` 是用来添加 OP 的。 **返回** -:ref:`api_guide_Block`,该Program中用户当前所在的 :ref:`api_guide_Block`。 +:ref:`api_guide_Block`,该 Program 中用户当前所在的 :ref:`api_guide_Block`。 **代码示例** @@ -413,11 +413,11 @@ current_block() list_vars() ''''''''' -获取当前Program中所有变量。返回值是一个可迭代对象(iterable object)。 +获取当前 Program 中所有变量。返回值是一个可迭代对象(iterable object)。 **返回** -Generator,会yield每个Program中的变量。 +Generator,会 yield 每个 Program 中的变量。 **代码示例** @@ -440,11 +440,11 @@ Generator,会yield每个Program中的变量。 all_parameters() ''''''''' -获取当前Program中所有的 :ref:`api_guide_parameter`。返回值是一个列表。 +获取当前 Program 中所有的 :ref:`api_guide_parameter`。返回值是一个列表。 **返回** -list[ :ref:`api_guide_parameter` ],一个包含当前Program中所有参数的列表。 +list[ :ref:`api_guide_parameter` ],一个包含当前 Program 中所有参数的列表。 **代码示例** @@ -477,16 +477,16 @@ list[ :ref:`api_guide_parameter` ],一个包含当前Program中所有参数的 state_dict(mode='all', scope=None) ''''''''' -获取当前 ``Program`` 持久性变量。并将所有持久性变量存放在dict结构中。 +获取当前 ``Program`` 持久性变量。并将所有持久性变量存放在 dict 结构中。 **参数** - - mode (str,可选) - 获取何种持久性变量。目前支持以下选项:(1) 'opt':获得优化器的持久性变量放在dict结构中;(2) 'param':获得组网中的持久性变量放在dict结构中,不包含优化器中的持久性变量;(3) 'all':获得组网和优化器中的持久性变量放在dict结构中;默认值为'all'。 - - scope (Scope,可选) - 如果scope为 ``None``,通过 `paddle.static.global_scope()` 获取全局/默认作用域实例,并从中获取 ``state_dict``;否则从指定的 ``scope`` 获取 ``state_dict``。默认值为 ``None`` 。 + - mode (str,可选) - 获取何种持久性变量。目前支持以下选项:(1) 'opt':获得优化器的持久性变量放在 dict 结构中;(2) 'param':获得组网中的持久性变量放在 dict 结构中,不包含优化器中的持久性变量;(3) 'all':获得组网和优化器中的持久性变量放在 dict 结构中;默认值为'all'。 + - scope (Scope,可选) - 如果 scope 为 ``None``,通过 `paddle.static.global_scope()` 获取全局/默认作用域实例,并从中获取 ``state_dict``;否则从指定的 ``scope`` 获取 ``state_dict``。默认值为 ``None`` 。 **返回** -dict,包含持久性变量的dict,键值是持久性变量的名字,值为持久性变量。 +dict,包含持久性变量的 dict,键值是持久性变量的名字,值为持久性变量。 **代码示例** @@ -517,7 +517,7 @@ set_state_dict(state_dict, scope=None) **参数** - state_dict (dict) - 包含持久性变量的字典。键值是持久性变量的名字,值为持久性变量。 - - scope (Scope,可选) - 如果scope为 ``None``,通过 `paddle.static.global_scope()` 获取全局/默认作用域实例,并将 ``state_dict`` 中久性变量设置到这个作用域中;否则将 ``state_dict`` 设置到指定的 ``scope`` 中。默认值为 ``None`` 。 + - scope (Scope,可选) - 如果 scope 为 ``None``,通过 `paddle.static.global_scope()` 获取全局/默认作用域实例,并将 ``state_dict`` 中久性变量设置到这个作用域中;否则将 ``state_dict`` 设置到指定的 ``scope`` 中。默认值为 ``None`` 。 **返回** diff --git a/docs/api/paddle/static/Variable_cn.rst b/docs/api/paddle/static/Variable_cn.rst index 99ddaedd19e..30fffe58caa 100644 --- a/docs/api/paddle/static/Variable_cn.rst +++ b/docs/api/paddle/static/Variable_cn.rst @@ -11,7 +11,7 @@ Variable 2. 请使用 `Block.create_var` 创建一个静态的 `Variable`,该静态的 `Variable` 在使用 :ref:`cn_api_fluid_executor` 执行前是没有实际数据的。 -在Paddle静态图模式中,OP的每个输入和输出都是 :ref:`api_guide_Variable`。多数情况下,:ref:`api_guide_Variable` 用于保存不同种类的数据或训练标签。 +在 Paddle 静态图模式中,OP 的每个输入和输出都是 :ref:`api_guide_Variable`。多数情况下,:ref:`api_guide_Variable` 用于保存不同种类的数据或训练标签。 :ref:`api_guide_Variable` 总是属于某一个 :ref:`api_guide_Block`。所有 :ref:`api_guide_Variable` 都有其自己的 ``name``,不同 :ref:`api_guide_Block` 中的两个 :ref:`api_guide_Variable` 可以具有相同的名称。如果使用的 **不是** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ 模式,那么同一个 :ref:`api_guide_Block` 中的两个或更多 :ref:`api_guide_Variable` 拥有相同 ``name`` 将意味着他们会共享相同的内容。通常我们使用这种方式来实现 **参数共享**。 @@ -43,7 +43,7 @@ to_string(throw_on_error, with_details=True) **参数:** - **throw_on_error** (bool) - 是否在没有设置必需字段时抛出异常。 - - **with_details** (bool) - 值为true时,打印更多关于 :ref:`api_guide_Variable` 的信息,如 ``error_clip`` , ``stop_gradient`` 等。 + - **with_details** (bool) - 值为 true 时,打印更多关于 :ref:`api_guide_Variable` 的信息,如 ``error_clip`` , ``stop_gradient`` 等。 **返回** @@ -227,7 +227,7 @@ set_value(value, scope=None) size(self) ''''''''' -返回该 :ref:`api_guide_Variable` 中的数据元素数量,结果是一个shape为[1]的int64的 ``Variable`` 。 +返回该 :ref:`api_guide_Variable` 中的数据元素数量,结果是一个 shape 为[1]的 int64 的 ``Variable`` 。 **返回** @@ -241,7 +241,7 @@ COPY-FROM: paddle.static.Variable.size ndimension(self) ''''''''' -返回该 :ref:`api_guide_Variable` 的维度,也称作rank。 +返回该 :ref:`api_guide_Variable` 的维度,也称作 rank。 **返回** @@ -262,7 +262,7 @@ ndimension(self) dim(self) ''''''''' -返回该 :ref:`api_guide_Variable` 的维度,也称作rank。 +返回该 :ref:`api_guide_Variable` 的维度,也称作 rank。 **返回** ``Variable`` 的维度。 @@ -285,7 +285,7 @@ persistable ''''''''' .. note:: -1. 该属性我们即将废弃,此介绍仅为了帮助用户理解概念,1.6版本后用户可以不再关心该属性 +1. 该属性我们即将废弃,此介绍仅为了帮助用户理解概念,1.6 版本后用户可以不再关心该属性 2. 该属性除参数以外默认值为 ``False``,而参数的该属性默认值为 ``True`` 。 此 :ref:`api_guide_Variable` 是否是长期存活的 :ref:`api_guide_Variable`。 @@ -329,7 +329,7 @@ type .. note:: 该属性是只读属性。 -此 :ref:`api_guide_Variable` 的内存模型,例如是::ref:`api_fluid_LoDTensor`,或者SelectedRows。 +此 :ref:`api_guide_Variable` 的内存模型,例如是::ref:`api_fluid_LoDTensor`,或者 SelectedRows。 ndim ''''''''' @@ -337,4 +337,4 @@ ndim .. note:: 该属性是只读属性。 -此 :ref:`api_guide_Variable` 的维度,也称作rank。 +此 :ref:`api_guide_Variable` 的维度,也称作 rank。 diff --git a/docs/api/paddle/static/WeightNormParamAttr_cn.rst b/docs/api/paddle/static/WeightNormParamAttr_cn.rst index 93ad53d2cd7..ba694d1628f 100644 --- a/docs/api/paddle/static/WeightNormParamAttr_cn.rst +++ b/docs/api/paddle/static/WeightNormParamAttr_cn.rst @@ -11,7 +11,7 @@ WeightNormParamAttr 动态图模式下请使用 ``paddle.nn.utils.weight_norm`` 。 .. note:: - 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略::ref:`cn_api_paddle_nn_ClipGradByGlobalNorm` 、 + 该类中的 ``gradient_clip`` 属性在 2.0 版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略::ref:`cn_api_paddle_nn_ClipGradByGlobalNorm` 、 :ref:`cn_api_paddle_nn_ClipGradByNorm` 、 :ref:`cn_api_paddle_nn_ClipGradByValue` 。 该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ @@ -19,16 +19,16 @@ WeightNormParamAttr 参数 :::::::::::: - - **dim** (int,可选) - 进行归一化操作(norm)的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 :math:`[cout, cin, kh, kw]` , rank是4,则dim可以选0,1,2,3;fc的权重shape是 :math:`[cout, cin]` ,rank是2,dim可以选0,1。 dim 默认为None,如果为None就对所有元素做归一化(norm)。 + - **dim** (int,可选) - 进行归一化操作(norm)的切片所在维度,是小于权重 Tensor rank 的非负数。比如卷积的权重 shape 是 :math:`[cout, cin, kh, kw]` , rank 是 4,则 dim 可以选 0,1,2,3;fc 的权重 shape 是 :math:`[cout, cin]` ,rank 是 2,dim 可以选 0,1。 dim 默认为 None,如果为 None 就对所有元素做归一化(norm)。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **initializer** (Initializer,可选) - 初始化参数方法,例如 ``initializer = fluid.nn.initializer.Constant(1.0)``。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。 - - **learning_rate** (float32,可选) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。 + - **initializer** (Initializer,可选) - 初始化参数方法,例如 ``initializer = fluid.nn.initializer.Constant(1.0)``。默认为 None,如果为 None 则使用默认初始化函数 `Xavier()` 。 + - **learning_rate** (float32,可选) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为 1.0。 - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略::ref:`cn_api_paddle_regularizer_L1Decay` 、 :ref:`cn_api_paddle_regularizer_L2Decay`,如果在 ``optimizer`` (例如 :ref:`cn_api_paddle_optimizer_SGD` ) 中也 - 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - - **trainable** (bool) - 可选,指明参数是否可训练,默认为True。 - - **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为False。 - - **need_clip** (bool) - 可选,指明参数梯度是否需要在优化器中进行clip,默认为True。 + 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为 None,表示没有正则化。 + - **trainable** (bool) - 可选,指明参数是否可训练,默认为 True。 + - **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为 False。 + - **need_clip** (bool) - 可选,指明参数梯度是否需要在优化器中进行 clip,默认为 True。 代码示例 diff --git a/docs/api/paddle/static/accuracy_cn.rst b/docs/api/paddle/static/accuracy_cn.rst index 88cb61c622b..8259e452fef 100755 --- a/docs/api/paddle/static/accuracy_cn.rst +++ b/docs/api/paddle/static/accuracy_cn.rst @@ -10,20 +10,20 @@ accuracy accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall -使用输入和标签计算准确率。如果正确的标签在topk个预测值里,则计算结果加1。注意:输出正确率的类型由input类型决定,input和lable的类型可以不一样。 +使用输入和标签计算准确率。如果正确的标签在 topk 个预测值里,则计算结果加 1。注意:输出正确率的类型由 input 类型决定,input 和 lable 的类型可以不一样。 参数 :::::::::::: - - **input** (Tensor|LoDTensor)-数据类型为float32,float64。输入为网络的预测值。shape为 ``[sample_number, class_dim]`` 。 - - **label** (Tensor|LoDTensor)-数据类型为int64,int32。输入为数据集的标签。shape为 ``[sample_number, 1]`` 。 - - **k** (int64|int32) - 取每个类别中k个预测值用于计算。 + - **input** (Tensor|LoDTensor)-数据类型为 float32,float64。输入为网络的预测值。shape 为 ``[sample_number, class_dim]`` 。 + - **label** (Tensor|LoDTensor)-数据类型为 int64,int32。输入为数据集的标签。shape 为 ``[sample_number, 1]`` 。 + - **k** (int64|int32) - 取每个类别中 k 个预测值用于计算。 - **correct** (int64|int32)-正确预测值的个数。 - **total** (int64|int32)-总共的预测值。 返回 :::::::::::: - Tensor,计算出来的正确率,数据类型为float32。 + Tensor,计算出来的正确率,数据类型为 float32。 代码示例 diff --git a/docs/api/paddle/static/auc_cn.rst b/docs/api/paddle/static/auc_cn.rst index a1859e6c566..bf344f21da6 100755 --- a/docs/api/paddle/static/auc_cn.rst +++ b/docs/api/paddle/static/auc_cn.rst @@ -10,9 +10,9 @@ auc **Area Under the Curve(AUC) Layer** -该层根据前向输出和标签计算AUC,在二分类(binary classification)估计中广泛使用。 +该层根据前向输出和标签计算 AUC,在二分类(binary classification)估计中广泛使用。 -注:如果输入标注包含一种值,只有0或1两种情况,数据类型则强制转换成布尔值。 +注:如果输入标注包含一种值,只有 0 或 1 两种情况,数据类型则强制转换成布尔值。 相关定义可以在这里找到:https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve @@ -25,25 +25,25 @@ auc 参数 :::::::::::: - - **input** (Tensor|LoDTensor) - 数据类型为float32、float64。浮点二维变量,值的范围为[0,1]。每一行降序排列。该输入为网络预测值的输入。 - - **label** (Tensor|LoDTensor) - 数据类型为int32、int64。二维整型变量,为训练数据的标签。 + - **input** (Tensor|LoDTensor) - 数据类型为 float32、float64。浮点二维变量,值的范围为[0,1]。每一行降序排列。该输入为网络预测值的输入。 + - **label** (Tensor|LoDTensor) - 数据类型为 int32、int64。二维整型变量,为训练数据的标签。 - **curve** (str) - 曲线类型,可以为 ``ROC`` 或 ``PR``,默认 ``ROC``。 - - **num_thresholds** (int) - 将roc曲线离散化时使用的临界值数。默认200。 - - **topk** (int) - 取topk的输出值用于计算。 - - **slide_steps** (int) - 当计算batch auc时,不仅用当前步也用于先前步。slide_steps=1,表示用当前步;slide_steps = 3表示用当前步和前两步;slide_steps = 0,则用所有步。 + - **num_thresholds** (int) - 将 roc 曲线离散化时使用的临界值数。默认 200。 + - **topk** (int) - 取 topk 的输出值用于计算。 + - **slide_steps** (int) - 当计算 batch auc 时,不仅用当前步也用于先前步。slide_steps=1,表示用当前步;slide_steps = 3 表示用当前步和前两步;slide_steps = 0,则用所有步。 返回 :::::::::::: -tuple,当前计算出的AUC。数据类型是tensor,支持float32和float64。 +tuple,当前计算出的 AUC。数据类型是 tensor,支持 float32 和 float64。 -返回的元组为auc_out, batch_auc_out, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg]。 +返回的元组为 auc_out, batch_auc_out, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg]。 -- auc_out为准确率的结果; -- batch_auc_out为batch准确率的结果; -- batch_stat_pos为batch计算时label=1的统计值; -- batch_stat_neg为batch计算时label=0的统计值; -- stat_pos计算时label=1的统计值; -- stat_neg为计算时label=0的统计值。 +- auc_out 为准确率的结果; +- batch_auc_out 为 batch 准确率的结果; +- batch_stat_pos 为 batch 计算时 label=1 的统计值; +- batch_stat_neg 为 batch 计算时 label=0 的统计值; +- stat_pos 计算时 label=1 的统计值; +- stat_neg 为计算时 label=0 的统计值。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/cpu_places_cn.rst b/docs/api/paddle/static/cpu_places_cn.rst index af3bbe8a34d..4980020d3ab 100644 --- a/docs/api/paddle/static/cpu_places_cn.rst +++ b/docs/api/paddle/static/cpu_places_cn.rst @@ -8,7 +8,7 @@ cpu_places 该接口创建 ``device_count`` 个 ``paddle.CPUPlace`` 对象,并返回所创建的对象列表。 -如果 ``device_count`` 为 ``None``,则设备数目将由环境变量 ``CPU_NUM`` 确定。如果未设置 ``CPU_NUM`` 环境变量,则设备数目会默认设为1,也就是说,``CPU_NUM=1``。 +如果 ``device_count`` 为 ``None``,则设备数目将由环境变量 ``CPU_NUM`` 确定。如果未设置 ``CPU_NUM`` 环境变量,则设备数目会默认设为 1,也就是说,``CPU_NUM=1``。 ``CPU_NUM`` 表示在当前任务中使用的设备数目。如果 ``CPU_NUM`` 与物理核心数相同,可以加速程序的运行。 参数 diff --git a/docs/api/paddle/static/create_global_var_cn.rst b/docs/api/paddle/static/create_global_var_cn.rst index 5730e44b6c9..ecd88e18b0c 100644 --- a/docs/api/paddle/static/create_global_var_cn.rst +++ b/docs/api/paddle/static/create_global_var_cn.rst @@ -8,21 +8,21 @@ create_global_var -该OP在全局块中创建一个新的Tensor,Tensor的值为 ``value`` 。 +该 OP 在全局块中创建一个新的 Tensor,Tensor 的值为 ``value`` 。 参数 :::::::::::: - - **shape** (list[int])- 指定输出Tensor的形状,它可以是一个整数列表。 + - **shape** (list[int])- 指定输出 Tensor 的形状,它可以是一个整数列表。 - **value** (float)- 变量的值,填充新创建的变量。 - **dtype** (str|numpy.dtype,可选)– 初始化数据类型。 - **persistable** (bool,可选)- 是否为永久变量,默认:False。 - - **force_cpu** (bool,可选)- 是否将该变量压入CPU,默认值为 False。 + - **force_cpu** (bool,可选)- 是否将该变量压入 CPU,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Variable,创建的Tensor变量。 +Variable,创建的 Tensor 变量。 代码示例 diff --git a/docs/api/paddle/static/create_parameter_cn.rst b/docs/api/paddle/static/create_parameter_cn.rst index ce3d3f7c278..5806db88d93 100644 --- a/docs/api/paddle/static/create_parameter_cn.rst +++ b/docs/api/paddle/static/create_parameter_cn.rst @@ -9,23 +9,23 @@ create_parameter -该OP创建一个参数。该参数是一个可学习的变量,拥有梯度并且可优化。 +该 OP 创建一个参数。该参数是一个可学习的变量,拥有梯度并且可优化。 -**注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。** +**注意:这是一个低级别的 API。如果您希望自己创建新的 op,这个 API 将非常有用,无需使用 layers。** 参数 :::::::::::: - - **shape** (list[int]) - 指定输出Tensor的形状,它可以是一个整数列表。 + - **shape** (list[int]) - 指定输出 Tensor 的形状,它可以是一个整数列表。 - **dtype** (str|numpy.dtype) – 初始化数据类型。可设置的字符串值有:"float16","float32","float64"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **attr** (ParamAttr,可选) - 指定参数的属性对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为None,表示将采用 ParamAttr 的默认方式初始化。 - - **is_bias** (bool,可选) - 当default_initializer为空,该值会对选择哪个默认初始化程序产生影响。如果is_bias为真,则使用initializer.Constant(0.0),否则使用Xavier(),默认值False。 + - **attr** (ParamAttr,可选) - 指定参数的属性对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为 None,表示将采用 ParamAttr 的默认方式初始化。 + - **is_bias** (bool,可选) - 当 default_initializer 为空,该值会对选择哪个默认初始化程序产生影响。如果 is_bias 为真,则使用 initializer.Constant(0.0),否则使用 Xavier(),默认值 False。 - **default_initializer** (Initializer,可选) - 参数的初始化程序,默认值为空。 返回 :::::::::::: -创建的Parameter变量。 +创建的 Parameter 变量。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/cuda_places_cn.rst b/docs/api/paddle/static/cuda_places_cn.rst index 5030d49c75c..c46e0f8c886 100644 --- a/docs/api/paddle/static/cuda_places_cn.rst +++ b/docs/api/paddle/static/cuda_places_cn.rst @@ -7,7 +7,7 @@ cuda_places .. note:: - 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的 GPU 设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 该接口根据 ``device_ids`` 创建一个或多个 ``paddle.CUDAPlace`` 对象,并返回所创建的对象列表。 @@ -15,12 +15,12 @@ cuda_places 例如:``FLAGS_selected_gpus=0,1,2``,则返回的列表将为 ``[paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)]``。 如果未设置标志 ``FLAGS_selected_gpus``,则根据 ``CUDA_VISIBLE_DEVICES`` 环境变量,返回所有可见的 GPU places。 -如果 ``device_ids`` 不是 ``None``,它应该是使用的GPU设备ID的列表或元组。 +如果 ``device_ids`` 不是 ``None``,它应该是使用的 GPU 设备 ID 的列表或元组。 例如:``device_id=[0,1,2]``,返回的列表将是 ``[paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)]``。 参数 ::::::::: - - **device_ids** (list(int)|tuple(int),可选) - GPU的设备ID列表或元组。默认值为 ``None``。 + - **device_ids** (list(int)|tuple(int),可选) - GPU 的设备 ID 列表或元组。默认值为 ``None``。 返回 ::::::::: diff --git a/docs/api/paddle/static/data_cn.rst b/docs/api/paddle/static/data_cn.rst index 304a6cf576a..27e2227cdcd 100644 --- a/docs/api/paddle/static/data_cn.rst +++ b/docs/api/paddle/static/data_cn.rst @@ -9,16 +9,16 @@ data -该OP会在全局block中创建变量(Tensor),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed数据进该变量,当 ``dtype`` 为None时,``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 +该 OP 会在全局 block 中创建变量(Tensor),该全局变量可被计算图中的算子(operator)访问。该变量可作为占位符用于数据输入。例如用执行器(Executor)feed 数据进该变量,当 ``dtype`` 为 None 时,``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 参数 :::::::::::: - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **shape** (list|tuple)- 声明维度信息的list或tuple。可以在某个维度上设置None或-1,以指示该维度可以是任何大小。例如,将可变batchsize设置为None或-1。 - - **dtype** (np.dtype|str,可选)- 数据类型,支持bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为None。当 ``dtype`` 为None时,``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 - - **lod_level** (int,可选)- LoDTensor变量的LoD level数,LoD level是PaddlePaddle的高级特性,一般任务中不会需要更改此默认值,关于LoD level的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor`。默认值为0。 + - **shape** (list|tuple)- 声明维度信息的 list 或 tuple。可以在某个维度上设置 None 或-1,以指示该维度可以是任何大小。例如,将可变 batchsize 设置为 None 或-1。 + - **dtype** (np.dtype|str,可选)- 数据类型,支持 bool,float16,float32,float64,int8,int16,int32,int64,uint8。默认值为 None。当 ``dtype`` 为 None 时,``dtype`` 将通过 ``padle.get_default_dtype()`` 获取全局类型。 + - **lod_level** (int,可选)- LoDTensor 变量的 LoD level 数,LoD level 是 PaddlePaddle 的高级特性,一般任务中不会需要更改此默认值,关于 LoD level 的详细适用场景和用法请见 :ref:`cn_user_guide_lod_tensor`。默认值为 0。 返回 :::::::::::: diff --git a/docs/api/paddle/static/default_main_program_cn.rst b/docs/api/paddle/static/default_main_program_cn.rst index 464948ca96f..ef62acc78f7 100644 --- a/docs/api/paddle/static/default_main_program_cn.rst +++ b/docs/api/paddle/static/default_main_program_cn.rst @@ -5,18 +5,18 @@ default_main_program .. py:function:: paddle.static.default_main_program() -此接口可以获取当前用于存储OP和Tensor描述信息的 ``default main program``。 +此接口可以获取当前用于存储 OP 和 Tensor 描述信息的 ``default main program``。 例如 ``z = paddle.add(x, y)`` 会创建新 ``Op`` 和 tensor ``z``,这些变量会被记录在 ``default main program`` 中。 -``default main program`` 是许多编程接口中Program参数的默认值。例如对于 ``Executor.run()`` 如果用户没有传入Program参数,会默认使用 ``default main program`` 。 +``default main program`` 是许多编程接口中 Program 参数的默认值。例如对于 ``Executor.run()`` 如果用户没有传入 Program 参数,会默认使用 ``default main program`` 。 可以使用 :ref:`cn_api_fluid_program_guard` 来切换 ``default main program``。 返回 ::::::::: - :ref:`cn_api_fluid_Program`,当前默认用于存储OP和Tensor描述的Program。 + :ref:`cn_api_fluid_Program`,当前默认用于存储 OP 和 Tensor 描述的 Program。 代码示例 diff --git a/docs/api/paddle/static/default_startup_program_cn.rst b/docs/api/paddle/static/default_startup_program_cn.rst index 38bd20e2c12..997298b7ca7 100644 --- a/docs/api/paddle/static/default_startup_program_cn.rst +++ b/docs/api/paddle/static/default_startup_program_cn.rst @@ -15,7 +15,7 @@ default_startup_program 该函数可以获取默认/全局 startup :ref:`cn_api_fluid_Program` (初始化启动程序)。 -``paddle.nn`` 中的函数将参数初始化OP追加到 ``startup program`` 中,运行 ``startup program`` 会完成参数的初始化。 +``paddle.nn`` 中的函数将参数初始化 OP 追加到 ``startup program`` 中,运行 ``startup program`` 会完成参数的初始化。 该函数将返回默认的或当前的 ``startup program``。用户可以使用 :ref:`cn_api_fluid_program_guard` 来切换 :ref:`cn_api_fluid_Program` 。 diff --git a/docs/api/paddle/static/deserialize_persistables_cn.rst b/docs/api/paddle/static/deserialize_persistables_cn.rst index 9f10f53ced1..49356c3aa20 100644 --- a/docs/api/paddle/static/deserialize_persistables_cn.rst +++ b/docs/api/paddle/static/deserialize_persistables_cn.rst @@ -21,7 +21,7 @@ deserialize_persistables 返回 :::::::::::: - - Program:包含反序列化后的参数的program。 + - Program:包含反序列化后的参数的 program。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/device_guard_cn.rst b/docs/api/paddle/static/device_guard_cn.rst index a9d6145fd83..be12c98030a 100644 --- a/docs/api/paddle/static/device_guard_cn.rst +++ b/docs/api/paddle/static/device_guard_cn.rst @@ -6,14 +6,14 @@ device_guard .. py:function:: paddle.static.device_guard(device=None) .. note:: - 该API仅支持静态图模式。 + 该 API 仅支持静态图模式。 -一个用于指定OP运行设备的上下文管理器。 +一个用于指定 OP 运行设备的上下文管理器。 参数 :::::::::::: - - **device** (str|None) – 指定上下文中使用的设备。它可以是 ``cpu``、 ``gpu``、 ``gpu:x``,其中 ``x`` 是GPU 的编号。当它被设置为 ``cpu`` 或者 ``gpu`` 时,创建在该上下文中的OP将被运行在CPUPlace或者CUDAPlace上。若设置为 ``gpu``,同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致,默认值:None,在该上下文中的OP将被自动地分配设备。 + - **device** (str|None) – 指定上下文中使用的设备。它可以是 ``cpu``、 ``gpu``、 ``gpu:x``,其中 ``x`` 是 GPU 的编号。当它被设置为 ``cpu`` 或者 ``gpu`` 时,创建在该上下文中的 OP 将被运行在 CPUPlace 或者 CUDAPlace 上。若设置为 ``gpu``,同时程序运行在单卡模式下,设备的索引将与执行器的设备索引保持一致,默认值:None,在该上下文中的 OP 将被自动地分配设备。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/global_scope_cn.rst b/docs/api/paddle/static/global_scope_cn.rst index 2c6a97469d8..096b0ec33a3 100644 --- a/docs/api/paddle/static/global_scope_cn.rst +++ b/docs/api/paddle/static/global_scope_cn.rst @@ -9,7 +9,7 @@ global_scope -获取全局/默认作用域实例。很多API使用默认 ``global_scope``,例如 ``Executor.run`` 等。 +获取全局/默认作用域实例。很多 API 使用默认 ``global_scope``,例如 ``Executor.run`` 等。 返回 :::::::::::: diff --git a/docs/api/paddle/static/gradients_cn.rst b/docs/api/paddle/static/gradients_cn.rst index d7f674508d4..6c328801c8a 100644 --- a/docs/api/paddle/static/gradients_cn.rst +++ b/docs/api/paddle/static/gradients_cn.rst @@ -16,7 +16,7 @@ gradients - **targets** (Tensor|list[Tensor]) – 目标 Tensor 或包含 Tensor 的列表。 - **inputs** (Tensor|list[Tensor]) – 输入 Tensor 或包含 Tensor 的列表。 - - **target_gradients** (Tensor|list[Tensor],可选) – 目标的梯度 Tensor,应与目标 Tensor 的形状相同;如果设置为None,则以 1 初始化所有梯度 Tensor。 + - **target_gradients** (Tensor|list[Tensor],可选) – 目标的梯度 Tensor,应与目标 Tensor 的形状相同;如果设置为 None,则以 1 初始化所有梯度 Tensor。 - **no_grad_set** (set[Tensor|str],可选) – 在 `block0` ( :ref:`api_guide_Block` ) 中要忽略梯度的 Tensor 的名字的集合。所有的 :ref:`api_guide_Block` 中带有 ``stop_gradient = True`` 的所有 Tensor 的名字都会被自动添加到此集合中。如果该参数不为 ``None``,则会将该参数集合的内容添加到默认的集合中。默认值为 ``None`` 。 diff --git a/docs/api/paddle/static/ipu_shard_guard_cn.rst b/docs/api/paddle/static/ipu_shard_guard_cn.rst index 08c9c61f30f..d33d7fcf8c3 100644 --- a/docs/api/paddle/static/ipu_shard_guard_cn.rst +++ b/docs/api/paddle/static/ipu_shard_guard_cn.rst @@ -6,18 +6,18 @@ ipu_shard_guard .. py:function:: paddle.static.ipu_shard_guard(index=-1, stage=-1) -该接口用于对模型进行切分。用于指定Op在哪个ipu上进行计算以及模型被切分之后的计算顺序。 +该接口用于对模型进行切分。用于指定 Op 在哪个 ipu 上进行计算以及模型被切分之后的计算顺序。 .. note: -仅支持当enable_manual_shard=True,index设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 -仅支持当enable_pipelining=True,stage设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 -一个index支持对应None stage或一个stage,一个stage仅支持对应一个新的index或者一个重复的index。 +仅支持当 enable_manual_shard=True,index 设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 +仅支持当 enable_pipelining=True,stage 设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 +一个 index 支持对应 None stage 或一个 stage,一个 stage 仅支持对应一个新的 index 或者一个重复的 index。 参数 ::::::::: - - **index** (int,可选) - 指定Op在哪个ipu上计算,(如‘0, 1, 2, 3’),默认值-1,表示Op没有指定ipu。 - - **stage** (int,可选) – 指定被切分的模型的计算顺序,(如‘0, 1, 2, 3’),按照数值大小顺序对被切分的模型进行计算,默认值-1,表示没有数据流水计算顺序并按照计算图顺序计算Op。 + - **index** (int,可选) - 指定 Op 在哪个 ipu 上计算,(如‘0, 1, 2, 3’),默认值-1,表示 Op 没有指定 ipu。 + - **stage** (int,可选) – 指定被切分的模型的计算顺序,(如‘0, 1, 2, 3’),按照数值大小顺序对被切分的模型进行计算,默认值-1,表示没有数据流水计算顺序并按照计算图顺序计算 Op。 返回 ::::::::: diff --git a/docs/api/paddle/static/load_cn.rst b/docs/api/paddle/static/load_cn.rst index 536959b0e96..53f6c8e824d 100644 --- a/docs/api/paddle/static/load_cn.rst +++ b/docs/api/paddle/static/load_cn.rst @@ -6,20 +6,20 @@ load .. py:function:: paddle.static.load(program, model_path, executor=None, var_list=None) -该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。 +该接口从 Program 中过滤出参数和优化器信息,然后从文件中获取相应的值。 -如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 +如果 Program 和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 该函数还可以加载用[save_params,save_persistables,save_vars]接口保存的模型文件。 -当[save_params,save_persistables,save_vars]保存的模型格式为单个大文件时,var_list不能为None。 +当[save_params,save_persistables,save_vars]保存的模型格式为单个大文件时,var_list 不能为 None。 参数 :::::::::::: - - **program** ( :ref:`cn_api_fluid_Program` ) – 要加载的Program。 - - **model_path** (str) – 保存Program的目录名称+文件前缀。格式为 ``目录名称/文件前缀`` 。 - - **executor** (Executor,可选) - 当startup program没有运行时,用于初始化参数的Executor。默认值:None。 - - **var_list** (list,可选) - 指定加载的Tensor列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,Tensor列表可以是所有加载文件中Tensor的子集;当加载的单个大文件时,Tensor列表必须和加载文件中的Tensor保持一致。 + - **program** ( :ref:`cn_api_fluid_Program` ) – 要加载的 Program。 + - **model_path** (str) – 保存 Program 的目录名称+文件前缀。格式为 ``目录名称/文件前缀`` 。 + - **executor** (Executor,可选) - 当 startup program 没有运行时,用于初始化参数的 Executor。默认值:None。 + - **var_list** (list,可选) - 指定加载的 Tensor 列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,Tensor 列表可以是所有加载文件中 Tensor 的子集;当加载的单个大文件时,Tensor 列表必须和加载文件中的 Tensor 保持一致。 返回 :::::::::::: diff --git a/docs/api/paddle/static/load_inference_model_cn.rst b/docs/api/paddle/static/load_inference_model_cn.rst index 39028d41776..89705fae6c9 100644 --- a/docs/api/paddle/static/load_inference_model_cn.rst +++ b/docs/api/paddle/static/load_inference_model_cn.rst @@ -25,8 +25,8 @@ load_inference_model :::::::::::: 该接口返回一个包含三个元素的列表 [program,feed_target_names, fetch_targets]。它们的含义描述如下: - - **program** (Program)– ``Program`` (详见 :ref:`api_guide_Program` )类的实例。此处它被用于预测,因此可被称为Inference Program。 - - **feed_target_names** (list)– 字符串列表,包含着Inference Program预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 + - **program** (Program)– ``Program`` (详见 :ref:`api_guide_Program` )类的实例。此处它被用于预测,因此可被称为 Inference Program。 + - **feed_target_names** (list)– 字符串列表,包含着 Inference Program 预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 - **fetch_targets** (list)– ``Variable`` (详见 :ref:`api_guide_Program` )类型列表,包含着模型的所有输出变量。通过这些输出变量即可得到模型的预测结果。 diff --git a/docs/api/paddle/static/load_program_state_cn.rst b/docs/api/paddle/static/load_program_state_cn.rst index c5fca4f08f3..020f01b8630 100644 --- a/docs/api/paddle/static/load_program_state_cn.rst +++ b/docs/api/paddle/static/load_program_state_cn.rst @@ -5,17 +5,17 @@ load_program_state .. py:function:: paddle.static.load_program_state(model_path, var_list=None) -该接口从本地加载 ``Program`` 的参数和优化器的Tensor信息到内存中。 +该接口从本地加载 ``Program`` 的参数和优化器的 Tensor 信息到内存中。 参数 :::::::::::: - - **model_path** (str) - 存储 ``Program`` 的参数和优化器的Tensor信息的目录名称+文件前缀,格式为 ``目录名称/文件前缀`` 。 - - **var_list** (list,可选) - 指定加载的Tensor列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,Tensor列表可以是所有加载文件中Tensor的子集;当加载的单个大文件时,Tensor列表必须和加载文件中的Tensor保持一致。 + - **model_path** (str) - 存储 ``Program`` 的参数和优化器的 Tensor 信息的目录名称+文件前缀,格式为 ``目录名称/文件前缀`` 。 + - **var_list** (list,可选) - 指定加载的 Tensor 列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,Tensor 列表可以是所有加载文件中 Tensor 的子集;当加载的单个大文件时,Tensor 列表必须和加载文件中的 Tensor 保持一致。 返回 :::::::::::: -存储参数和优化器信息的dict。 +存储参数和优化器信息的 dict。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/mlu_places_cn.rst b/docs/api/paddle/static/mlu_places_cn.rst index 027e58568d4..eb7d4a4487f 100644 --- a/docs/api/paddle/static/mlu_places_cn.rst +++ b/docs/api/paddle/static/mlu_places_cn.rst @@ -7,7 +7,7 @@ mlu_places .. note:: - 多卡任务请先使用 FLAGS_selected_mlus 环境变量设置可见的MLU设备。 + 多卡任务请先使用 FLAGS_selected_mlus 环境变量设置可见的 MLU 设备。 该接口根据 ``device_ids`` 创建一个或多个 ``paddle.device.MLUPlace`` 对象,并返回所创建的对象列表。 @@ -15,12 +15,12 @@ mlu_places 例如:``FLAGS_selected_mlus=0,1,2``,则返回的列表将为 ``[paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]``。 如果未设置标志 ``FLAGS_selected_mlus``,则返回所有可见的 MLU places。 -如果 ``device_ids`` 不是 ``None``,它应该是使用的MLU设备ID的列表或元组。 +如果 ``device_ids`` 不是 ``None``,它应该是使用的 MLU 设备 ID 的列表或元组。 例如:``device_id=[0,1,2]``,返回的列表将是 ``[paddle.device.MLUPlace(0), paddle.device.MLUPlace(1), paddle.device.MLUPlace(2)]``。 参数 ::::::::: - - **device_ids** (list(int)|tuple(int),可选) - MLU的设备ID列表或元组。默认值为 ``None``。 + - **device_ids** (list(int)|tuple(int),可选) - MLU 的设备 ID 列表或元组。默认值为 ``None``。 返回 ::::::::: diff --git a/docs/api/paddle/static/name_scope_cn.rst b/docs/api/paddle/static/name_scope_cn.rst index a9ab41dae11..34b5056818c 100644 --- a/docs/api/paddle/static/name_scope_cn.rst +++ b/docs/api/paddle/static/name_scope_cn.rst @@ -6,7 +6,7 @@ name_scope .. py:function:: paddle.static.name_scope(prefix=None) -该函数为静态图下的operators生成不同的命名空间。 +该函数为静态图下的 operators 生成不同的命名空间。 .. note:: 该函数只用于静态图下的调试和可视化,不建议用在其它方面,否则会引起内存泄露。 @@ -15,7 +15,7 @@ name_scope 参数 :::::::::::: - - **prefix** (str,可选) - 名称前缀。默认值为None。 + - **prefix** (str,可选) - 名称前缀。默认值为 None。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/batch_norm_cn.rst b/docs/api/paddle/static/nn/batch_norm_cn.rst index ebc9d06f524..dacc1891652 100644 --- a/docs/api/paddle/static/nn/batch_norm_cn.rst +++ b/docs/api/paddle/static/nn/batch_norm_cn.rst @@ -19,7 +19,7 @@ batch_norm 更多详情请参考:`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ -``input`` 是mini-batch的输入。 +``input`` 是 mini-batch 的输入。 .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ @@ -32,10 +32,10 @@ batch_norm moving\_mean = moving\_mean * momentum + mini\_batch\_mean * (1. - momentum) \\ moving\_variance = moving\_variance * momentum + mini\_batch\_var * (1. - momentum) -moving_mean和moving_var是训练过程中统计得到的全局均值和方差,在预测或者评估中使用。 +moving_mean 和 moving_var 是训练过程中统计得到的全局均值和方差,在预测或者评估中使用。 `is_test` 参数只能用于测试或者评估阶段,如果想在训练阶段使用预训练模型的全局均值和方差的话,可以设置 `use_global_stats=True`。 -当use_global_stats = True时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 不是一个minibatch的统计数据。它们是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。训练和测试(或预测)具有相同的行为: +当 use_global_stats = True 时,:math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 不是一个 minibatch 的统计数据。它们是全局(或运行)统计数据(moving_mean 和 moving_variance),通常来自预先训练好的模型。训练和测试(或预测)具有相同的行为: .. math:: @@ -48,24 +48,24 @@ moving_mean和moving_var是训练过程中统计得到的全局均值和方差 参数 :::::::::::: - - **input** (Tensor) - batch_norm算子的输入特征,是一个Tensor类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 - - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。 + - **input** (Tensor) - batch_norm 算子的输入特征,是一个 Tensor 类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 + - **act** (string)- 激活函数类型,可以是 leaky_realu、relu、prelu 等。默认:None。 - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。 - - **momentum** (float|Tensor)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Tensor类型。更新公式为::math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`,默认:0.9。 + - **momentum** (float|Tensor)- 此值用于计算 moving_mean 和 moving_var,是一个 float 类型或者一个 shape 为[1],数据类型为 float32 的 Tensor 类型。更新公式为::math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`,默认:0.9。 - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。 - - **param_attr** (ParamAttr|None):指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的权重初始化是1.0。 - - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的偏置初始化是0.0。 - - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - - **in_place** (bool)- batch_norm的输出复用输入的tensor,可以节省显存。默认:False。 + - **param_attr** (ParamAttr|None):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm 算子默认的权重初始化是 1.0。 + - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm 算子默认的偏置初始化是 0.0。 + - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 + - **in_place** (bool)- batch_norm 的输出复用输入的 tensor,可以节省显存。默认:False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``batch_norm`` 将随机命名全局均值;否则,``batch_norm`` 将命名全局均值为 ``moving_mean_name``。默认:None。 - - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``batch_norm`` 将随机命名全局方差;否则,``batch_norm`` 将命名全局方差为 ``moving_variance_name``。默认:None。 - - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。 - - **use_global_stats** (bool) – 是否使用全局均值和方差。在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。 + - **moving_mean_name** (string)- moving_mean 的名称,存储全局均值。如果将其设置为 None, ``batch_norm`` 将随机命名全局均值;否则,``batch_norm`` 将命名全局均值为 ``moving_mean_name``。默认:None。 + - **moving_variance_name** (string)- moving_variance 的名称,存储全局变量。如果将其设置为 None, ``batch_norm`` 将随机命名全局方差;否则,``batch_norm`` 将命名全局方差为 ``moving_variance_name``。默认:None。 + - **do_model_average_for_mean_and_var** (bool,默认 False)- 是否为 mean 和 variance 做模型均值。 + - **use_global_stats** (bool) – 是否使用全局均值和方差。在预测或测试模式下,将 use_global_stats 设置为 true 或将 is_test 设置为 true,并且行为是等效的。在训练模式中,当设置 use_global_stats 为 True 时,在训练期间也使用全局均值和方差。默认:False。 返回 :::::::::::: - 维度和输入相同的Tensor,在输入中运用批正则后的结果。 + 维度和输入相同的 Tensor,在输入中运用批正则后的结果。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/case_cn.rst b/docs/api/paddle/static/nn/case_cn.rst index 846a227af28..dccda0c4212 100644 --- a/docs/api/paddle/static/nn/case_cn.rst +++ b/docs/api/paddle/static/nn/case_cn.rst @@ -7,12 +7,12 @@ case .. py:function:: paddle.static.nn.case(pred_fn_pairs, default=None, name=None) -该OP的运行方式类似于python的if-elif-elif-else。 +该 OP 的运行方式类似于 python 的 if-elif-elif-else。 参数 :::::::::::: - - **pred_fn_pairs** (list|tuple) - 一个list或者tuple,元素是二元组(pred, fn)。其中 ``pred`` 是形状为[1]的布尔型 Tensor,``fn`` 是一个可调用对象。所有的可调用对象都返回相同结构的Tensor。 + - **pred_fn_pairs** (list|tuple) - 一个 list 或者 tuple,元素是二元组(pred, fn)。其中 ``pred`` 是形状为[1]的布尔型 Tensor,``fn`` 是一个可调用对象。所有的可调用对象都返回相同结构的 Tensor。 - **default** (callable,可选) - 可调用对象,返回一个或多个张量。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -20,8 +20,8 @@ case :::::::::::: Tensor|list(Tensor) -- 如果 ``pred_fn_pairs`` 中存在pred是True的元组(pred, fn),则返回第一个为True的pred的元组中fn的返回结果;如果 ``pred_fn_pairs`` 中不存在pred为True的元组(pred, fn) 且 ``default`` 不是None,则返回调用 ``default`` 的返回结果; -- 如果 ``pred_fn_pairs`` 中不存在pred为True的元组(pred, fn) 且 ``default`` 是None,则返回 ``pred_fn_pairs`` 中最后一个pred的返回结果。 +- 如果 ``pred_fn_pairs`` 中存在 pred 是 True 的元组(pred, fn),则返回第一个为 True 的 pred 的元组中 fn 的返回结果;如果 ``pred_fn_pairs`` 中不存在 pred 为 True 的元组(pred, fn) 且 ``default`` 不是 None,则返回调用 ``default`` 的返回结果; +- 如果 ``pred_fn_pairs`` 中不存在 pred 为 True 的元组(pred, fn) 且 ``default`` 是 None,则返回 ``pred_fn_pairs`` 中最后一个 pred 的返回结果。 代码示例 diff --git a/docs/api/paddle/static/nn/cond_cn.rst b/docs/api/paddle/static/nn/cond_cn.rst index c4293e3b0e0..9c0776d8ea7 100644 --- a/docs/api/paddle/static/nn/cond_cn.rst +++ b/docs/api/paddle/static/nn/cond_cn.rst @@ -7,16 +7,16 @@ cond .. py:function:: paddle.static.nn.cond(pred, true_fn=None, false_fn=None, name=None) -如果 ``pred`` 是 ``True``,该API返回 ``true_fn()``,否则返回 ``false_fn()`` 。 -用户如果不想在 ``callable`` 中做任何事,可以把 ``true_fn`` 或 ``false_fn`` 设为 ``None``,此时本API会把该 ``callable`` 视为简单返回 ``None`` 。 +如果 ``pred`` 是 ``True``,该 API 返回 ``true_fn()``,否则返回 ``false_fn()`` 。 +用户如果不想在 ``callable`` 中做任何事,可以把 ``true_fn`` 或 ``false_fn`` 设为 ``None``,此时本 API 会把该 ``callable`` 视为简单返回 ``None`` 。 -``true_fn`` 和 ``false_fn`` 需要返回同样嵌套结构(nest structure)的Tensor,如果不想返回任何值也可都返回 ``None`` 。 -PaddlePaddle里Tensor的嵌套结构是指一个Tensor,或者Tensor的元组(tuple),或者Tensor的列表(list)。 +``true_fn`` 和 ``false_fn`` 需要返回同样嵌套结构(nest structure)的 Tensor,如果不想返回任何值也可都返回 ``None`` 。 +PaddlePaddle 里 Tensor 的嵌套结构是指一个 Tensor,或者 Tensor 的元组(tuple),或者 Tensor 的列表(list)。 .. note:: - 1. ``true_fn`` 和 ``false_fn`` 返回的元组必须形状相同,但是里面的Tensor形状可以不同。 + 1. ``true_fn`` 和 ``false_fn`` 返回的元组必须形状相同,但是里面的 Tensor 形状可以不同。 2. 本接口在动态图和静态图模式下都可以运行,在动态图情况下就只会按 ``pred`` 条件运行其中一支分支。 - 3. 静态图模式下,因为各个分支都要参与组网,因此不论运行哪个分支,在 ``true_fn`` 和 ``false_fn`` 内外创建的Tensor和Op都会组网,即PaddlePaddle并不是惰性语法(lazy semantics)。例如 + 3. 静态图模式下,因为各个分支都要参与组网,因此不论运行哪个分支,在 ``true_fn`` 和 ``false_fn`` 内外创建的 Tensor 和 Op 都会组网,即 PaddlePaddle 并不是惰性语法(lazy semantics)。例如 .. code-block:: python @@ -31,14 +31,14 @@ PaddlePaddle里Tensor的嵌套结构是指一个Tensor,或者Tensor的元组 参数 ::::::::: - - **pred** (Tensor) - 一个形状为[1]的布尔型(boolean)的Tensor,该布尔值决定要返回 ``true_fn`` 还是 ``false_fn`` 的运行结果。 - - **true_fn** (callable) - 一个当 ``pred`` 是 ``True`` 时被调用的callable,默认值:``None`` 。 - - **false_fn** (callable) - 一个当 ``pred`` 是 ``False`` 时被调用的callable,默认值:``None`` 。 + - **pred** (Tensor) - 一个形状为[1]的布尔型(boolean)的 Tensor,该布尔值决定要返回 ``true_fn`` 还是 ``false_fn`` 的运行结果。 + - **true_fn** (callable) - 一个当 ``pred`` 是 ``True`` 时被调用的 callable,默认值:``None`` 。 + - **false_fn** (callable) - 一个当 ``pred`` 是 ``False`` 时被调用的 callable,默认值:``None`` 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor|list(Tensor)|tuple(Tensor),如果 ``pred`` 是 ``True``,该API返回 ``true_fn()``,否则返回 ``false_fn()`` 。 +Tensor|list(Tensor)|tuple(Tensor),如果 ``pred`` 是 ``True``,该 API 返回 ``true_fn()``,否则返回 ``false_fn()`` 。 代码示例 ::::::::: @@ -71,7 +71,7 @@ Tensor|list(Tensor)|tuple(Tensor),如果 ``pred`` 是 ``True``,该API返回 y = paddle.full(shape=[1], dtype='float32', fill_value=0.23) pred = paddle.less_than(x=x, y=y, name=None) ret = paddle.static.nn.cond(pred, true_func, false_func) - # ret 是包含两个tensors的元组 + # ret 是包含两个 tensors 的元组 # ret[0] = [[1 1]] # ret[1] = [[ True True True] # [ True True True]] diff --git a/docs/api/paddle/static/nn/conv2d_cn.rst b/docs/api/paddle/static/nn/conv2d_cn.rst index 32d4762a63b..5def4ca2f56 100644 --- a/docs/api/paddle/static/nn/conv2d_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_cn.rst @@ -9,15 +9,15 @@ conv2d -该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。 +该 OP 是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。 -输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。 +输入和输出是 NCHW 或 NHWC 格式,其中 N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。滤波器是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是滤波器高度,W 是滤波器宽度。 -如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。 +如果组数(groups)大于 1,C 等于输入图像通道数除以组数的结果。详情请参考 UFLDL's : `卷积 `_ 。 -如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 +如果 bias_attr 不为 False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -25,12 +25,12 @@ conv2d 其中: - - :math:`X`:输入值,NCHW或NHWC格式的4-D Tensor; - - :math:`W`:滤波器值,MCHW格式的4-D Tensor; + - :math:`X`:输入值,NCHW 或 NHWC 格式的 4-D Tensor; + - :math:`W`:滤波器值,MCHW 格式的 4-D Tensor; - :math:`*`:卷积操作; - :math:`b`:偏置值,2-D Tensor,形状为 ``[M,1]``; - :math:`\sigma`:激活函数; - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同。 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同。 **示例** @@ -70,24 +70,24 @@ conv2d 参数 :::::::::::: - - **input** (Tensor) - 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **input** (Tensor) - 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度,数据类型为 float16, float32 或 float64。 - **num_filters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含两个整数值:(filter_size_height,filter_size_width)。若为一个整数,filter_size_height = filter_size_width = filter_size。 - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式: - - (1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - - (2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - - (3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - (1)包含 4 个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; + - (2)包含 4 个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; + - (3)包含 2 个整数值:[padding_height, padding_width],此时 padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 - - **act** (str,可选) - 激活函数类型,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`。如果设为None,则未添加激活函数。默认值:None。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和滤波器分别根据通道数量平均分为 n 组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第 n 组滤波器和第 n 组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用 cudnn 内核。只有已安装 cudnn 库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`。如果设为 None,则未添加激活函数。默认值:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 返回 :::::::::::: diff --git a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst index fa8a2fb5a80..80a2015983f 100644 --- a/docs/api/paddle/static/nn/conv2d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv2d_transpose_cn.rst @@ -11,13 +11,13 @@ conv2d_transpose 二维转置卷积层(Convlution2D transpose layer) -该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。 +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。 -输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。 +输入(Input)和输出(Output)为 NCHW 或 NHWC 格式,其中 N 为批尺寸,C 为通道数(channel),H 为特征层高度,W 为特征层宽度。滤波器是 MCHW 格式,M 是输出图像通道数,C 是输入图像通道数,H 是滤波器高度,W 是滤波器宽度。如果组数大于 1,C 等于输入图像通道数除以组数的结果。 转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和论文细节。 -如果参数bias_attr不为False,转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 +如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。如果 act 不为 None,则转置卷积计算之后添加相应的激活函数。 论文参考:https://arxiv.org/pdf/1603.07285.pdf @@ -29,24 +29,24 @@ conv2d_transpose 其中: - - :math:`X`:输入,具有NCHW或NHWC格式的4-D Tensor; - - :math:`W`:滤波器,具有NCHW格式的4-D Tensor; + - :math:`X`:输入,具有 NCHW 或 NHWC 格式的 4-D Tensor; + - :math:`W`:滤波器,具有 NCHW 格式的 4-D Tensor; - :math:`*`:卷积计算(注意:转置卷积本质上的计算还是卷积); - :math:`b`:偏置(bias),2-D Tensor,形状为 ``[M,1]``; - :math:`σ`:激活函数; - - :math:`Out`:输出值,NCHW或NHWC格式的4-D Tensor,和 ``X`` 的形状可能不同。 + - :math:`Out`:输出值,NCHW 或 NHWC 格式的 4-D Tensor,和 ``X`` 的形状可能不同。 **示例** - 输入: - 输入Tensor的形状::math:`(N,C_{in}, H_{in}, W_{in})` + 输入 Tensor 的形状::math:`(N,C_{in}, H_{in}, W_{in})` 滤波器的形状::math:`(C_{in}, C_{out}, H_f, W_f)` - 输出: - 输出Tensor的形状::math:`(N,C_{out}, H_{out}, W_{out})` + 输出 Tensor 的形状::math:`(N,C_{out}, H_{out}, W_{out})` 其中 @@ -70,35 +70,35 @@ conv2d_transpose & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(W_f-1)+1 \\ .. note:: -如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`; -否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 +如果 output_size 为 None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`; +否则,指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 -如果指定了output_size, ``conv2d_transpose`` 可以自动计算滤波器的大小。 +如果指定了 output_size, ``conv2d_transpose`` 可以自动计算滤波器的大小。 参数 :::::::::::: - - **input** (Tensor)- 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度。数据类型:float32或float64。 + - **input** (Tensor)- 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的 4-D Tensor,N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。数据类型:float32 或 float64。 - **num_filters** (int) - 滤波器(卷积核)的个数,与输出图片的通道数相同。 - - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含两个整型数,(output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 - - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含两个整型数,(filter_size_height, filter_size_width)。否则,filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 - - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - - - (1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - - (2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - - (3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - - - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,则必须包含两个整型数,形式为(stride_height,stride_width)。否则,stride_height = stride_width = stride。默认:stride = 1。 - - **dilation** (int|tuple,可选) - 膨胀比例(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组必须包含两个整型数,形式为(dilation_height, dilation_width)。否则,dilation_height = dilation_width = dilation。默认:dilation= 1。 - - **groups** (int,可选) - 二维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 - - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的权重初始化是Xavier。 - - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的偏置初始化是0.0。 - - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 - - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果 output_size 是一个元组,则必须包含两个整型数,(output_size_height,output_size_width)。如果 output_size=None,则内部会使用 filter_size、padding 和 stride 来计算 output_size。如果 output_size 和 filter_size 是同时指定的,那么它们应满足上面的公式。默认:None。output_size 和 filter_size 不能同时为 None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果 filter_size 是一个元组,则必须包含两个整型数,(filter_size_height, filter_size_width)。否则,filter_size_height = filter_size_width = filter_size。如果 filter_size=None,则必须指定 output_size, ``conv2d_transpose`` 内部会根据 output_size、padding 和 stride 计算出滤波器大小。默认:None。output_size 和 filter_size 不能同时为 None。 + - **padding** (int|list|tuple|str,可选) - 填充 padding 大小。padding 参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个 0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式: + + - (1)包含 4 个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; + - (2)包含 4 个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; + - (3)包含 2 个整数值:[padding_height, padding_width],此时 padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + + - **stride** (int|tuple,可选) - 步长 stride 大小。滤波器和输入进行卷积计算时滑动的步长。如果 stride 是一个元组,则必须包含两个整型数,形式为(stride_height,stride_width)。否则,stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例 dilation 是一个元组,那么元组必须包含两个整型数,形式为(dilation_height, dilation_width)。否则,dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 二维转置卷积层的组数。从 Alex Krizhevsky 的 CNN Deep 论文中的群卷积中受到启发,当 group=2 时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose 算子默认的权重初始化是 Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose 算子默认的偏置初始化是 0.0。 + - **use_cudnn** (bool,可选) - 是否使用 cudnn 内核,只有已安装 cudnn 库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为 None,则不使用激活函数。默认:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 返回 :::::::::::: diff --git a/docs/api/paddle/static/nn/conv3d_cn.rst b/docs/api/paddle/static/nn/conv3d_cn.rst index fa5d0e9f8b2..0e73e415d9d 100644 --- a/docs/api/paddle/static/nn/conv3d_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_cn.rst @@ -9,15 +9,15 @@ conv3d -该OP是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。 +该 OP 是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。 -输入和输出是NCDHW或NDHWC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。 +输入和输出是 NCDHW 或 NDHWC 格式,其中 N 是批尺寸,C 是通道数,D 是特征层深度,H 是特征层高度,W 是特征层宽度。 三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。 -如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 +如果 bias_attr 不为 False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 -对每个输入X,有等式: +对每个输入 X,有等式: .. math:: @@ -25,12 +25,12 @@ conv3d 其中: - - :math:`X`:输入值,NCDHW或NDHWC格式的5-D Tensor; - - :math:`W`:滤波器值,MCDHW格式的5-D Tensor; + - :math:`X`:输入值,NCDHW 或 NDHWC 格式的 5-D Tensor; + - :math:`W`:滤波器值,MCDHW 格式的 5-D Tensor; - :math:`*`:卷积操作; - :math:`b`:偏置值,2-D Tensor,形为 ``[M,1]``; - :math:`\sigma`:激活函数; - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同。 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同。 **示例** @@ -75,24 +75,24 @@ conv3d 参数 :::::::::::: - - **input** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **input** (Tensor) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型为 float16, float32 或 float64。 - **num_fliters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 - - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含三个整数值:(filter_size_depth, filter_size_height,filter_size_width)。若为一个整数,则filter_size_depth = filter_size_height = filter_size_width = filter_size。 + - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含三个整数值:(filter_size_depth, filter_size_height,filter_size_width)。若为一个整数,则 filter_size_depth = filter_size_height = filter_size_width = filter_size。 - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含三个整型数:(stride_depth, stride_height, stride_width)。若为一个整数,stride_depth = stride_height = stride_width = stride。默认值:1。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式: - - (1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; - - (2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; - - (3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - (1)包含 5 个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]]; + - (2)包含 6 个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right]; + - (3)包含 3 个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含三个整型数:(dilation_depth, dilation_height,dilation_width)。若为一个整数,dilation_depth = dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 - - **act** (str,可选) - 激活函数类型,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`。如果设为None,则未添加激活函数。默认值:None。 + - **groups** (int,可选) - 三维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和滤波器分别根据通道数量平均分为 n 组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第 n 组滤波器和第 n 组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用 cudnn 内核。只有已安装 cudnn 库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`。如果设为 None,则未添加激活函数。默认值:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 返回 :::::::::::: diff --git a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst index 3d115cb8757..276718c118b 100755 --- a/docs/api/paddle/static/nn/conv3d_transpose_cn.rst +++ b/docs/api/paddle/static/nn/conv3d_transpose_cn.rst @@ -11,13 +11,13 @@ conv3d_transpose 三维转置卷积层(Convlution3D transpose layer) -该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。 +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过 output_size 指定输出特征层大小。 -输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。 +输入(Input)和输出(Output)为 NCDHW 或者 NDHWC 格式。其中 N 为批尺寸,C 为通道数(channel),D 为特征深度,H 为特征层高度,W 为特征层宽度。 转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和论文细节。 -如果参数bias_attr不为False,转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 +如果参数 bias_attr 不为 False,转置卷积计算会添加偏置项。如果 act 不为 None,则转置卷积计算之后添加相应的激活函数。 论文参考:https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf @@ -28,26 +28,26 @@ conv3d_transpose 其中: - - :math:`X`:输入,具有NCDHW或NDHWC格式的5-D Tensor; - - :math:`W`:滤波器,具有NCDHW格式的5-D Tensor; + - :math:`X`:输入,具有 NCDHW 或 NDHWC 格式的 5-D Tensor; + - :math:`W`:滤波器,具有 NCDHW 格式的 5-D Tensor; - :math:`*`:卷积操作(注意:转置卷积本质上的计算还是卷积); - :math:`b`:偏置(bias),2-D Tensor,形状为 ``[M,1]``; - :math:`σ`:激活函数; - - :math:`Out`:输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同。 + - :math:`Out`:输出值,NCDHW 或 NDHWC 格式的 5-D Tensor,和 ``X`` 的形状可能不同。 **示例** 输入: - 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + 输入的 shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` - 滤波器的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + 滤波器的 shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` 输出: - 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + 输出的 shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` 其中: @@ -77,35 +77,35 @@ conv3d_transpose .. note:: -如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`; -否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ),并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 +如果 output_size 为 None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}`; +否则,指定的 output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的 output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ),并且指定的 output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 -如果指定了output_size, ``conv3d_transpose`` 可以自动计算滤波器的大小。 +如果指定了 output_size, ``conv3d_transpose`` 可以自动计算滤波器的大小。 参数 :::::::::::: - - **input** (Tensor)- 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 + - **input** (Tensor)- 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的 5-D Tensor,N 是批尺寸,C 是通道数,D 是特征深度,H 是特征高度,W 是特征宽度,数据类型:float32 或 float64。 - **num_filters** (int) - 滤波器(卷积核)的个数,与输出的图片的通道数相同。 - - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含三个整型数,(output_size_depth,output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 - - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 - - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式: - - - (1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]; - - (2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]; - - (3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 - - - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认:stride = 1。 - - **dilation** (int|tuple,可选) - 膨胀比例dilation大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认:dilation= 1。 - - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 - - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的权重初始化是Xavier。 - - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的偏置初始化是0.0。 - - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 - - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果 output_size 是一个元组,则必须包含三个整型数,(output_size_depth,output_size_height,output_size_width)。如果 output_size=None,则内部会使用 filter_size、padding 和 stride 来计算 output_size。如果 output_size 和 filter_size 是同时指定的,那么它们应满足上面的公式。默认:None。output_size 和 filter_size 不能同时为 None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果 filter_size 是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果 filter_size=None,则必须指定 output_size, ``conv2d_transpose`` 内部会根据 output_size、padding 和 stride 计算出滤波器大小。默认:None。output_size 和 filter_size 不能同时为 None。 + - **padding** (int|list|tuple|str,可选) - 填充 padding 大小。padding 参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个 0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有 3 种格式: + + - (1)包含 5 个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]; + - (2)包含 6 个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]; + - (3)包含 3 个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + + - **stride** (int|tuple,可选) - 步长 stride 大小。滤波器和输入进行卷积计算时滑动的步长。如果 stride 是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例 dilation 大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例 dilation 是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从 Alex Krizhevsky 的 CNN Deep 论文中的群卷积中受到启发,当 group=2 时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose 算子默认的权重初始化是 Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose 算子默认的偏置初始化是 0.0。 + - **use_cudnn** (bool,可选) - 是否使用 cudnn 内核,只有已安装 cudnn 库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为 None,则不使用激活函数。默认:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCDHW"。 返回 :::::::::::: diff --git a/docs/api/paddle/static/nn/crf_decoding_cn.rst b/docs/api/paddle/static/nn/crf_decoding_cn.rst index 7d1307a8247..80046ff3455 100644 --- a/docs/api/paddle/static/nn/crf_decoding_cn.rst +++ b/docs/api/paddle/static/nn/crf_decoding_cn.rst @@ -13,7 +13,7 @@ crf_decoding 这个层运算的结果会随着输入 ``Label`` 的有无而改变: - 1. ``Label`` 非 None 的情况,在实际训练中时常发生。此时本层会协同 :ref:`cn_api_fluid_layers_chunk_eval` 工作。在 LoDTensor 模式下,本层会返回一行形为 [N X 1] 的向量,在 padding 模式下,返回形状则为 [B x S],其中值为 0 的部分代表该 label 不适合作为对应结点的标注,值为1的部分则反之。此类型的输出可以直接作为 :ref:`cn_api_fluid_layers_chunk_eval` 算子的输入; + 1. ``Label`` 非 None 的情况,在实际训练中时常发生。此时本层会协同 :ref:`cn_api_fluid_layers_chunk_eval` 工作。在 LoDTensor 模式下,本层会返回一行形为 [N X 1] 的向量,在 padding 模式下,返回形状则为 [B x S],其中值为 0 的部分代表该 label 不适合作为对应结点的标注,值为 1 的部分则反之。此类型的输出可以直接作为 :ref:`cn_api_fluid_layers_chunk_eval` 算子的输入; 2. 当没有 ``Label`` 时,该函数会执行标准解码过程; @@ -22,8 +22,8 @@ crf_decoding 参数 :::::::::::: - - **input** (Tensor) — 一个形为 [N x D] 的 LoDTensor,其中 N 是mini-batch的大小,D是标注(tag) 的总数;或者形为 [B x S x D] 的普通 Tensor,B 是批次大小,S 是序列最大长度,D 是标注的总数。该输入是 :ref:`cn_api_fluid_layers_linear_chain_crf`` 的 unscaled emission weight matrix (未标准化的发射权重矩阵)。数据类型为 float32 或者 float64。 - - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_guide_ParamAttr` 。 + - **input** (Tensor) — 一个形为 [N x D] 的 LoDTensor,其中 N 是 mini-batch 的大小,D 是标注(tag) 的总数;或者形为 [B x S x D] 的普通 Tensor,B 是批次大小,S 是序列最大长度,D 是标注的总数。该输入是 :ref:`cn_api_fluid_layers_linear_chain_crf`` 的 unscaled emission weight matrix (未标准化的发射权重矩阵)。数据类型为 float32 或者 float64。 + - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_guide_ParamAttr` 。 - **label** (Tensor,可选) — 形为 [N x 1] 的正确标注(ground truth)(LoDTensor 模式),或者形状为 [B x S]。有关该参数的更多信息,请详见上述描述。数据类型为 int64。 - **length** (Tensor,可选) — 形状为 [B x 1],表示输入序列的真实长度。该输入非 None,表示该层工作在 padding 模式下,即 ``input`` 和 ``label`` 都是带 padding 的普通 Tensor。数据类型为 int64。 diff --git a/docs/api/paddle/static/nn/data_norm_cn.rst b/docs/api/paddle/static/nn/data_norm_cn.rst index b38723d4767..ee394bfcd74 100644 --- a/docs/api/paddle/static/nn/data_norm_cn.rst +++ b/docs/api/paddle/static/nn/data_norm_cn.rst @@ -11,12 +11,12 @@ data_norm **数据正则化层** -可用作conv2d和fully_connected操作的正则化函数。此层所需的数据格式为以下之一: +可用作 conv2d 和 fully_connected 操作的正则化函数。此层所需的数据格式为以下之一: 1. NHWC [batch, in_height, in_width, in_channels] 2. NCHW [batch, in_channels, in_height, in_width] -:math:`input` 为一个mini-batch上的特征: +:math:`input` 为一个 mini-batch 上的特征: .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ @@ -33,17 +33,17 @@ data_norm - **input** (Tensor) - 输入变量。 - **act** (string,可选) - 激活函数类型,线性| relu | prelu | ...,默认值为 None。 - **epsilon** (float,可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 - - **param_attr** (ParamAttr,可选) - 参数比例的参数属性。默认值为None。 - - **data_layout** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - - **in_place** (bool,可选) - 是否使data_norm的输入和输出复用同一块内存,默认值为False。 + - **param_attr** (ParamAttr,可选) - 参数比例的参数属性。默认值为 None。 + - **data_layout** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N 是批尺寸,C 是通道数,H 是特征高度,W 是特征宽度。默认值:"NCHW"。 + - **in_place** (bool,可选) - 是否使 data_norm 的输入和输出复用同一块内存,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **moving_mean_name** (string,可选) - 存储全局Mean的moving_mean的名称。默认值为None。 - - **moving_variance_name** (string,可选) - 存储全局Variance的moving_variance的名称。默认值为None。 - - **do_model_average_for_mean_and_var** (bool,可选) - 是否为mean和variance进行模型平均。默认值为False。 - - **slot_dim** (int,可选) - 一个slot的embedding维度,slot用来表征一类特征的集合,在pslib模式下,通常我们通过slot区分特征id,并从参数服务器(pslib)中提取它们的embedding。embedding的第一维是历史上这个embedding展示的次数。如果本op的输入是由这样的embedding连接而来,那么当这个特征id是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了slot_dim来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。默认值为 -1。 - - **summary_decay_rate** (float,可选) - 更新summary信息时的衰减率。默认值为 0.9999999。 - - **sync_stats** (bool,默认值False) - 在多GPU卡的场景下可以使用,用来同步多卡间的summary信息。 - - **enable_scale_and_shift** (bool,默认值False) - 在分布式全局正则化后是否做像batchnorm一样做scale&shift的操作。 + - **moving_mean_name** (string,可选) - 存储全局 Mean 的 moving_mean 的名称。默认值为 None。 + - **moving_variance_name** (string,可选) - 存储全局 Variance 的 moving_variance 的名称。默认值为 None。 + - **do_model_average_for_mean_and_var** (bool,可选) - 是否为 mean 和 variance 进行模型平均。默认值为 False。 + - **slot_dim** (int,可选) - 一个 slot 的 embedding 维度,slot 用来表征一类特征的集合,在 pslib 模式下,通常我们通过 slot 区分特征 id,并从参数服务器(pslib)中提取它们的 embedding。embedding 的第一维是历史上这个 embedding 展示的次数。如果本 op 的输入是由这样的 embedding 连接而来,那么当这个特征 id 是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了 slot_dim 来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。默认值为 -1。 + - **summary_decay_rate** (float,可选) - 更新 summary 信息时的衰减率。默认值为 0.9999999。 + - **sync_stats** (bool,默认值 False) - 在多 GPU 卡的场景下可以使用,用来同步多卡间的 summary 信息。 + - **enable_scale_and_shift** (bool,默认值 False) - 在分布式全局正则化后是否做像 batchnorm 一样做 scale&shift 的操作。 返回 :::::::::::: diff --git a/docs/api/paddle/static/nn/deform_conv2d_cn.rst b/docs/api/paddle/static/nn/deform_conv2d_cn.rst index 297730cdcaa..368e9fdad81 100644 --- a/docs/api/paddle/static/nn/deform_conv2d_cn.rst +++ b/docs/api/paddle/static/nn/deform_conv2d_cn.rst @@ -9,17 +9,17 @@ deform_conv2d **可变形卷积算子** -deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: +deform_conv2d op 对输入 4-D Tensor 计算 2-D 可变形卷积。给定输入 Tensor x,输出 Tensor y,可变形卷积运算如下所示: -可形变卷积v2: +可形变卷积 v2: :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}` -可形变卷积v1: +可形变卷积 v1: :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}` -其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第k个位置的可学习偏移和调制标量。在deform_conv2d_v1中 :math:`\Delta m_k` 为1。 +其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第 k 个位置的可学习偏移和调制标量。在 deform_conv2d_v1 中 :math:`\Delta m_k` 为 1。 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 @@ -51,24 +51,24 @@ deform_conv2d op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor 参数 :::::::::::: - - **x** (Tensor) - 形状为 :math:`[N, C, H, W]` 的输入Tensor,数据类型为float32或float64。 - - **offset** (Tensor) – 可变形卷积层的输入坐标偏移,数据类型为float32或float64。 - - **mask** (Tensor,可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子v1时,请将mask设置为None,数据类型为float32或float64。 - - **num_filters** (int) – 卷积核数,与输出Tensor通道数相同。 - - **filter_size** (int|tuple) – 卷积核大小。如果filter_size为元组,则必须包含两个整数(filter_size_H, filter_size_W)。若数据类型为int,卷积核形状为(filter_size, filter_size)。 - - **stride** (int|tuple,可选) – 步长大小。如果stride为元组,则必须包含两个整数(stride_H, stride_W)。否则stride_H = stride_W = stride。默认值为1。 - - **padding** (int|tuple,可选) – padding大小。如果padding为元组,则必须包含两个整数(padding_H, padding_W)。否则padding_H = padding_W = padding。默认值为0。 - - **dilation** (int|tuple,可选) – dilation大小。如果dilation为元组,则必须包含两个整数(dilation_H, dilation_W)。否则dilation_H = dilation_W = dilation。默认值为1。 - - **groups** (int,可选) – 卷积组数。依据Alex Krizhevsky的Deep CNN论文中的分组卷积,有:当group=2时,前一半卷积核只和前一半输入通道有关,而后一半卷积核只和后一半输入通道有关。默认值为1。 - - **deformable_groups** (int,可选) – 可变形卷积组数。默认值为1。 - - **im2col_step** (int,可选) – 每个im2col计算的最大图像数。总batch大小应可以被该值整除或小于该值。如果您面临内存问题,可以尝试在此处使用一个较小的值。默认值为1。 - - **weight_attr** (ParamAttr,可选) – 可变形卷积的可学习权重的属性。如果将其设置为None或某种ParamAttr,可变形卷积将创建ParamAttr作为weight_attr。如果没有设置此weight_attr的Initializer,该参数将被Normal(0.0, std)初始化,且其中的std为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`。默认值为None。 - - **bias_attr** (ParamAttr|bool,可选) – 可变形卷积层的偏置的参数属性。如果设为False,则输出单元不会加偏置。如果设为None或者某种ParamAttr,conv2d会创建ParamAttr作为bias_attr。如果不设置bias_attr的Initializer,偏置会被初始化为0。默认值为None。 + - **x** (Tensor) - 形状为 :math:`[N, C, H, W]` 的输入 Tensor,数据类型为 float32 或 float64。 + - **offset** (Tensor) – 可变形卷积层的输入坐标偏移,数据类型为 float32 或 float64。 + - **mask** (Tensor,可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子 v1 时,请将 mask 设置为 None,数据类型为 float32 或 float64。 + - **num_filters** (int) – 卷积核数,与输出 Tensor 通道数相同。 + - **filter_size** (int|tuple) – 卷积核大小。如果 filter_size 为元组,则必须包含两个整数(filter_size_H, filter_size_W)。若数据类型为 int,卷积核形状为(filter_size, filter_size)。 + - **stride** (int|tuple,可选) – 步长大小。如果 stride 为元组,则必须包含两个整数(stride_H, stride_W)。否则 stride_H = stride_W = stride。默认值为 1。 + - **padding** (int|tuple,可选) – padding 大小。如果 padding 为元组,则必须包含两个整数(padding_H, padding_W)。否则 padding_H = padding_W = padding。默认值为 0。 + - **dilation** (int|tuple,可选) – dilation 大小。如果 dilation 为元组,则必须包含两个整数(dilation_H, dilation_W)。否则 dilation_H = dilation_W = dilation。默认值为 1。 + - **groups** (int,可选) – 卷积组数。依据 Alex Krizhevsky 的 Deep CNN 论文中的分组卷积,有:当 group=2 时,前一半卷积核只和前一半输入通道有关,而后一半卷积核只和后一半输入通道有关。默认值为 1。 + - **deformable_groups** (int,可选) – 可变形卷积组数。默认值为 1。 + - **im2col_step** (int,可选) – 每个 im2col 计算的最大图像数。总 batch 大小应可以被该值整除或小于该值。如果您面临内存问题,可以尝试在此处使用一个较小的值。默认值为 1。 + - **weight_attr** (ParamAttr,可选) – 可变形卷积的可学习权重的属性。如果将其设置为 None 或某种 ParamAttr,可变形卷积将创建 ParamAttr 作为 weight_attr。如果没有设置此 weight_attr 的 Initializer,该参数将被 Normal(0.0, std)初始化,且其中的 std 为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`。默认值为 None。 + - **bias_attr** (ParamAttr|bool,可选) – 可变形卷积层的偏置的参数属性。如果设为 False,则输出单元不会加偏置。如果设为 None 或者某种 ParamAttr,conv2d 会创建 ParamAttr 作为 bias_attr。如果不设置 bias_attr 的 Initializer,偏置会被初始化为 0。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,可变形卷积输出的4-D Tensor,数据类型为float32或float64。 +Tensor,可变形卷积输出的 4-D Tensor,数据类型为 float32 或 float64。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/embedding_cn.rst b/docs/api/paddle/static/nn/embedding_cn.rst index 8762d12f1ca..d94b955790d 100644 --- a/docs/api/paddle/static/nn/embedding_cn.rst +++ b/docs/api/paddle/static/nn/embedding_cn.rst @@ -11,23 +11,23 @@ embedding **嵌入层(Embedding Layer)** -该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 +该 OP 根据 input 中的 id 信息从 embedding 矩阵中查询对应 embedding 信息,并会根据输入的 size (vocab_size, emb_size)和 dtype 自动构造一个二维 embedding 矩阵。 -输出的Tensor的shape是将输入Tensor shape的会在输出的embedding最后追加一维emb_size。 +输出的 Tensor 的 shape 是将输入 Tensor shape 的会在输出的 embedding 最后追加一维 emb_size。 .. note:: -input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 +input 中的 id 必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 :: Case 1: - input是Tensor,且padding_idx = -1 + input 是 Tensor,且 padding_idx = -1 input.data = [[[1], [3]], [[2], [4]], [[4], [127]]] input.shape = [3, 2, 1] - 若size = [128, 16] - 输出为Tensor: + 若 size = [128, 16] + 输出为 Tensor: out.shape = [3, 2, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654]], @@ -37,16 +37,16 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data - 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 + 输入的 padding_idx 小于 0,则自动转换为 padding_idx = -1 + 128 = 127,对于输入 id 为 127 的词,进行 padding 处理。 Case 2: - input是lod level 为1的LoDTensor,且padding_idx = 0 + input 是 lod level 为 1 的 LoDTensor,且 padding_idx = 0 input.lod = [[2, 3]] input.data = [[1], [3], [2], [4], [0]] input.shape = [5, 1] - 若size = [128, 16] + 若 size = [128, 16] 输出为: out.lod = [[2, 3]] @@ -56,23 +56,23 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [0.345249859, 0.124939536, ..., 0.194353745], [0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]] # padding data - 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + 输入的 padding_idx = 0,则对于输入 id 为 0 的词,进行 padding 处理。 参数 :::::::::::: - - **input** (Variable) - 存储id信息的Tensor,数据类型必须为:int64,输入的shape最后一维须为1。input中的id必须满足 ``0 =< id < size[0]`` 。 - - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小),第二个为emb_size(embedding层维度)。 - - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_paddle_optimizer_Adadelta` 、 :ref:`cn_api_paddle_optimizer_Adamax`,此时is_sparse必须为False。默认为False。 - - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 - - **padding_idx** (int|long|None) - padding_idx需在区间 ``[-vocab_size, vocab_size)``,否则不生效,``padding_idx < 0`` 时,padding_idx会被改成``vocab_size + padding_idx``,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 - - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_paddle_ParamAttr`。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_paddle_to_tensor` 进行初始化,即可实现加载自定义或预训练的词向量。 - - **dtype** (str) - 输出Tensor的数据类型,数据类型必须为:float32 或 float64,默认为 float32。 + - **input** (Variable) - 存储 id 信息的 Tensor,数据类型必须为:int64,输入的 shape 最后一维须为 1。input 中的 id 必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding 矩阵的维度。必须包含两个元素,第一个元素为 vocab_size(词表大小),第二个为 emb_size(embedding 层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse 更新速度更快,推荐使用稀疏更新的方式。但某些 optimizer 不支持 sparse 更新,比如 :ref:`cn_api_paddle_optimizer_Adadelta` 、 :ref:`cn_api_paddle_optimizer_Adamax`,此时 is_sparse 必须为 False。默认为 False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储 embedding 矩阵,仅在多机分布式 cpu 训练中使用。默认为 False。 + - **padding_idx** (int|long|None) - padding_idx 需在区间 ``[-vocab_size, vocab_size)``,否则不生效,``padding_idx < 0`` 时,padding_idx 会被改成``vocab_size + padding_idx``,input 中等于 padding_index 的 id 对应的 embedding 信息会被设置为 0,且这部分填充数据在训练时将不会被更新。如果为 None,不作处理,默认为 None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_paddle_ParamAttr`。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为 numpy 数据格式,且保证本地词向量的 shape 和 embedding 的 ``size`` 参数一致,然后使用 :ref:`cn_api_paddle_to_tensor` 进行初始化,即可实现加载自定义或预训练的词向量。 + - **dtype** (str) - 输出 Tensor 的数据类型,数据类型必须为:float32 或 float64,默认为 float32。 返回 :::::::::::: -Variable,input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 +Variable,input 映射后得到的 Embedding Tensor 或 LoDTensor,数据类型和 dtype 定义的类型一致。 代码示例 diff --git a/docs/api/paddle/static/nn/fc_cn.rst b/docs/api/paddle/static/nn/fc_cn.rst index e447b906fe7..03a428b8aa5 100755 --- a/docs/api/paddle/static/nn/fc_cn.rst +++ b/docs/api/paddle/static/nn/fc_cn.rst @@ -7,12 +7,12 @@ fc .. py:function:: paddle.static.nn.fc(x, size, num_flatten_dims=1, weight_attr=None, bias_attr=None, activation=None, name=None) -该OP将在神经网络中构建一个全连接层。其输入可以是一个Tensor或多个Tensor组成的list(详见参数说明)。该OP会为每个输入Tensor创建一个权重(weight)参数,即一个从每个输入单元到每个输出单元的全连接权重矩阵。 -每个输入Tensor和其对应的权重(weight)相乘得到形状为 :math:`[batch\_size, *, size]` 输出Tensor,其中 :math:`*` 表示可以为任意个额外的维度。 -如果有多个输入Tensor,则多个形状为 :math:`[batch\_size, *, size]` 的Tensor计算结果会被累加起来,作为最终输出。如果 :attr:`bias_attr` 非空,则会创建一个偏置(bias)参数,并把它累加到输出Tensor中。 +该 OP 将在神经网络中构建一个全连接层。其输入可以是一个 Tensor 或多个 Tensor 组成的 list(详见参数说明)。该 OP 会为每个输入 Tensor 创建一个权重(weight)参数,即一个从每个输入单元到每个输出单元的全连接权重矩阵。 +每个输入 Tensor 和其对应的权重(weight)相乘得到形状为 :math:`[batch\_size, *, size]` 输出 Tensor,其中 :math:`*` 表示可以为任意个额外的维度。 +如果有多个输入 Tensor,则多个形状为 :math:`[batch\_size, *, size]` 的 Tensor 计算结果会被累加起来,作为最终输出。如果 :attr:`bias_attr` 非空,则会创建一个偏置(bias)参数,并把它累加到输出 Tensor 中。 如果 :attr:`activation` 非空,将会在输出结果上应用相应的激活函数。 -对于单个输入Tensor ::math`X`,计算公式为: +对于单个输入 Tensor ::math`X`,计算公式为: .. math:: @@ -20,7 +20,7 @@ fc -对于多个Tensor,计算公式为: +对于多个 Tensor,计算公式为: .. math:: @@ -29,12 +29,12 @@ fc 其中: -- :math:`M`:输入Tensor的个数。如果输入是Tensor列表,:math:`M` 等于 :math:`len(X)`; -- :math:`X_i`:第i个输入Tensor; -- :math:`W_i`:对应第i个输入Tensor的权重矩阵; +- :math:`M`:输入 Tensor 的个数。如果输入是 Tensor 列表,:math:`M` 等于 :math:`len(X)`; +- :math:`X_i`:第 i 个输入 Tensor; +- :math:`W_i`:对应第 i 个输入 Tensor 的权重矩阵; - :math:`b`:偏置参数; - :math:`Act` :activation function (激活函数); -- :math:`Out`:输出Tensor。 +- :math:`Out`:输出 Tensor。 .. code-block:: text @@ -68,19 +68,19 @@ fc 参数 ::::::::: -- **x** (Tensor|list of Tensor) – 一个多维Tensor或由多个Tensor组成的list,每个输入Tensor的维度至少是2。数据类型可以为float16,float32或float64。 -- **size** (int) – 全连接层输出单元的数目,即输出Tensor的特征维度。 -- **num_flatten_dims** (int) – 输入可以接受维度大于2的Tensor。在计算时,输入首先会被扁平化为一个二维矩阵,之后再与权重相乘。参数 :attr:`num_flatten_dims` 决定了输入Tensor扁平化的方式:前 :math:`num\_flatten\_dims` (包含边界,从1开始数) 个维度会被扁平化为二维矩阵的第一维 (即为矩阵的高),剩下的 :math:`rank(x) - num\_flatten\_dims` 维被扁平化为二维矩阵的第二维 (即矩阵的宽)。例如,假设 :attr:`x` 是一个五维的Tensor,其形状为 :math:`[2, 3, 4, 5, 6]` , :attr:`num_flatten_dims` = 3时扁平化后的矩阵形状为 :math:`[2 * 3 * 4, 5 * 6] = [24, 30]`,最终输出Tensor的形状为 :math:`[2, 3, 4, size]`。默认值为1。 -- **weight_attr** (ParamAttr,可选) – 指定权重参数的属性。默认值为None,表示使用默认的权重参数属性,将权重参数初始化为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。注意:如果该api输入x为一个张量的数组,那**weight_attr**也应该是一个同样长度的数组,并且与x数组一一对应。 -- **bias_attr** (ParamAttr|bool,可选) – 指定偏置参数的属性。:attr:`bias_attr` 为bool类型且设置为False时,表示不会为该层添加偏置。:attr:`bias_attr` 如果设置为True或者None,则表示使用默认的偏置参数属性,将偏置参数初始化为0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为None。 -- **activation** (str,可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`,默认值为None。 +- **x** (Tensor|list of Tensor) – 一个多维 Tensor 或由多个 Tensor 组成的 list,每个输入 Tensor 的维度至少是 2。数据类型可以为 float16,float32 或 float64。 +- **size** (int) – 全连接层输出单元的数目,即输出 Tensor 的特征维度。 +- **num_flatten_dims** (int) – 输入可以接受维度大于 2 的 Tensor。在计算时,输入首先会被扁平化为一个二维矩阵,之后再与权重相乘。参数 :attr:`num_flatten_dims` 决定了输入 Tensor 扁平化的方式:前 :math:`num\_flatten\_dims` (包含边界,从 1 开始数) 个维度会被扁平化为二维矩阵的第一维 (即为矩阵的高),剩下的 :math:`rank(x) - num\_flatten\_dims` 维被扁平化为二维矩阵的第二维 (即矩阵的宽)。例如,假设 :attr:`x` 是一个五维的 Tensor,其形状为 :math:`[2, 3, 4, 5, 6]` , :attr:`num_flatten_dims` = 3 时扁平化后的矩阵形状为 :math:`[2 * 3 * 4, 5 * 6] = [24, 30]`,最终输出 Tensor 的形状为 :math:`[2, 3, 4, size]`。默认值为 1。 +- **weight_attr** (ParamAttr,可选) – 指定权重参数的属性。默认值为 None,表示使用默认的权重参数属性,将权重参数初始化为 0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。注意:如果该 api 输入 x 为一个张量的数组,那**weight_attr**也应该是一个同样长度的数组,并且与 x 数组一一对应。 +- **bias_attr** (ParamAttr|bool,可选) – 指定偏置参数的属性。:attr:`bias_attr` 为 bool 类型且设置为 False 时,表示不会为该层添加偏置。:attr:`bias_attr` 如果设置为 True 或者 None,则表示使用默认的偏置参数属性,将偏置参数初始化为 0。具体用法请参见 :ref:`cn_api_fluid_ParamAttr`。默认值为 None。 +- **activation** (str,可选) – 应用于输出上的激活函数,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,形状为 :math:`[batch\_size, *, size]`,数据类型与输入Tensor相同。 +Tensor,形状为 :math:`[batch\_size, *, size]`,数据类型与输入 Tensor 相同。 diff --git a/docs/api/paddle/static/nn/group_norm_cn.rst b/docs/api/paddle/static/nn/group_norm_cn.rst index 55d9440e918..1af6210fe2a 100755 --- a/docs/api/paddle/static/nn/group_norm_cn.rst +++ b/docs/api/paddle/static/nn/group_norm_cn.rst @@ -11,11 +11,11 @@ group_norm 参数 ::::::::: - - **input** (Tensor):维度大于1的Tensor,数据类型为float32或float64。 - - **groups** (int):从 channel 中分离出来的 group 的数目,数据类型为int32。 - - **epsilon** (float,可选):为防止方差除以零,增加一个很小的值。数据类型为float32。默认值:1e-05。 - - **param_attr** (ParamAttr|bool,可选):指定权重参数属性的对象。若 ``param_attr`` 为bool类型,只支持为False,表示没有权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选):指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **input** (Tensor):维度大于 1 的 Tensor,数据类型为 float32 或 float64。 + - **groups** (int):从 channel 中分离出来的 group 的数目,数据类型为 int32。 + - **epsilon** (float,可选):为防止方差除以零,增加一个很小的值。数据类型为 float32。默认值:1e-05。 + - **param_attr** (ParamAttr|bool,可选):指定权重参数属性的对象。若 ``param_attr`` 为 bool 类型,只支持为 False,表示没有权重参数。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选):指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **act** (str,可选):将激活应用于输出的 group normalizaiton。 - **data_layout** (str,可选):指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC",默认值:"NCHW"。如果是"NCHW",则数据按[批大小,输入通道数,* ]的顺序存储。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/static/nn/instance_norm_cn.rst b/docs/api/paddle/static/nn/instance_norm_cn.rst index a8d14498169..548fbcbfb66 100644 --- a/docs/api/paddle/static/nn/instance_norm_cn.rst +++ b/docs/api/paddle/static/nn/instance_norm_cn.rst @@ -16,7 +16,7 @@ NCHW[batch,in_channels,in_height,in_width] 更多详情请参考:`Instance Normalization: The Missing Ingredient for Fast Stylization `_ -``input`` 是mini-batch的输入。 +``input`` 是 mini-batch 的输入。 .. math:: \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\ @@ -28,15 +28,15 @@ NCHW[batch,in_channels,in_height,in_width] 参数 :::::::::::: - - **input** (Tensor) - instance_norm算子的输入特征,是一个Tensor,输入的维度可以为 2, 3, 4, 5。数据类型:float32和float64。 - - **epsilon** (float,默认1e-05)-为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。 - - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时,instance_norm创建相应属性的param_attr(权重)参数。默认:None。 - - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时,instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。 + - **input** (Tensor) - instance_norm 算子的输入特征,是一个 Tensor,输入的维度可以为 2, 3, 4, 5。数据类型:float32 和 float64。 + - **epsilon** (float,默认 1e-05)-为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为 1e-5。 + - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为 None 或者一个 ParamAttr 的类(ParamAttr 中可以指定参数的各种属性)。如果设为 None,则默认的参数初始化为 1.0。如果在 ParamAttr 指定了属性时,instance_norm 创建相应属性的 param_attr(权重)参数。默认:None。 + - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为 None 或者一个 ParamAttr 的类(ParamAttr 中可以指定参数的各种属性)。如果设为 None,默认的参数初始化为 0.0。如果在 ParamAttr 指定了参数的属性时,instance_norm 创建相应属性的 bias_attr(偏置)参数。默认:None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor,在输入中运用instance normalization后的结果。 +Tensor,在输入中运用 instance normalization 后的结果。 代码示例 diff --git a/docs/api/paddle/static/nn/layer_norm_cn.rst b/docs/api/paddle/static/nn/layer_norm_cn.rst index 586798f3dd9..cf49ba8f5e4 100644 --- a/docs/api/paddle/static/nn/layer_norm_cn.rst +++ b/docs/api/paddle/static/nn/layer_norm_cn.rst @@ -9,7 +9,7 @@ layer_norm -该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。 +该 OP 实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。 论文参考:`Layer Normalization `_ @@ -31,14 +31,14 @@ layer_norm 参数 :::::::::::: - - **input** (Tensor) - 维度为任意维度的多维 ``Tensor``,数据类型为float32或float64。 + - **input** (Tensor) - 维度为任意维度的多维 ``Tensor``,数据类型为 float32 或 float64。 - **scale** (bool,可选) - 指明是否在归一化后学习自适应增益 ``g``。默认值:True。 - **shift** (bool,可选) - 指明是否在归一化后学习自适应偏差 ``b``。默认值:True。 - **begin_norm_axis** (int,可选) - 指明归一化将沿着 ``begin_norm_axis`` 到 ``rank(input)`` 的维度执行。默认值:1。 - **epsilon** (float,可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **act** (str,可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`,默认值为None。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str,可选) - 应用于输出上的激活函数,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/static/nn/multi_box_head_cn.rst b/docs/api/paddle/static/nn/multi_box_head_cn.rst index 232afbfe52b..f21c51a3b35 100644 --- a/docs/api/paddle/static/nn/multi_box_head_cn.rst +++ b/docs/api/paddle/static/nn/multi_box_head_cn.rst @@ -9,16 +9,16 @@ multi_box_head -基于SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅SSD论文的2.2节。 +基于 SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅 SSD 论文的 2.2 节。 论文参考:`SSD:Single Shot MultiBox Detector `_ 。 参数 :::::::::::: - - **inputs** (list(Variable) | tuple(Variable)) - 输入特征的列表,仅支持格式为NCHW的4-D Tensor。 - - **image** (Variable) - 一般是网络输入的图像数据,仅支持NCHW格式。 - - **base_size** (int) - 输入图片的大小,当输入个数len(inputs) > 2,并且 ``min_size`` 和 ``max_size`` 为None时,通过 ``baze_size``, ``min_ratio`` 和 ``max_ratio`` 来计算出 ``min_size`` 和 ``max_size``。计算公式如下: + - **inputs** (list(Variable) | tuple(Variable)) - 输入特征的列表,仅支持格式为 NCHW 的 4-D Tensor。 + - **image** (Variable) - 一般是网络输入的图像数据,仅支持 NCHW 格式。 + - **base_size** (int) - 输入图片的大小,当输入个数 len(inputs) > 2,并且 ``min_size`` 和 ``max_size`` 为 None 时,通过 ``baze_size``, ``min_ratio`` 和 ``max_ratio`` 来计算出 ``min_size`` 和 ``max_size``。计算公式如下: .. code-block:: python @@ -32,12 +32,12 @@ multi_box_head max_sizes = [base_size * .20] + max_sizes - **num_classes** (int) - 类别数。 - - **aspect_ratios** (list(float) | tuple(float) | list(list(float)) | tuple(tuple(float)) - 候选框的宽高比,``aspect_ratios`` 和 ``input`` 的个数必须相等。如果每个特征层提取先验框的 ``aspect_ratio`` 多余一个,写成嵌套的list,例如[[2., 3.]]。 - - **min_ratio** (int)- 先验框的长度和 ``base_size`` 的最小比率,注意,这里是百分比,假如比率为0.2,这里应该给20.0。默认值:None。 + - **aspect_ratios** (list(float) | tuple(float) | list(list(float)) | tuple(tuple(float)) - 候选框的宽高比,``aspect_ratios`` 和 ``input`` 的个数必须相等。如果每个特征层提取先验框的 ``aspect_ratio`` 多余一个,写成嵌套的 list,例如[[2., 3.]]。 + - **min_ratio** (int)- 先验框的长度和 ``base_size`` 的最小比率,注意,这里是百分比,假如比率为 0.2,这里应该给 20.0。默认值:None。 - **max_ratio** (int)- 先验框的长度和 ``base_size`` 的最大比率,注意事项同 ``min_ratio``。默认值:None。 - - **min_sizes** (list(float) | tuple(float) | None)- 每层提取的先验框的最小长度,如果输入个数len(inputs)<= 2,则必须设置 ``min_sizes``,并且 ``min_sizes`` 的个数应等于len(inputs)。默认值:None。 - - **max_sizes** (list | tuple | None)- 每层提取的先验框的最大长度,如果len(inputs)<= 2,则必须设置 ``max_sizes``,并且 ``min_sizes`` 的长度应等于len(inputs)。默认值:None。 - - **steps** (list(float) | tuple(float)) - 相邻先验框的中心点步长,如果在水平和垂直方向上步长相同,则设置steps即可,否则分别通过step_w和step_h设置不同方向的步长。如果 ``steps``, ``ste_w`` 和 ``step_h`` 均为None,步长为输入图片的大小 ``base_size`` 和特征图大小的比例。默认值:None。 + - **min_sizes** (list(float) | tuple(float) | None)- 每层提取的先验框的最小长度,如果输入个数 len(inputs)<= 2,则必须设置 ``min_sizes``,并且 ``min_sizes`` 的个数应等于 len(inputs)。默认值:None。 + - **max_sizes** (list | tuple | None)- 每层提取的先验框的最大长度,如果 len(inputs)<= 2,则必须设置 ``max_sizes``,并且 ``min_sizes`` 的长度应等于 len(inputs)。默认值:None。 + - **steps** (list(float) | tuple(float)) - 相邻先验框的中心点步长,如果在水平和垂直方向上步长相同,则设置 steps 即可,否则分别通过 step_w 和 step_h 设置不同方向的步长。如果 ``steps``, ``ste_w`` 和 ``step_h`` 均为 None,步长为输入图片的大小 ``base_size`` 和特征图大小的比例。默认值:None。 - **step_w** (list(float)| tuple(float)) - 水平方向上先验框中心点步长。默认值:None。 - **step_h** (list | tuple) - 垂直方向上先验框中心点步长。默认值:None。 - **offset** (float) - 左上角先验框中心在水平和垂直方向上的偏移。默认值:0.5 @@ -48,23 +48,23 @@ multi_box_head - **pad** (int | list(int) | tuple(int)) - 计算回归位置和分类置信度的卷积核的填充。默认值:0。 - **stride** (int | list | tuple) - 计算回归位置和分类置信度的卷积核的步长。默认值:1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **min_max_aspect_ratios_order** (bool) - 如果设置为True,则输出先验框的顺序为[min,max,aspect_ratios],这与Caffe一致。请注意,此顺序会影响卷积层后面的权重顺序,但不会影响最终检测结果。默认值:False。 + - **min_max_aspect_ratios_order** (bool) - 如果设置为 True,则输出先验框的顺序为[min,max,aspect_ratios],这与 Caffe 一致。请注意,此顺序会影响卷积层后面的权重顺序,但不会影响最终检测结果。默认值:False。 返回 :::::::::::: list(Variable) | tuple(Variable) - - **mbox_loc(Variable)** - 预测框的回归位置。格式为[N,num_priors,4],其中 ``N`` 是batch size, ``num_priors`` 是总共提取的先验框的个数。 - - **mbox_conf(Variable)** - 预测框的分类信度。格式为[N,num_priors,C],其中 ``num_priors`` 同上,C是类别数。 - - **boxes(Variable)** - 提取的先验框。布局是[num_priors,4], ``num_priors`` 同上,常量4是坐标个数。 + - **mbox_loc(Variable)** - 预测框的回归位置。格式为[N,num_priors,4],其中 ``N`` 是 batch size, ``num_priors`` 是总共提取的先验框的个数。 + - **mbox_conf(Variable)** - 预测框的分类信度。格式为[N,num_priors,C],其中 ``num_priors`` 同上,C 是类别数。 + - **boxes(Variable)** - 提取的先验框。布局是[num_priors,4], ``num_priors`` 同上,常量 4 是坐标个数。 - **variances(Variable)** - 提取的先验框方差。布局是[num_priors,4], ``num_priors`` 同上。 代码示例 1 :::::::::::: -设置min_ratio和max_ratio +设置 min_ratio 和 max_ratio .. code-block:: python @@ -94,7 +94,7 @@ list(Variable) | tuple(Variable) 代码示例 2: :::::::::::: -设置min_sizes和max_sizes +设置 min_sizes 和 max_sizes .. code-block:: python diff --git a/docs/api/paddle/static/nn/nce_cn.rst b/docs/api/paddle/static/nn/nce_cn.rst index f491e30acfd..76fd0aabaf7 100644 --- a/docs/api/paddle/static/nn/nce_cn.rst +++ b/docs/api/paddle/static/nn/nce_cn.rst @@ -21,14 +21,14 @@ nce - **input** (Tensor) - 输入张量,2-D 张量,形状为 [batch_size, dim],数据类型为 float32 或者 float64。 - **label** (Tensor) - 标签,2-D 张量,形状为 [batch_size, num_true_class],数据类型为 int64。 - **num_total_classes** (int) - 所有样本中的类别的总数。 - - **sample_weight** (Tensor,可选) - 存储每个样本权重,shape 为 [batch_size, 1] 存储每个样本的权重。每个样本的默认权重为1.0。 - - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr,可选):指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **num_neg_samples** (int) - 负样例的数量,默认值是10。 + - **sample_weight** (Tensor,可选) - 存储每个样本权重,shape 为 [batch_size, 1] 存储每个样本的权重。每个样本的默认权重为 1.0。 + - **param_attr** (ParamAttr,可选):指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选):指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **num_neg_samples** (int) - 负样例的数量,默认值是 10。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **sampler** (str,可选) – 采样器,用于从负类别中进行取样。可以是 ``uniform``, ``log_uniform`` 或 ``custom_dist``,默认 ``uniform`` 。 - - **custom_dist** (nd.array,可选) – 第0维的长度为 ``num_total_classes``。如果采样器类别为 ``custom_dist``,则使用此参数。custom_dist[i] 是第i个类别被取样的概率。默认为 None。 - - **seed** (int,可选) – 采样器使用的seed。默认为0。 + - **custom_dist** (nd.array,可选) – 第 0 维的长度为 ``num_total_classes``。如果采样器类别为 ``custom_dist``,则使用此参数。custom_dist[i] 是第 i 个类别被取样的概率。默认为 None。 + - **seed** (int,可选) – 采样器使用的 seed。默认为 0。 - **is_sparse** (bool,可选) – 标志位,指明是否使用稀疏更新,为 ``True`` 时 :math:`weight@GRAD` 和 :math:`bias@GRAD` 的类型会变为 SelectedRows。默认为 ``False`` 。 返回 diff --git a/docs/api/paddle/static/nn/prelu_cn.rst b/docs/api/paddle/static/nn/prelu_cn.rst index 7df9d36827b..f74fb080e65 100644 --- a/docs/api/paddle/static/nn/prelu_cn.rst +++ b/docs/api/paddle/static/nn/prelu_cn.rst @@ -5,31 +5,31 @@ prelu .. py:function:: paddle.static.nn.prelu(x, mode, param_attr=None, data_format="NCHW", name=None) -prelu激活函数 +prelu 激活函数 .. math:: prelu(x) = max(0, x) + \alpha * min(0, x) 共提供三种激活方式: - - all:所有元素使用同一个alpha值; - - channel:在同一个通道中的元素使用同一个alpha值; - - element:每一个元素有一个独立的alpha值。 + - all:所有元素使用同一个 alpha 值; + - channel:在同一个通道中的元素使用同一个 alpha 值; + - element:每一个元素有一个独立的 alpha 值。 参数 :::::::::::: - - **x** (Tensor)- 多维Tensor或LoDTensor,数据类型为float32。 + - **x** (Tensor)- 多维 Tensor 或 LoDTensor,数据类型为 float32。 - **mode** (str) - 权重共享模式。 - - **param_attr** (ParamAttr,可选) - 可学习权重 :math:`[\alpha]` 的参数属性,可由ParamAttr创建。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **param_attr** (ParamAttr,可选) - 可学习权重 :math:`[\alpha]` 的参数属性,可由 ParamAttr 创建。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - **data_format** (str,可选) – 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是 "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" 或者 "NDHWC"。默认值:"NCHW"。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -表示激活输出Tensor,数据类型和形状于输入相同。 +表示激活输出 Tensor,数据类型和形状于输入相同。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/row_conv_cn.rst b/docs/api/paddle/static/nn/row_conv_cn.rst index 74ee1e81816..6d501bb6ff5 100644 --- a/docs/api/paddle/static/nn/row_conv_cn.rst +++ b/docs/api/paddle/static/nn/row_conv_cn.rst @@ -8,9 +8,9 @@ row_conv -该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,双向的RNN在深度语音模型中很有用,它通过对整个序列执行正向和反向传递来学习序列的表示。 +该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于 DeepSpeech2 论文中,双向的 RNN 在深度语音模型中很有用,它通过对整个序列执行正向和反向传递来学习序列的表示。 -然而,与单向RNNs不同的是,在线部署和低延迟设置中,双向RNNs具有难度。超前卷积将来自未来子序列的信息以一种高效的方式进行计算,以改进单向递归神经网络。row convolution operator 与一维序列卷积不同,计算方法如下: +然而,与单向 RNNs 不同的是,在线部署和低延迟设置中,双向 RNNs 具有难度。超前卷积将来自未来子序列的信息以一种高效的方式进行计算,以改进单向递归神经网络。row convolution operator 与一维序列卷积不同,计算方法如下: 给定输入序列长度为 :math:`t` 的输入序列 :math:`X` 和输入维度 :math:`D`,以及一个大小为 :math:`context * D` 的滤波器 :math:`W`,输出序列卷积为: @@ -18,9 +18,9 @@ row_conv out_i = \sum_{j=i}^{i+context-1} X_{j} · W_{j-i} 公式中: - - :math:`out_i`:第i行输出变量形为[1, D]。 + - :math:`out_i`:第 i 行输出变量形为[1, D]。 - :math:`context`:下文(future context)大小 - - :math:`X_j`:第j行输出变量,形为[1,D] + - :math:`X_j`:第 j 行输出变量,形为[1,D] - :math:`W_{j-i}`:第(j-i)行参数,其形状为[1,D]。 详细请参考 `设计文档 `_ 。 @@ -30,14 +30,14 @@ row_conv 参数 :::::::::::: - - **input** (Tensor) - 支持输入为LodTensor和Tensor,输入类型可以是[float32, float64],它支持可变时间长度的输入序列。当输入input为LodTensor时,其内部张量是一个具有形状(T x N)的矩阵,其中T是这个mini batch中的总的timestep,N是输入数据维数。当输入input为Tensor时,其形状为(B x T x N)的三维矩阵,B为mini batch大小,T为每个batch输入中的最大timestep,N是输入数据维数。当输入input为LoDTensor,形状为[9, N],LoD信息为[2, 3, 4],等价于输入input为形状是[3, 4, N]的Tensor。 - - **future_context_size** (int) - 下文大小。请注意,卷积核的shape是[future_context_size + 1, N],N和输入input的数据维度N保持一致。 + - **input** (Tensor) - 支持输入为 LodTensor 和 Tensor,输入类型可以是[float32, float64],它支持可变时间长度的输入序列。当输入 input 为 LodTensor 时,其内部张量是一个具有形状(T x N)的矩阵,其中 T 是这个 mini batch 中的总的 timestep,N 是输入数据维数。当输入 input 为 Tensor 时,其形状为(B x T x N)的三维矩阵,B 为 mini batch 大小,T 为每个 batch 输入中的最大 timestep,N 是输入数据维数。当输入 input 为 LoDTensor,形状为[9, N],LoD 信息为[2, 3, 4],等价于输入 input 为形状是[3, 4, N]的 Tensor。 + - **future_context_size** (int) - 下文大小。请注意,卷积核的 shape 是[future_context_size + 1, N],N 和输入 input 的数据维度 N 保持一致。 - **param_attr** (ParamAttr) - 参数的属性,包括名称、初始化器等。 - **act** (str) - 非线性激活函数。 返回 :::::::::::: -表示row_conv计算结果的Tensor,数据类型、维度和输入input相同。 +表示 row_conv 计算结果的 Tensor,数据类型、维度和输入 input 相同。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_concat_cn.rst b/docs/api/paddle/static/nn/sequence_concat_cn.rst index 63a4e717e64..dacde811732 100644 --- a/docs/api/paddle/static/nn/sequence_concat_cn.rst +++ b/docs/api/paddle/static/nn/sequence_concat_cn.rst @@ -7,13 +7,13 @@ sequence_concat .. py:function:: paddle.static.nn.sequence_concat(input, name=None) .. note:: -该OP的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用 :ref:`paddle.concat ` 。 +该 OP 的输入只能是 LoDTensor,如果您需要处理的输入是 Tensor 类型,请使用 :ref:`paddle.concat ` 。 -**该OP仅支持LoDTensor**,通过LoDTensor的LoD信息将输入的多个LoDTensor进行连接(concat),输出连接后的LoDTensor。 +**该 OP 仅支持 LoDTensor**,通过 LoDTensor 的 LoD 信息将输入的多个 LoDTensor 进行连接(concat),输出连接后的 LoDTensor。 :: - input是由多个LoDTensor组成的list: + input 是由多个 LoDTensor 组成的 list: input = [x1, x2] 其中: x1.lod = [[0, 3, 5]] @@ -25,7 +25,7 @@ sequence_concat x2.shape = [4, 1] 且必须满足:len(x1.lod[0]) == len(x2.lod[0]) - 输出为LoDTensor: + 输出为 LoDTensor: out.lod = [[0, 3+2, 5+4]] out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]] out.shape = [9, 1] @@ -34,12 +34,12 @@ sequence_concat 参数 ::::::::: - - **input** (list of Variable) – 多个LoDTensor组成的list,要求每个输入LoDTensor的LoD长度必须一致。数据类型为float32、float64或int64。 + - **input** (list of Variable) – 多个 LoDTensor 组成的 list,要求每个输入 LoDTensor 的 LoD 长度必须一致。数据类型为 float32、float64 或 int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,输出连接后的LoDTensor,数据类型和输入一致。 +Tensor,输出连接后的 LoDTensor,数据类型和输入一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_conv_cn.rst b/docs/api/paddle/static/nn/sequence_conv_cn.rst index 87479435c33..bdaaf42c6bb 100644 --- a/docs/api/paddle/static/nn/sequence_conv_cn.rst +++ b/docs/api/paddle/static/nn/sequence_conv_cn.rst @@ -7,24 +7,24 @@ sequence_conv .. py:function:: paddle.static.nn.sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=True, padding_start=None, bias_attr=None, param_attr=None, act=None, name=None) .. note:: -1. 该API的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用 :ref:`paddle.nn.functional.conv2d ` 。 +1. 该 API 的输入只能是 LoDTensor,如果您需要处理的输入是 Tensor 类型,请使用 :ref:`paddle.nn.functional.conv2d ` 。 2. 参数 ``padding`` 为无用参数,将在未来的版本中被移除。 -在给定的卷积参数下(如卷积核数目、卷积核大小等),对输入的变长序列(sequence)LoDTensor进行卷积操作。默认情况下,该OP会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的序列输出长度和输入长度一致。支持通过配置 ``padding_start`` 参数来指定序列填充的行为。 +在给定的卷积参数下(如卷积核数目、卷积核大小等),对输入的变长序列(sequence)LoDTensor 进行卷积操作。默认情况下,该 OP 会自适应地在每个输入序列的两端等长地填充全 0 数据,以确保卷积后的序列输出长度和输入长度一致。支持通过配置 ``padding_start`` 参数来指定序列填充的行为。 :: 这里详细介绍数据填充操作的细节: - 对于一个min-batch为2的变长序列输入,分别包含3个、1个时间步(time_step), - 假设输入input是一个[4, N]的float类型LoDTensor,为了方便,这里假设N = 2 + 对于一个 min-batch 为 2 的变长序列输入,分别包含 3 个、1 个时间步(time_step), + 假设输入 input 是一个[4, N]的 float 类型 LoDTensor,为了方便,这里假设 N = 2 input.data = [[1, 1], [2, 2], [3, 3], [4, 4]] input.lod = [[0, 3, 4]] - 即输入input总共有4个词,每个词被表示为一个2维向量。 + 即输入 input 总共有 4 个词,每个词被表示为一个 2 维向量。 Case1: @@ -39,7 +39,7 @@ sequence_conv [2, 2, 3, 3, 0, 0], [0, 0, 4, 4, 0, 0]] - 它将和卷积核矩阵相乘得到最终的输出,假设num_filters = 3: + 它将和卷积核矩阵相乘得到最终的输出,假设 num_filters = 3: output.data = [[ 0.3234, -0.2334, 0.7433], [ 0.5646, 0.9464, -0.1223], [-0.1343, 0.5653, 0.4555], @@ -52,21 +52,21 @@ sequence_conv 参数 ::::::::: - - **input** (Variable) - 维度为 :math:`(M, K)` 的二维LoDTensor,仅支持lod_level为1。其中M是mini-batch的总时间步数,K是输入的 ``hidden_size`` 特征维度。数据类型为float32或float64。 + - **input** (Variable) - 维度为 :math:`(M, K)` 的二维 LoDTensor,仅支持 lod_level 为 1。其中 M 是 mini-batch 的总时间步数,K 是输入的 ``hidden_size`` 特征维度。数据类型为 float32 或 float64。 - **num_filters** (int) - 滤波器的数量。 - - **filter_size** (int,可选) - 滤波器的高度(H);不支持指定滤波器宽度(W),宽度固定取值为输入的 ``hidden_size``。默认值为3。 - - **filter_stride** (int,可选) - 滤波器每次移动的步长。目前只支持取值为1,默认为1。 - - **padding** (bool,可选) - **此参数不起任何作用,将在未来的版本中被移除。** 无论 ``padding`` 取值为False或者True,默认地,该函数会自适应地在每个输入序列的两端等长地填充全0数据,以确保卷积后的输出序列长度和输入长度一致。默认填充是考虑到输入的序列长度可能会小于卷积核大小,这会导致无正确计算卷积输出。填充为0的数据在训练过程中不会被更新。默认为True。 - - **padding_start** (int,可选) - 表示对输入序列填充时的起始位置,可以为负值。负值表示在每个序列的首端填充 ``|padding_start|`` 个时间步(time_step)的全0数据;正值表示对每个序列跳过前 ``padding_start`` 个时间步的数据。同时在末端填充 :math:`filter\_size + padding\_start - 1` 个时间步的全0数据,以保证卷积输出序列长度和输入长度一致。如果 ``padding_start`` 为None,则在每个序列的两端填充 :math:`\frac{filter\_size}{2}` 个时间步的全0数据;如果 ``padding_start`` 设置为0,则只在序列的末端填充 :math:`filter\_size - 1` 个时间步的全0数据。默认为None。 - - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **act** (str,可选) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations`,默认值为None。 + - **filter_size** (int,可选) - 滤波器的高度(H);不支持指定滤波器宽度(W),宽度固定取值为输入的 ``hidden_size``。默认值为 3。 + - **filter_stride** (int,可选) - 滤波器每次移动的步长。目前只支持取值为 1,默认为 1。 + - **padding** (bool,可选) - **此参数不起任何作用,将在未来的版本中被移除。** 无论 ``padding`` 取值为 False 或者 True,默认地,该函数会自适应地在每个输入序列的两端等长地填充全 0 数据,以确保卷积后的输出序列长度和输入长度一致。默认填充是考虑到输入的序列长度可能会小于卷积核大小,这会导致无正确计算卷积输出。填充为 0 的数据在训练过程中不会被更新。默认为 True。 + - **padding_start** (int,可选) - 表示对输入序列填充时的起始位置,可以为负值。负值表示在每个序列的首端填充 ``|padding_start|`` 个时间步(time_step)的全 0 数据;正值表示对每个序列跳过前 ``padding_start`` 个时间步的数据。同时在末端填充 :math:`filter\_size + padding\_start - 1` 个时间步的全 0 数据,以保证卷积输出序列长度和输入长度一致。如果 ``padding_start`` 为 None,则在每个序列的两端填充 :math:`\frac{filter\_size}{2}` 个时间步的全 0 数据;如果 ``padding_start`` 设置为 0,则只在序列的末端填充 :math:`filter\_size - 1` 个时间步的全 0 数据。默认为 None。 + - **bias_attr** (ParamAttr,可选) - 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str,可选) – 应用于输出上的激活函数,如 tanh、softmax、sigmoid,relu 等,支持列表请参考 :ref:`api_guide_activations`,默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -和输入序列等长的LoDTensor,数据类型和输入一致,为float32或float64。 +和输入序列等长的 LoDTensor,数据类型和输入一致,为 float32 或 float64。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst index 90bccc4a1ee..2bba86e6841 100644 --- a/docs/api/paddle/static/nn/sequence_enumerate_cn.rst +++ b/docs/api/paddle/static/nn/sequence_enumerate_cn.rst @@ -9,7 +9,7 @@ sequence_enumerate 枚举形状为 ``[d_1, 1]`` 的输入序列所有长度为 ``win_size`` 的子序列,生成一个形状为 ``[d_1, win_size]`` 的新序列,需要时以 ``pad_value`` 填充。 .. note:: -该API的输入 ``input`` 只能是LodTensor。 +该 API 的输入 ``input`` 只能是 LodTensor。 范例如下: @@ -29,14 +29,14 @@ sequence_enumerate 参数 ::::::::: - - **input** (Variable)- 输入序列,形状为 ``[d_1, 1]`` ,lod level为1的LodTensor。数据类型支持int32,int64,float32或float64。 + - **input** (Variable)- 输入序列,形状为 ``[d_1, 1]`` ,lod level 为 1 的 LodTensor。数据类型支持 int32,int64,float32 或 float64。 - **win_size** (int)- 子序列窗口大小。 - - **pad_value** (int,可选)- 填充值,默认为0。 + - **pad_value** (int,可选)- 填充值,默认为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -枚举序列,形状为 ``[d_1, win_size]`` ,lod_level为1的LoDTensor。数据类型与输入 ``input`` 一致。 +枚举序列,形状为 ``[d_1, win_size]`` ,lod_level 为 1 的 LoDTensor。数据类型与输入 ``input`` 一致。 diff --git a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst index 59c38932e3f..71e933df176 100644 --- a/docs/api/paddle/static/nn/sequence_expand_as_cn.rst +++ b/docs/api/paddle/static/nn/sequence_expand_as_cn.rst @@ -6,58 +6,58 @@ sequence_expand_as .. py:function:: paddle.static.nn.sequence_expand_as(x, y, name=None) -Sequence Expand As Layer,该OP根据输入 ``y`` 的第0级lod对输入 ``x`` 进行扩展。当前实现要求 ``y`` 的lod层数(level)必须为1,且 ``x`` 的第一维必须和 ``y`` 的第0层lod大小相同,所以扩展后的LodTensor具有和 ``y`` 相同的lod。扩展结果与输入 ``x`` 的lod无关,所以无需考虑 ``x`` 的lod。 +Sequence Expand As Layer,该 OP 根据输入 ``y`` 的第 0 级 lod 对输入 ``x`` 进行扩展。当前实现要求 ``y`` 的 lod 层数(level)必须为 1,且 ``x`` 的第一维必须和 ``y`` 的第 0 层 lod 大小相同,所以扩展后的 LodTensor 具有和 ``y`` 相同的 lod。扩展结果与输入 ``x`` 的 lod 无关,所以无需考虑 ``x`` 的 lod。 .. note:: -该API的输入 ``x`` 可以是Tensor或LoDTensor, ``y`` 只能是LodTensor。 +该 API 的输入 ``x`` 可以是 Tensor 或 LoDTensor, ``y`` 只能是 LodTensor。 范例解释如下: :: - 例1: - 假设,有4个长度维1的序列[a]、[b]、[c]和[d],现在要将其扩展为长度是3、3、1、1的序列[a][a][a]、[b][b][b]、[c]和[d]。 - 显然,扩展后的序列lod为[0, 3, 6, 7, 8],则: - 给定输入一维LoDTensor x + 例 1: + 假设,有 4 个长度维 1 的序列[a]、[b]、[c]和[d],现在要将其扩展为长度是 3、3、1、1 的序列[a][a][a]、[b][b][b]、[c]和[d]。 + 显然,扩展后的序列 lod 为[0, 3, 6, 7, 8],则: + 给定输入一维 LoDTensor x x.data = [[a], [b], [c], [d]] x.dims = [4, 1] 和输入 y - y.lod = [[3, 3, 1, 1]] #为了便于理解这里用基于长度lod表示 + y.lod = [[3, 3, 1, 1]] #为了便于理解这里用基于长度 lod 表示 - 经过sequence_expand_as运算,得到输出1级LoDTensor out - out.lod = [[0, 3, 6, 7, 8]] #基于偏移的lod,等价于基于长度的[[3, 3, 1, 1]] + 经过 sequence_expand_as 运算,得到输出 1 级 LoDTensor out + out.lod = [[0, 3, 6, 7, 8]] #基于偏移的 lod,等价于基于长度的[[3, 3, 1, 1]] out.data = [[a], [a], [a], [b], [b], [b], [c], [d]] out.dims = [8, 1] - 可见,输出out将x扩展至和y具有相同的lod。 + 可见,输出 out 将 x 扩展至和 y 具有相同的 lod。 :: - 例2: - 设定与例1类似,给定输入一维LoDTensor x: + 例 2: + 设定与例 1 类似,给定输入一维 LoDTensor x: x.data = [[a, b], [c, d], [e, f]] x.dims = [3, 2] 和输入 y: - y.lod = [[2, 1, 3]] #为了便于理解这里用基于长度lod表示 + y.lod = [[2, 1, 3]] #为了便于理解这里用基于长度 lod 表示 - 输出为1级LoDTensor: - out.lod = [[0, 2, 3, 6]] #基于偏移的lod,等价于基于长度的[[2, 1, 3]] + 输出为 1 级 LoDTensor: + out.lod = [[0, 2, 3, 6]] #基于偏移的 lod,等价于基于长度的[[2, 1, 3]] out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]] out.dims = [6, 2] - 可见,输出out将x扩展至和y具有相同的lod。 + 可见,输出 out 将 x 扩展至和 y 具有相同的 lod。 参数 ::::::::: - - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` 的二维Tensor或LoDTensor,第一维必须与输入 ``y`` 的第0层lod大小相同,且仅支持lod_level为1。数据类型支持int32,int64,float32或float64。 - - **y** (Variable) - 输入变量,LoDTensor,lod level必须为1。 + - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` 的二维 Tensor 或 LoDTensor,第一维必须与输入 ``y`` 的第 0 层 lod 大小相同,且仅支持 lod_level 为 1。数据类型支持 int32,int64,float32 或 float64。 + - **y** (Variable) - 输入变量,LoDTensor,lod level 必须为 1。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -扩展变量,维度为 :math:`[N, K]` 的二维LoDTensor,N由输入 ``y`` 的lod决定,且仅支持lod_level为1。数据类型与输入 ``x`` 一致。 +扩展变量,维度为 :math:`[N, K]` 的二维 LoDTensor,N 由输入 ``y`` 的 lod 决定,且仅支持 lod_level 为 1。数据类型与输入 ``x`` 一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_expand_cn.rst b/docs/api/paddle/static/nn/sequence_expand_cn.rst index 5d35be2d5dc..54cead3f5c5 100644 --- a/docs/api/paddle/static/nn/sequence_expand_cn.rst +++ b/docs/api/paddle/static/nn/sequence_expand_cn.rst @@ -8,59 +8,59 @@ sequence_expand -序列扩张层(Sequence Expand Layer),根据输入 ``y`` 的第 ``ref_level`` 层lod对输入 ``x`` 进行扩展。``x`` 的lod level最多为1,若 ``x`` 的lod level为1,则 ``x`` 的lod大小必须与 ``y`` 的第 ``ref_level`` 层lod大小相等;若 ``x`` 的lod level为0,则 ``x`` 的第一维大小必须与 ``y`` 第 ``ref_level`` 层大小相等。``x`` 的秩最少为2,当 ``x`` 的秩大于2时,将被当作是一个二维张量处理。 +序列扩张层(Sequence Expand Layer),根据输入 ``y`` 的第 ``ref_level`` 层 lod 对输入 ``x`` 进行扩展。``x`` 的 lod level 最多为 1,若 ``x`` 的 lod level 为 1,则 ``x`` 的 lod 大小必须与 ``y`` 的第 ``ref_level`` 层 lod 大小相等;若 ``x`` 的 lod level 为 0,则 ``x`` 的第一维大小必须与 ``y`` 第 ``ref_level`` 层大小相等。``x`` 的秩最少为 2,当 ``x`` 的秩大于 2 时,将被当作是一个二维张量处理。 .. note:: -该API的输入 ``x`` 可以是Tensor或LodTensor, ``y`` 只能是LodTensor。 +该 API 的输入 ``x`` 可以是 Tensor 或 LodTensor, ``y`` 只能是 LodTensor。 范例解释如下: :: - 例1: - 假设两个长度为2的序列[a][b]和[c][d],欲将其扩展为4个长度为2的序列[a][b]、[a][b]、[c][d]、[c][d]。 - 序列[a][b]扩展2次,[c][d]扩展2次,扩展所需依据的lod为[2, 2],则: - 给定输入一维LoDTensor x - x.lod = [[2, 2]] #表示两个序列的长度为2,为了便于理解这里用基于长度lod表示 + 例 1: + 假设两个长度为 2 的序列[a][b]和[c][d],欲将其扩展为 4 个长度为 2 的序列[a][b]、[a][b]、[c][d]、[c][d]。 + 序列[a][b]扩展 2 次,[c][d]扩展 2 次,扩展所需依据的 lod 为[2, 2],则: + 给定输入一维 LoDTensor x + x.lod = [[2, 2]] #表示两个序列的长度为 2,为了便于理解这里用基于长度 lod 表示 x.data = [[a], [b], [c], [d]] x.dims = [4, 1] 和输入 y - y.lod = [[2, 2], #第0层lod,指定按该层扩展,表示分别扩展2次,为了便于理解这里用基于长度lod表示 - [3, 3, 1, 1]] #第1层lod,注意,因为指定ref_level为0,所以这一层与运算无关 - 指定 ref_level = 0,依据y的第0层lod进行扩展, + y.lod = [[2, 2], #第 0 层 lod,指定按该层扩展,表示分别扩展 2 次,为了便于理解这里用基于长度 lod 表示 + [3, 3, 1, 1]] #第 1 层 lod,注意,因为指定 ref_level 为 0,所以这一层与运算无关 + 指定 ref_level = 0,依据 y 的第 0 层 lod 进行扩展, - 经过sequence_expand,输出为1级LoDTensor out - out.lod = [[0, 2, 4, 6, 8]] #基于偏移的lod,等价于基于长度的[[2, 2, 2, 2]] + 经过 sequence_expand,输出为 1 级 LoDTensor out + out.lod = [[0, 2, 4, 6, 8]] #基于偏移的 lod,等价于基于长度的[[2, 2, 2, 2]] out.data = [[a], [b], [a], [b], [c], [d], [c], [d]] out.dims = [8, 1] :: - 例2: - 假设有3个长度维1的序列[a]、[b]、[c],现在要将其扩展为长度是2、0、3的序列[a][a]、[c][c][c]。 - 显然,扩展后的序列lod为[2, 0, 3],则: - 给定输入一维LoDTensor x + 例 2: + 假设有 3 个长度维 1 的序列[a]、[b]、[c],现在要将其扩展为长度是 2、0、3 的序列[a][a]、[c][c][c]。 + 显然,扩展后的序列 lod 为[2, 0, 3],则: + 给定输入一维 LoDTensor x x.data = [[a], [b], [c]] x.dims = [3, 1] 和输入 y y.lod = [[2, 0, 3]] 默认 ref_level = -1 - 经过sequence_expand,输出为1级LoDTensor out + 经过 sequence_expand,输出为 1 级 LoDTensor out out.data = [[a], [a], [c], [c], [c]] out.dims = [5, 1] 参数 ::::::::: - - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` ,lod level至多1的二维Tensor或LoDTensor。数据类型支持int32,int64,float32或float64。 - - **y** (Variable) - 输入变量,lod level至少为1的LoDTensor。数据类型不限。 - - **ref_level** (int,可选) - 扩展 ``x`` 所依据的 ``y`` 的lod层。默认值-1,表示lod的最后一层。 + - **x** (Variable) - 输入变量,维度为 :math:`[M, K]` ,lod level 至多 1 的二维 Tensor 或 LoDTensor。数据类型支持 int32,int64,float32 或 float64。 + - **y** (Variable) - 输入变量,lod level 至少为 1 的 LoDTensor。数据类型不限。 + - **ref_level** (int,可选) - 扩展 ``x`` 所依据的 ``y`` 的 lod 层。默认值-1,表示 lod 的最后一层。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -扩展变量,维度为 :math:`[N, K]` 的LoDTensor,N由输入 ``x`` 和 ``y`` 的lod共同决定。数据类型与输入 ``x`` 一致。 +扩展变量,维度为 :math:`[N, K]` 的 LoDTensor,N 由输入 ``x`` 和 ``y`` 的 lod 共同决定。数据类型与输入 ``x`` 一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_first_step_cn.rst b/docs/api/paddle/static/nn/sequence_first_step_cn.rst index a8d7e30bf59..0a886d2f540 100644 --- a/docs/api/paddle/static/nn/sequence_first_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_first_step_cn.rst @@ -7,35 +7,35 @@ sequence_first_step .. py:function:: paddle.static.nn.sequence_first_step(input) .. note:: -该API仅支持LoDTensor类型的输入。 +该 API 仅支持 LoDTensor 类型的输入。 -对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的第一个时间步(time_step)的特征向量作为池化后的输出向量。 +对输入的 LoDTensor,在最后一层 lod_level 上,选取其每个序列(sequence)的第一个时间步(time_step)的特征向量作为池化后的输出向量。 :: Case 1: - input是1-level LoDTensor: + input 是 1-level LoDTensor: input.lod = [[0, 2, 5, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 输出为LoDTensor: + 输出为 LoDTensor: out.shape = [3, 1] 且 out.shape[0] == len(x.lod[-1]) == 3 out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) Case 2: - input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 - 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; - 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input 是 2-level 的 LoDTensor,包含 3 个长度分别为[2, 0, 3]的序列,其中中间的 0 表示序列为空。 + 第一个长度为 2 的序列包含 2 个长度分别为[1, 2]的子序列; + 最后一个长度为 3 的序列包含 3 个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 - 输出为LoDTensor: + 将根据最后一层的 lod 信息[0, 1, 3, 4, 4, 7]进行池化操作,且 pad_value = 0.0 + 输出为 LoDTensor: out.shape= [5, 1] out.lod = [[0, 2, 2, 5]] 其中 out.shape[0] == len(x.lod[-1]) == 5 @@ -44,11 +44,11 @@ sequence_first_step 参数 ::::::::: -**input** (Variable)- 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 +**input** (Variable)- 类型为 LoDTensor 的输入序列,仅支持 lod_level 不超过 2 的 LoDTensor,数据类型为 float32。 返回 ::::::::: -每个输入序列中的第一个step的特征向量组成的LoDTensor,数据类型为float32。 +每个输入序列中的第一个 step 的特征向量组成的 LoDTensor,数据类型为 float32。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_last_step_cn.rst b/docs/api/paddle/static/nn/sequence_last_step_cn.rst index cc4ae53762b..f6bf3cded41 100644 --- a/docs/api/paddle/static/nn/sequence_last_step_cn.rst +++ b/docs/api/paddle/static/nn/sequence_last_step_cn.rst @@ -8,20 +8,20 @@ sequence_last_step .. note:: -该API仅支持LoDTensor类型的输入。 +该 API 仅支持 LoDTensor 类型的输入。 -对输入的LoDTensor,在最后一层lod_level上,选取其每个序列(sequence)的最后一个时间步(time-step)的特征向量作为池化后的输出向量。 +对输入的 LoDTensor,在最后一层 lod_level 上,选取其每个序列(sequence)的最后一个时间步(time-step)的特征向量作为池化后的输出向量。 :: Case 1: - input是1-level的LoDTensor: + input 是 1-level 的 LoDTensor: input.lod = [[0, 2, 5, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 输出为LoDTensor: + 输出为 LoDTensor: out.shape = [3, 1] 且 out.shape[0] == len(x.lod[-1]) == 3 @@ -29,15 +29,15 @@ sequence_last_step Case 2: - input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 - 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; - 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input 是 2-level 的 LoDTensor,包含 3 个长度分别为[2, 0, 3]的序列,其中中间的 0 表示序列为空。 + 第一个长度为 2 的序列包含 2 个长度分别为[1, 2]的子序列; + 最后一个长度为 3 的序列包含 3 个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 - 输出为LoDTensor: + 将根据最后一层的 lod 信息[0, 1, 3, 4, 4, 7]进行池化操作,且 pad_value = 0.0 + 输出为 LoDTensor: out.shape= [5, 1] out.lod = [[0, 2, 2, 5]] 其中 out.shape[0] == len(x.lod[-1]) == 5 @@ -46,11 +46,11 @@ sequence_last_step 参数 ::::::::: -**input** (Tensor)- 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 +**input** (Tensor)- 类型为 LoDTensor 的输入序列,仅支持 lod_level 不超过 2 的 LoDTensor,数据类型为 float32。 返回 ::::::::: -每个输入序列中的最后一步特征向量组成的LoDTensor,数据类型为float32。 +每个输入序列中的最后一步特征向量组成的 LoDTensor,数据类型为 float32。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_pad_cn.rst b/docs/api/paddle/static/nn/sequence_pad_cn.rst index 121ade1a9ee..1226f122907 100644 --- a/docs/api/paddle/static/nn/sequence_pad_cn.rst +++ b/docs/api/paddle/static/nn/sequence_pad_cn.rst @@ -6,52 +6,52 @@ sequence_pad .. py:function:: paddle.static.nn.sequence_pad(x,pad_value,maxlen=None,name=None) -序列填充操作符(Sequence Pad Operator),该OP将同一batch中的序列填充到一个一致的长度(由 ``maxlen`` 指定)。填充的新元素的值具体由输入 ``pad_value`` 指定,并会添加到每一个序列的末尾,使得他们最终的长度保持一致。最后返回一个Python tuple ``(Out, Length)``,其中LodTensor ``Out`` 为填充后的序列,LodTensor ``Length`` 为填充前的原序列长度信息。 +序列填充操作符(Sequence Pad Operator),该 OP 将同一 batch 中的序列填充到一个一致的长度(由 ``maxlen`` 指定)。填充的新元素的值具体由输入 ``pad_value`` 指定,并会添加到每一个序列的末尾,使得他们最终的长度保持一致。最后返回一个 Python tuple ``(Out, Length)``,其中 LodTensor ``Out`` 为填充后的序列,LodTensor ``Length`` 为填充前的原序列长度信息。 .. note:: -该API的输入 ``x`` 只能是LodTensor。 +该 API 的输入 ``x`` 只能是 LodTensor。 范例如下: :: - 例1: - 给定输入1-level LoDTensor x: - x.lod = [[0, 2, 5]] #输入的两个序列长度是2和3 + 例 1: + 给定输入 1-level LoDTensor x: + x.lod = [[0, 2, 5]] #输入的两个序列长度是 2 和 3 x.data = [[a],[b],[c],[d],[e]] 和输入 pad_value: pad_value.data = [0] 设置 maxlen = 4 - 得到得到tuple (Out, Length): + 得到得到 tuple (Out, Length): Out.data = [[[a],[b],[0],[0]],[[c],[d],[e],[0]]] - Length.data = [2, 3] #原序列长度是2和3 + Length.data = [2, 3] #原序列长度是 2 和 3 :: - 例2: - 给定输入1-level LoDTensor x: + 例 2: + 给定输入 1-level LoDTensor x: x.lod = [[0, 2, 5]] x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]] 和输入 pad_value: pad_value.data = [0] - 默认 maxlen = None, (根据x的形状,此例中实际长度为3) + 默认 maxlen = None, (根据 x 的形状,此例中实际长度为 3) - 得到得到tuple (Out, Length): + 得到得到 tuple (Out, Length): Out.data = [[[a1,a2],[b1,b2],[0,0]],[[c1,c2],[d1,d2],[e1,e2]]] Length.data = [2, 3] :: - 例3: - 给定输入1-level LoDTensor x: + 例 3: + 给定输入 1-level LoDTensor x: x.lod = [[0, 2, 5]] x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]] 和输入 pad_value: pad_value.data = [p1,p2] - 默认 maxlen = None, (根据x的形状,此例中实际长度为3) + 默认 maxlen = None, (根据 x 的形状,此例中实际长度为 3) - 得到tuple (Out, Length): + 得到 tuple (Out, Length): Out.data = [[[a1,a2],[b1,b2],[p1,p2]],[[c1,c2],[d1,d2],[e1,e2]]] Length.data = [2, 3] @@ -59,14 +59,14 @@ sequence_pad 参数 ::::::::: - - **x** (Tensor) - 输入,维度为 ``[M, K]`` 的LoDTensor,仅支持lod_level为1。lod所描述的序列数量,作为要填充的batch_size。数据类型为int32,int64,float32或float64。 - - **pad_value** (Tensor) - 填充值,可以是标量或长度为 ``K`` 的一维Tensor。如果是标量,则自动广播为Tensor。数据类型需与 ``x`` 相同。 - - **maxlen** (int,可选) - 填充序列的长度。默认为None,此时以序列中最长序列的长度为准,其他所有序列填充至该长度。当是某个特定的正整数,最大长度必须大于最长初始序列的长度。 + - **x** (Tensor) - 输入,维度为 ``[M, K]`` 的 LoDTensor,仅支持 lod_level 为 1。lod 所描述的序列数量,作为要填充的 batch_size。数据类型为 int32,int64,float32 或 float64。 + - **pad_value** (Tensor) - 填充值,可以是标量或长度为 ``K`` 的一维 Tensor。如果是标量,则自动广播为 Tensor。数据类型需与 ``x`` 相同。 + - **maxlen** (int,可选) - 填充序列的长度。默认为 None,此时以序列中最长序列的长度为准,其他所有序列填充至该长度。当是某个特定的正整数,最大长度必须大于最长初始序列的长度。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -元素为两个LoDTensor的Python tuple。第一个元素为填充后的变量 ``Out``,形状为 ``[batch_size, maxlen, K]`` ,lod level为0的LoDTensor,数据类型与输入 ``x`` 相同。第二个元素为填充前的原序列长度信息 ``Length`` ,lod level为0的一维LoDTensor,长度等于batch_size,数据类型为int64。 +元素为两个 LoDTensor 的 Python tuple。第一个元素为填充后的变量 ``Out``,形状为 ``[batch_size, maxlen, K]`` ,lod level 为 0 的 LoDTensor,数据类型与输入 ``x`` 相同。第二个元素为填充前的原序列长度信息 ``Length`` ,lod level 为 0 的一维 LoDTensor,长度等于 batch_size,数据类型为 int64。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_pool_cn.rst b/docs/api/paddle/static/nn/sequence_pool_cn.rst index b493eb1d036..a5a6965ad46 100644 --- a/docs/api/paddle/static/nn/sequence_pool_cn.rst +++ b/docs/api/paddle/static/nn/sequence_pool_cn.rst @@ -9,11 +9,11 @@ sequence_pool .. note:: -该API的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用 :ref:`paddle.nn.functional.avg_pool2d ` 或 :ref:`paddle.nn.functional.max_pool2d ` 。 +该 API 的输入只能是 LoDTensor,如果您需要处理的输入是 Tensor 类型,请使用 :ref:`paddle.nn.functional.avg_pool2d ` 或 :ref:`paddle.nn.functional.max_pool2d ` 。 -对输入的LoDTensor进行指定方式的池化(pooling)操作。通过指定pool_type参数,将输入的每个序列(sequence)在最后一层lod_level上或时间步(time-step)上对特征进行诸如sum、average、sqrt等池化操作。 +对输入的 LoDTensor 进行指定方式的池化(pooling)操作。通过指定 pool_type 参数,将输入的每个序列(sequence)在最后一层 lod_level 上或时间步(time-step)上对特征进行诸如 sum、average、sqrt 等池化操作。 -支持六种pool_type: +支持六种 pool_type: - **average**: :math:`Out[i] = \frac{\sum_{i}X_{i}}{N}` - **sum**: :math:`Out[i] = \sum _{j}X_{ij}` @@ -22,20 +22,20 @@ sequence_pool - **last**: :math:`Out[i] = X_{N\_i}` - **first**: :math:`Out[i] = X_{0}` -其中 ``N_i`` 为待池化第i个输入序列的长度。 +其中 ``N_i`` 为待池化第 i 个输入序列的长度。 :: Case 1: - input是1-level的LoDTensor,且pad_value = 0.0: + input 是 1-level 的 LoDTensor,且 pad_value = 0.0: input.lod = [[0, 2, 5, 7, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 输出为LoDTensor: + 输出为 LoDTensor: out.shape = [4, 1] 其中 out.shape[0] == len(x.lod[-1]) == 4 - 对于不同的pool_type: + 对于不同的 pool_type: average: out.data = [[2.], [4.], [3.], [0.0]], where 2.=(1. + 3.)/2, 4.=(2. + 4. + 6.)/3, 3.=(5. + 1.)/2 sum : out.data = [[4.], [12.], [6.], [0.0]], where 4.=1. + 3., 12.=2. + 4. + 6., 6.=5. + 1. sqrt : out.data = [[2.82], [6.93], [4.24], [0.0]], where 2.82=(1. + 3.)/sqrt(2), 6.93=(2. + 4. + 6.)/sqrt(3), 4.24=(5. + 1.)/sqrt(2) @@ -43,19 +43,19 @@ sequence_pool last : out.data = [[3.], [6.], [1.], [0.0]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.) first : out.data = [[1.], [2.], [5.], [0.0]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.) - 上述out.data中的最后一个[0.0]均为填充的数据。 + 上述 out.data 中的最后一个[0.0]均为填充的数据。 Case 2: - input是2-level的LoDTensor,包含3个长度分别为[2, 0, 3]的序列,其中中间的0表示序列为空。 - 第一个长度为2的序列包含2个长度分别为[1, 2]的子序列; - 最后一个长度为3的序列包含3个长度分别为[1, 0, 3]的子序列。 + input 是 2-level 的 LoDTensor,包含 3 个长度分别为[2, 0, 3]的序列,其中中间的 0 表示序列为空。 + 第一个长度为 2 的序列包含 2 个长度分别为[1, 2]的子序列; + 最后一个长度为 3 的序列包含 3 个长度分别为[1, 0, 3]的子序列。 input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]] input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]] input.shape = [7, 1] - 以pool_type取值为sum为例,将根据最后一层的lod信息[0, 1, 3, 4, 4, 7]进行池化操作,且pad_value = 0.0 - 输出为LoDTensor: + 以 pool_type 取值为 sum 为例,将根据最后一层的 lod 信息[0, 1, 3, 4, 4, 7]进行池化操作,且 pad_value = 0.0 + 输出为 LoDTensor: out.shape= [5, 1] out.lod = [[0, 2, 2, 5]] 其中 out.shape[0] == len(x.lod[-1]) == 5 @@ -65,14 +65,14 @@ sequence_pool 参数 ::::::::: - - **input** (Tensor) - 类型为LoDTensor的输入序列,仅支持lod_level不超过2的LoDTensor,数据类型为float32。 - - **pool_type** (str) - 池化类型,支持average,sum,sqrt,max,last和first池化操作。 - - **is_test** (bool,可选) - 仅在pool_type取值为max时生效。当is_test为False时,则在池化操作过程中会创建maxIndex临时Tenosr,以记录最大特征值对应的索引信息,用于训练阶段的反向梯度计算。默认为False。 - - **pad_value** (float,可选) - 用于填充输入序列为空时的池化结果,默认为0.0。 + - **input** (Tensor) - 类型为 LoDTensor 的输入序列,仅支持 lod_level 不超过 2 的 LoDTensor,数据类型为 float32。 + - **pool_type** (str) - 池化类型,支持 average,sum,sqrt,max,last 和 first 池化操作。 + - **is_test** (bool,可选) - 仅在 pool_type 取值为 max 时生效。当 is_test 为 False 时,则在池化操作过程中会创建 maxIndex 临时 Tenosr,以记录最大特征值对应的索引信息,用于训练阶段的反向梯度计算。默认为 False。 + - **pad_value** (float,可选) - 用于填充输入序列为空时的池化结果,默认为 0.0。 返回 ::::::::: -经过指定类型池化后的LoDTensor,数据类型为float32。 +经过指定类型池化后的 LoDTensor,数据类型为 float32。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_reshape_cn.rst b/docs/api/paddle/static/nn/sequence_reshape_cn.rst index 963e12974fb..263cdd03281 100644 --- a/docs/api/paddle/static/nn/sequence_reshape_cn.rst +++ b/docs/api/paddle/static/nn/sequence_reshape_cn.rst @@ -8,20 +8,20 @@ sequence_reshape .. note:: -该API的输入只能是LoDTensor,如果您需要处理的输入是Tensor类型,请使用 :ref:`paddle.reshape ` 。 +该 API 的输入只能是 LoDTensor,如果您需要处理的输入是 Tensor 类型,请使用 :ref:`paddle.reshape ` 。 -在指定 ``new_dim`` 参数下,通过序列原始长度、和原始shape计算出新的shape,以输出包含新维度(new_dim)下的LoDTensor。目前仅支持1-level LoDTensor,请确保(原长度*原维数)可以除以新的维数,且每个序列没有余数。 +在指定 ``new_dim`` 参数下,通过序列原始长度、和原始 shape 计算出新的 shape,以输出包含新维度(new_dim)下的 LoDTensor。目前仅支持 1-level LoDTensor,请确保(原长度*原维数)可以除以新的维数,且每个序列没有余数。 :: - input是一个LoDTensor: + input 是一个 LoDTensor: input.lod = [[0, 2, 6]] input.data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]] input.shape = [6, 2] 设置 new_dim = 4 - 输出为LoDTensor: + 输出为 LoDTensor: out.lod = [[0, 1, 3]] out.data = [[1, 2, 3, 4], @@ -34,12 +34,12 @@ sequence_reshape 参数 ::::::::: - - **input** (Tensor) - 维度为 :math:`[M, K]` 的二维LoDTensor,且仅支持lod_level为1。数据类型为int32,int64,float32或float64。 - - **new_dim** (int)- 指定reshape后的新维度,即对输入LoDTensor重新reshape后的新维度。 + - **input** (Tensor) - 维度为 :math:`[M, K]` 的二维 LoDTensor,且仅支持 lod_level 为 1。数据类型为 int32,int64,float32 或 float64。 + - **new_dim** (int)- 指定 reshape 后的新维度,即对输入 LoDTensor 重新 reshape 后的新维度。 返回 ::::::::: -根据新维度重新reshape后的LoDTensor,数据类型和输入一致。 +根据新维度重新 reshape 后的 LoDTensor,数据类型和输入一致。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_reverse_cn.rst b/docs/api/paddle/static/nn/sequence_reverse_cn.rst index 18917c67d7a..2d86c18decd 100644 --- a/docs/api/paddle/static/nn/sequence_reverse_cn.rst +++ b/docs/api/paddle/static/nn/sequence_reverse_cn.rst @@ -7,13 +7,13 @@ sequence_reverse .. note:: -该API仅支持LoDTensor。 +该 API 仅支持 LoDTensor。 -输入的LoDTensor,在每个序列(sequence)上进行反转。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。该OP在构建反向 :ref:`cn_api_fluid_layers_DynamicRNN` 网络时十分有用。 +输入的 LoDTensor,在每个序列(sequence)上进行反转。目前仅支持对 LoD 层次(LoD level)为 1 的 LoDTensor 进行反转。该 OP 在构建反向 :ref:`cn_api_fluid_layers_DynamicRNN` 网络时十分有用。 :: - 输入x是一个LoDTensor: + 输入 x 是一个 LoDTensor: x.lod = [[0, 2, 5]] x.data = [[1, 2, 3, 4], [5, 6, 7, 8], @@ -22,7 +22,7 @@ sequence_reverse [17,18, 19, 20]] x.shape = [5, 4] - 输出out与x具有同样的shape和LoD信息: + 输出 out 与 x 具有同样的 shape 和 LoD 信息: out.lod = [[0, 2, 5]] out.data = [[5, 6, 7, 8], [1, 2, 3, 4], @@ -35,12 +35,12 @@ sequence_reverse 参数 ::::::::: - - **x** (Variable) – 输入是LoD level为1的LoDTensor。目前仅支持对LoD层次(LoD level)为1的LoDTensor进行反转。数据类型为float32,float64,int8,int32或int64。 + - **x** (Variable) – 输入是 LoD level 为 1 的 LoDTensor。目前仅支持对 LoD 层次(LoD level)为 1 的 LoDTensor 进行反转。数据类型为 float32,float64,int8,int32 或 int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -输出在每个序列上反转后的LoDTensor,数据类型和输入类型一致。 +输出在每个序列上反转后的 LoDTensor,数据类型和输入类型一致。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/sequence_scatter_cn.rst b/docs/api/paddle/static/nn/sequence_scatter_cn.rst index 730ff637e71..03eac0993c9 100644 --- a/docs/api/paddle/static/nn/sequence_scatter_cn.rst +++ b/docs/api/paddle/static/nn/sequence_scatter_cn.rst @@ -8,15 +8,15 @@ sequence_scatter .. note:: - 该OP的输入index,updates必须是LoDTensor。 + 该 OP 的输入 index,updates 必须是 LoDTensor。 -根据index提供的位置将updates中的信息更新到输出中。 +根据 index 提供的位置将 updates 中的信息更新到输出中。 -先使用input初始化output,然后通过output[instance_index][index[pos]] += updates[pos]方式,将updates的信息更新到output中,其中instance_idx是pos对应的在batch中第k个样本。 +先使用 input 初始化 output,然后通过 output[instance_index][index[pos]] += updates[pos]方式,将 updates 的信息更新到 output 中,其中 instance_idx 是 pos 对应的在 batch 中第 k 个样本。 -output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数据j,若能找到out[i][j] = input[i][j] + update[m][n],否则 out[i][j] = input[i][j]。 +output[i][j]的值取决于能否在 index 中第 i+1 个区间中找到对应的数据 j,若能找到 out[i][j] = input[i][j] + update[m][n],否则 out[i][j] = input[i][j]。 -例如,在下面样例中,index的lod信息分为了3个区间。其中,out[0][0]能在index中第1个区间中找到对应数据0,所以,使用updates对应位置的值进行更新,out[0][0] = input[0][0]+updates[0][0]。out[2][1]不能在index中第3个区间找到对应数据1,所以,它等于输入对应位置的值,out[2][1] = input[2][1]。 +例如,在下面样例中,index 的 lod 信息分为了 3 个区间。其中,out[0][0]能在 index 中第 1 个区间中找到对应数据 0,所以,使用 updates 对应位置的值进行更新,out[0][0] = input[0][0]+updates[0][0]。out[2][1]不能在 index 中第 3 个区间找到对应数据 1,所以,它等于输入对应位置的值,out[2][1] = input[2][1]。 **样例**: @@ -45,14 +45,14 @@ output[i][j]的值取决于能否在index中第i+1个区间中找到对应的数 参数 ::::::::: - - **input** (Tensor) - 维度为 :math:`[N, k_1 ... k_n]` 的Tensor,支持的数据类型:float32,float64,int32,int64。 - - **index** (Tensor) - 包含index信息的LoDTensor,lod level必须等于1,支持的数据类型:int32,int64。 - - **updates** (Tensor) - 包含updates信息的LoDTensor,lod level和index一致,数据类型与input的数据类型一致。支持的数据类型:float32,float64,int32,int64。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - **input** (Tensor) - 维度为 :math:`[N, k_1 ... k_n]` 的 Tensor,支持的数据类型:float32,float64,int32,int64。 + - **index** (Tensor) - 包含 index 信息的 LoDTensor,lod level 必须等于 1,支持的数据类型:int32,int64。 + - **updates** (Tensor) - 包含 updates 信息的 LoDTensor,lod level 和 index 一致,数据类型与 input 的数据类型一致。支持的数据类型:float32,float64,int32,int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -在input的基础上使用updates进行更新后得到的Tensor,它与input有相同的维度和数据类型。 +在 input 的基础上使用 updates 进行更新后得到的 Tensor,它与 input 有相同的维度和数据类型。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_slice_cn.rst b/docs/api/paddle/static/nn/sequence_slice_cn.rst index e54f5398cee..607c4a50bc9 100644 --- a/docs/api/paddle/static/nn/sequence_slice_cn.rst +++ b/docs/api/paddle/static/nn/sequence_slice_cn.rst @@ -7,13 +7,13 @@ sequence_slice .. py:function:: paddle.static.nn.sequence_slice(input, offset, length, name=None) -**实现Sequence Slice(序列切片)运算** +**实现 Sequence Slice(序列切片)运算** 该层从给定序列中截取子序列。截取依据为所给的开始 ``offset`` (偏移量) 和子序列长 ``length`` 。 .. note:: -该API输入只能是LoDTensor,如果您需要处理的是Tensor类型,请使用 :ref:`paddle.slice ` 。 +该 API 输入只能是 LoDTensor,如果您需要处理的是 Tensor 类型,请使用 :ref:`paddle.slice ` 。 :: @@ -29,7 +29,7 @@ sequence_slice length.data = [[2], [1]] (4) name (str|None) - 输出变量为LoDTensor: + 输出变量为 LoDTensor: out.data = [[a1, a2], [b1, b2], [e1, e2]], out.lod = [[2, 1]], @@ -37,14 +37,14 @@ sequence_slice .。注意:: ``input`` , ``offset`` , ``length`` 的第一维大小应相同。 - ``offset`` 从0开始。 + ``offset`` 从 0 开始。 参数 ::::::::: - - **input** (Tensor) – 输入变量,类型为LoDTensor,承载着完整的序列。数据类型为float32,float64,int32或int64。 - - **offset** (Tensor) – 指定每个序列切片的起始索引,数据类型为int32或int64。 - - **length** (Tensor) – 指定每个子序列的长度,数据类型为int32或int64。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - **input** (Tensor) – 输入变量,类型为 LoDTensor,承载着完整的序列。数据类型为 float32,float64,int32 或 int64。 + - **offset** (Tensor) – 指定每个序列切片的起始索引,数据类型为 int32 或 int64。 + - **length** (Tensor) – 指定每个子序列的长度,数据类型为 int32 或 int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: diff --git a/docs/api/paddle/static/nn/sequence_softmax_cn.rst b/docs/api/paddle/static/nn/sequence_softmax_cn.rst index 8339f8799f1..162483146c2 100644 --- a/docs/api/paddle/static/nn/sequence_softmax_cn.rst +++ b/docs/api/paddle/static/nn/sequence_softmax_cn.rst @@ -7,19 +7,19 @@ sequence_softmax .. py:function:: paddle.static.nn.sequence_softmax(input, use_cudnn=False, name=None) .. note:: - 该API的输入只能是LoDTensor,如果要处理的输入是Tensor类型,请使用 :ref:`paddle.nn.functional.softmax `。 + 该 API 的输入只能是 LoDTensor,如果要处理的输入是 Tensor 类型,请使用 :ref:`paddle.nn.functional.softmax `。 -根据LoD信息将输入的第0维度进行划分,在划分的每一个区间内部进行运算。 +根据 LoD 信息将输入的第 0 维度进行划分,在划分的每一个区间内部进行运算。 -对第i个区间内的元素的计算公式如下: +对第 i 个区间内的元素的计算公式如下: .. math:: Out\left ( X[lod[i]:lod[i+1]],: \right ) = \frac{exp(X[lod[i]:lod[i+1],:])}{\sum (exp(X[lod[i]:lod[i+1],:]))} -输入Tensor的维度可为 :math:`[N,1]` 或者 :math:`[N]`,推荐使用 :math:`[N]` 。 +输入 Tensor 的维度可为 :math:`[N,1]` 或者 :math:`[N]`,推荐使用 :math:`[N]` 。 -例如,对有6个样本的batch,每个样本的长度为3,2,4,1,2,3,其lod信息为[[0, 3, 5, 9, 10, 12, 15]],根据lod信息将第0维度划分为6份,在 :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]` 中进行softmax运算。 +例如,对有 6 个样本的 batch,每个样本的长度为 3,2,4,1,2,3,其 lod 信息为[[0, 3, 5, 9, 10, 12, 15]],根据 lod 信息将第 0 维度划分为 6 份,在 :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]` 中进行 softmax 运算。 :: @@ -46,13 +46,13 @@ sequence_softmax 参数 ::::::::: - - **input** (Tensor) - 维度为 :math:`[N, 1]` 或者 :math:`[N]` 的LoDTensor,推荐使用 :math:`[N]`。支持的数据类型:float32,float64。 - - **use_cudnn** (bool,可选) - 是否用cudnn核,仅当安装cudnn版本的paddle库且使用gpu训练或推理的时候生效。支持的数据类型:bool型。默认值为False。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - **input** (Tensor) - 维度为 :math:`[N, 1]` 或者 :math:`[N]` 的 LoDTensor,推荐使用 :math:`[N]`。支持的数据类型:float32,float64。 + - **use_cudnn** (bool,可选) - 是否用 cudnn 核,仅当安装 cudnn 版本的 paddle 库且使用 gpu 训练或推理的时候生效。支持的数据类型:bool 型。默认值为 False。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -根据区间计算softmax之后的LoDTensor,其维度与input的维度一致,数据类型与input的数据类型一致。 +根据区间计算 softmax 之后的 LoDTensor,其维度与 input 的维度一致,数据类型与 input 的数据类型一致。 代码示例 diff --git a/docs/api/paddle/static/nn/sequence_unpad_cn.rst b/docs/api/paddle/static/nn/sequence_unpad_cn.rst index dd4500a6e66..80853f29135 100644 --- a/docs/api/paddle/static/nn/sequence_unpad_cn.rst +++ b/docs/api/paddle/static/nn/sequence_unpad_cn.rst @@ -11,9 +11,9 @@ sequence_unpad .. note:: - 该API的输入为Tensor,输出为LoDTensor。用于移除填充元素,与之对应,还存在进行数据填充的API :ref:`cn_api_fluid_layers_sequence_pad`。 + 该 API 的输入为 Tensor,输出为 LoDTensor。用于移除填充元素,与之对应,还存在进行数据填充的 API :ref:`cn_api_fluid_layers_sequence_pad`。 -根据length的信息,将input中padding(填充)元素移除,并且返回一个LoDTensor。 +根据 length 的信息,将 input 中 padding(填充)元素移除,并且返回一个 LoDTensor。 :: @@ -24,7 +24,7 @@ sequence_unpad [ 6.0, 7.0, 8.0, 9.0, 10.0], [11.0, 12.0, 13.0, 14.0, 15.0]], - 其中包含 3 个被填充到长度为5的序列,实际长度由输入变量 ``length`` 指明,其中,x的维度为[3,4],length维度为[3],length的第0维与x的第0维一致: + 其中包含 3 个被填充到长度为 5 的序列,实际长度由输入变量 ``length`` 指明,其中,x 的维度为[3,4],length 维度为[3],length 的第 0 维与 x 的第 0 维一致: length.data = [2, 3, 4], @@ -37,13 +37,13 @@ sequence_unpad 参数 ::::::::: - - **x** (Tensor) – 包含填充元素的Tensor,其维度大小不能小于2,支持的数据类型:float32, float64,int32, int64。 - - **length** (Tensor) – 存储每个样本实际长度信息的1D Tesnor,该Tensor维度的第0维必须与x维度的第0维一致。支持的数据类型:int64。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 + - **x** (Tensor) – 包含填充元素的 Tensor,其维度大小不能小于 2,支持的数据类型:float32, float64,int32, int64。 + - **length** (Tensor) – 存储每个样本实际长度信息的 1D Tesnor,该 Tensor 维度的第 0 维必须与 x 维度的第 0 维一致。支持的数据类型:int64。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -将输入的填充元素移除,并返回一个LoDTensor,其递归序列长度与length参数的信息一致,其数据类型和输入一致。 +将输入的填充元素移除,并返回一个 LoDTensor,其递归序列长度与 length 参数的信息一致,其数据类型和输入一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/static/nn/sparse_embedding_cn.rst b/docs/api/paddle/static/nn/sparse_embedding_cn.rst index e570586ab9b..a71aba95f21 100644 --- a/docs/api/paddle/static/nn/sparse_embedding_cn.rst +++ b/docs/api/paddle/static/nn/sparse_embedding_cn.rst @@ -7,25 +7,25 @@ sparse_embedding .. py:function:: paddle.static.nn.sparse_embedding(input, size, padding_idx=None, is_test=False, entry=None, table_class="CommonSparseTable", param_attr=None, dtype='float32') -在飞桨参数服务器模式的大规模稀疏训练中作为embedding lookup层的算子,而不是使用paddle.nn.functional.embedding。 +在飞桨参数服务器模式的大规模稀疏训练中作为 embedding lookup 层的算子,而不是使用 paddle.nn.functional.embedding。 -根据input中的id信息从embedding矩阵中查询对应embedding信息,并会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 +根据 input 中的 id 信息从 embedding 矩阵中查询对应 embedding 信息,并会根据输入的 size (vocab_size, emb_size)和 dtype 自动构造一个二维 embedding 矩阵。 -输出的Tensor的shape是将输入Tensor shape的会在输出的embedding最后追加一维emb_size。 +输出的 Tensor 的 shape 是将输入 Tensor shape 的会在输出的 embedding 最后追加一维 emb_size。 .. note:: -input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 +input 中的 id 必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 :: Case 1: - input是Tensor,且padding_idx = -1 + input 是 Tensor,且 padding_idx = -1 input.data = [[1, 3], [2, 4], [4, 127]] input.shape = [3, 2] - 若size = [128, 16] - 输出为Tensor: + 若 size = [128, 16] + 输出为 Tensor: out.shape = [3, 2, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654]], @@ -35,18 +35,18 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data - 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127,对于输入id为127的词,进行padding处理。 + 输入的 padding_idx 小于 0,则自动转换为 padding_idx = -1 + 128 = 127,对于输入 id 为 127 的词,进行 padding 处理。 Case 2: - input是lod level 为1的LoDTensor,且padding_idx = 0 + input 是 lod level 为 1 的 LoDTensor,且 padding_idx = 0 input.lod = [[2, 3]] input.data = [[1], [3], [2], [4], [0]] input.shape = [5, 1] - 若size = [128, 16] + 若 size = [128, 16] - 输出为LoDTensor: + 输出为 LoDTensor: out.lod = [[2, 3]] out.shape = [5, 1, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]], @@ -54,23 +54,23 @@ input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出 [[0.345249859, 0.124939536, ..., 0.194353745]], [[0.945345345, 0.435394634, ..., 0.435345365]], [[0.0, 0.0, ..., 0.0 ]]] # padding data - 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + 输入的 padding_idx = 0,则对于输入 id 为 0 的词,进行 padding 处理。 参数 :::::::: - - **input** (Variable) - 存储id信息的Tensor,数据类型必须为:int64,输入的shape最后一维须为1。input中的id必须满足 ``0 =< id < size[0]`` 。 - - **size** (tuple|list) - embedding矩阵的维度(vocab_size,emb_size)。必须包含两个元素,第一个元素为vocab_size(词表大小),第二个为emb_size(embedding层维度)。大规模稀疏场景下,参数规模初始为0,会随着训练的进行逐步扩展,因此如果vocab_size暂时无用,其值可以为任意整数,emb_size则为词嵌入权重参数的维度配置。 - - **padding_idx** (int|long|None,可选) - padding_idx需在区间 ``[-vocab_size, vocab_size)``,否则不生效,``padding_idx < 0`` 时,padding_idx会被改成``vocab_size + padding_idx``,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为None,不作处理,默认为None。 - - **is_test** (bool,可选) - 表示训练/预测模式。在预测模式(is_test=False)下,遇到不存在的特征,不会初始化及创建,直接以0填充后返回。默认值为False。 - - **entry** (str,可选) - 准入策略配置,目前支持概率准入ProbabilityEntry和频次准入CountFilterEntry。默认为None。 - - **table_class** (str,可选) - 稀疏表的类型,其值可以为CommonSparseTable和SSDSparseTable。默认为CommonSparseTable。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_paddle_ParamAttr`。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_paddle_to_tensor` 进行初始化,即可实现加载自定义或预训练的词向量。 - - **dtype** (str) - 输出Tensor的数据类型,数据类型必须为:float32 或float64,默认为float32。 + - **input** (Variable) - 存储 id 信息的 Tensor,数据类型必须为:int64,输入的 shape 最后一维须为 1。input 中的 id 必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding 矩阵的维度(vocab_size,emb_size)。必须包含两个元素,第一个元素为 vocab_size(词表大小),第二个为 emb_size(embedding 层维度)。大规模稀疏场景下,参数规模初始为 0,会随着训练的进行逐步扩展,因此如果 vocab_size 暂时无用,其值可以为任意整数,emb_size 则为词嵌入权重参数的维度配置。 + - **padding_idx** (int|long|None,可选) - padding_idx 需在区间 ``[-vocab_size, vocab_size)``,否则不生效,``padding_idx < 0`` 时,padding_idx 会被改成``vocab_size + padding_idx``,input 中等于 padding_index 的 id 对应的 embedding 信息会被设置为 0,且这部分填充数据在训练时将不会被更新。如果为 None,不作处理,默认为 None。 + - **is_test** (bool,可选) - 表示训练/预测模式。在预测模式(is_test=False)下,遇到不存在的特征,不会初始化及创建,直接以 0 填充后返回。默认值为 False。 + - **entry** (str,可选) - 准入策略配置,目前支持概率准入 ProbabilityEntry 和频次准入 CountFilterEntry。默认为 None。 + - **table_class** (str,可选) - 稀疏表的类型,其值可以为 CommonSparseTable 和 SSDSparseTable。默认为 CommonSparseTable。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_paddle_ParamAttr`。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为 numpy 数据格式,且保证本地词向量的 shape 和 embedding 的 ``size`` 参数一致,然后使用 :ref:`cn_api_paddle_to_tensor` 进行初始化,即可实现加载自定义或预训练的词向量。 + - **dtype** (str) - 输出 Tensor 的数据类型,数据类型必须为:float32 或 float64,默认为 float32。 返回 :::::::: -Variable,input映射后得到的Embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 +Variable,input 映射后得到的 Embedding Tensor 或 LoDTensor,数据类型和 dtype 定义的类型一致。 代码示例 :::::::: diff --git a/docs/api/paddle/static/nn/spectral_norm_cn.rst b/docs/api/paddle/static/nn/spectral_norm_cn.rst index 7fcb038e19a..ed995a43d32 100644 --- a/docs/api/paddle/static/nn/spectral_norm_cn.rst +++ b/docs/api/paddle/static/nn/spectral_norm_cn.rst @@ -7,18 +7,18 @@ spectral_norm **Spectral Normalization Layer** -该OP用于计算了fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量shape相同。谱特征值计算方式如下。 +该 OP 用于计算了 fc、conv1d、conv2d、conv3d 层的权重参数的谱正则值,输入权重参数应分别为 2-D, 3-D, 4-D, 5-D 张量,输出张量与输入张量 shape 相同。谱特征值计算方式如下。 -步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 +步骤 1:生成形状为[H]的向量 U,以及形状为[W]的向量 V,其中 H 是输入权重张量的第 ``dim`` 个维度,W 是剩余维度的乘积。 -步骤2: ``power_iters`` 应该是一个正整数,用U和V迭代计算 ``power_iters`` 轮,迭代步骤如下。 +步骤 2: ``power_iters`` 应该是一个正整数,用 U 和 V 迭代计算 ``power_iters`` 轮,迭代步骤如下。 .. math:: \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ \mathbf{u} &:= \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} -步骤3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 +步骤 3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 .. math:: \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ @@ -29,10 +29,10 @@ spectral_norm 参数 ::::::::: - - **weight** (Tensor) - spectral_norm算子的输入权重张量,可以是2-D, 3-D, 4-D, 5-D Tensor,它是fc、conv1d、conv2d、conv3d层的权重,数据类型为float32或float64。 - - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1,默认为0。 - - **power_iters** (int,可选) - 将用于计算spectral norm的功率迭代次数,默认值1。 - - **eps** (float,可选) - epsilon用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零,默认1e-12。 + - **weight** (Tensor) - spectral_norm 算子的输入权重张量,可以是 2-D, 3-D, 4-D, 5-D Tensor,它是 fc、conv1d、conv2d、conv3d 层的权重,数据类型为 float32 或 float64。 + - **dim** (int,可选) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果 input(weight)是 fc 层的权重,则应设置为 0;如果 input(weight)是 conv 层的权重,则应设置为 1,默认为 0。 + - **power_iters** (int,可选) - 将用于计算 spectral norm 的功率迭代次数,默认值 1。 + - **eps** (float,可选) - epsilon 用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零,默认 1e-12。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/static/nn/switch_case_cn.rst b/docs/api/paddle/static/nn/switch_case_cn.rst index 8be87d970a9..6b0086c139c 100644 --- a/docs/api/paddle/static/nn/switch_case_cn.rst +++ b/docs/api/paddle/static/nn/switch_case_cn.rst @@ -7,13 +7,13 @@ switch_case .. py:function:: paddle.static.nn.switch_case(branch_index, branch_fns, default=None, name=None) -运行方式类似于c++的switch/case。 +运行方式类似于 c++的 switch/case。 参数 :::::::::::: - - **branch_index** (Tensor)- 形状为[1]的Tensor,指定将要执行的分支。数据类型是 ``int32``, ``int64`` 或 ``uint8``。 - - **branch_fns** (dict|list|tuple) - 如果 ``branch_fns`` 是一个list或tuple,它的元素可以是 (int, callable) 二元组,即由整数和可调用对象构成的二元组,整数表示对应的可调用对象的键;也可以仅仅是可调用对象,它在list或者tuple中的实际索引值将作为该可调用对象的键。如果 ``branch_fns`` 是一个字典,那么它的键是整数,它的值是可调用对象。所有的可调用对象都返回相同结构的Tensor。 + - **branch_index** (Tensor)- 形状为[1]的 Tensor,指定将要执行的分支。数据类型是 ``int32``, ``int64`` 或 ``uint8``。 + - **branch_fns** (dict|list|tuple) - 如果 ``branch_fns`` 是一个 list 或 tuple,它的元素可以是 (int, callable) 二元组,即由整数和可调用对象构成的二元组,整数表示对应的可调用对象的键;也可以仅仅是可调用对象,它在 list 或者 tuple 中的实际索引值将作为该可调用对象的键。如果 ``branch_fns`` 是一个字典,那么它的键是整数,它的值是可调用对象。所有的可调用对象都返回相同结构的 Tensor。 - **default** (callable,可选) - 可调用对象,返回一个或多个张量。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 @@ -22,8 +22,8 @@ switch_case Tensor|list(Tensor) -- 如果 ``branch_fns`` 中存在与 ``branch_index`` 匹配的可调用对象,则返回该可调用对象的返回结果;如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 不是None,则返回调用 ``default`` 的返回结果; -- 如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 是None,则返回 ``branch_fns`` 中键值最大的可调用对象的返回结果。 +- 如果 ``branch_fns`` 中存在与 ``branch_index`` 匹配的可调用对象,则返回该可调用对象的返回结果;如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 不是 None,则返回调用 ``default`` 的返回结果; +- 如果 ``branch_fns`` 中不存在与 ``branch_index`` 匹配的可调用对象且 ``default`` 是 None,则返回 ``branch_fns`` 中键值最大的可调用对象的返回结果。 代码示例 :::::::::::: diff --git a/docs/api/paddle/static/nn/while_loop_cn.rst b/docs/api/paddle/static/nn/while_loop_cn.rst index 8ce1d93e243..6309027512c 100644 --- a/docs/api/paddle/static/nn/while_loop_cn.rst +++ b/docs/api/paddle/static/nn/while_loop_cn.rst @@ -7,7 +7,7 @@ ____________________________________ .. py:function:: paddle.static.nn.while_loop(cond, body, loop_vars, is_test=False, name=None) -该API用于实现类似while的循环控制功能,只要循环条件 ``cond`` 的返回值为True,``while_loop`` 则会循环执行循环体 ``body``,直到 ``cond`` 的返回值为False。 +该 API 用于实现类似 while 的循环控制功能,只要循环条件 ``cond`` 的返回值为 True,``while_loop`` 则会循环执行循环体 ``body``,直到 ``cond`` 的返回值为 False。 .. note:: ``body`` 中定义的局部变量无法使用 ``Executor`` 的 ``fetch_list`` 来获取的,变量需在 ``body`` 外定义并将其置于 ``loop_vars`` 中进行循环更新后才可通过 ``fetch_list`` 获取。 @@ -15,10 +15,10 @@ ____________________________________ 参数 ::::::::: - - **cond** (callable) - 返回boolean类型张量的可调用函数,用以判断循环是否继续执行。``cond`` 的参数和 ``loop_vars`` 相对应。 - - **body** (callable) - 循环执行的结构体。其返回一个包含tensor或LoDTensorArray的列表或元组,且这些tensor或LoDTensorArray的长度,结构,类型和 ``loop_vars`` 中的相同。且``body`` 的参数与 ``loop_vars`` 相对应。 - - **loop_vars** (list|tuple) - 包含tensor或LoDTensorArray的列表或是元组,将其传入至 ``cond`` 和 ``body`` 中,得到循环条件和输出值。 - - **is_test** (bool,可选) - 用于表明是否在测试阶段执行,默认值为False。 + - **cond** (callable) - 返回 boolean 类型张量的可调用函数,用以判断循环是否继续执行。``cond`` 的参数和 ``loop_vars`` 相对应。 + - **body** (callable) - 循环执行的结构体。其返回一个包含 tensor 或 LoDTensorArray 的列表或元组,且这些 tensor 或 LoDTensorArray 的长度,结构,类型和 ``loop_vars`` 中的相同。且``body`` 的参数与 ``loop_vars`` 相对应。 + - **loop_vars** (list|tuple) - 包含 tensor 或 LoDTensorArray 的列表或是元组,将其传入至 ``cond`` 和 ``body`` 中,得到循环条件和输出值。 + - **is_test** (bool,可选) - 用于表明是否在测试阶段执行,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/static/npu_places_cn.rst b/docs/api/paddle/static/npu_places_cn.rst index ae760f79262..0127f53a6d1 100644 --- a/docs/api/paddle/static/npu_places_cn.rst +++ b/docs/api/paddle/static/npu_places_cn.rst @@ -7,7 +7,7 @@ npu_places .. note:: - 多卡任务请先使用 FLAGS_selected_npus 环境变量设置可见的NPU设备。 + 多卡任务请先使用 FLAGS_selected_npus 环境变量设置可见的 NPU 设备。 该接口根据 ``device_ids`` 创建一个或多个 ``paddle.NPUPlace`` 对象,并返回所创建的对象列表。 @@ -15,12 +15,12 @@ npu_places 例如:``FLAGS_selected_npus=0,1,2``,则返回的列表将为 ``[paddle.NPUPlace(0), paddle.NPUPlace(1), paddle.NPUPlace(2)]``。 如果未设置标志 ``FLAGS_selected_npus``,则返回所有可见的 NPU places。 -如果 ``device_ids`` 不是 ``None``,它应该是使用的NPU设备ID的列表或元组。 +如果 ``device_ids`` 不是 ``None``,它应该是使用的 NPU 设备 ID 的列表或元组。 例如:``device_id=[0,1,2]``,返回的列表将是 ``[paddle.NPUPlace(0), paddle.NPUPlace(1), paddle.NPUPlace(2)]``。 参数 ::::::::: - - **device_ids** (list(int)|tuple(int),可选) - NPU的设备ID列表或元组。默认值为 ``None``。 + - **device_ids** (list(int)|tuple(int),可选) - NPU 的设备 ID 列表或元组。默认值为 ``None``。 返回 ::::::::: diff --git a/docs/api/paddle/static/program_guard_cn.rst b/docs/api/paddle/static/program_guard_cn.rst index 6b7ef649821..aa22cd1bd5a 100644 --- a/docs/api/paddle/static/program_guard_cn.rst +++ b/docs/api/paddle/static/program_guard_cn.rst @@ -9,9 +9,9 @@ program_guard -该接口应配合使用python的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。 +该接口应配合使用 python 的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。 -``with`` 语句块下的各接口将在新的main program(主程序)中添加 operators(算子)和 Tensors(张量)。 +``with`` 语句块下的各接口将在新的 main program(主程序)中添加 operators(算子)和 Tensors(张量)。 参数 :::::::::::: diff --git a/docs/api/paddle/static/py_func_cn.rst b/docs/api/paddle/static/py_func_cn.rst index 44965a9e9ab..2e30b545ff5 100644 --- a/docs/api/paddle/static/py_func_cn.rst +++ b/docs/api/paddle/static/py_func_cn.rst @@ -9,11 +9,11 @@ py_func -PaddlePaddle 通过py_func在Python端注册OP。py_func的设计原理在于Paddle中的Tensor与numpy数组可以方便的互相转换,从而可使用Python中的numpy API来自定义一个Python OP。 +PaddlePaddle 通过 py_func 在 Python 端注册 OP。py_func 的设计原理在于 Paddle 中的 Tensor 与 numpy 数组可以方便的互相转换,从而可使用 Python 中的 numpy API 来自定义一个 Python OP。 -该自定义的Python OP的前向函数是 ``func``,反向函数是 ``backward_func`` 。 Paddle将在前向部分调用 ``func``,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是None)。 ``x`` 为 ``func`` 的输入,必须为Tensor类型;``out`` 为 ``func`` 的输出,既可以是Tensor类型,也可以是numpy数组。 +该自定义的 Python OP 的前向函数是 ``func``,反向函数是 ``backward_func`` 。 Paddle 将在前向部分调用 ``func``,并在反向部分调用 ``backward_func`` (如果 ``backward_func`` 不是 None)。 ``x`` 为 ``func`` 的输入,必须为 Tensor 类型;``out`` 为 ``func`` 的输出,既可以是 Tensor 类型,也可以是 numpy 数组。 -反向函数 ``backward_func`` 的输入依次为:前向输入 ``x`` 、前向输出 ``out`` 、 ``out`` 的梯度。如果 ``out`` 的某些输出没有梯度,则 ``backward_func`` 的相关输入为None。如果 ``x`` 的某些变量没有梯度,则用户应在 ``backward_func`` 中主动返回None。 +反向函数 ``backward_func`` 的输入依次为:前向输入 ``x`` 、前向输出 ``out`` 、 ``out`` 的梯度。如果 ``out`` 的某些输出没有梯度,则 ``backward_func`` 的相关输入为 None。如果 ``x`` 的某些变量没有梯度,则用户应在 ``backward_func`` 中主动返回 None。 在调用该接口之前,还应正确设置 ``out`` 的数据类型和形状,而 ``out`` 和 ``x`` 对应梯度的数据类型和形状将自动推断而出。 @@ -22,11 +22,11 @@ PaddlePaddle 通过py_func在Python端注册OP。py_func的设计原理在于Pad 参数 :::::::::::: - - **func** (callable) - 所注册的Python OP的前向函数,运行网络时,将根据该函数与前向输入 ``x``,计算前向输出 ``out``。在 ``func`` 建议先主动将Tensor转换为numpy数组,方便灵活的使用numpy相关的操作,如果未转换成numpy,则可能某些操作无法兼容。 - - **x** (Tensor|tuple(Tensor)|list[Tensor]) - 前向函数 ``func`` 的输入,多个Tensor以tuple(Tensor)或list[Tensor]的形式传入。 - - **out** (T|tuple(T)|list[T]) - 前向函数 ``func`` 的输出,可以为T|tuple(T)|list[T],其中T既可以为Tensor,也可以为numpy数组。由于Paddle无法自动推断 ``out`` 的形状和数据类型,必须应事先创建 ``out`` 。 - - **backward_func** (callable,可选) - 所注册的Python OP的反向函数。默认值为None,意味着没有反向计算。若不为None,则会在运行网络反向时调用 ``backward_func`` 计算 ``x`` 的梯度。 - - **skip_vars_in_backward_input** (Tensor) - ``backward_func`` 的输入中不需要的变量,可以是Tensor|tuple(Tensor)|list[Tensor]。这些变量必须是 ``x`` 和 ``out`` 中的一个。默认值为None,意味着没有变量需要从 ``x`` 和 ``out`` 中去除。若不为None,则这些变量将不是 ``backward_func`` 的输入。该参数仅在 ``backward_func`` 不为None时有用。 + - **func** (callable) - 所注册的 Python OP 的前向函数,运行网络时,将根据该函数与前向输入 ``x``,计算前向输出 ``out``。在 ``func`` 建议先主动将 Tensor 转换为 numpy 数组,方便灵活的使用 numpy 相关的操作,如果未转换成 numpy,则可能某些操作无法兼容。 + - **x** (Tensor|tuple(Tensor)|list[Tensor]) - 前向函数 ``func`` 的输入,多个 Tensor 以 tuple(Tensor)或 list[Tensor]的形式传入。 + - **out** (T|tuple(T)|list[T]) - 前向函数 ``func`` 的输出,可以为 T|tuple(T)|list[T],其中 T 既可以为 Tensor,也可以为 numpy 数组。由于 Paddle 无法自动推断 ``out`` 的形状和数据类型,必须应事先创建 ``out`` 。 + - **backward_func** (callable,可选) - 所注册的 Python OP 的反向函数。默认值为 None,意味着没有反向计算。若不为 None,则会在运行网络反向时调用 ``backward_func`` 计算 ``x`` 的梯度。 + - **skip_vars_in_backward_input** (Tensor) - ``backward_func`` 的输入中不需要的变量,可以是 Tensor|tuple(Tensor)|list[Tensor]。这些变量必须是 ``x`` 和 ``out`` 中的一个。默认值为 None,意味着没有变量需要从 ``x`` 和 ``out`` 中去除。若不为 None,则这些变量将不是 ``backward_func`` 的输入。该参数仅在 ``backward_func`` 不为 None 时有用。 返回 :::::::::::: @@ -45,12 +45,12 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` paddle.enable_static() - # 自定义的前向函数,可直接输入LoDTenosor + # 自定义的前向函数,可直接输入 LoDTenosor def tanh(x): return np.tanh(x) - # 在反向函数中跳过前向输入x,返回x的梯度。 - # 必须使用np.array主动将LodTensor转换为numpy,否则"+/-"等操作无法使用 + # 在反向函数中跳过前向输入 x,返回 x 的梯度。 + # 必须使用 np.array 主动将 LodTensor 转换为 numpy,否则"+/-"等操作无法使用 def tanh_grad(y, dy): return np.array(dy) * (1 - np.square(np.array(y))) @@ -74,7 +74,7 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` out=new_hidden, backward_func=tanh_grad, skip_vars_in_backward_input=hidden) - # 用户自定义的调试函数,打印出输入的LodTensor + # 用户自定义的调试函数,打印出输入的 LodTensor paddle.static.py_func(func=debug_func, x=hidden, out=None) prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax') @@ -100,14 +100,14 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` .. code-block:: python - # 该示例展示了如何将LoDTensor转化为numpy数组,并利用numpy API来自定义一个OP + # 该示例展示了如何将 LoDTensor 转化为 numpy 数组,并利用 numpy API 来自定义一个 OP import paddle import numpy as np paddle.enable_static() def element_wise_add(x, y): - # 必须先手动将LodTensor转换为numpy数组,否则无法支持numpy的shape操作 + # 必须先手动将 LodTensor 转换为 numpy 数组,否则无法支持 numpy 的 shape 操作 x = np.array(x) y = np.array(y) @@ -133,16 +133,16 @@ Tensor|tuple(Tensor)|list[Tensor],前向函数的输出 ``out`` x = paddle.static.data(name='x', shape=[2,3], dtype='int32') y = paddle.static.data(name='y', shape=[2,3], dtype='int32') - # 创建前向函数的输出变量,必须指明变量名称name/数据类型dtype/维度shape + # 创建前向函数的输出变量,必须指明变量名称 name/数据类型 dtype/维度 shape output = create_tmp_var('output','int32', [3,1]) - # 输入多个LodTensor以list[Variable]或tuple(Variable)形式 + # 输入多个 LodTensor 以 list[Variable]或 tuple(Variable)形式 paddle.static.py_func(func=element_wise_add, x=[x,y], out=output) exe=paddle.static.Executor(paddle.CPUPlace()) exe.run(start_program) - # 给program喂入numpy数组 + # 给 program 喂入 numpy 数组 input1 = np.random.randint(1, 10, size=[2,3], dtype='int32') input2 = np.random.randint(1, 10, size=[2,3], dtype='int32') out = exe.run(main_program, diff --git a/docs/api/paddle/static/save_cn.rst b/docs/api/paddle/static/save_cn.rst index d33aea854c7..8583e316d11 100644 --- a/docs/api/paddle/static/save_cn.rst +++ b/docs/api/paddle/static/save_cn.rst @@ -10,17 +10,17 @@ save 参数包含所有的可训练 :ref:`cn_api_fluid_Variable`,将保存到后缀为 ``.pdparams`` 的文件中。 -优化器信息包含优化器使用的所有Tensor。对于Adam优化器,包含beta1、beta2、momentum等。 -所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的Tensor(如sgd),则不会生成)。 +优化器信息包含优化器使用的所有 Tensor。对于 Adam 优化器,包含 beta1、beta2、momentum 等。 +所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的 Tensor(如 sgd),则不会生成)。 网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 参数 :::::::::::: - - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 - - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 - - **protocol** (int,可选) – pickle模块的协议版本,默认值为4,取值范围是[2,4]。 + - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的 Program。 + - **model_path** (str) – 保存 program 的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + - **protocol** (int,可选) – pickle 模块的协议版本,默认值为 4,取值范围是[2,4]。 - **\*\*configs** (dict,可选) - 可选的关键字参数。 返回 diff --git a/docs/api/paddle/static/set_ipu_shard_cn.rst b/docs/api/paddle/static/set_ipu_shard_cn.rst index 2251428bb31..181aaf7dddf 100644 --- a/docs/api/paddle/static/set_ipu_shard_cn.rst +++ b/docs/api/paddle/static/set_ipu_shard_cn.rst @@ -10,15 +10,15 @@ set_ipu_shard .. note: -仅支持当enable_manual_shard=True,index设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 -仅支持当enable_pipelining=True,stage设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 -一个index支持对应None stage或一个stage,一个stage仅支持对应一个新的index或者一个重复的index。 +仅支持当 enable_manual_shard=True,index 设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 +仅支持当 enable_pipelining=True,stage 设置才有效。请参阅 :ref:`cn_api_fluid_IpuStrategy` 。 +一个 index 支持对应 None stage 或一个 stage,一个 stage 仅支持对应一个新的 index 或者一个重复的 index。 参数 ::::::::: - **call_func** (Layer|function) - 静态图下的函数或者计算层。 - - **index** (int,可选) - 指定Op在哪个ipu上计算,(如‘0, 1, 2, 3’),默认值-1,表示不指定ipu。 - - **stage** (int,可选) – 指定被切分的模型的计算顺序,(如‘0, 1, 2, 3’),按照数值大小顺序对被切分的模型进行计算,默认值-1,表示没有数据流水计算顺序并按照计算图顺序计算Op。 + - **index** (int,可选) - 指定 Op 在哪个 ipu 上计算,(如‘0, 1, 2, 3’),默认值-1,表示不指定 ipu。 + - **stage** (int,可选) – 指定被切分的模型的计算顺序,(如‘0, 1, 2, 3’),按照数值大小顺序对被切分的模型进行计算,默认值-1,表示没有数据流水计算顺序并按照计算图顺序计算 Op。 返回 ::::::::: diff --git a/docs/api/paddle/static/set_program_state_cn.rst b/docs/api/paddle/static/set_program_state_cn.rst index decd38070b1..c3ead68620f 100644 --- a/docs/api/paddle/static/set_program_state_cn.rst +++ b/docs/api/paddle/static/set_program_state_cn.rst @@ -17,7 +17,7 @@ set_program_state :::::::::::: - **program** (Program) - 需要被设置的 ``Program`` 。 - - **state_dict** (dict) - 存储参数和优化器信息的dict;dict中key的类型为Tensor的名称,value为np.ndarray类型的数据。 + - **state_dict** (dict) - 存储参数和优化器信息的 dict;dict 中 key 的类型为 Tensor 的名称,value 为 np.ndarray 类型的数据。 返回 :::::::::::: diff --git a/docs/api/paddle/static/xpu_places_cn.rst b/docs/api/paddle/static/xpu_places_cn.rst index 4e4cd21481e..88900cfd313 100644 --- a/docs/api/paddle/static/xpu_places_cn.rst +++ b/docs/api/paddle/static/xpu_places_cn.rst @@ -7,7 +7,7 @@ xpu_places .. note:: - 多卡任务请先使用 FLAGS_selected_xpus 环境变量设置可见的XPU设备,下个版本将会修正 XPU_VISIBLE_DEVICES 环境变量无效的问题。 + 多卡任务请先使用 FLAGS_selected_xpus 环境变量设置可见的 XPU 设备,下个版本将会修正 XPU_VISIBLE_DEVICES 环境变量无效的问题。 该接口根据 ``device_ids`` 创建一个或多个 ``paddle.XPUPlace`` 对象,并返回所创建的对象列表。 @@ -15,12 +15,12 @@ xpu_places 例如:``FLAGS_selected_xpus=0,1,2``,则返回的列表将为 ``[paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)]``。 如果未设置标志 ``FLAGS_selected_xpus``,则根据 ``XPU_VISIBLE_DEVICES`` 环境变量,返回所有可见的 XPU places。 -如果 ``device_ids`` 不是 ``None``,它应该是使用的XPU设备ID的列表或元组。 +如果 ``device_ids`` 不是 ``None``,它应该是使用的 XPU 设备 ID 的列表或元组。 例如:``device_id=[0,1,2]``,返回的列表将是 ``[paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)]``。 参数 ::::::::: - - **device_ids** (list(int)|tuple(int),可选) - XPU的设备ID列表或元组。默认值为 ``None``。 + - **device_ids** (list(int)|tuple(int),可选) - XPU 的设备 ID 列表或元组。默认值为 ``None``。 返回 ::::::::: diff --git a/docs/api/paddle/std_cn.rst b/docs/api/paddle/std_cn.rst index 6e584afc3e5..5a17b4d684f 100644 --- a/docs/api/paddle/std_cn.rst +++ b/docs/api/paddle/std_cn.rst @@ -9,10 +9,10 @@ std 参数 :::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64。 - - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算标准差。默认值为None。 - - unbiased (bool,可选) - 是否使用无偏估计来计算标准差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为True,则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为True。 - - keepdim (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为False。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 + - axis (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算标准差。默认值为 None。 + - unbiased (bool,可选) - 是否使用无偏估计来计算标准差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为 True,则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为 True。 + - keepdim (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/strided_slice_cn.rst b/docs/api/paddle/strided_slice_cn.rst index 4e0c09fb9ea..7eb00b3e198 100644 --- a/docs/api/paddle/strided_slice_cn.rst +++ b/docs/api/paddle/strided_slice_cn.rst @@ -6,14 +6,14 @@ strided_slice -strided_slice算子。 +strided_slice 算子。 -沿多个轴生成 ``x`` 的切片,与numpy类似:https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html。该OP使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``x`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以0为初始位置), ``strides`` 表示切片的步长,``strides`` 如果为负数,则按照反方向进行切片。如果传递给 ``starts`` 或 ``ends`` 的值大于n(维度中的元素数目),则表示n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 以及 ``strides`` 四个参数的元素数目必须相等。以下示例将解释切片如何工作: +沿多个轴生成 ``x`` 的切片,与 numpy 类似:https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html。该 OP 使用 ``axes`` 、 ``starts`` 和 ``ends`` 属性来指定轴列表中每个轴的起点和终点位置,并使用此信息来对 ``x`` 切片。如果向 ``starts`` 或 ``ends`` 传递负值如 :math:`-i`,则表示该轴的反向第 :math:`i-1` 个位置(这里以 0 为初始位置), ``strides`` 表示切片的步长,``strides`` 如果为负数,则按照反方向进行切片。如果传递给 ``starts`` 或 ``ends`` 的值大于 n(维度中的元素数目),则表示 n。当切片一个未知数量的维度时,建议传入 ``INT_MAX``。 ``axes`` 、 ``starts`` 和 ``ends`` 以及 ``strides`` 四个参数的元素数目必须相等。以下示例将解释切片如何工作: :: - 示例1: + 示例 1: 给定: data=[[1,2,3,4],[5,6,7,8],] axes=[0,1] @@ -23,7 +23,7 @@ strided_slice算子。 则: result=[[5,6,7],] - 示例2: + 示例 2: 给定: data=[[1,2,3,4],[5,6,7,8],] axes=[0,1] @@ -33,12 +33,12 @@ strided_slice算子。 则: result=[[8,7,6],] - 示例3: + 示例 3: 给定: data=[[1,2,3,4],[5,6,7,8],] axes=[0,1] starts=[0,1] - ends=[-1,1000] # 此处-1表示第0维的反向第0个位置,索引值是1。 + ends=[-1,1000] # 此处-1 表示第 0 维的反向第 0 个位置,索引值是 1。 strides =[1,3] 则: result=[[2],] @@ -50,9 +50,9 @@ strided_slice算子。 - **x** (Tensor)- 多维 ``Tensor``,数据类型为 ``bool``, ``float32``,``float64``,``int32``,或 ``int64``。 - **axes** (list|tuple)- 数据类型是 ``int32``。表示进行切片的轴。 - - **starts** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``starts`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的起始索引值。 - - **ends** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``ends`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的结束索引值。 - - **strides** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``strides`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``strides`` 的类型是 ``Tensor``,则是1-D ``Tensor``。表示在各个轴上切片的步长。 + - **starts** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``starts`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``starts`` 的类型是 ``Tensor``,则是 1-D ``Tensor``。表示在各个轴上切片的起始索引值。 + - **ends** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``ends`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``ends`` 的类型是 ``Tensor``,则是 1-D ``Tensor``。表示在各个轴上切片的结束索引值。 + - **strides** (list|tuple|Tensor)- 数据类型是 ``int32``。如果 ``strides`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor``。如果 ``strides`` 的类型是 ``Tensor``,则是 1-D ``Tensor``。表示在各个轴上切片的步长。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/subtract_cn.rst b/docs/api/paddle/subtract_cn.rst index 19c4008ae86..a0246b4f4d5 100644 --- a/docs/api/paddle/subtract_cn.rst +++ b/docs/api/paddle/subtract_cn.rst @@ -14,7 +14,7 @@ subtract out = x - y .. note:: - ``paddle.subtract`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.subtract`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 参数 ::::::::: diff --git a/docs/api/paddle/sum_cn.rst b/docs/api/paddle/sum_cn.rst index 294c351e0cd..9c6fac68fa4 100644 --- a/docs/api/paddle/sum_cn.rst +++ b/docs/api/paddle/sum_cn.rst @@ -13,7 +13,7 @@ sum - **x** (Tensor) - 输入变量为多维 Tensor,支持数据类型为 float32、float64、int32、int64。 - **axis** (int|list|tuple,可选) - 求和运算的维度。如果为 None,则计算所有元素的和并返回包含单个元素的 Tensor 变量,否则必须在 :math:`[−rank(x),rank(x)]` 范围内。如果 :math:`axis [i] <0`,则维度将变为 :math:`rank+axis[i]`,默认值为 None。 - **dtype** (str,可选) - 输出变量的数据类型。若参数为空,则输出变量的数据类型和输入变量相同,默认值为 None。 - - **keepdim** (bool) - 是否在输出 Tensor 中保留减小的维度。如 keepdim 为 true,否则结果张量的维度将比输入张量小,默认值为False。 + - **keepdim** (bool) - 是否在输出 Tensor 中保留减小的维度。如 keepdim 为 true,否则结果张量的维度将比输入张量小,默认值为 False。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: diff --git a/docs/api/paddle/sysconfig/get_include_cn.rst b/docs/api/paddle/sysconfig/get_include_cn.rst index bc4fea6e966..9a83b0e9652 100644 --- a/docs/api/paddle/sysconfig/get_include_cn.rst +++ b/docs/api/paddle/sysconfig/get_include_cn.rst @@ -5,7 +5,7 @@ get_include .. py:function:: paddle.sysconfig.get_include() -获取包含飞桨C++头文件的目录。 +获取包含飞桨 C++头文件的目录。 返回 :::::::::: diff --git a/docs/api/paddle/sysconfig/get_lib_cn.rst b/docs/api/paddle/sysconfig/get_lib_cn.rst index 7f925cf7f4d..d4e278547ab 100644 --- a/docs/api/paddle/sysconfig/get_lib_cn.rst +++ b/docs/api/paddle/sysconfig/get_lib_cn.rst @@ -5,7 +5,7 @@ get_lib .. py:function:: paddle.sysconfig.get_lib() -获取包含libpadle_framework的目录。 +获取包含 libpadle_framework 的目录。 返回 :::::::::: diff --git a/docs/api/paddle/t_cn.rst b/docs/api/paddle/t_cn.rst index 62984fded36..e17982f75c3 100644 --- a/docs/api/paddle/t_cn.rst +++ b/docs/api/paddle/t_cn.rst @@ -5,16 +5,16 @@ t .. py:function:: paddle.t(input, name=None) -对小于等于2维的Tensor进行数据转置。0维和1维Tensor返回本身,2维Tensor等价于perm设置为0,1的 :ref:`cn_api_fluid_layers_transpose` 函数。 +对小于等于 2 维的 Tensor 进行数据转置。0 维和 1 维 Tensor 返回本身,2 维 Tensor 等价于 perm 设置为 0,1 的 :ref:`cn_api_fluid_layers_transpose` 函数。 参数 :::::::: - - **input** (Tensor) - 输入:N维(N<=2)Tensor,可选的数据类型为float16、float32、float64、int32、int64。 + - **input** (Tensor) - 输入:N 维(N<=2)Tensor,可选的数据类型为 float16、float32、float64、int32、int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::: -Tensor,0维和1维Tensor返回本身,2维Tensor返回转置Tensor。 +Tensor,0 维和 1 维 Tensor 返回本身,2 维 Tensor 返回转置 Tensor。 代码示例 :::::::: diff --git a/docs/api/paddle/take_along_axis_cn.rst b/docs/api/paddle/take_along_axis_cn.rst index e06ddf03a5e..9e6fa99c9df 100644 --- a/docs/api/paddle/take_along_axis_cn.rst +++ b/docs/api/paddle/take_along_axis_cn.rst @@ -4,19 +4,19 @@ take_along_axis ------------------------------- .. py:function:: paddle.take_along_axis(arr, indices, axis) -基于输入索引矩阵,沿着指定axis从arr矩阵里选取1d切片。索引矩阵必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐。 +基于输入索引矩阵,沿着指定 axis 从 arr 矩阵里选取 1d 切片。索引矩阵必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐。 参数 ::::::::: -- **arr** (Tensor) - 输入的Tensor 作为源矩阵,数据类型为:float32、float64。 -- **indices** (Tensor) - 索引矩阵,包含沿轴提取1d切片的下标,必须和arr矩阵有相同的维度,需要能够broadcast与arr矩阵对齐,数据类型为:int、int64。 +- **arr** (Tensor) - 输入的 Tensor 作为源矩阵,数据类型为:float32、float64。 +- **indices** (Tensor) - 索引矩阵,包含沿轴提取 1d 切片的下标,必须和 arr 矩阵有相同的维度,需要能够 broadcast 与 arr 矩阵对齐,数据类型为:int、int64。 - **axis** (int) - 指定沿着哪个维度获取对应的值,数据类型为:int。 返回 ::::::::: -- **out** (Tensor) - 输出Tensor,包含indeces矩阵选定的元素,与 ``arr`` 数据类型相同。 +- **out** (Tensor) - 输出 Tensor,包含 indeces 矩阵选定的元素,与 ``arr`` 数据类型相同。 代码示例 ::::::::: diff --git a/docs/api/paddle/tan_cn.rst b/docs/api/paddle/tan_cn.rst index 65d579a2963..f97bbf0379a 100644 --- a/docs/api/paddle/tan_cn.rst +++ b/docs/api/paddle/tan_cn.rst @@ -4,7 +4,7 @@ tan ------------------------------- .. py:function:: paddle.tan(x, name=None) -三角函数tangent。 +三角函数 tangent。 输入范围是 `(k*pi-pi/2, k*pi+pi/2)`,输出范围是 `[-inf, inf]` 。 @@ -14,14 +14,14 @@ tan 参数 ::::::::: - - **x** (Tensor) – 该OP的输入为Tensor。数据类型为float32,float64。 + - **x** (Tensor) – 该 OP 的输入为 Tensor。数据类型为 float32,float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor - 该OP的输出为Tensor,数据类型为输入一致。 +Tensor - 该 OP 的输出为 Tensor,数据类型为输入一致。 代码示例 diff --git a/docs/api/paddle/tanh_cn.rst b/docs/api/paddle/tanh_cn.rst index ee238332157..406bf1029f0 100644 --- a/docs/api/paddle/tanh_cn.rst +++ b/docs/api/paddle/tanh_cn.rst @@ -15,12 +15,12 @@ tanh 激活函数 ::::::::: - - **x** (Tensor) - Tanh算子的输入,多维Tensor,数据类型为 float16,float32或float64。 + - **x** (Tensor) - Tanh 算子的输入,多维 Tensor,数据类型为 float16,float32 或 float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -tanh的输出Tensor,和输入有着相同类型和shape。 +tanh 的输出 Tensor,和输入有着相同类型和 shape。 代码示例 diff --git a/docs/api/paddle/tensordot_cn.rst b/docs/api/paddle/tensordot_cn.rst index 0009995de28..ea693606617 100644 --- a/docs/api/paddle/tensordot_cn.rst +++ b/docs/api/paddle/tensordot_cn.rst @@ -5,22 +5,22 @@ tensordot .. py:function:: paddle.tensordot(x, y, axes=2, name=None) -张量缩并运算(Tensor Contraction),即沿着axes给定的多个轴对两个张量对应元素的乘积进行加和操作。 +张量缩并运算(Tensor Contraction),即沿着 axes 给定的多个轴对两个张量对应元素的乘积进行加和操作。 参数 :::::::::::: - **x** (Tensor)- 缩并运算操作的左张量,数据类型为 ``float32`` 或 ``float64``。 - **y** (Tensor)- 缩并运算操作的右张量,与 ``x`` 具有相同的数据类型。 - - **axes** (int|tuple|list|Tensor)- 指定对 ``x`` 和 ``y`` 做缩并运算的轴,默认值为整数2。 + - **axes** (int|tuple|list|Tensor)- 指定对 ``x`` 和 ``y`` 做缩并运算的轴,默认值为整数 2。 1. ``axes`` 可以是一个非负整数。若输入的是一个整数 ``n``,则表示对 ``x`` 的后 ``n`` 个轴和对 ``y`` 的前 ``n`` 个轴进行缩并运算。 - 2. ``axes`` 可以是一个一维的整数tuple或list,表示 ``x`` 和 ``y`` 沿着相同的轴方向进行缩并运算。例如,``axes`` =[0, 1]表示 ``x`` 的前两个轴和 ``y`` 的前两个轴对应进行缩并运算。 + 2. ``axes`` 可以是一个一维的整数 tuple 或 list,表示 ``x`` 和 ``y`` 沿着相同的轴方向进行缩并运算。例如,``axes`` =[0, 1]表示 ``x`` 的前两个轴和 ``y`` 的前两个轴对应进行缩并运算。 - 3. ``axes`` 可以是一个tuple或list,其中包含一个或两个一维的整数tuple|list|Tensor。如果 ``axes`` 包含一个tuple|list|Tensor,则对 ``x`` 和 ``y`` 的相同轴做缩并运算,具体轴下标由该tuple|list|Tensor中的整数值指定。如果 ``axes`` 包含两个tuple|list|Tensor,则第一个指定 ``x`` 做缩并运算的轴下标,第二个指定 ``y`` 的对应轴下标。如果 ``axes`` 包含两个以上的tuple|list|Tensor,只有前两个会被作为轴下标序列使用,其它的将被忽略。 + 3. ``axes`` 可以是一个 tuple 或 list,其中包含一个或两个一维的整数 tuple|list|Tensor。如果 ``axes`` 包含一个 tuple|list|Tensor,则对 ``x`` 和 ``y`` 的相同轴做缩并运算,具体轴下标由该 tuple|list|Tensor 中的整数值指定。如果 ``axes`` 包含两个 tuple|list|Tensor,则第一个指定 ``x`` 做缩并运算的轴下标,第二个指定 ``y`` 的对应轴下标。如果 ``axes`` 包含两个以上的 tuple|list|Tensor,只有前两个会被作为轴下标序列使用,其它的将被忽略。 - 4. ``axes`` 可以是一个张量,这种情况下该张量会被转换成list,然后应用前述规则确定做缩并运算的轴。请注意,输入Tensor类型的 ``axes`` 只在动态图模式下可用。 + 4. ``axes`` 可以是一个张量,这种情况下该张量会被转换成 list,然后应用前述规则确定做缩并运算的轴。请注意,输入 Tensor 类型的 ``axes`` 只在动态图模式下可用。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 @@ -29,8 +29,8 @@ tensordot .. note:: -1. 本API支持张量维度广播,``x`` 和 ``y`` 做缩并操作的对应维度size必须相等,或适用于广播规则。 -2. 本API支持axes扩展,当指定的 ``x`` 和 ``y`` 两个轴序列长短不一时,短的序列会自动在末尾补充和长序列相同的轴下标。例如,如果输入 ``axes`` =[[0, 1, 2, 3], [1, 0]],则指定 ``x`` 的轴序列是[0, 1, 2, 3],对应 ``y`` 的轴序列会自动从[1,0]扩展成[1, 0, 2, 3]。 +1. 本 API 支持张量维度广播,``x`` 和 ``y`` 做缩并操作的对应维度 size 必须相等,或适用于广播规则。 +2. 本 API 支持 axes 扩展,当指定的 ``x`` 和 ``y`` 两个轴序列长短不一时,短的序列会自动在末尾补充和长序列相同的轴下标。例如,如果输入 ``axes`` =[[0, 1, 2, 3], [1, 0]],则指定 ``x`` 的轴序列是[0, 1, 2, 3],对应 ``y`` 的轴序列会自动从[1,0]扩展成[1, 0, 2, 3]。 代码示例 :::::::::::: diff --git a/docs/api/paddle/text/Conll05st_cn.rst b/docs/api/paddle/text/Conll05st_cn.rst index 9a7dea23f79..f02664699d8 100644 --- a/docs/api/paddle/text/Conll05st_cn.rst +++ b/docs/api/paddle/text/Conll05st_cn.rst @@ -10,20 +10,20 @@ Conll05st 测试数据集的实现。 .. note:: - 只支持自动下载公共的 Conll05st测试数据集。 + 只支持自动下载公共的 Conll05st 测试数据集。 参数 ::::::::: - - data_file(str)- 保存数据的路径,如果参数 `download` 设置为True,可设置为None。默认为None。 - - word_dict_file(str)- 保存词典的路径。如果参数 `download` 设置为True,可设置为None。默认为None。 - - verb_dict_file(str)- 保存动词词典的路径。如果参数 `download` 设置为True,可设置为None。默认为None。 - - target_dict_file(str)- 保存目标词典的路径如果参数 `download` 设置为True,可设置为None。默认为None。 - - emb_file(str)- 保存词嵌入词典的文件。只有在 `get_embedding` 能被设置为None 且 `download` 为True时使用。 - - download(bool)- 如果 `data_file` 、 `word_dict_file` 、 `verb_dict_file` 和 `target_dict_file` 未设置,是否下载数据集。默认为True。 + - data_file(str)- 保存数据的路径,如果参数 `download` 设置为 True,可设置为 None。默认为 None。 + - word_dict_file(str)- 保存词典的路径。如果参数 `download` 设置为 True,可设置为 None。默认为 None。 + - verb_dict_file(str)- 保存动词词典的路径。如果参数 `download` 设置为 True,可设置为 None。默认为 None。 + - target_dict_file(str)- 保存目标词典的路径如果参数 `download` 设置为 True,可设置为 None。默认为 None。 + - emb_file(str)- 保存词嵌入词典的文件。只有在 `get_embedding` 能被设置为 None 且 `download` 为 True 时使用。 + - download(bool)- 如果 `data_file` 、 `word_dict_file` 、 `verb_dict_file` 和 `target_dict_file` 未设置,是否下载数据集。默认为 True。 返回值 ::::::::: -``Dataset``,conll05st数据集实例。 +``Dataset``,conll05st 数据集实例。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/Imdb_cn.rst b/docs/api/paddle/text/Imdb_cn.rst index e2d7e874d8a..4b420219b32 100644 --- a/docs/api/paddle/text/Imdb_cn.rst +++ b/docs/api/paddle/text/Imdb_cn.rst @@ -10,15 +10,15 @@ Imdb 参数 ::::::::: - - data_file(str) - 保存压缩数据的路径,如果参数 :attr:`download`设置为True, - 可设置为None。默认为None。 + - data_file(str) - 保存压缩数据的路径,如果参数 :attr:`download`设置为 True, + 可设置为 None。默认为 None。 - mode(str) - 'train' 或'test' 模式。默认为'train'。 - - cutoff(int) - 构建词典的截止大小。默认为Default 150。 - - download(bool) - 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为True。 + - cutoff(int) - 构建词典的截止大小。默认为 Default 150。 + - download(bool) - 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为 True。 返回 ::::::::: -``Dataset``, IMDB数据集实例。 +``Dataset``, IMDB 数据集实例。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/Imikolov_cn.rst b/docs/api/paddle/text/Imikolov_cn.rst index b97ba08c5d3..f03bddccd8d 100644 --- a/docs/api/paddle/text/Imikolov_cn.rst +++ b/docs/api/paddle/text/Imikolov_cn.rst @@ -6,21 +6,21 @@ Imikolov .. py:class:: paddle.text.datasets.Imikolov() -该类是对imikolov测试数据集的实现。 +该类是对 imikolov 测试数据集的实现。 参数 ::::::::: - - data_file(str)- 保存数据的路径,如果参数 :attr:`download`设置为True,可设置为None。默认为None。 + - data_file(str)- 保存数据的路径,如果参数 :attr:`download`设置为 True,可设置为 None。默认为 None。 - data_type(str)- 'NGRAM'或'SEQ'。默认为'NGRAM'。 - window_size(int) - 'NGRAM'数据滑动窗口的大小。默认为-1。 - mode(str)- 'train' 'test' mode. Default 'train'. - - min_word_freq(int)- 构建词典的最小词频。默认为50。 - - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为True。 + - min_word_freq(int)- 构建词典的最小词频。默认为 50。 + - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为 True。 返回 ::::::::: -``Dataset``,imikolov数据集实例。 +``Dataset``,imikolov 数据集实例。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/Movielens_cn.rst b/docs/api/paddle/text/Movielens_cn.rst index dd3b36b11cc..6337e9b7be1 100644 --- a/docs/api/paddle/text/Movielens_cn.rst +++ b/docs/api/paddle/text/Movielens_cn.rst @@ -12,15 +12,15 @@ Movielens 参数 ::::::::: - - data_file(str)- 保存压缩数据的路径,如果参数 :attr:`download`设置为True,可设置为None。默认为None。 + - data_file(str)- 保存压缩数据的路径,如果参数 :attr:`download`设置为 True,可设置为 None。默认为 None。 - mode(str)- 'train' 或 'test' 模式。默认为'train'。 - - test_ratio(float) - 为测试集划分的比例。默认为0.1。 - - rand_seed(int)- 随机数种子。默认为0。 - - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为True。 + - test_ratio(float) - 为测试集划分的比例。默认为 0.1。 + - rand_seed(int)- 随机数种子。默认为 0。 + - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为 True。 返回值 ::::::::: - ``Dataset``,Movielens 1-M数据集实例。 + ``Dataset``,Movielens 1-M 数据集实例。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/Overview_cn.rst b/docs/api/paddle/text/Overview_cn.rst index 97c50285123..3daac74d806 100644 --- a/docs/api/paddle/text/Overview_cn.rst +++ b/docs/api/paddle/text/Overview_cn.rst @@ -3,34 +3,34 @@ paddle.text --------------------- -paddle.text 目录是飞桨在文本领域的高层API。有Paddle内置以及PaddleNLP中提供的两种。具体如下: +paddle.text 目录是飞桨在文本领域的高层 API。有 Paddle 内置以及 PaddleNLP 中提供的两种。具体如下: -- :ref:`内置数据集相关API ` -- :ref:`PaddleNLP提供的API ` +- :ref:`内置数据集相关 API ` +- :ref:`PaddleNLP 提供的 API ` .. _about_datasets: -内置数据集相关API +内置数据集相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`Conll05st ` ", "Conll05st数据集" - " :ref:`Imdb ` ", "Imdb数据集" - " :ref:`Imikolov ` ", "Imikolov数据集" - " :ref:`Movielens ` ", "Movielens数据集" - " :ref:`UCIHousing ` ", "UCIHousing数据集" - " :ref:`WMT14 ` ", "WMT14数据集" - " :ref:`WMT16 ` ", "WMT16数据集" + " :ref:`Conll05st ` ", "Conll05st 数据集" + " :ref:`Imdb ` ", "Imdb 数据集" + " :ref:`Imikolov ` ", "Imikolov 数据集" + " :ref:`Movielens ` ", "Movielens 数据集" + " :ref:`UCIHousing ` ", "UCIHousing 数据集" + " :ref:`WMT14 ` ", "WMT14 数据集" + " :ref:`WMT16 ` ", "WMT16 数据集" .. _about_paddlenlp: -PaddleNLP提供的API +PaddleNLP 提供的 API :::::::::::::::::::: -PaddleNLP 提供了在文本任务上简洁易用的全流程API,旨在为飞桨开发者提升文本领域建模效率。深度适配飞桨框架,提供基于最新版Paddle的NLP领域最佳实践。 +PaddleNLP 提供了在文本任务上简洁易用的全流程 API,旨在为飞桨开发者提升文本领域建模效率。深度适配飞桨框架,提供基于最新版 Paddle 的 NLP 领域最佳实践。 安装命令: @@ -39,14 +39,14 @@ PaddleNLP 提供了在文本任务上简洁易用的全流程API,旨在为飞 pip install --upgrade paddlenlp -i https://pypi.org/simple -可参考PaddleNLP `GitHub `_ 以及 `文档 `_ +可参考 PaddleNLP `GitHub `_ 以及 `文档 `_ .. csv-table:: - :header: "API模块", "功能简介", "API用法简单示例" + :header: "API 模块", "功能简介", "API 用法简单示例" :widths: 10, 20, 20 - " `paddlenlp.datasets `_ ", "数据集相关API,包含自定义数据集,数据集贡献与数据集快速加载等功能", " ``train_ds = paddlenlp.datasets.load_dataset('ptb', splits='train')`` " - " `paddlenlp.data `_ ", "文本数据处理Pipeline的相关API", "见链接文档" - " `paddlenlp.transformers `_ ", "基于Transformer结构相关的预训练模型API,包含ERNIE, BERT, RoBERTa, Electra等主流经典结构和下游任务", " ``model = paddlenlp.transformers.BertForSequenceClassification.from_pretrained('bert-wwm-chinese', num_classes=2)`` " - " `paddlenlp.metrics `_", "提供了文本任务上的一些模型评价指标,例如Perplexity、GlLUE中用到的评估器、BLEU、Rouge等,与飞桨高层API兼容", " ``metric = paddlenlp.metrics.AccuracyAndF1()`` " - " `paddlenlp.embeddings `_", "词向量相关API,支持一键快速加载包预训练的中文词向量,VisualDL高维可视化等功能", " ``token_embedding = paddlenlp.embeddings.TokenEmbedding(embedding_name='fasttext.wiki-news.target.word-word.dim300.en')`` " + " `paddlenlp.datasets `_ ", "数据集相关 API,包含自定义数据集,数据集贡献与数据集快速加载等功能", " ``train_ds = paddlenlp.datasets.load_dataset('ptb', splits='train')`` " + " `paddlenlp.data `_ ", "文本数据处理 Pipeline 的相关 API", "见链接文档" + " `paddlenlp.transformers `_ ", "基于 Transformer 结构相关的预训练模型 API,包含 ERNIE, BERT, RoBERTa, Electra 等主流经典结构和下游任务", " ``model = paddlenlp.transformers.BertForSequenceClassification.from_pretrained('bert-wwm-chinese', num_classes=2)`` " + " `paddlenlp.metrics `_", "提供了文本任务上的一些模型评价指标,例如 Perplexity、GlLUE 中用到的评估器、BLEU、Rouge 等,与飞桨高层 API 兼容", " ``metric = paddlenlp.metrics.AccuracyAndF1()`` " + " `paddlenlp.embeddings `_", "词向量相关 API,支持一键快速加载包预训练的中文词向量,VisualDL 高维可视化等功能", " ``token_embedding = paddlenlp.embeddings.TokenEmbedding(embedding_name='fasttext.wiki-news.target.word-word.dim300.en')`` " diff --git a/docs/api/paddle/text/UCIHousing_cn.rst b/docs/api/paddle/text/UCIHousing_cn.rst index e8c430345ad..83c6f42af2a 100644 --- a/docs/api/paddle/text/UCIHousing_cn.rst +++ b/docs/api/paddle/text/UCIHousing_cn.rst @@ -12,13 +12,13 @@ UCIHousing 参数 ::::::::: - - data_file(str)- 保存数据的路径,如果参数 :attr:`download`设置为True,可设置为None。默认为None。 + - data_file(str)- 保存数据的路径,如果参数 :attr:`download`设置为 True,可设置为 None。默认为 None。 - mode(str)- 'train'或'test'模式。默认为'train'。 - - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为True。 + - download(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为 True。 返回值 ::::::::: -``Dataset``,UCI housing数据集实例。 +``Dataset``,UCI housing 数据集实例。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/ViterbiDecoder_cn.rst b/docs/api/paddle/text/ViterbiDecoder_cn.rst index b10b772bfbc..2923b766e80 100644 --- a/docs/api/paddle/text/ViterbiDecoder_cn.rst +++ b/docs/api/paddle/text/ViterbiDecoder_cn.rst @@ -8,19 +8,19 @@ ViterbiDecoder 参数 ::::::::: - - **transitions (Tensor)** 转移概率。形状为[num_tags, num_tags],数据类型为float32或float64。 - - **include_bos_eos_tag (bool,可选)** 是否包含前置、后置标签。如果设为True,**transition_params** 中倒数第一列为前置标签的转移概率,倒数第二列为后置标签的转移概率。默认值为True。 + - **transitions (Tensor)** 转移概率。形状为[num_tags, num_tags],数据类型为 float32 或 float64。 + - **include_bos_eos_tag (bool,可选)** 是否包含前置、后置标签。如果设为 True,**transition_params** 中倒数第一列为前置标签的转移概率,倒数第二列为后置标签的转移概率。默认值为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 形状 ::::::::: - - **potentials (Tensor)** 发射概率。形状为[batch_size, sequence_length, num_tags],数据类型为float32或float64。 - - **lengths (Tensor)** 序列真实长度。形状为[batch_size],数据类型为int64。 + - **potentials (Tensor)** 发射概率。形状为[batch_size, sequence_length, num_tags],数据类型为 float32 或 float64。 + - **lengths (Tensor)** 序列真实长度。形状为[batch_size],数据类型为 int64。 返回 ::::::::: - - **scores (Tensor)** Viterbi路径的最高得分。形状为[batch_size],数据类型为float32或float64。 - - **paths (Tensor)** Viterbi路径。形状为[batch_size, sequence_length],数据类型为int64。 + - **scores (Tensor)** Viterbi 路径的最高得分。形状为[batch_size],数据类型为 float32 或 float64。 + - **paths (Tensor)** Viterbi 路径。形状为[batch_size, sequence_length],数据类型为 int64。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/WMT14_cn.rst b/docs/api/paddle/text/WMT14_cn.rst index d56497fdd83..1c990345e5e 100644 --- a/docs/api/paddle/text/WMT14_cn.rst +++ b/docs/api/paddle/text/WMT14_cn.rst @@ -7,27 +7,27 @@ WMT14 该类是对 `WMT14 `_ 测试数据集实现。 -由于原始WMT14数据集太大,我们在这里提供了一组小数据集。该类将从 +由于原始 WMT14 数据集太大,我们在这里提供了一组小数据集。该类将从 http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz 下载数据集。 参数 ::::::::: - - **data_file**(str)- 保存数据集压缩文件的路径,如果参数:attr: `download` 设置为True,可设置为None。默认为None。 + - **data_file**(str)- 保存数据集压缩文件的路径,如果参数:attr: `download` 设置为 True,可设置为 None。默认为 None。 - **mode**(str)- 'train','test' 或'gen'。默认为'train'。 - **dict_size**(int)- 词典大小。默认为-1。 - - **download**(bool)- 如果:attr: `data_file` 未设置,是否自动下载数据集。默认为True。 + - **download**(bool)- 如果:attr: `data_file` 未设置,是否自动下载数据集。默认为 True。 返回值 ::::::::: -``Dataset``,WMT14数据集实例。 +``Dataset``,WMT14 数据集实例。 - - **src_ids** (np.array) - 源语言当前的token id序列。 - - **trg_ids** (np.array) - 目标语言当前的token id序列。 - - **trg_ids_next** (np.array) - 目标语言下一段的token id序列。 + - **src_ids** (np.array) - 源语言当前的 token id 序列。 + - **trg_ids** (np.array) - 目标语言当前的 token id 序列。 + - **trg_ids_next** (np.array) - 目标语言下一段的 token id 序列。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/WMT16_cn.rst b/docs/api/paddle/text/WMT16_cn.rst index 41632240ac1..92d32dd64dc 100644 --- a/docs/api/paddle/text/WMT16_cn.rst +++ b/docs/api/paddle/text/WMT16_cn.rst @@ -7,27 +7,27 @@ WMT16 该类是对 `WMT16 `_ 测试数据集实现。 -ACL2016多模态机器翻译。有关更多详细信息,请访问此网站: +ACL2016 多模态机器翻译。有关更多详细信息,请访问此网站: http://www.statmt.org/wmt16/multimodal-task.html#task1 如果您任务中使用了该数据集,请引用论文:`Multi30K: Multilingual English-German Image Descriptions. `_ 。 参数 ::::::::: - - **data_file**(str)- 保存数据集压缩文件的路径,如果参数 :attr:`download`设置为True,可设置为None。默认值为None。 + - **data_file**(str)- 保存数据集压缩文件的路径,如果参数 :attr:`download`设置为 True,可设置为 None。默认值为 None。 - **mode**(str)- 'train','test' 或 'val'。默认为'train'。 - **src_dict_size**(int)- 源语言词典大小。默认为-1。 - **trg_dict_size**(int) - 目标语言测点大小。默认为-1。 - **lang**(str)- 源语言,'en' 或 'de'。默认为 'en'。 - - **download**(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为True。 + - **download**(bool)- 如果 :attr:`data_file`未设置,是否自动下载数据集。默认为 True。 返回值 ::::::::: -``Dataset``,WMT16数据集实例。实例一共有三个字段: +``Dataset``,WMT16 数据集实例。实例一共有三个字段: - - **src_ids** (np.array) - 源语言当前的token id序列。 - - **trg_ids** (np.array) - 目标语言当前的token id序列。 - - **trg_ids_next** (np.array) - 目标语言下一段的token id序列。 + - **src_ids** (np.array) - 源语言当前的 token id 序列。 + - **trg_ids** (np.array) - 目标语言当前的 token id 序列。 + - **trg_ids_next** (np.array) - 目标语言下一段的 token id 序列。 代码示例 ::::::::: diff --git a/docs/api/paddle/text/viterbi_decode_cn.rst b/docs/api/paddle/text/viterbi_decode_cn.rst index 1897401b6ad..81e3c126701 100644 --- a/docs/api/paddle/text/viterbi_decode_cn.rst +++ b/docs/api/paddle/text/viterbi_decode_cn.rst @@ -4,22 +4,22 @@ viterbi_decode ------------------------------- .. py:function:: paddle.text.viterbi_decode(potentials, transition_params, lengths, include_bos_eos_tag=True, name=None) -该层利用输入的发射概率和转移概率进行解码。通过用Viterbi算法,动态地寻找隐藏状态最可能的序列,该序列也被称为 Viterbi 路径(Viterbi path),从而得到观察标签 (tags) 序列。 +该层利用输入的发射概率和转移概率进行解码。通过用 Viterbi 算法,动态地寻找隐藏状态最可能的序列,该序列也被称为 Viterbi 路径(Viterbi path),从而得到观察标签 (tags) 序列。 参数 ::::::::: - - **potentials (Tensor)** 发射概率。形状为[batch_size, lengths, num_tags],数据类型为float32或float64。 - - **transition_params (Tensor)** 转移概率。形状为[num_tags, num_tags],数据类型为float32或float64。 - - **lengths (Tensor)** 序列真实长度。形状为[batch_size],数据类型为int64。 - - **include_bos_eos_tag (bool,可选)** 是否包含前置、后置标签。如果设为True,**transition_params** 中倒数第一列为前置标签的转移概率,倒数第二列为后置标签的转移概率。默认值为True。 + - **potentials (Tensor)** 发射概率。形状为[batch_size, lengths, num_tags],数据类型为 float32 或 float64。 + - **transition_params (Tensor)** 转移概率。形状为[num_tags, num_tags],数据类型为 float32 或 float64。 + - **lengths (Tensor)** 序列真实长度。形状为[batch_size],数据类型为 int64。 + - **include_bos_eos_tag (bool,可选)** 是否包含前置、后置标签。如果设为 True,**transition_params** 中倒数第一列为前置标签的转移概率,倒数第二列为后置标签的转移概率。默认值为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - - **scores (Tensor)** Viterbi路径的最高得分。形状为[batch_size],数据类型为float32或float64。 - - **paths (Tensor)** Viterbi路径。形状为[batch_size, lengths],数据类型为int64。 + - **scores (Tensor)** Viterbi 路径的最高得分。形状为[batch_size],数据类型为 float32 或 float64。 + - **paths (Tensor)** Viterbi 路径。形状为[batch_size, lengths],数据类型为 int64。 代码示例 ::::::::: diff --git a/docs/api/paddle/tile_cn.rst b/docs/api/paddle/tile_cn.rst index 81cc5e953f1..76307498913 100644 --- a/docs/api/paddle/tile_cn.rst +++ b/docs/api/paddle/tile_cn.rst @@ -7,17 +7,17 @@ tile 根据参数 ``repeat_times`` 对输入 ``x`` 的各维度进行复制。平铺后,输出的第 ``i`` 个维度的值等于 ``x.shape[i]*repeat_times[i]`` 。 -``x`` 的维数和 ``repeat_times`` 中的元素数量应小于等于6。 +``x`` 的维数和 ``repeat_times`` 中的元素数量应小于等于 6。 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:bool、float32、float64、int32或int64。 - - repeat_times (list|tuple|Tensor) - 指定输入 ``x`` 每个维度的复制次数。如果 ``repeat_times`` 的类型是list或tuple,它的元素可以是整数或者数据类型为int32的1-D Tensor。如果 ``repeat_times`` 的类型是Tensor,则是数据类型为int32的1-D Tensor。 + - x (Tensor) - 输入的 Tensor,数据类型为:bool、float32、float64、int32 或 int64。 + - repeat_times (list|tuple|Tensor) - 指定输入 ``x`` 每个维度的复制次数。如果 ``repeat_times`` 的类型是 list 或 tuple,它的元素可以是整数或者数据类型为 int32 的 1-D Tensor。如果 ``repeat_times`` 的类型是 Tensor,则是数据类型为 int32 的 1-D Tensor。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -``Tensor``,数据类型与 ``x`` 相同。返回值的第i维的大小等于 ``x[i] * repeat_times[i]`` 。 +``Tensor``,数据类型与 ``x`` 相同。返回值的第 i 维的大小等于 ``x[i] * repeat_times[i]`` 。 代码示例 ::::::::: diff --git a/docs/api/paddle/to_tensor_cn.rst b/docs/api/paddle/to_tensor_cn.rst index fc6680f3a35..a8c77d7b05e 100644 --- a/docs/api/paddle/to_tensor_cn.rst +++ b/docs/api/paddle/to_tensor_cn.rst @@ -6,7 +6,7 @@ to_tensor .. py:function:: paddle.to_tensor(data, dtype=None, place=None, stop_gradient=True) -通过已知的 ``data`` 来创建一个 Tensor,Tensor类型为 ``paddle.Tensor``。 +通过已知的 ``data`` 来创建一个 Tensor,Tensor 类型为 ``paddle.Tensor``。 ``data`` 可以是 scalar,tuple,list,numpy\.ndarray,paddle\.Tensor。 如果 ``data`` 已经是一个 Tensor,且 ``dtype`` 、 ``place`` 没有发生变化,将不会发生 Tensor 的拷贝并返回原来的 Tensor。 @@ -17,8 +17,8 @@ to_tensor - **data** (scalar|tuple|list|ndarray|Tensor) - 初始化 Tensor 的数据,可以是 scalar,list,tuple,numpy\.ndarray,paddle\.Tensor 类型。 - **dtype** (str,可选) - 创建 Tensor 的数据类型,可以是 bool、float16、float32、float64、int8、int16、int32、int64、uint8、complex64、complex128。 - 默认值为None,如果 ``data`` 为 python 浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``data`` 为其他类型,则会自动推导类型。 - - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace,可选) - 创建tensor的设备位置,可以是 CPUPlace、CUDAPinnedPlace、CUDAPlace。默认值为 None,使用全局的 place。 + 默认值为 None,如果 ``data`` 为 python 浮点类型,则从 :ref:`cn_api_paddle_framework_get_default_dtype` 获取类型,如果 ``data`` 为其他类型,则会自动推导类型。 + - **place** (CPUPlace|CUDAPinnedPlace|CUDAPlace,可选) - 创建 tensor 的设备位置,可以是 CPUPlace、CUDAPinnedPlace、CUDAPlace。默认值为 None,使用全局的 place。 - **stop_gradient** (bool,可选) - 是否阻断 Autograd 的梯度传导。默认值为 True,此时不进行梯度传传导。 返回 diff --git a/docs/api/paddle/tolist_cn.rst b/docs/api/paddle/tolist_cn.rst index b453d8acb71..874504786de 100644 --- a/docs/api/paddle/tolist_cn.rst +++ b/docs/api/paddle/tolist_cn.rst @@ -5,7 +5,7 @@ tolist .. py:function:: paddle.tolist(x) -将paddle Tensor转化为python list,注意:只适用于动态图。 +将 paddle Tensor 转化为 python list,注意:只适用于动态图。 .. code-block:: text @@ -18,7 +18,7 @@ tolist 返回 ::::::::: -Tensor对应结构的list。 +Tensor 对应结构的 list。 diff --git a/docs/api/paddle/topk_cn.rst b/docs/api/paddle/topk_cn.rst index bb67865d87e..83053573403 100644 --- a/docs/api/paddle/topk_cn.rst +++ b/docs/api/paddle/topk_cn.rst @@ -12,7 +12,7 @@ topk ::::::::: - **x** (Tensor) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int32、int64。 - **k** (int,Tensor) - 在指定的轴上进行 top 寻找的数量。 - - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 + - **axis** (int,可选) - 指定对输入 Tensor 进行运算的轴, ``axis`` 的有效范围是[-R, R),R 是输入 ``x`` 的 Rank, ``axis`` 为负时与 ``axis`` + R 等价。默认值为-1。 - **largest** (bool,可选) - 指定算法排序的方向。如果设置为 True,排序算法按照降序的算法排序,否则按照升序排序。默认值为 True。 - **sorted** (bool,可选) - 控制返回的结果是否按照有序返回,默认为 True。在 GPU 上总是返回有序的结果。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/transpose_cn.rst b/docs/api/paddle/transpose_cn.rst index f40f874322c..085f9bf3062 100644 --- a/docs/api/paddle/transpose_cn.rst +++ b/docs/api/paddle/transpose_cn.rst @@ -8,18 +8,18 @@ transpose -根据perm对输入的多维Tensor进行数据重排。返回多维Tensor的第i维对应输入Tensor的perm[i]维。 +根据 perm 对输入的多维 Tensor 进行数据重排。返回多维 Tensor 的第 i 维对应输入 Tensor 的 perm[i]维。 参数 :::::::::::: - - **x** (Tensor) - 输入:x:[N_1, N_2, ..., N_k, D]多维Tensor,可选的数据类型为bool, float16, float32, float64, int32, int64。 - - **perm** (list|tuple) - perm长度必须和X的维度相同,并依照perm中数据进行重排。 + - **x** (Tensor) - 输入:x:[N_1, N_2, ..., N_k, D]多维 Tensor,可选的数据类型为 bool, float16, float32, float64, int32, int64。 + - **perm** (list|tuple) - perm 长度必须和 X 的维度相同,并依照 perm 中数据进行重排。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -多维Tensor +多维 Tensor 代码示例 @@ -31,14 +31,14 @@ transpose [[13 14 15 16] [17 18 19 20] [21 22 23 24]]] shape(x) = [2,3,4] - # 例0 + # 例 0 perm0 = [1,0,2] y_perm0 = [[[ 1 2 3 4] [13 14 15 16]] [[ 5 6 7 8] [17 18 19 20]] [[ 9 10 11 12] [21 22 23 24]]] shape(y_perm0) = [3,2,4] - # 例1 + # 例 1 perm1 = [2,1,0] y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]] [[ 2 14] [ 6 18] [10 22]] diff --git a/docs/api/paddle/tril_cn.rst b/docs/api/paddle/tril_cn.rst index b0863121b3f..b81601dbc36 100644 --- a/docs/api/paddle/tril_cn.rst +++ b/docs/api/paddle/tril_cn.rst @@ -8,13 +8,13 @@ tril -返回输入矩阵 ``input`` 的下三角部分,其余部分被设为0。 +返回输入矩阵 ``input`` 的下三角部分,其余部分被设为 0。 矩形的下三角部分被定义为对角线上和下方的元素。 参数 ::::::::: - - **input** (Tensor) - 输入Tensor input,数据类型支持 bool、float32、float64、int32、int64。 - - **diagonal** (int,可选) - 指定的对角线,默认值为0。如果 diagonal = 0,表示主对角线;如果 diagonal 是正数,表示主对角线之上的对角线;如果 diagonal 是负数,表示主对角线之下的对角线。 + - **input** (Tensor) - 输入 Tensor input,数据类型支持 bool、float32、float64、int32、int64。 + - **diagonal** (int,可选) - 指定的对角线,默认值为 0。如果 diagonal = 0,表示主对角线;如果 diagonal 是正数,表示主对角线之上的对角线;如果 diagonal 是负数,表示主对角线之下的对角线。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/tril_indices_cn.rst b/docs/api/paddle/tril_indices_cn.rst index ae8c253181c..3f0357654f0 100644 --- a/docs/api/paddle/tril_indices_cn.rst +++ b/docs/api/paddle/tril_indices_cn.rst @@ -17,11 +17,11 @@ tril_indices + 如果 offset > 0,取主对角线右上的对角线。 + 如果 offset < 0,取主对角线左下的对角线。 - - **dtype** (int,可选) - 指定输出张量的数据类型,默认值为int64。 + - **dtype** (int,可选) - 指定输出张量的数据类型,默认值为 int64。 返回 ::::::::: -Tensor,二维矩阵的下三角矩阵行坐标和列坐标。数据类型和参数dtype一致。 +Tensor,二维矩阵的下三角矩阵行坐标和列坐标。数据类型和参数 dtype 一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/triu_cn.rst b/docs/api/paddle/triu_cn.rst index b0b709d0301..8be09bde6cc 100644 --- a/docs/api/paddle/triu_cn.rst +++ b/docs/api/paddle/triu_cn.rst @@ -6,13 +6,13 @@ triu .. py:function:: paddle.triu(input, diagonal=0, name=None) -返回输入矩阵 `input` 的上三角部分,其余部分被设为0。 +返回输入矩阵 `input` 的上三角部分,其余部分被设为 0。 矩形的上三角部分被定义为对角线上和上方的元素。 参数 ::::::::: - - **input** (Tensor):输入Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 - - **diagonal** (int,可选):指定的对角线,默认值为0。如果diagonal = 0,表示主对角线;如果diagonal是正数,表示主对角线之上的对角线;如果diagonal是负数,表示主对角线之下的对角线。 + - **input** (Tensor):输入 Tensor input,数据类型支持 `float32`, `float64`, `int32`, `int64` 。 + - **diagonal** (int,可选):指定的对角线,默认值为 0。如果 diagonal = 0,表示主对角线;如果 diagonal 是正数,表示主对角线之上的对角线;如果 diagonal 是负数,表示主对角线之下的对角线。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/trunc_cn.rst b/docs/api/paddle/trunc_cn.rst index 7c5eac665b7..340502071fe 100644 --- a/docs/api/paddle/trunc_cn.rst +++ b/docs/api/paddle/trunc_cn.rst @@ -6,12 +6,12 @@ trunc .. py:function:: paddle.trunc(input, name=None) -将输入 `Tensor` 的小数部分置0,返回置0后的 `Tensor`,如果输入 `Tensor` 的数据类型为整数,则不做处理。 +将输入 `Tensor` 的小数部分置 0,返回置 0 后的 `Tensor`,如果输入 `Tensor` 的数据类型为整数,则不做处理。 参数 ::::::::: - - **input** (Tensor):输入变量,类型为 Tensor,支持int32、int64、float32、float64数据类型。 + - **input** (Tensor):输入变量,类型为 Tensor,支持 int32、int64、float32、float64 数据类型。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 diff --git a/docs/api/paddle/unbind_cn.rst b/docs/api/paddle/unbind_cn.rst index 9f8c0563779..654e7996561 100644 --- a/docs/api/paddle/unbind_cn.rst +++ b/docs/api/paddle/unbind_cn.rst @@ -12,8 +12,8 @@ unbind 参数 ::::::::: - - **input** (Tensor) - 输入变量,数据类型为 float32、float64、int32、int64的多维 Tensor。 - - **axis** (int32|int64,可选) - 数据类型为 int32 或 int64,表示需要分割的维度。如果 axis < 0,则划分的维度为 rank(input) + axis。默认值为0。 + - **input** (Tensor) - 输入变量,数据类型为 float32、float64、int32、int64 的多维 Tensor。 + - **axis** (int32|int64,可选) - 数据类型为 int32 或 int64,表示需要分割的维度。如果 axis < 0,则划分的维度为 rank(input) + axis。默认值为 0。 返回 ::::::::: diff --git a/docs/api/paddle/uniform_cn.rst b/docs/api/paddle/uniform_cn.rst index 5145a6c0d32..6bc61cf4f02 100644 --- a/docs/api/paddle/uniform_cn.rst +++ b/docs/api/paddle/uniform_cn.rst @@ -8,11 +8,11 @@ uniform -返回数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +返回数值服从范围[``min``, ``max``)内均匀分布的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 .. code-block:: text - 示例1: + 示例 1: 给定: shape=[1,2] 则输出为: @@ -21,17 +21,17 @@ uniform 参数 :::::::::::: - - **shape** (list|tuple|Tensor) - 生成的随机Tensor的形状。如果 ``shape`` 是list、tuple,则其中的元素可以是int,或者是形状为[1]且数据类型为int32、int64的Tensor。如果 ``shape`` 是Tensor,则是数据类型为int32、int64的1-D Tensor。 - - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持float32、float64。默认值为None。 - - **min** (float|int,可选) - 要生成的随机值范围的下限,min包含在范围中。支持的数据类型:float、int。默认值为-1.0。 - - **max** (float|int,可选) - 要生成的随机值范围的上限,max不包含在范围中。支持的数据类型:float、int。默认值为1.0。 - - **seed** (int,可选) - 随机种子,用于生成样本。0表示使用系统生成的种子。注意如果种子不为0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 + - **shape** (list|tuple|Tensor) - 生成的随机 Tensor 的形状。如果 ``shape`` 是 list、tuple,则其中的元素可以是 int,或者是形状为[1]且数据类型为 int32、int64 的 Tensor。如果 ``shape`` 是 Tensor,则是数据类型为 int32、int64 的 1-D Tensor。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 float32、float64。默认值为 None。 + - **min** (float|int,可选) - 要生成的随机值范围的下限,min 包含在范围中。支持的数据类型:float、int。默认值为-1.0。 + - **max** (float|int,可选) - 要生成的随机值范围的上限,max 不包含在范围中。支持的数据类型:float、int。默认值为 1.0。 + - **seed** (int,可选) - 随机种子,用于生成样本。0 表示使用系统生成的种子。注意如果种子不为 0,该操作符每次都生成同样的随机数。支持的数据类型:int。默认为 0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机Tensor,形状为 ``shape``,数据类型为 ``dtype``。 +Tensor:数值服从范围[``min``, ``max``)内均匀分布的随机 Tensor,形状为 ``shape``,数据类型为 ``dtype``。 代码示例 diff --git a/docs/api/paddle/unique_cn.rst b/docs/api/paddle/unique_cn.rst index 6c31e3a3eea..38a090a7569 100644 --- a/docs/api/paddle/unique_cn.rst +++ b/docs/api/paddle/unique_cn.rst @@ -5,7 +5,7 @@ unique .. py:function:: paddle.unique(x, return_index=False, return_inverse=False, return_counts=False, axis=None, dtype="int64", name=None) -返回Tensor按升序排序后的独有元素。 +返回 Tensor 按升序排序后的独有元素。 参数 :::::::::::: @@ -14,7 +14,7 @@ unique - **return_index** (bool,可选) - 如果为 True,则还返回独有元素在输入 Tensor 中的索引。 - **return_inverse** (bool,可选) - 如果为 True,则还返回输入 Tensor 的元素对应在独有元素中的索引,该索引可用于重构输入 Tensor。 - **return_counts** (bool,可选) - 如果为 True,则还返回每个独有元素在输入 Tensor 中的个数。 - - **axis** (int,可选) - 指定选取独有元素的轴。默认值为 None,将输入平铺为1-D的 Tensor 后再选取独有元素。 + - **axis** (int,可选) - 指定选取独有元素的轴。默认值为 None,将输入平铺为 1-D 的 Tensor 后再选取独有元素。 - **dtype** (np.dtype|str,可选) - 用于设置 ``index`` , ``inverse`` 或者 ``counts`` 的类型,应该为 int32 或者 int64。默认:int64。 - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 diff --git a/docs/api/paddle/unique_consecutive_cn.rst b/docs/api/paddle/unique_consecutive_cn.rst index 4f8250be752..6040a279466 100644 --- a/docs/api/paddle/unique_consecutive_cn.rst +++ b/docs/api/paddle/unique_consecutive_cn.rst @@ -5,24 +5,24 @@ unique_consecutive .. py:function:: paddle.unique_consecutive(x, return_inverse=False, return_counts=False, axis=None, dtype="int64", name=None) -将Tensor中连续重复的元素进行去重,返回连续不重复的Tensor。 +将 Tensor 中连续重复的元素进行去重,返回连续不重复的 Tensor。 参数 :::::::::::: - **x** (Tensor) - 输入的 `Tensor`,数据类型为:float32、float64、int32、int64。 - - **return_inverse** (bool,可选) - 如果为True,则还返回输入Tensor的元素对应在连续不重复元素中的索引,该索引可用于重构输入Tensor。默认:False。 - - **return_counts** (bool,可选) - 如果为True,则还返回每个连续不重复元素在输入Tensor中的个数。默认:False。 - - **axis** (int,可选) - 指定选取连续不重复元素的轴。默认值为None,将输入平铺为1-D的Tensor后再选取连续不重复元素。默认:None。 - - **dtype** (np.dtype|str,可选) - 用于设置 `inverse` 或者 `counts` 的类型,应该为int32或者int64。默认:int64。 + - **return_inverse** (bool,可选) - 如果为 True,则还返回输入 Tensor 的元素对应在连续不重复元素中的索引,该索引可用于重构输入 Tensor。默认:False。 + - **return_counts** (bool,可选) - 如果为 True,则还返回每个连续不重复元素在输入 Tensor 中的个数。默认:False。 + - **axis** (int,可选) - 指定选取连续不重复元素的轴。默认值为 None,将输入平铺为 1-D 的 Tensor 后再选取连续不重复元素。默认:None。 + - **dtype** (np.dtype|str,可选) - 用于设置 `inverse` 或者 `counts` 的类型,应该为 int32 或者 int64。默认:int64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: - - **out** (Tensor) - 连续不重复元素构成的Tensor,数据类型与输入一致。 - - **inverse** (Tensor,可选) - 输入Tensor的元素对应在连续不重复元素中的索引,仅在 `return_inverse` 为True时返回。 - - **counts** (Tensor,可选) - 每个连续不重复元素在输入Tensor中的个数,仅在 `return_counts` 为True时返回。 + - **out** (Tensor) - 连续不重复元素构成的 Tensor,数据类型与输入一致。 + - **inverse** (Tensor,可选) - 输入 Tensor 的元素对应在连续不重复元素中的索引,仅在 `return_inverse` 为 True 时返回。 + - **counts** (Tensor,可选) - 每个连续不重复元素在输入 Tensor 中的个数,仅在 `return_counts` 为 True 时返回。 代码示例 :::::::::::: diff --git a/docs/api/paddle/unsqueeze_cn.rst b/docs/api/paddle/unsqueeze_cn.rst index de5d2e07e2f..d34cc0b1de5 100644 --- a/docs/api/paddle/unsqueeze_cn.rst +++ b/docs/api/paddle/unsqueeze_cn.rst @@ -5,20 +5,20 @@ unsqueeze .. py:function:: paddle.unsqueeze(x, axis, name=None) -向输入Tensor的Shape中一个或多个位置(axis)插入尺寸为1的维度。 +向输入 Tensor 的 Shape 中一个或多个位置(axis)插入尺寸为 1 的维度。 -请注意,在动态图模式下,输出Tensor将与输入Tensor共享数据,并且没有Tensor数据拷贝的过程。 +请注意,在动态图模式下,输出 Tensor 将与输入 Tensor 共享数据,并且没有 Tensor 数据拷贝的过程。 如果不希望输入与输出共享数据,请使用 `Tensor.clone`,例如 `unsqueeze_clone_x = x.unsqueeze(-1).clone()` 。 参数 ::::::::: - **x** (Tensor)- 输入的 `Tensor`,数据类型为:float32、float64、bool、int8、int32、int64。 - - **axis** (int|list|tuple|Tensor) - 表示要插入维度的位置。数据类型是 int32。如果 axis 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 Tensor。如果 axes 的类型是 Tensor,则是1-D Tensor。如果 axis 是负数,则 axis=axis+ndim(x)+1 。 + - **axis** (int|list|tuple|Tensor) - 表示要插入维度的位置。数据类型是 int32。如果 axis 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 Tensor。如果 axes 的类型是 Tensor,则是 1-D Tensor。如果 axis 是负数,则 axis=axis+ndim(x)+1 。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: -Tensor,扩展维度后的多维Tensor,数据类型与输入Tensor一致。 +Tensor,扩展维度后的多维 Tensor,数据类型与输入 Tensor 一致。 代码示例 ::::::::: diff --git a/docs/api/paddle/unstack_cn.rst b/docs/api/paddle/unstack_cn.rst index e444983bc00..474eccfe8d2 100644 --- a/docs/api/paddle/unstack_cn.rst +++ b/docs/api/paddle/unstack_cn.rst @@ -8,22 +8,22 @@ unstack -将单个dim为 ``D`` 的Tensor沿 ``axis`` 轴unpack为 ``num`` 个dim为 ``(D-1)`` 的Tensor。 +将单个 dim 为 ``D`` 的 Tensor 沿 ``axis`` 轴 unpack 为 ``num`` 个 dim 为 ``(D-1)`` 的 Tensor。 参数 :::::::::::: - - **x** (Tensor) – 输入x为 ``dim > 0`` 的Tensor, + - **x** (Tensor) – 输入 x 为 ``dim > 0`` 的 Tensor, 支持的数据类型:float32,float64,int32,int64。 - - **axis** (int | 可选) – 输入Tensor进行unpack运算所在的轴,axis的范围为:``[-D, D)`` , - 如果 ``axis < 0``,则 :math:`axis = axis + dim(x)`,axis的默认值为0。 + - **axis** (int | 可选) – 输入 Tensor 进行 unpack 运算所在的轴,axis 的范围为:``[-D, D)`` , + 如果 ``axis < 0``,则 :math:`axis = axis + dim(x)`,axis 的默认值为 0。 - - **num** (int | 可选) - axis轴的长度,一般无需设置,默认值为 ``None`` 。 + - **num** (int | 可选) - axis 轴的长度,一般无需设置,默认值为 ``None`` 。 返回 :::::::::::: - 长度为num的Tensor列表,数据类型与输入Tensor相同,dim为 ``(D-1)``。 + 长度为 num 的 Tensor 列表,数据类型与输入 Tensor 相同,dim 为 ``(D-1)``。 代码示例 diff --git a/docs/api/paddle/utils/Overview_cn.rst b/docs/api/paddle/utils/Overview_cn.rst index 75b97642c59..351138757a9 100644 --- a/docs/api/paddle/utils/Overview_cn.rst +++ b/docs/api/paddle/utils/Overview_cn.rst @@ -3,49 +3,49 @@ paddle.utils --------------------- -paddle.utils 目录下包含飞桨框架工具类的API。具体如下: +paddle.utils 目录下包含飞桨框架工具类的 API。具体如下: -- :ref:`自定义OP相关API ` -- :ref:`工具类相关API ` +- :ref:`自定义 OP 相关 API ` +- :ref:`工具类相关 API ` .. _about_cpp_extension: -自定义OP相关API +自定义 OP 相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`load ` ", "飞桨框架一键编译自定义OP、自动生成和返回Python API的接口" - " :ref:`setup ` ", "飞桨框架编译自定义OP、并安装到site-package目录的接口" - " :ref:`CppExtension ` ", "飞桨框架编译仅支持CPU的自定义OP扩展类" - " :ref:`CUDAExtension ` ", "飞桨框架编译支持GPU的自定义OP扩展类" - " :ref:`get_build_directory ` ", "返回一键编译自定义OP的build目录" + " :ref:`load ` ", "飞桨框架一键编译自定义 OP、自动生成和返回 Python API 的接口" + " :ref:`setup ` ", "飞桨框架编译自定义 OP、并安装到 site-package 目录的接口" + " :ref:`CppExtension ` ", "飞桨框架编译仅支持 CPU 的自定义 OP 扩展类" + " :ref:`CUDAExtension ` ", "飞桨框架编译支持 GPU 的自定义 OP 扩展类" + " :ref:`get_build_directory ` ", "返回一键编译自定义 OP 的 build 目录" .. _about_utils: -工具类相关API +工具类相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`deprecated ` ", "飞桨框架废弃API装饰器" + " :ref:`deprecated ` ", "飞桨框架废弃 API 装饰器" " :ref:`get_weights_path_from_url ` ", "从文件夹获取权重" " :ref:`run_check ` ", "检查是否正常安装飞桨框架" " :ref:`generate ` ", "产生以前缀开头的唯一名称" " :ref:`guard ` ", "更改命名空间" " :ref:`switch ` ", "切换命名空间" - " :ref:`cuda_profiler ` ", "CUDA性能分析器" + " :ref:`cuda_profiler ` ", "CUDA 性能分析器" " :ref:`profiler ` ", "通用性能分析器" " :ref:`reset_profiler ` ", "清除之前的性能分析记录" " :ref:`start_profiler ` ", "激活使用性能分析器" " :ref:`stop_profiler ` ", "停止使用性能分析器" " :ref:`require_version ` ", "用于检查已安装的飞桨版本是否介于[min_version, max_version]之间" - " :ref:`to_dlpack ` ", "用于将Tensor对象转换为DLPack" - " :ref:`from_dlpack ` ", "用于从DLPack中解码出Tensor对象" + " :ref:`to_dlpack ` ", "用于将 Tensor 对象转换为 DLPack" + " :ref:`from_dlpack ` ", "用于从 DLPack 中解码出 Tensor 对象" diff --git a/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst b/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst index 688336a4d6f..f21dcbc2a2f 100644 --- a/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/CUDAExtension_cn.rst @@ -36,7 +36,7 @@ CUDAExtension 参数 :::::::::::: - - **sources** (list[str]) - 用于指定自定义 OP 对应的源码文件。cpp 源文件支持。cc、.cpp等后缀;cuda 源文件以。cu 为后缀。 + - **sources** (list[str]) - 用于指定自定义 OP 对应的源码文件。cpp 源文件支持。cc、.cpp 等后缀;cuda 源文件以。cu 为后缀。 - **\*args, \*\*kwargs** (可选) - 用于指定 Extension 的其他参数,支持的参数与 ``setuptools.Extension`` 一致。 返回 diff --git a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst index 31de33ef795..1b02f9647c9 100644 --- a/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/CppExtension_cn.rst @@ -34,7 +34,7 @@ CppExtension 参数 :::::::::::: - - **sources** (list[str]) - 用于指定自定义 OP 对应的源码文件。cpp 源文件支持。cc、.cpp等后缀 + - **sources** (list[str]) - 用于指定自定义 OP 对应的源码文件。cpp 源文件支持。cc、.cpp 等后缀 - **\*args, \*\*kwargs** (可选) - 用于指定 Extension 的其他参数,支持的参数与 ``setuptools.Extension`` 一致。 返回 diff --git a/docs/api/paddle/utils/cpp_extension/load_cn.rst b/docs/api/paddle/utils/cpp_extension/load_cn.rst index 14482d91e7d..20c2c33ac51 100644 --- a/docs/api/paddle/utils/cpp_extension/load_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/load_cn.rst @@ -7,10 +7,10 @@ load 此接口将即时编译(Just-In-Time)传入的自定义 OP 对应的 cpp 和 cuda 源码文件,返回一个包含自定义算子 API 的 ``Module`` 对象。 -其通过子进程的方式,在后台隐式地执行源码文件编译、符号链接、动态库生成、组网 API 接口生成等一系列过程。不需要本地预装 CMake 或者 Ninja 等工具命令,仅需必要的编译器命令环境,Linux 下需安装版本不低于 5.4 的 GCC,并软链到 `/usr/bin/cc` ,Windows下需安装版本不低于2017的Visual Studio;若编译支持 GPU 设备的算子,则需要提前安装CUDA,其中自带 `nvcc` 编译环境。 +其通过子进程的方式,在后台隐式地执行源码文件编译、符号链接、动态库生成、组网 API 接口生成等一系列过程。不需要本地预装 CMake 或者 Ninja 等工具命令,仅需必要的编译器命令环境,Linux 下需安装版本不低于 5.4 的 GCC,并软链到 `/usr/bin/cc` ,Windows 下需安装版本不低于 2017 的 Visual Studio;若编译支持 GPU 设备的算子,则需要提前安装 CUDA,其中自带 `nvcc` 编译环境。 -在编译前会执行 `ABI 兼容性检查 `_ ,即检查编译器版本是否与本地安装的 Paddle 一致。如在Linux下,对于 CUDA 10.1 以上的 Paddle 默认使用 GCC 8.2 编译,则本地 ``cc`` 对应的编译器版本也需为 8.2,在Windows下,Paddle 使用Visualt Studio 2017编译,则本地也需安装 -大于2017的Visual Studio,如果不满足,则可能由于 ABI 兼容性原因引发自定义 OP 编译或执行报错。Mac 下默认使用 clang 进行编译,无 ABI 兼容性问题。 +在编译前会执行 `ABI 兼容性检查 `_ ,即检查编译器版本是否与本地安装的 Paddle 一致。如在 Linux 下,对于 CUDA 10.1 以上的 Paddle 默认使用 GCC 8.2 编译,则本地 ``cc`` 对应的编译器版本也需为 8.2,在 Windows 下,Paddle 使用 Visualt Studio 2017 编译,则本地也需安装 +大于 2017 的 Visual Studio,如果不满足,则可能由于 ABI 兼容性原因引发自定义 OP 编译或执行报错。Mac 下默认使用 clang 进行编译,无 ABI 兼容性问题。 相对于 :ref:`cn_api_paddle_utils_cpp_extension_setup` 的方式,此接口不需要额外的 ``setup.py`` 文件和 ``python setup.py install`` 命令,``load`` 接口包含了一键执行自定义 OP 的编译和加载的全部流程。 @@ -18,7 +18,7 @@ load 1. 目前已支持 Linux ,MacOS 和 Windows 平台。 2. 编译器的 ABI 兼容性是向前兼容的,Linux 下推荐使用 GCC 8.2 高版本作为 ``/usr/bin/cc`` 命令的软链对象,可通过 ``which cc`` 查看 ``cc`` 命令的位置,使用 ``cc --version`` 查看对应的 GCC 版本。 - 3. Windows下需要安装Visual Studio 2017及以上版本。 + 3. Windows 下需要安装 Visual Studio 2017 及以上版本。 **使用样例如下:** diff --git a/docs/api/paddle/utils/cpp_extension/setup_cn.rst b/docs/api/paddle/utils/cpp_extension/setup_cn.rst index d3a5ddb4af7..26e3182ba25 100644 --- a/docs/api/paddle/utils/cpp_extension/setup_cn.rst +++ b/docs/api/paddle/utils/cpp_extension/setup_cn.rst @@ -9,8 +9,8 @@ setup 此接口是对 Python 内建库中的 ``setuptools.setup`` 接口的进一步封装,支持的参数类型,以及使用方式均与原生接口保持一致。接口隐藏了 Paddle 框架内部概念,如默认需要指定的编译选项,头文件搜索目录,链接选项等;此接口会自动搜索和检查本地的 ``cc`` 和 ``nvcc`` 编译命令和版本环境,根据用户指定的 ``Extension`` 类型,完成支持 CPU 或 GPU 设备的算子编译。 -同时,编译前会执行 `ABI 兼容性检查 `_ ,即检查编译器版本是否与本地安装的 Paddle 一致。如在Linux下,对于 CUDA 10.1 以上的 Paddle 默认使用 GCC 8.2 编译,则本地 ``cc`` 对应的编译器版本也需为 8.2,在Windows下,Paddle 使用Visualt Studio 2017编译,则本地也需安装 -大于2017的Visual Studio,如果不满足,则可能由于 ABI 兼容性原因引发自定义 OP 编译或执行报错。Mac 下默认使用 clang 进行编译,无 ABI 兼容性问题。 +同时,编译前会执行 `ABI 兼容性检查 `_ ,即检查编译器版本是否与本地安装的 Paddle 一致。如在 Linux 下,对于 CUDA 10.1 以上的 Paddle 默认使用 GCC 8.2 编译,则本地 ``cc`` 对应的编译器版本也需为 8.2,在 Windows 下,Paddle 使用 Visualt Studio 2017 编译,则本地也需安装 +大于 2017 的 Visual Studio,如果不满足,则可能由于 ABI 兼容性原因引发自定义 OP 编译或执行报错。Mac 下默认使用 clang 进行编译,无 ABI 兼容性问题。 相对于即时编译的 :ref:`cn_api_paddle_utils_cpp_extension_load` 接口,此接口仅需执行一次 ``python setup.py install`` 命令,即可像其他 python 库一样 import 导入使用。如下是一个 ``setup.py`` 文件的简单样例: @@ -19,7 +19,7 @@ setup 1. 目前已支持 Linux ,MacOS 和 Windows 平台。 2. 编译器的 ABI 兼容性是向前兼容的,Linux 下推荐使用 GCC 8.2 高版本作为 ``/usr/bin/cc`` 命令的软链对象,可通过 ``which cc`` 查看 ``cc`` 命令的位置,使用 ``cc --version`` 查看对应的 GCC 版本。 - 3. Windows下需要安装Visual Studio 2017及以上版本。 + 3. Windows 下需要安装 Visual Studio 2017 及以上版本。 .. code-block:: text diff --git a/docs/api/paddle/utils/deprecated_cn.rst b/docs/api/paddle/utils/deprecated_cn.rst index 133397c0b9f..13255765a57 100644 --- a/docs/api/paddle/utils/deprecated_cn.rst +++ b/docs/api/paddle/utils/deprecated_cn.rst @@ -5,18 +5,18 @@ paddle_utils_deprecated .. py:function:: paddle.utils.deprecated(update_to="", since="", reason="") -对于即将废弃的API可以加入该装饰器,在调用对应 PaddlePaddle API 时,可以做如下两件事情: +对于即将废弃的 API 可以加入该装饰器,在调用对应 PaddlePaddle API 时,可以做如下两件事情: - - 修改被装饰API的相关docstring,添加即将废弃警告。 - - 当相关API被调用时,向控制台输出相关warning信息 :class:`~exceptions.DeprecatedWarning`。 + - 修改被装饰 API 的相关 docstring,添加即将废弃警告。 + - 当相关 API 被调用时,向控制台输出相关 warning 信息 :class:`~exceptions.DeprecatedWarning`。 参数 :::::::::::: - **since** (str) - 即将废弃相对应的版本号。 - - **update_to** (str) - 新的API名称。 - - **reason** (str) - 即将废弃该API的原因。 + - **update_to** (str) - 新的 API 名称。 + - **reason** (str) - 即将废弃该 API 的原因。 返回 :::::::::::: diff --git a/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst b/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst index 82cfe37e79e..2e86194d12c 100644 --- a/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst +++ b/docs/api/paddle/utils/dlpack/from_dlpack_cn.rst @@ -5,15 +5,15 @@ from_dlpack .. py:function:: paddle.utils.dlpack.from_dlpack(dlpack) -将DLPack解码为Tensor对象。其中,DLPack是一种开放的内存张量结构,可用于不同深度学习框架之间的张量共享。注意,一个DLPack只能被解码一次。 +将 DLPack 解码为 Tensor 对象。其中,DLPack 是一种开放的内存张量结构,可用于不同深度学习框架之间的张量共享。注意,一个 DLPack 只能被解码一次。 参数 ::::::::: - - **dlpack** (PyCapsule) - DLPack,即带有dltensor的PyCapsule对象。 + - **dlpack** (PyCapsule) - DLPack,即带有 dltensor 的 PyCapsule 对象。 返回 ::::::::: - - **out** (Tensor) - 从DLPack中解码得到的Tensor。需要注意的是,对于带有`bool`数据类型的dltensor输入,我们最终解码得到的Tensor对应的数据类型为`uint8`。 + - **out** (Tensor) - 从 DLPack 中解码得到的 Tensor。需要注意的是,对于带有`bool`数据类型的 dltensor 输入,我们最终解码得到的 Tensor 对应的数据类型为`uint8`。 代码示例 ::::::::: diff --git a/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst b/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst index 68ac04f4fad..abd6ed99535 100644 --- a/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst +++ b/docs/api/paddle/utils/dlpack/to_dlpack_cn.rst @@ -5,15 +5,15 @@ to_dlpack .. py:function:: paddle.utils.dlpack.to_dlpack(x) -将Tensor对象转化为DLPack。其中,DLPack是一种开放的内存张量结构,可用于不同深度学习框架之间的张量共享。 +将 Tensor 对象转化为 DLPack。其中,DLPack 是一种开放的内存张量结构,可用于不同深度学习框架之间的张量共享。 参数 ::::::::: - - **x** (Tensor) - Paddle Tensor,并且其数据类型为支持bool,float16,float32,float64,int8,int16,int32,int64,uint8,complex64,complex128。 + - **x** (Tensor) - Paddle Tensor,并且其数据类型为支持 bool,float16,float32,float64,int8,int16,int32,int64,uint8,complex64,complex128。 返回 ::::::::: - - **dlpack** (PyCapsule) - DLPack,即带有dltensor的PyCapsule对象。 + - **dlpack** (PyCapsule) - DLPack,即带有 dltensor 的 PyCapsule 对象。 代码示例 ::::::::: diff --git a/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst b/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst index accf8c8aa98..fa807141c1c 100644 --- a/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst +++ b/docs/api/paddle/utils/download/get_weights_path_from_url_cn.rst @@ -5,13 +5,13 @@ get_weights_path_from_url .. py:function:: paddle.utils.download.get_weights_path_from_url(url, md5sum=None) - 从 ``WEIGHT_HOME`` 文件夹获取权重,如果不存在,就从url下载。 + 从 ``WEIGHT_HOME`` 文件夹获取权重,如果不存在,就从 url 下载。 参数 :::::::::::: - **url** (str) - 下载的链接。 - - **md5sum** (str,可选) - 下载文件的md5值。默认值:None。 + - **md5sum** (str,可选) - 下载文件的 md5 值。默认值:None。 返回 :::::::::::: diff --git a/docs/api/paddle/utils/run_check_cn.rst b/docs/api/paddle/utils/run_check_cn.rst index 679efae1b31..a3e51527e8f 100644 --- a/docs/api/paddle/utils/run_check_cn.rst +++ b/docs/api/paddle/utils/run_check_cn.rst @@ -5,7 +5,7 @@ run_check .. py:function:: paddle.utils.run_check() -检查用户机器上,PaddlePaddle是否正确地安装了,以及是否能够成功运行。 +检查用户机器上,PaddlePaddle 是否正确地安装了,以及是否能够成功运行。 代码示例 diff --git a/docs/api/paddle/utils/unique_name/generate_cn.rst b/docs/api/paddle/utils/unique_name/generate_cn.rst index 3bcf02b382b..ae043288be1 100644 --- a/docs/api/paddle/utils/unique_name/generate_cn.rst +++ b/docs/api/paddle/utils/unique_name/generate_cn.rst @@ -8,7 +8,7 @@ generate -该接口产生以前缀key开头的唯一名称。目前,Paddle通过从0开始的编号对相同前缀key的名称进行区分。例如,使用key=fc连续调用该接口会产生fc_0, fc_1, fc_2等不同名称。 +该接口产生以前缀 key 开头的唯一名称。目前,Paddle 通过从 0 开始的编号对相同前缀 key 的名称进行区分。例如,使用 key=fc 连续调用该接口会产生 fc_0, fc_1, fc_2 等不同名称。 参数 :::::::::::: @@ -17,7 +17,7 @@ generate 返回 :::::::::::: -str,含前缀key的唯一名称。 +str,含前缀 key 的唯一名称。 代码示例 :::::::::::: diff --git a/docs/api/paddle/utils/unique_name/guard_cn.rst b/docs/api/paddle/utils/unique_name/guard_cn.rst index 9c3b77a941b..f8673a5e09c 100644 --- a/docs/api/paddle/utils/unique_name/guard_cn.rst +++ b/docs/api/paddle/utils/unique_name/guard_cn.rst @@ -8,12 +8,12 @@ guard -该接口用于更改命名空间,与with语句一起使用。使用后,在with语句的上下文中使用新的命名空间,调用generate接口时相同前缀的名称将从0开始重新编号。 +该接口用于更改命名空间,与 with 语句一起使用。使用后,在 with 语句的上下文中使用新的命名空间,调用 generate 接口时相同前缀的名称将从 0 开始重新编号。 参数 :::::::::::: - - **new_generator** (str|bytes,可选) - 新命名空间的名称。请注意,Python2中的str在Python3中被区分为str和bytes两种,因此这里有两种类型。缺省值为None,若不为None,new_generator将作为前缀添加到generate接口产生的唯一名称中。 + - **new_generator** (str|bytes,可选) - 新命名空间的名称。请注意,Python2 中的 str 在 Python3 中被区分为 str 和 bytes 两种,因此这里有两种类型。缺省值为 None,若不为 None,new_generator 将作为前缀添加到 generate 接口产生的唯一名称中。 返回 :::::::::::: diff --git a/docs/api/paddle/utils/unique_name/switch_cn.rst b/docs/api/paddle/utils/unique_name/switch_cn.rst index c07365d0cb1..02c9b6dad69 100644 --- a/docs/api/paddle/utils/unique_name/switch_cn.rst +++ b/docs/api/paddle/utils/unique_name/switch_cn.rst @@ -8,12 +8,12 @@ switch -该接口将当前上下文的命名空间切换到新的命名空间。该接口与guard接口都可用于更改命名空间,推荐使用guard接口,配合with语句管理命名空间上下文。 +该接口将当前上下文的命名空间切换到新的命名空间。该接口与 guard 接口都可用于更改命名空间,推荐使用 guard 接口,配合 with 语句管理命名空间上下文。 参数 :::::::::::: - - **new_generator** (UniqueNameGenerator,可选) - 要切换到的新命名空间,一般无需设置。缺省值为None,表示切换到一个匿名的新命名空间。 + - **new_generator** (UniqueNameGenerator,可选) - 要切换到的新命名空间,一般无需设置。缺省值为 None,表示切换到一个匿名的新命名空间。 返回 :::::::::::: diff --git a/docs/api/paddle/var_cn.rst b/docs/api/paddle/var_cn.rst index bd53df417d1..9ad439436bf 100644 --- a/docs/api/paddle/var_cn.rst +++ b/docs/api/paddle/var_cn.rst @@ -12,8 +12,8 @@ var - **x** (Tensor) - 输入的 Tensor,数据类型为:float32、float64。 - **axis** (int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int、list(int)、tuple(int)。 - - 如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D是 ``x`` 的维度。 - - 如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D` 。 + - 如果 ``axis`` 包含多个维度,则沿着 ``axis`` 中的所有轴进行计算。``axis`` 或者其中的元素值应该在范围[-D, D)内,D 是 ``x`` 的维度。 + - 如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D` 。 - 如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算方差。默认值为 None。 - **unbiased** (bool,可选) - 是否使用无偏估计来计算方差。使用 :math:`N` 来代表在 axis 上的维度,如果 ``unbiased`` 为 True,则在计算中使用 :math:`N - 1` 作为除数。为 False 时将使用 :math:`N` 作为除数。默认值为 True。 diff --git a/docs/api/paddle/version/Overview_cn.rst b/docs/api/paddle/version/Overview_cn.rst index 59edcec0e15..f5e6ee1d395 100644 --- a/docs/api/paddle/version/Overview_cn.rst +++ b/docs/api/paddle/version/Overview_cn.rst @@ -3,16 +3,16 @@ paddle.version --------------------- -paddle.version 目录下包含的API返回 paddle 安装包相关配置的版本信息。具体如下: +paddle.version 目录下包含的 API 返回 paddle 安装包相关配置的版本信息。具体如下: -配置版本相关API +配置版本相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`cuda ` ", "获取paddle wheel包编译时使用的CUDA版本" - " :ref:`cudnn ` ", "获取paddle wheel包编译时使用的cuDNN版本" - " :ref:`show ` ", "打印paddle版本、CUDA版本、cuDNN版本等信息" + " :ref:`cuda ` ", "获取 paddle wheel 包编译时使用的 CUDA 版本" + " :ref:`cudnn ` ", "获取 paddle wheel 包编译时使用的 cuDNN 版本" + " :ref:`show ` ", "打印 paddle 版本、CUDA 版本、cuDNN 版本等信息" diff --git a/docs/api/paddle/version/cuda_cn.rst b/docs/api/paddle/version/cuda_cn.rst index 15862a34187..cd2a2ac96cd 100644 --- a/docs/api/paddle/version/cuda_cn.rst +++ b/docs/api/paddle/version/cuda_cn.rst @@ -11,7 +11,7 @@ cuda 返回 :::::::::: -若paddle wheel包为GPU版本,则返回paddle wheel包编译时使用的CUDA的版本信息;若paddle wheel包为CPU版本,则返回 ``False`` 。 +若 paddle wheel 包为 GPU 版本,则返回 paddle wheel 包编译时使用的 CUDA 的版本信息;若 paddle wheel 包为 CPU 版本,则返回 ``False`` 。 代码示例: :::::::::: diff --git a/docs/api/paddle/version/cudnn_cn.rst b/docs/api/paddle/version/cudnn_cn.rst index e73d8211055..f95d43d1b4e 100644 --- a/docs/api/paddle/version/cudnn_cn.rst +++ b/docs/api/paddle/version/cudnn_cn.rst @@ -11,7 +11,7 @@ cudnn 返回 ::::::::: -若paddle wheel包为GPU版本,则返回paddle wheel包编译时使用的cuDNN的版本信息;若paddle wheel包为CPU版本,则返回 ``False`` 。 +若 paddle wheel 包为 GPU 版本,则返回 paddle wheel 包编译时使用的 cuDNN 的版本信息;若 paddle wheel 包为 CPU 版本,则返回 ``False`` 。 代码示例: :::::::::: diff --git a/docs/api/paddle/version/show_cn.rst b/docs/api/paddle/version/show_cn.rst index f74eff8d927..e4e2e56ced0 100644 --- a/docs/api/paddle/version/show_cn.rst +++ b/docs/api/paddle/version/show_cn.rst @@ -5,22 +5,22 @@ show .. py:function:: paddle.version.show() -如果paddle wheel包是正式发行版本,则打印版本号。否则,获取paddle wheel包编译时对应的commit id。 -另外,打印paddle wheel包使用的CUDA和cuDNN的版本信息。 +如果 paddle wheel 包是正式发行版本,则打印版本号。否则,获取 paddle wheel 包编译时对应的 commit id。 +另外,打印 paddle wheel 包使用的 CUDA 和 cuDNN 的版本信息。 返回 ::::::::: -如果paddle wheel包不是正式发行版本,则输出wheel包编译时对应的commit id号。否则,输出如下信息: +如果 paddle wheel 包不是正式发行版本,则输出 wheel 包编译时对应的 commit id 号。否则,输出如下信息: - - full_version - paddle wheel包的版本号。 - - major - paddle wheel包版本号的major信息。 - - minor - paddle wheel包版本号的minor信息。 - - patch - paddle wheel包版本号的patch信息。 - - rc - 是否是rc版本。 - - cuda - 若paddle wheel包为GPU版本,则返回paddle wheel包编译时使用的CUDA的版本信息;若paddle wheel包为CPU版本,则返回 ``False`` 。 - - cudnn - 若paddle wheel包为GPU版本,则返回paddle wheel包编译时使用的cuDNN的版本信息;若paddle wheel包为CPU版本,则返回 ``False`` 。 + - full_version - paddle wheel 包的版本号。 + - major - paddle wheel 包版本号的 major 信息。 + - minor - paddle wheel 包版本号的 minor 信息。 + - patch - paddle wheel 包版本号的 patch 信息。 + - rc - 是否是 rc 版本。 + - cuda - 若 paddle wheel 包为 GPU 版本,则返回 paddle wheel 包编译时使用的 CUDA 的版本信息;若 paddle wheel 包为 CPU 版本,则返回 ``False`` 。 + - cudnn - 若 paddle wheel 包为 GPU 版本,则返回 paddle wheel 包编译时使用的 cuDNN 的版本信息;若 paddle wheel 包为 CPU 版本,则返回 ``False`` 。 代码示例 :::::::::: diff --git a/docs/api/paddle/vision/Overview_cn.rst b/docs/api/paddle/vision/Overview_cn.rst index dd3ea47b76e..c1dd95c10ca 100644 --- a/docs/api/paddle/vision/Overview_cn.rst +++ b/docs/api/paddle/vision/Overview_cn.rst @@ -3,83 +3,83 @@ paddle.vision --------------------- -paddle.vision 目录是飞桨在视觉领域的高层API。具体如下: +paddle.vision 目录是飞桨在视觉领域的高层 API。具体如下: -- :ref:`内置数据集相关API ` -- :ref:`内置模型相关API ` -- :ref:`视觉操作相关API ` -- :ref:`数据处理相关API ` -- :ref:`其他API ` +- :ref:`内置数据集相关 API ` +- :ref:`内置模型相关 API ` +- :ref:`视觉操作相关 API ` +- :ref:`数据处理相关 API ` +- :ref:`其他 API ` .. _about_datasets: -内置数据集相关API +内置数据集相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`Cifar10 ` ", "Cifar10数据集" - " :ref:`Cifar100 ` ", "Cifar100数据集" - " :ref:`FashionMNIST ` ", "FashionMNIST数据集" - " :ref:`Flowers ` ", "Flowers数据集" - " :ref:`MNIST ` ", "MNIST数据集" - " :ref:`VOC2012 ` ", "VOC2012数据集" + " :ref:`Cifar10 ` ", "Cifar10 数据集" + " :ref:`Cifar100 ` ", "Cifar100 数据集" + " :ref:`FashionMNIST ` ", "FashionMNIST 数据集" + " :ref:`Flowers ` ", "Flowers 数据集" + " :ref:`MNIST ` ", "MNIST 数据集" + " :ref:`VOC2012 ` ", "VOC2012 数据集" .. _about_models: -内置模型相关API +内置模型相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`LeNet ` ", "LeNet模型" - " :ref:`AlexNet ` ", "AlexNet模型" - " :ref:`alexnet ` ", "AlexNet模型" - " :ref:`MobileNetV1 ` ", "MobileNetV1模型" - " :ref:`mobilenet_v1 ` ", "MobileNetV1模型" - " :ref:`MobileNetV2 ` ", "MobileNetV2模型" - " :ref:`mobilenet_v2 ` ", "MobileNetV2模型" - " :ref:`MobileNetV3Small ` ", "MobileNetV3Small模型" - " :ref:`MobileNetV3Large ` ", "MobileNetV3Large模型" - " :ref:`mobilenet_v3_small ` ", "MobileNetV3Small模型" - " :ref:`mobilenet_v3_large ` ", "MobileNetV3Large模型" - " :ref:`ResNet ` ", "ResNet模型" - " :ref:`resnet18 ` ", "18层的ResNet模型" - " :ref:`resnet34 ` ", "34层的ResNet模型" - " :ref:`resnet50 ` ", "50层的ResNet模型" - " :ref:`resnet101 ` ", "101层的ResNet模型" - " :ref:`resnet152 ` ", "152层的ResNet模型" - " :ref:`wide_resnet50_2 ` ", "50层的WideResNet模型" - " :ref:`wide_resnet101_2 ` ", "101层的WideResNet模型" - " :ref:`resnext50_32x4d ` ", "ResNeXt-50 32x4d模型" - " :ref:`resnext50_64x4d ` ", "ResNeXt-50 64x4d模型" - " :ref:`resnext101_32x4d ` ", "ResNeXt-101 32x4d模型" - " :ref:`resnext101_64x4d ` ", "ResNeXt-101 64x4d模型" - " :ref:`resnext152_32x4d ` ", "ResNeXt-152 32x4d模型" - " :ref:`resnext152_64x4d ` ", "ResNeXt-152 64x4d模型" - " :ref:`VGG ` ", "VGG模型" - " :ref:`vgg11 ` ", "11层的VGG模型" - " :ref:`vgg13 ` ", "13层的VGG模型" - " :ref:`vgg16 ` ", "16层的VGG模型" - " :ref:`vgg19 ` ", "19层的VGG模型" - " :ref:`DenseNet ` ", "DenseNet模型" - " :ref:`densenet121 ` ", "121层的DenseNet模型" - " :ref:`densenet161 ` ", "161层的DenseNet模型" - " :ref:`densenet169 ` ", "169层的DenseNet模型" - " :ref:`densenet201 ` ", "201层的DenseNet模型" - " :ref:`densenet264 ` ", "264层的DenseNet模型" - " :ref:`InceptionV3 ` ", "InceptionV3模型" - " :ref:`inception_v3 ` ", "InceptionV3模型" - " :ref:`GoogLeNet ` ", "GoogLeNet模型" - " :ref:`googlenet ` ", "GoogLeNet模型" - " :ref:`SqueezeNet ` ", "SqueezeNet模型" - " :ref:`squeezenet1_0 ` ", "squeezenet1_0模型" - " :ref:`squeezenet1_1 ` ", "squeezenet1_1模型" - " :ref:`ShuffleNetV2 ` ", "ShuffleNetV2模型" + " :ref:`LeNet ` ", "LeNet 模型" + " :ref:`AlexNet ` ", "AlexNet 模型" + " :ref:`alexnet ` ", "AlexNet 模型" + " :ref:`MobileNetV1 ` ", "MobileNetV1 模型" + " :ref:`mobilenet_v1 ` ", "MobileNetV1 模型" + " :ref:`MobileNetV2 ` ", "MobileNetV2 模型" + " :ref:`mobilenet_v2 ` ", "MobileNetV2 模型" + " :ref:`MobileNetV3Small ` ", "MobileNetV3Small 模型" + " :ref:`MobileNetV3Large ` ", "MobileNetV3Large 模型" + " :ref:`mobilenet_v3_small ` ", "MobileNetV3Small 模型" + " :ref:`mobilenet_v3_large ` ", "MobileNetV3Large 模型" + " :ref:`ResNet ` ", "ResNet 模型" + " :ref:`resnet18 ` ", "18 层的 ResNet 模型" + " :ref:`resnet34 ` ", "34 层的 ResNet 模型" + " :ref:`resnet50 ` ", "50 层的 ResNet 模型" + " :ref:`resnet101 ` ", "101 层的 ResNet 模型" + " :ref:`resnet152 ` ", "152 层的 ResNet 模型" + " :ref:`wide_resnet50_2 ` ", "50 层的 WideResNet 模型" + " :ref:`wide_resnet101_2 ` ", "101 层的 WideResNet 模型" + " :ref:`resnext50_32x4d ` ", "ResNeXt-50 32x4d 模型" + " :ref:`resnext50_64x4d ` ", "ResNeXt-50 64x4d 模型" + " :ref:`resnext101_32x4d ` ", "ResNeXt-101 32x4d 模型" + " :ref:`resnext101_64x4d ` ", "ResNeXt-101 64x4d 模型" + " :ref:`resnext152_32x4d ` ", "ResNeXt-152 32x4d 模型" + " :ref:`resnext152_64x4d ` ", "ResNeXt-152 64x4d 模型" + " :ref:`VGG ` ", "VGG 模型" + " :ref:`vgg11 ` ", "11 层的 VGG 模型" + " :ref:`vgg13 ` ", "13 层的 VGG 模型" + " :ref:`vgg16 ` ", "16 层的 VGG 模型" + " :ref:`vgg19 ` ", "19 层的 VGG 模型" + " :ref:`DenseNet ` ", "DenseNet 模型" + " :ref:`densenet121 ` ", "121 层的 DenseNet 模型" + " :ref:`densenet161 ` ", "161 层的 DenseNet 模型" + " :ref:`densenet169 ` ", "169 层的 DenseNet 模型" + " :ref:`densenet201 ` ", "201 层的 DenseNet 模型" + " :ref:`densenet264 ` ", "264 层的 DenseNet 模型" + " :ref:`InceptionV3 ` ", "InceptionV3 模型" + " :ref:`inception_v3 ` ", "InceptionV3 模型" + " :ref:`GoogLeNet ` ", "GoogLeNet 模型" + " :ref:`googlenet ` ", "GoogLeNet 模型" + " :ref:`SqueezeNet ` ", "SqueezeNet 模型" + " :ref:`squeezenet1_0 ` ", "squeezenet1_0 模型" + " :ref:`squeezenet1_1 ` ", "squeezenet1_1 模型" + " :ref:`ShuffleNetV2 ` ", "ShuffleNetV2 模型" " :ref:`shufflenet_v2_x0_25 ` ", "输出通道缩放比例为 0.25 的 ShuffleNetV2 模型" " :ref:`shufflenet_v2_x0_33 ` ", "输出通道缩放比例为 0.33 的 ShuffleNetV2 模型" " :ref:`shufflenet_v2_x0_5 ` ", "输出通道缩放比例为 0.5 的 ShuffleNetV2 模型" @@ -91,25 +91,25 @@ paddle.vision 目录是飞桨在视觉领域的高层API。具体如下: .. _about_ops: -视觉操作相关API +视觉操作相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 - " :ref:`deform_conv2d ` ", "计算2-D可变形卷积" - " :ref:`DeformConv2D ` ", "计算2-D可变形卷积" - " :ref:`yolo_box ` ", "生成YOLO检测框" - " :ref:`yolo_loss ` ", "计算YOLO损失" + " :ref:`deform_conv2d ` ", "计算 2-D 可变形卷积" + " :ref:`DeformConv2D ` ", "计算 2-D 可变形卷积" + " :ref:`yolo_box ` ", "生成 YOLO 检测框" + " :ref:`yolo_loss ` ", "计算 YOLO 损失" .. _about_transforms: -数据处理相关API +数据处理相关 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`adjust_brightness ` ", "调整图像亮度" @@ -148,11 +148,11 @@ paddle.vision 目录是飞桨在视觉领域的高层API。具体如下: .. _about_others: -其他API +其他 API :::::::::::::::::::: .. csv-table:: - :header: "API名称", "API功能" + :header: "API 名称", "API 功能" :widths: 10, 30 " :ref:`get_image_backend ` ", "获取用于加载图像的模块名称" diff --git a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst index 94d9a3b1258..bfb9c63a0cf 100644 --- a/docs/api/paddle/vision/ops/DeformConv2D_cn.rst +++ b/docs/api/paddle/vision/ops/DeformConv2D_cn.rst @@ -6,17 +6,17 @@ DeformConv2D .. py:class:: paddle.vision.ops.DeformConv2D(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, deformable_groups=1, groups=1, weight_attr=None, bias_attr=None) -deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: +deform_conv2d 对输入 4-D Tensor 计算 2-D 可变形卷积。给定输入 Tensor x,输出 Tensor y,可变形卷积运算如下所示: -可形变卷积v2(mask != None): +可形变卷积 v2(mask != None): :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}` -可形变卷积v1(mask = None): +可形变卷积 v1(mask = None): :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}` -其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第k个位置的可学习偏移和调制标量。在deformable conv v1中 :math:`\Delta m_k` 为1。 +其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第 k 个位置的可学习偏移和调制标量。在 deformable conv v1 中 :math:`\Delta m_k` 为 1。 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 @@ -53,9 +53,9 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x - **padding** (int|list|tuple,可选) - 填充大小。卷积核操作填充大小。如果它是一个列表或元组,则必须包含两个整型数:(padding_height,padding_width)。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。可以为单个整数或包含两个整数的元组或列表,分别表示卷积核中的元素沿着高和宽的空洞。如果为单个整数,表示高和宽的空洞都等于该整数。默认值:1。 - **deformable_groups** (int,可选) - 可变形卷积组数。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **weight_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为 bool 类型,只支持为 False,表示没有偏置参数。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 形状: diff --git a/docs/api/paddle/vision/ops/PSRoIPool_cn.rst b/docs/api/paddle/vision/ops/PSRoIPool_cn.rst index 77b9bf6670c..e017654296b 100644 --- a/docs/api/paddle/vision/ops/PSRoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/PSRoIPool_cn.rst @@ -9,15 +9,15 @@ PSRoIPool 参数 ::::::::: - - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W), 数据类型为int32. 如果output_size是int类型,H和W都与其相等。 - - **spatial_scale** (float,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到输入特征图的尺寸。 + - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W), 数据类型为 int32. 如果 output_size 是 int 类型,H 和 W 都与其相等。 + - **spatial_scale** (float,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到输入特征图的尺寸。 形状 ::::::::: - - x: 4-D Tensor,形状为(N, C, H, W)。数据类型为float32或float64。 + - x: 4-D Tensor,形状为(N, C, H, W)。数据类型为 float32 或 float64。 - boxes: 2-D Tensor,形状为(num_rois, 4)。 - boxes_num: 1-D Tensor。 - - output: 4-D tensor,形状为(Roi数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + - output: 4-D tensor,形状为(Roi 数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 返回 ::::::::: diff --git a/docs/api/paddle/vision/ops/RoIAlign_cn.rst b/docs/api/paddle/vision/ops/RoIAlign_cn.rst index 0b17461dc38..3458aeee637 100644 --- a/docs/api/paddle/vision/ops/RoIAlign_cn.rst +++ b/docs/api/paddle/vision/ops/RoIAlign_cn.rst @@ -9,15 +9,15 @@ RoIAlign 参数 ::::::::: - - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为int32。如果output_size是单个int类型整数,则H和W都与其相等。 - - spatial_scale (float,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到输入特征图的尺寸,默认值1.0。 + - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为 int32。如果 output_size 是单个 int 类型整数,则 H 和 W 都与其相等。 + - spatial_scale (float,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到输入特征图的尺寸,默认值 1.0。 形状 ::::::::: - - x: 4-D Tensor,形状为(N, C, H, W)。数据类型为float32或float64。 + - x: 4-D Tensor,形状为(N, C, H, W)。数据类型为 float32 或 float64。 - boxes: 2-D Tensor,形状为(boxes_num, 4)。 - - boxes_num: 1-D Tensor。数据类型为int32。 - - output: 4-D tensor,形状为(RoI数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + - boxes_num: 1-D Tensor。数据类型为 int32。 + - output: 4-D tensor,形状为(RoI 数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 返回 ::::::::: diff --git a/docs/api/paddle/vision/ops/RoIPool_cn.rst b/docs/api/paddle/vision/ops/RoIPool_cn.rst index f784bae909b..b8320b74bec 100644 --- a/docs/api/paddle/vision/ops/RoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/RoIPool_cn.rst @@ -9,15 +9,15 @@ RoIPool 参数 ::::::::: - - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为int32。如果output_size是int类型,H和W都与其相等。 - - spatial_scale (float,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到input特征图的尺寸,默认值1.0。 + - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为 int32。如果 output_size 是 int 类型,H 和 W 都与其相等。 + - spatial_scale (float,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到 input 特征图的尺寸,默认值 1.0。 形状 ::::::::: - x: 4-D Tensor,形状为(N, C, H, W)。 - boxes: 2-D Tensor,形状为(num_rois, 4)。 - boxes_num: 1-D Tensor。 - - output: 4-D tensor,形状为(Roi数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + - output: 4-D tensor,形状为(Roi 数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 返回 ::::::::: diff --git a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst index 0f9b49ac599..99e6fe60558 100755 --- a/docs/api/paddle/vision/ops/deform_conv2d_cn.rst +++ b/docs/api/paddle/vision/ops/deform_conv2d_cn.rst @@ -5,17 +5,17 @@ deform_conv2d .. py:function:: paddle.vision.ops.deform_conv2d(x, offset, weight, bias=None, stride=1, padding=0, dilation=1, deformable_groups=1, groups=1, mask=None, name=None) -deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: +deform_conv2d 对输入 4-D Tensor 计算 2-D 可变形卷积。给定输入 Tensor x,输出 Tensor y,可变形卷积运算如下所示: -可形变卷积v2(mask != None): +可形变卷积 v2(mask != None): :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}` -可形变卷积v1(mask = None): +可形变卷积 v1(mask = None): :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}` -其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第k个位置的可学习偏移和调制标量。在deformable conv v1中 :math:`\Delta m_k` 为1。 +其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第 k 个位置的可学习偏移和调制标量。在 deformable conv v1 中 :math:`\Delta m_k` 为 1。 具体细节可以参考论文:`<> `_ 和 `<> `_ 。 @@ -47,21 +47,21 @@ deform_conv2d 对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x 参数 :::::::::::: - - **x** (Tensor) - 形状为 :math:`[N, C, H, W]` 的输入Tensor,数据类型为float32或float64。 - - **offset** (Tensor) – 可变形卷积层的输入坐标偏移,数据类型为float32或float64。 - - **weight** (Tensor) – 卷积核参数,形状为 :math:`[[M, C/g, kH, kW]`,其中 M 是输出通道数,g 是group组数,kH是卷积核高度尺寸,kW是卷积核宽度尺寸。数据类型为float32或float64。 + - **x** (Tensor) - 形状为 :math:`[N, C, H, W]` 的输入 Tensor,数据类型为 float32 或 float64。 + - **offset** (Tensor) – 可变形卷积层的输入坐标偏移,数据类型为 float32 或 float64。 + - **weight** (Tensor) – 卷积核参数,形状为 :math:`[[M, C/g, kH, kW]`,其中 M 是输出通道数,g 是 group 组数,kH 是卷积核高度尺寸,kW 是卷积核宽度尺寸。数据类型为 float32 或 float64。 - **bias** (Tensor,选) - 可变形卷积偏置项,形状为 :math:`[M,]` 。 - **stride** (int|list|tuple,可选) - 步长大小。卷积核和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - **padding** (int|list|tuple,可选) - 填充大小。卷积核操作填充大小。如果它是一个列表或元组,则必须包含两个整型数:(padding_height,padding_width)。若为一个整数,padding_height = padding_width = padding。默认值:0。 - **dilation** (int|list|tuple,可选) - 空洞大小。空洞卷积时会使用该参数,卷积核对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果空洞大小为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - **deformable_groups** (int,可选) - 可变形卷积组数。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和卷积核分别根据通道数量平均分为n组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第n组卷积核和第n组输入进行卷积计算。默认值:1。 - - **mask** (Tensor,可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子v1时,请将mask设置为None,数据类型为float32或float64。 + - **groups** (int,可选) - 二维卷积层的组数。根据 Alex Krizhevsky 的深度卷积神经网络(CNN)论文中的成组卷积:当 group=n,输入和卷积核分别根据通道数量平均分为 n 组,第一组卷积核和第一组输入进行卷积计算,第二组卷积核和第二组输入进行卷积计算,……,第 n 组卷积核和第 n 组输入进行卷积计算。默认值:1。 + - **mask** (Tensor,可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子 v1 时,请将 mask 设置为 None,数据类型为 float32 或 float64。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -可变形卷积输出的4-D Tensor,数据类型为float32或float64。 +可变形卷积输出的 4-D Tensor,数据类型为 float32 或 float64。 代码示例 diff --git a/docs/api/paddle/vision/ops/nms_cn.rst b/docs/api/paddle/vision/ops/nms_cn.rst index a73004ac224..b2eef2922ab 100644 --- a/docs/api/paddle/vision/ops/nms_cn.rst +++ b/docs/api/paddle/vision/ops/nms_cn.rst @@ -11,25 +11,25 @@ nms IoU = \frac{intersection\_area(box1, box2)}{union\_area(box1, box2)} -如果参数scores不为None,输入的boxes会首先按照它们对应的score降序排序,否则将默认输入的boxes为排好序的。 +如果参数 scores 不为 None,输入的 boxes 会首先按照它们对应的 score 降序排序,否则将默认输入的 boxes 为排好序的。 -如果category_idxs和categories不为None,分类NMS将会被执行,也就是说,nms过程会在每一个类别的框当中分别进行计算,计算结果会被组合起来然后按照得分倒序排列。 +如果 category_idxs 和 categories 不为 None,分类 NMS 将会被执行,也就是说,nms 过程会在每一个类别的框当中分别进行计算,计算结果会被组合起来然后按照得分倒序排列。 -如果top_k不为None的话,排序的计算结果中仅有前k个元素会被返回,否则会返回所有的元素。 +如果 top_k 不为 None 的话,排序的计算结果中仅有前 k 个元素会被返回,否则会返回所有的元素。 参数 ::::::::: - - boxes(Tensor) - 待进行计算的框坐标,它应当是一个形状为[num_boxes, 4]的2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出,数据类型可以是float32或float64,其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值,其关系应符合``0 <= x1 < x2 && 0 <= y1 < y2``。 - - iou_threshold(float32,可选) - 用于判断两个框是否重叠的IoU门限值。如果IoU(box1, box2) > threshold, box1和box2将被认为是重叠框。默认为:0.3。 - - scores(Tensor,可选) - 与boxes参数对应的score,它应当是一个形状为[num_boxes]的1-D Tensor。数据类型可以是float32或float64。默认为:None。 - - category_idxs(Tensor,可选) - 与boxes参数对应的类别编号,它应当是一个形状为[num_boxes]的1-D Tensor。数据类型为int64。默认为:None。 - - categories(List,可选) - 类别列表,它的每个元素应该是唯一的,满足categories == paddle.unique(class_idxs)。默认为:None。 - - top_k(int64,可选) - 需要返回的分数最高的boxes索引数量。该值须小于等于num_boxes。默认为:None。 + - boxes(Tensor) - 待进行计算的框坐标,它应当是一个形状为[num_boxes, 4]的 2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出,数据类型可以是 float32 或 float64,其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值,其关系应符合``0 <= x1 < x2 && 0 <= y1 < y2``。 + - iou_threshold(float32,可选) - 用于判断两个框是否重叠的 IoU 门限值。如果 IoU(box1, box2) > threshold, box1 和 box2 将被认为是重叠框。默认为:0.3。 + - scores(Tensor,可选) - 与 boxes 参数对应的 score,它应当是一个形状为[num_boxes]的 1-D Tensor。数据类型可以是 float32 或 float64。默认为:None。 + - category_idxs(Tensor,可选) - 与 boxes 参数对应的类别编号,它应当是一个形状为[num_boxes]的 1-D Tensor。数据类型为 int64。默认为:None。 + - categories(List,可选) - 类别列表,它的每个元素应该是唯一的,满足 categories == paddle.unique(class_idxs)。默认为:None。 + - top_k(int64,可选) - 需要返回的分数最高的 boxes 索引数量。该值须小于等于 num_boxes。默认为:None。 返回 ::::::::: - - Tensor - 被NMS保留的检测边界框的索引,它应当是一个形状为[num_boxes]的1-D Tensor。 + - Tensor - 被 NMS 保留的检测边界框的索引,它应当是一个形状为[num_boxes]的 1-D Tensor。 代码示例 diff --git a/docs/api/paddle/vision/ops/psroi_pool_cn.rst b/docs/api/paddle/vision/ops/psroi_pool_cn.rst index ff935d78704..59252cd113f 100644 --- a/docs/api/paddle/vision/ops/psroi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/psroi_pool_cn.rst @@ -7,21 +7,21 @@ psroi_pool 位置敏感的兴趣区域池化(也称为 PSROIPooling),是在指定输入的感兴趣区域上执行位置敏感的平均池化。它在非均匀大小的输入上执行并获得固定大小的特征图。 -PSROIPooling由R-FCN提出。更多详细信息,请参阅 https://arxiv.org/abs/1605.06409。 +PSROIPooling 由 R-FCN 提出。更多详细信息,请参阅 https://arxiv.org/abs/1605.06409。 参数 ::::::::: - - **x** (Tensor) - 输入的特征图,形状为(N, C, H, W),数据类型为float32或float64。 - - **boxes** (Tensor) - 待执行池化的ROIs(Regions of Interest,感兴趣区域)的框坐标。它应当是一个形状为(num_rois, 4)的2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 - - **boxes_num** (Tensor) - 该batch中每一张图所包含的框数量。 - - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为int32。如果output_size是int类型,H和W都与其相等。 - - **spatial_scale** (float,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到输入特征图的尺寸。 + - **x** (Tensor) - 输入的特征图,形状为(N, C, H, W),数据类型为 float32 或 float64。 + - **boxes** (Tensor) - 待执行池化的 ROIs(Regions of Interest,感兴趣区域)的框坐标。它应当是一个形状为(num_rois, 4)的 2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 + - **boxes_num** (Tensor) - 该 batch 中每一张图所包含的框数量。 + - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为 int32。如果 output_size 是 int 类型,H 和 W 都与其相等。 + - **spatial_scale** (float,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到输入特征图的尺寸。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - 4-D Tensor。池化后的ROIs,其形状是(Roi数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + 4-D Tensor。池化后的 ROIs,其形状是(Roi 数量,输出通道数,池化后高度,池化后宽度)。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 代码示例 ::::::::: diff --git a/docs/api/paddle/vision/ops/roi_align_cn.rst b/docs/api/paddle/vision/ops/roi_align_cn.rst index f737043bb80..4ded1ffd983 100644 --- a/docs/api/paddle/vision/ops/roi_align_cn.rst +++ b/docs/api/paddle/vision/ops/roi_align_cn.rst @@ -5,24 +5,24 @@ roi_align .. py:function:: paddle.vision.ops.roi_align(x, boxes, boxes_num, output_size, spatial_scale=1.0, sampling_ratio=-1, aligned=True, name=None) -RoI Align是在指定输入的感兴趣区域上执行双线性插值以获得固定大小的特征图(例如7*7),如 Mask R-CNN论文中所述。 +RoI Align 是在指定输入的感兴趣区域上执行双线性插值以获得固定大小的特征图(例如 7*7),如 Mask R-CNN 论文中所述。 论文参考:`Mask R-CNN `_ 。 参数 ::::::::: - - **x** (Tensor) - 输入的特征图,形状为(N, C, H, W)。数据类型为float32或float64。 - - **boxes** (Tensor) - 待执行池化的RoIs(Regions of Interest)的框坐标。它应当是一个形状为(boxes_num, 4)的2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 - - **boxes_num** (Tensor) - 该batch中每一张图所包含的框数量。数据类型为int32。 - - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为int32。如果output_size是单个int类型整数,则H和W都与其相等。 - - **spatial_scale** (float32,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到input特征图的尺寸。 - - **sampling_ratio** (int32,可选) – 插值网格中用于计算每个池化输出条柱的输出值的采样点数。如果大于0,则使用每个条柱的精确采样点。如果小于或等于0,则使用自适应数量的网格点(计算为 ``ceil(roi_width / output_width)``,高度同理)。默认值:-1。 - - **aligned** (bool,可选)- 默认值为True,表示像素移动框将其坐标移动-0.5,以便与两个相邻像素索引更好地对齐。如果为False,则是使用遗留版本的实现。 + - **x** (Tensor) - 输入的特征图,形状为(N, C, H, W)。数据类型为 float32 或 float64。 + - **boxes** (Tensor) - 待执行池化的 RoIs(Regions of Interest)的框坐标。它应当是一个形状为(boxes_num, 4)的 2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 + - **boxes_num** (Tensor) - 该 batch 中每一张图所包含的框数量。数据类型为 int32。 + - **output_size** (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为 int32。如果 output_size 是单个 int 类型整数,则 H 和 W 都与其相等。 + - **spatial_scale** (float32,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到 input 特征图的尺寸。 + - **sampling_ratio** (int32,可选) – 插值网格中用于计算每个池化输出条柱的输出值的采样点数。如果大于 0,则使用每个条柱的精确采样点。如果小于或等于 0,则使用自适应数量的网格点(计算为 ``ceil(roi_width / output_width)``,高度同理)。默认值:-1。 + - **aligned** (bool,可选)- 默认值为 True,表示像素移动框将其坐标移动-0.5,以便与两个相邻像素索引更好地对齐。如果为 False,则是使用遗留版本的实现。 - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,池化后的RoIs,为一个形状是(RoI数量,输出通道数,池化后高度,池化后宽度)的4-D Tensor。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + Tensor,池化后的 RoIs,为一个形状是(RoI 数量,输出通道数,池化后高度,池化后宽度)的 4-D Tensor。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 代码示例 ::::::::: diff --git a/docs/api/paddle/vision/ops/roi_pool_cn.rst b/docs/api/paddle/vision/ops/roi_pool_cn.rst index da599738ced..eb345f8a866 100644 --- a/docs/api/paddle/vision/ops/roi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/roi_pool_cn.rst @@ -11,16 +11,16 @@ roi_pool 参数 ::::::::: - x (Tensor) - 输入的特征图,形状为(N, C, H, W)。 - - boxes (Tensor) - 待执行池化的ROIs(Regions of Interest,感兴趣区域)的框坐标。它应当是一个形状为(num_rois, 4)的2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 - - boxes_num (Tensor) - 该batch中每一张图所包含的框数量。 - - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为int32。如果output_size是int类型,H和W都与其相等。 - - spatial_scale (float,可选) - 空间比例因子,用于将boxes中的坐标从其输入尺寸按比例映射到input特征图的尺寸,默认值1.0。 + - boxes (Tensor) - 待执行池化的 ROIs(Regions of Interest,感兴趣区域)的框坐标。它应当是一个形状为(num_rois, 4)的 2-D Tensor,以[[x1, y1, x2, y2], ...]的形式给出。其中(x1, y1)是左上角的坐标值,(x2, y2)是右下角的坐标值。 + - boxes_num (Tensor) - 该 batch 中每一张图所包含的框数量。 + - output_size (int|Tuple(int, int)) - 池化后输出的尺寸(H, W),数据类型为 int32。如果 output_size 是 int 类型,H 和 W 都与其相等。 + - spatial_scale (float,可选) - 空间比例因子,用于将 boxes 中的坐标从其输入尺寸按比例映射到 input 特征图的尺寸,默认值 1.0。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 ::::::::: - Tensor,为池化后的ROIs,为一个形状是(Roi数量,输出通道数,池化后高度,池化后宽度)的4-D Tensor。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 + Tensor,为池化后的 ROIs,为一个形状是(Roi 数量,输出通道数,池化后高度,池化后宽度)的 4-D Tensor。输出通道数等于输入通道数/(池化后高度 * 池化后宽度)。 代码示例 diff --git a/docs/api/paddle/vision/ops/yolo_box_cn.rst b/docs/api/paddle/vision/ops/yolo_box_cn.rst index dce1f90eae4..92751f93e3b 100644 --- a/docs/api/paddle/vision/ops/yolo_box_cn.rst +++ b/docs/api/paddle/vision/ops/yolo_box_cn.rst @@ -5,11 +5,11 @@ yolo_box .. py:function:: paddle.vision.ops.yolo_box(x, img_size, anchors, class_num, conf_thresh, downsample_ratio, clip_bbox=True, name=None, scale_x_y=1.0) -该运算符基于YOLOv3网络的输出结果,生成YOLO检测框。 +该运算符基于 YOLOv3 网络的输出结果,生成 YOLO 检测框。 -连接 yolo_box 网络的输出形状应为[N,C,H,W],其中 H 和 W 相同,用来指定网格大小。对每个网格点预测给定的数目的框,这个数目记为 S,由 anchor 的数量指定。在第二维(通道维度)中,C应该等于S *(5 + class_num),class_num是源数据集中对象类别数目(例如coco数据集中的80),此外第二个(通道)维度中还有4个框位置坐标x,y,w,h,以及anchor box的one-hot key的置信度得分。 +连接 yolo_box 网络的输出形状应为[N,C,H,W],其中 H 和 W 相同,用来指定网格大小。对每个网格点预测给定的数目的框,这个数目记为 S,由 anchor 的数量指定。在第二维(通道维度)中,C 应该等于 S *(5 + class_num),class_num 是源数据集中对象类别数目(例如 coco 数据集中的 80),此外第二个(通道)维度中还有 4 个框位置坐标 x,y,w,h,以及 anchor box 的 one-hot key 的置信度得分。 -假设4个位置坐标是 :math:`t_x` ,:math:`t_y` ,:math:`t_w` , :math:`t_h`,则框的预测算法为: +假设 4 个位置坐标是 :math:`t_x` ,:math:`t_y` ,:math:`t_w` , :math:`t_h`,则框的预测算法为: .. math:: @@ -18,9 +18,9 @@ yolo_box b_w &= p_w e^{t_w}\\ b_h &= p_h e^{t_h}\\ -在上面的等式中,:math:`c_x` , :math:`c_x` 是当前网格的左上角顶点坐标。:math:`p_w` , :math:`p_h` 由anchors指定。 +在上面的等式中,:math:`c_x` , :math:`c_x` 是当前网格的左上角顶点坐标。:math:`p_w` , :math:`p_h` 由 anchors 指定。 -每个anchor预测框的第五通道的逻辑回归值表示每个预测框的置信度得分,并且每个anchor预测框的最后class_num通道的逻辑回归值表示分类得分。应忽略置信度低于conf_thresh的框。另外,框最终得分是置信度得分和分类得分的乘积。 +每个 anchor 预测框的第五通道的逻辑回归值表示每个预测框的置信度得分,并且每个 anchor 预测框的最后 class_num 通道的逻辑回归值表示分类得分。应忽略置信度低于 conf_thresh 的框。另外,框最终得分是置信度得分和分类得分的乘积。 .. math:: @@ -30,13 +30,13 @@ yolo_box 参数 ::::::::: - - **x** (Tensor)- YoloBox的输入张量是一个4-D张量,形状为[N,C,H,W]。第二维(C)存储每个anchor box位置坐标,每个anchor box的置信度分数和one hot key。通常,X应该是YOLOv3网络的输出。数据类型为float32或float64。 - - **img_size** (Tensor)- YoloBox的图像大小张量,这是一个形状为[N,2]的二维张量。该张量保持每个输入图像的高度和宽度,用于对输出图像按输入图像比例调整输出框的大小。数据类型为int32。 - - **anchors** (list | tuple) - anchor的宽度和高度,它将逐对解析。 + - **x** (Tensor)- YoloBox 的输入张量是一个 4-D 张量,形状为[N,C,H,W]。第二维(C)存储每个 anchor box 位置坐标,每个 anchor box 的置信度分数和 one hot key。通常,X 应该是 YOLOv3 网络的输出。数据类型为 float32 或 float64。 + - **img_size** (Tensor)- YoloBox 的图像大小张量,这是一个形状为[N,2]的二维张量。该张量保持每个输入图像的高度和宽度,用于对输出图像按输入图像比例调整输出框的大小。数据类型为 int32。 + - **anchors** (list | tuple) - anchor 的宽度和高度,它将逐对解析。 - **class_num** (int)- 要预测的类数。 - **conf_thresh** (float)- 检测框的置信度得分阈值。置信度得分低于阈值的框应该被忽略。 - - **downsample_ratio** (int)- 从网络输入到YoloBox操作输入的下采样率,因此应依次为第一个,第二个和第三个YoloBox运算设置该值为32,16,8 - - **clip_bbox** (bool,可选)- 是否将输出的bbox裁剪到 :attr:`img_size` 范围内,默认为True。 + - **downsample_ratio** (int)- 从网络输入到 YoloBox 操作输入的下采样率,因此应依次为第一个,第二个和第三个 YoloBox 运算设置该值为 32,16,8 + - **clip_bbox** (bool,可选)- 是否将输出的 bbox 裁剪到 :attr:`img_size` 范围内,默认为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - **scale_x_y** (float,可选) - 放缩解码边界框的中心点,默认值:1.0。 diff --git a/docs/api/paddle/vision/ops/yolo_loss_cn.rst b/docs/api/paddle/vision/ops/yolo_loss_cn.rst index 60c68857f8d..9782fdab480 100644 --- a/docs/api/paddle/vision/ops/yolo_loss_cn.rst +++ b/docs/api/paddle/vision/ops/yolo_loss_cn.rst @@ -5,9 +5,9 @@ yolo_loss .. py:function:: paddle.vision.ops.yolo_loss(x, gt_box, gt_label, anchors, anchor_mask, class_num, ignore_thresh, downsample_ratio, gt_score=None, use_label_smooth=True, name=None, scale_x_y=1.0) -该运算通过给定的预测结果和真实框计算yolov3损失。 +该运算通过给定的预测结果和真实框计算 yolov3 损失。 -yolov3 loss前的网络输出形状为[N,C,H,W],H和W应该相同,用来指定网格(grid)大小。每个网格点预测S个边界框(bounding boxes),S由每个尺度中 ``anchors`` 簇的个数指定。在第二维(表示通道的维度)中,C的值应为S *(class_num + 5),class_num是源数据集的对象种类数(如coco中为80),另外,除了存储4个边界框位置坐标x,y,w,h,还包括边界框以及每个anchor框的one-hot关键字的置信度得分。 +yolov3 loss 前的网络输出形状为[N,C,H,W],H 和 W 应该相同,用来指定网格(grid)大小。每个网格点预测 S 个边界框(bounding boxes),S 由每个尺度中 ``anchors`` 簇的个数指定。在第二维(表示通道的维度)中,C 的值应为 S *(class_num + 5),class_num 是源数据集的对象种类数(如 coco 中为 80),另外,除了存储 4 个边界框位置坐标 x,y,w,h,还包括边界框以及每个 anchor 框的 one-hot 关键字的置信度得分。 假设有四个表征位置的坐标为 :math:`t_x, t_y, t_w, t_h`,那么边界框的预测将会如下定义: $$ @@ -23,14 +23,14 @@ yolov3 loss前的网络输出形状为[N,C,H,W],H和W应该相同,用 b_h = p_h e^{t_h} $$ -在上面的等式中,:math:`c_x, c_y` 是当前网格的左上角,:math:`p_w, p_h` 由anchors指定。 -置信度得分是anchor框和真实框之间的IoU的逻辑回归值,anchor框的得分最高为1,此时该anchor框对应着最大IoU。 -如果anchor框之间的IoU大于忽略阀值ignore_thresh,则该anchor框的置信度评分损失将会被忽略。 +在上面的等式中,:math:`c_x, c_y` 是当前网格的左上角,:math:`p_w, p_h` 由 anchors 指定。 +置信度得分是 anchor 框和真实框之间的 IoU 的逻辑回归值,anchor 框的得分最高为 1,此时该 anchor 框对应着最大 IoU。 +如果 anchor 框之间的 IoU 大于忽略阀值 ignore_thresh,则该 anchor 框的置信度评分损失将会被忽略。 -因此,yolov3损失包括三个主要部分,框位置损失,目标性损失,分类损失。L1损失用于 -框坐标(w,h),同时,sigmoid交叉熵损失用于框坐标(x,y),目标性损失和分类损失。 +因此,yolov3 损失包括三个主要部分,框位置损失,目标性损失,分类损失。L1 损失用于 +框坐标(w,h),同时,sigmoid 交叉熵损失用于框坐标(x,y),目标性损失和分类损失。 -每个真实框将在所有anchor中找到最匹配的anchor,对该anchor的预测将会计算全部(三种)损失,但是没有匹配GT box(ground truth box真实框)的anchor的预测只会产生目标性损失。 +每个真实框将在所有 anchor 中找到最匹配的 anchor,对该 anchor 的预测将会计算全部(三种)损失,但是没有匹配 GT box(ground truth box 真实框)的 anchor 的预测只会产生目标性损失。 为了权衡大框(box)和小(box)之间的框坐标损失,框坐标损失将与比例权重相乘而得。即: @@ -38,40 +38,40 @@ yolov3 loss前的网络输出形状为[N,C,H,W],H和W应该相同,用 weight_{box} = 2.0 - t_w * t_h $$ -最后的loss值将如下计算: +最后的 loss 值将如下计算: $$ loss = (loss_{xy} + loss_{wh}) * weight_{box} + loss_{conf} + loss_{class} $$ -当 ``use_label_smooth`` 为 ``True`` 时,在计算分类损失时将平滑分类目标,将正样本的目标平滑到1.0-1.0 / class_num,并将负样本的目标平滑到1.0 / class_num。 +当 ``use_label_smooth`` 为 ``True`` 时,在计算分类损失时将平滑分类目标,将正样本的目标平滑到 1.0-1.0 / class_num,并将负样本的目标平滑到 1.0 / class_num。 -``GTScore`` (如果存在)表示真实框的mixup得分,那么真实框所产生的所有损失需要乘上GTScore。 +``GTScore`` (如果存在)表示真实框的 mixup 得分,那么真实框所产生的所有损失需要乘上 GTScore。 参数 :::::::::::: - - **x** (Tensor) - YOLOv3损失运算的输入张量,这是一个形状为[N,C,H,W]的四维Tensor。H和W应该相同,第二维(C)存储框的位置信息,以及每个anchor box的置信度得分和one-hot分类。数据类型为float32或float64。 - - **gt_box** (Tensor) - 真实框,应该是[N,B,4]的形状。第三维用来承载x、y、w、h,其中 x, y是真实框的中心坐标,w, h是框的宽度和高度,且x、y、w、h将除以输入图片的尺寸,缩放到[0,1]区间内。N是batch size,B是图像中所含有的的最多的box数目。数据类型为float32或float64。 - - **gt_label** (Tensor) - 真实框的类id,应该形为[N,B]。数据类型为int32。 - - **anchors** (list|tuple) - 指定anchor框的宽度和高度,它们将逐对进行解析 - - **anchor_mask** (list|tuple) - 当前YOLOv3损失计算中使用anchor的mask索引 + - **x** (Tensor) - YOLOv3 损失运算的输入张量,这是一个形状为[N,C,H,W]的四维 Tensor。H 和 W 应该相同,第二维(C)存储框的位置信息,以及每个 anchor box 的置信度得分和 one-hot 分类。数据类型为 float32 或 float64。 + - **gt_box** (Tensor) - 真实框,应该是[N,B,4]的形状。第三维用来承载 x、y、w、h,其中 x, y 是真实框的中心坐标,w, h 是框的宽度和高度,且 x、y、w、h 将除以输入图片的尺寸,缩放到[0,1]区间内。N 是 batch size,B 是图像中所含有的的最多的 box 数目。数据类型为 float32 或 float64。 + - **gt_label** (Tensor) - 真实框的类 id,应该形为[N,B]。数据类型为 int32。 + - **anchors** (list|tuple) - 指定 anchor 框的宽度和高度,它们将逐对进行解析 + - **anchor_mask** (list|tuple) - 当前 YOLOv3 损失计算中使用 anchor 的 mask 索引 - **class_num** (int) - 要预测的类别数 - **ignore_thresh** (float) - 一定条件下忽略某框置信度损失的忽略阈值 - - **downsample_ratio** (int) - 网络输入到YOLOv3 loss输入的下采样率,因此第一,第二和第三个 loss 的下采样率应分别为32,16,8 - - **gt_score** (Tensor)- 真实框的混合得分,形为[N,B]。默认None。数据类型为float32或float64。 - - **use_label_smooth** (bool)- 是否使用平滑标签。默认为True + - **downsample_ratio** (int) - 网络输入到 YOLOv3 loss 输入的下采样率,因此第一,第二和第三个 loss 的下采样率应分别为 32,16,8 + - **gt_score** (Tensor)- 真实框的混合得分,形为[N,B]。默认 None。数据类型为 float32 或 float64。 + - **use_label_smooth** (bool)- 是否使用平滑标签。默认为 True - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 - - **scale_x_y** (float,可选) - 缩放解码边界框的中心点。默认值1.0 。 + - **scale_x_y** (float,可选) - 缩放解码边界框的中心点。默认值 1.0 。 返回 ::::::::: -Tensor,yolov3损失的值,具有形状[N]的1-D Tensor。 +Tensor,yolov3 损失的值,具有形状[N]的 1-D Tensor。 代码示例 diff --git a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst index 63685b12246..68346ea6bb8 100644 --- a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst @@ -10,7 +10,7 @@ BrightnessTransform 参数 ::::::::: - - value (float) - 亮度调整范围大小,会从给定参数后的均匀分布[max(0,1 - brightness), 1 + brightness]中随机选择进行实际调整,可以是任何非负数。参数等于0时输出原始图像。 + - value (float) - 亮度调整范围大小,会从给定参数后的均匀分布[max(0,1 - brightness), 1 + brightness]中随机选择进行实际调整,可以是任何非负数。参数等于 0 时输出原始图像。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst index faeed88a28f..5f0ff8a9b80 100644 --- a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst +++ b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst @@ -13,7 +13,7 @@ ColorJitter - brightness(float) - 亮度调整范围大小,会从给定参数后的均匀分布[max(0,1 - brightness), 1 + brightness]中随机选择进行实际调整,不能是负数。 - contrast(float) - 对比度调整范围大小,,会从给定参数后的均匀分布[max(0,1 - contrast), 1 + contrast]中随机选择进行实际调整,不能是负数。 - saturation(float) - 饱和度调整范围大小,,会从给定参数后的均匀分布[max(0,1 - saturation), 1 + saturation]中随机选择进行实际调整,不能是负数。 - - hue(float) - 色调调整范围大小,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在0到0.5之间。 + - hue(float) - 色调调整范围大小,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在 0 到 0.5 之间。 - keys (list[str]|tuple[str], optional) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/Compose_cn.rst b/docs/api/paddle/vision/transforms/Compose_cn.rst index 7a1faea0253..05250b47cf1 100644 --- a/docs/api/paddle/vision/transforms/Compose_cn.rst +++ b/docs/api/paddle/vision/transforms/Compose_cn.rst @@ -15,7 +15,7 @@ Compose 返回 ::::::::: - 一个可调用的Compose对象,它将依次调用每个给定的 :attr:`transforms`。 + 一个可调用的 Compose 对象,它将依次调用每个给定的 :attr:`transforms`。 代码示例 ::::::::: diff --git a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst index 0e56a42d020..5ee4d9354af 100644 --- a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst @@ -10,7 +10,7 @@ ContrastTransform 参数 ::::::::: - - value (float) - 对比度调整范围大小,会从给定参数后的均匀分布[max(0,1 - contrast), 1 + contrast]中随机选择进行实际调整,不能是负数。参数值为0时返回原图像。 + - value (float) - 对比度调整范围大小,会从给定参数后的均匀分布[max(0,1 - contrast), 1 + contrast]中随机选择进行实际调整,不能是负数。参数值为 0 时返回原图像。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/Grayscale_cn.rst b/docs/api/paddle/vision/transforms/Grayscale_cn.rst index 3f74f4c8ef7..f60c73505dd 100644 --- a/docs/api/paddle/vision/transforms/Grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/Grayscale_cn.rst @@ -10,14 +10,14 @@ Grayscale 参数 ::::::::: - - num_output_channels (int,可选) - 输出图像的通道数,参数值为1或3。默认值:1。 + - num_output_channels (int,可选) - 输出图像的通道数,参数值为 1 或 3。默认值:1。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 ::::::::: - img (PIL.Image|np.ndarray|Paddle.Tensor) - 输入的图像数据,数据格式为'HWC'。 - - output (PIL.Image|np.ndarray|Paddle.Tensor) - 返回输入图像的灰度版本。如果 output_channels == 1,返回一个单通道图像。如果 output_channels == 3,返回一个3通道图像,其中RGB三个通道值一样。 + - output (PIL.Image|np.ndarray|Paddle.Tensor) - 返回输入图像的灰度版本。如果 output_channels == 1,返回一个单通道图像。如果 output_channels == 3,返回一个 3 通道图像,其中 RGB 三个通道值一样。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/HueTransform_cn.rst b/docs/api/paddle/vision/transforms/HueTransform_cn.rst index ea7139b157a..0bf7c0a3d17 100644 --- a/docs/api/paddle/vision/transforms/HueTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/HueTransform_cn.rst @@ -10,7 +10,7 @@ HueTransform 参数 ::::::::: - - value (float) - 色调调整范围大小,,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在0到0.5之间,参数值为0时返回原始图像。 + - value (float) - 色调调整范围大小,,会从给定参数后的均匀分布[-hue, hue]中随机选择进行实际调整,参数值需要在 0 到 0.5 之间,参数值为 0 时返回原始图像。 - keys (list[str]|tuple[str], optional) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/Pad_cn.rst b/docs/api/paddle/vision/transforms/Pad_cn.rst index 5e22c328e7e..8ccb9134033 100644 --- a/docs/api/paddle/vision/transforms/Pad_cn.rst +++ b/docs/api/paddle/vision/transforms/Pad_cn.rst @@ -11,9 +11,9 @@ pad ::::::::: - img (PIL.Image|np.ndarray) - 被填充的图像。 - - padding (int|list|tuple) - 在图像边界上进行填充的范围。如果提供的是单个int值,则该值用于填充图像所有边;如果提供的是长度为2的元组/列表,则分别为图像左/右和顶部/底部进行填充;如果提供的是长度为4的元组/列表,则按照左,上,右和下的顺序为图像填充。 - - fill (int|tuple) - 用于填充的像素值。仅当padding_mode为constant时参数值有效。默认值:0。如果参数值是一个长度为3的元组,则会分别用于填充R,G,B通道。 - - padding_mode (string) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。 ``constant`` 表示使用常量值进行填充,该值由fill参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充2个值,结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充2个值,结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 + - padding (int|list|tuple) - 在图像边界上进行填充的范围。如果提供的是单个 int 值,则该值用于填充图像所有边;如果提供的是长度为 2 的元组/列表,则分别为图像左/右和顶部/底部进行填充;如果提供的是长度为 4 的元组/列表,则按照左,上,右和下的顺序为图像填充。 + - fill (int|tuple) - 用于填充的像素值。仅当 padding_mode 为 constant 时参数值有效。默认值:0。如果参数值是一个长度为 3 的元组,则会分别用于填充 R,G,B 通道。 + - padding_mode (string) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。 ``constant`` 表示使用常量值进行填充,该值由 fill 参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充 2 个值,结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充 2 个值,结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/RandomCrop_cn.rst b/docs/api/paddle/vision/transforms/RandomCrop_cn.rst index f8766d976b5..c84cd2425da 100644 --- a/docs/api/paddle/vision/transforms/RandomCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomCrop_cn.rst @@ -10,11 +10,11 @@ RandomCrop 参数 ::::::::: - - **size** (sequence|int) - 裁剪后的图片大小。如果size是一个int值,而不是(h, w)这样的序列,那么会做一个方形的裁剪(size, size)。 - - **padding** (int|sequence,可选) - 对图像四周外边进行填充,如果提供了长度为4的序列,则将其分别用于填充左边界,上边界,右边界和下边界。默认值:None,不填充。 + - **size** (sequence|int) - 裁剪后的图片大小。如果 size 是一个 int 值,而不是(h, w)这样的序列,那么会做一个方形的裁剪(size, size)。 + - **padding** (int|sequence,可选) - 对图像四周外边进行填充,如果提供了长度为 4 的序列,则将其分别用于填充左边界,上边界,右边界和下边界。默认值:None,不填充。 - **pad_if_needed** (boolean,可选) - 如果裁剪后的图像小于期望的大小时,是否对裁剪后的图像进行填充,以避免引发异常,默认值:False,保持初次裁剪后的大小,不填充。 - - **fill** (float|tuple,可选) - 用于填充的像素值。仅当padding_mode为constant时参数值有效。默认值:0。如果参数值是一个长度为3的元组,则会分别用于填充R,G,B通道。 - - **padding_mode** (string,可选) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。``constant`` 表示使用常量值进行填充,该值由fill参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 [1, 2, 3, 4] 的两端分别填充2个值,结果是 [3, 2, 1, 2, 3, 4, 3, 2]。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 [1, 2, 3, 4] 的两端分别填充2个值,结果是 [2, 1, 1, 2, 3, 4, 4, 3]。 + - **fill** (float|tuple,可选) - 用于填充的像素值。仅当 padding_mode 为 constant 时参数值有效。默认值:0。如果参数值是一个长度为 3 的元组,则会分别用于填充 R,G,B 通道。 + - **padding_mode** (string,可选) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。``constant`` 表示使用常量值进行填充,该值由 fill 参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 [1, 2, 3, 4] 的两端分别填充 2 个值,结果是 [3, 2, 1, 2, 3, 4, 3, 2]。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 [1, 2, 3, 4] 的两端分别填充 2 个值,结果是 [2, 1, 1, 2, 3, 4, 4, 3]。 - **keys** (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst index 7f0f2b28cf1..8650b26373b 100644 --- a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst @@ -13,14 +13,14 @@ RandomErasing - prob (float,可选) - 输入数据被执行擦除操作的概率。默认值:0.5。 - scale (sequence,可选) - 擦除区域面积在输入图像的中占比范围。默认值:(0.02, 0.33)。 - ratio (sequence,可选) - 擦除区域的纵横比范围。默认值:(0.3, 3.3)。 - - value (int|float|sequence|str,可选) - 擦除区域中像素将被替换为的值。如果value是一个数,所有的像素都将被替换为这个数。如果value是长为3的序列,R,G,B通道将被对应地替换。如果value是"random",每个像素会被替换为随机值。默认值:0。 + - value (int|float|sequence|str,可选) - 擦除区域中像素将被替换为的值。如果 value 是一个数,所有的像素都将被替换为这个数。如果 value 是长为 3 的序列,R,G,B 通道将被对应地替换。如果 value 是"random",每个像素会被替换为随机值。默认值:0。 - inplace (bool,可选) - 该变换是否在原地操作。默认值:False。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 ::::::::: - - img (paddle.Tensor|np.array|PIL.Image) - 输入的图像数据。对于Tensor类型的输入,形状需要为(C, H, W)。对于np.array类型的输入,形状为(H, W, C)。 + - img (paddle.Tensor|np.array|PIL.Image) - 输入的图像数据。对于 Tensor 类型的输入,形状需要为(C, H, W)。对于 np.array 类型的输入,形状为(H, W, C)。 - output (paddle.Tensor|np.array|PIL.Image) - 返回随机擦除后的图像数据。 返回 diff --git a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst index d2ed45d0306..d23bc6004fc 100644 --- a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst @@ -10,7 +10,7 @@ RandomHorizontalFlip 参数 ::::::::: - - prob (float) - 图片执行水平翻转的概率,取值范围为[0, 1],默认值为0.5。 + - prob (float) - 图片执行水平翻转的概率,取值范围为[0, 1],默认值为 0.5。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst index 0f9d730b420..cbbe9b1d7a1 100644 --- a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst @@ -6,13 +6,13 @@ RandomResizedCrop .. py:class:: paddle.vision.transforms.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(3. / 4, 4. / 3), interpolation='bilinear', keys=None) 将输入图像按照随机大小和长宽比进行裁剪。 -会根据参数生成基于原图像的随机比例(默认值:0.08至1.0)和随机宽高比(默认值:3./4至4./3)。 +会根据参数生成基于原图像的随机比例(默认值:0.08 至 1.0)和随机宽高比(默认值:3./4 至 4./3)。 经过此接口操作后,输入图像将调整为参数指定大小。 参数 ::::::::: - - size (int|list|tuple) - 输出图像大小,当为单个int值时,生成指定size大小的方形图片,为(height,width)格式的数组或元组时按照参数大小输出。 + - size (int|list|tuple) - 输出图像大小,当为单个 int 值时,生成指定 size 大小的方形图片,为(height,width)格式的数组或元组时按照参数大小输出。 - scale (list|tuple) - 相对于原图的尺寸,随机裁剪后图像大小的范围。默认值:(0.08,1.0)。 - ratio (list|tuple) - 裁剪后的目标图像宽高比范围,默认值:(0.75, 1.33)。 - interpolation (int|str,可选) - 插值的方法。默认值:'bilinear'。当使用 ``pil`` 作为后端时,支持的插值方法如下:- "nearest": Image.NEAREST, - "bilinear": Image.BILINEAR, - "bicubic": Image.BICUBIC, - "box": Image.BOX, - "lanczos": Image.LANCZOS, - "hamming": Image.HAMMING。当使用 ``cv2`` 作为后端时,支持的插值方法如下:- "nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, - "area": cv2.INTER_AREA, - "bicubic": cv2.INTER_CUBIC, - "lanczos": cv2.INTER_LANCZOS4。 diff --git a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst index 8ccf7487108..153df3974de 100644 --- a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst @@ -5,13 +5,13 @@ RandomRotate .. py:class:: paddle.vision.transforms.RandomRotation(degrees, interpolation='nearest', expand=False, center=None, fill=0, keys=None) -依据degrees参数指定的角度范围,按照均匀分布随机产生一个角度对图像进行旋转。 +依据 degrees 参数指定的角度范围,按照均匀分布随机产生一个角度对图像进行旋转。 参数 ::::::::: - degrees (sequence|float|int) - 旋转的角度度数范围。 - 如果度数是数字而不是像(min,max)这样的序列,则会根据degrees参数值生成度数范围(-degrees,+degrees)。 + 如果度数是数字而不是像(min,max)这样的序列,则会根据 degrees 参数值生成度数范围(-degrees,+degrees)。 - interpolation (str, optional):插值的方法。 如果这个参数没有设定或者输入图像为单通道,则该参数会根据使用的后端,被设置为 ``PIL.Image.NEAREST`` 或者 ``cv2.INTER_NEAREST``。 当使用 ``pil`` 作为后端时,支持的插值方法如下: @@ -23,7 +23,7 @@ RandomRotate - "bilinear": cv2.INTER_LINEAR, - "bicubic": cv2.INTER_CUBIC - expand (bool,可选) - 是否要对旋转后的图片进行大小扩展,默认值:False。 - 当参数值为True时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。当参数值为False时,会按照原图像大小保留旋转后的图像。**这个扩展操作的前提是围绕中心旋转且没有平移**。 + 当参数值为 True 时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。当参数值为 False 时,会按照原图像大小保留旋转后的图像。**这个扩展操作的前提是围绕中心旋转且没有平移**。 - center (2-tuple,可选) - 旋转的中心点坐标,原点是图片左上角,默认值是图像的中心点。 - fill (int,可选) - 对图像扩展时填充的值。默认值:0。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 diff --git a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst index a9d98d5d9f9..4ed411a25c8 100644 --- a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst @@ -10,7 +10,7 @@ RandomVerticalFlip 参数 ::::::::: - - prob (float) - 执行图片垂直翻转的概率,默认值为0.5。 + - prob (float) - 执行图片垂直翻转的概率,默认值为 0.5。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/Resize_cn.rst b/docs/api/paddle/vision/transforms/Resize_cn.rst index 62b461b435e..9d9d25fa809 100644 --- a/docs/api/paddle/vision/transforms/Resize_cn.rst +++ b/docs/api/paddle/vision/transforms/Resize_cn.rst @@ -11,7 +11,7 @@ resize ::::::::: - img (numpy.ndarray|PIL.Image) - 输入数据,可以是(H, W, C)形状的图像或遮罩。 - - size (int|tuple) - 输出图像大小。如果size是一个序列,例如(h,w),输出大小将与此匹配。如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 + - size (int|tuple) - 输出图像大小。如果 size 是一个序列,例如(h,w),输出大小将与此匹配。如果 size 为 int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 - interpolation (int|str, optional) - 插值的方法,默认值:'bilinear'。 - 当使用 ``pil`` 作为后端时,支持的插值方法如下 + "nearest": Image.NEAREST, diff --git a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst index 44b38ab4d35..b69fffa0f75 100644 --- a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst @@ -10,7 +10,7 @@ SaturationTransform 参数 ::::::::: - - value (float) - 饱和度的调整数值,非负数,当参数值为0时返回原始图像。 + - value (float) - 饱和度的调整数值,非负数,当参数值为 0 时返回原始图像。 - keys (list[str]|tuple[str],可选) - 与 ``BaseTransform`` 定义一致。默认值:None。 形状 diff --git a/docs/api/paddle/vision/transforms/Transpose_cn.rst b/docs/api/paddle/vision/transforms/Transpose_cn.rst index dbc24cc3e92..aa147af7938 100644 --- a/docs/api/paddle/vision/transforms/Transpose_cn.rst +++ b/docs/api/paddle/vision/transforms/Transpose_cn.rst @@ -5,8 +5,8 @@ Transpose .. py:class:: paddle.vision.transforms.Transpose(order=(2, 0, 1), keys=None) -将输入的图像数据更改为目标格式。例如,大多数数据预处理是使用HWC格式的图片,而神经网络可能使用CHW模式输入张量。 -输出的图片是numpy.ndarray的实例。 +将输入的图像数据更改为目标格式。例如,大多数数据预处理是使用 HWC 格式的图片,而神经网络可能使用 CHW 模式输入张量。 +输出的图片是 numpy.ndarray 的实例。 参数 ::::::::: diff --git a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst index 94c3d610257..99a51789cad 100644 --- a/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_brightness_cn.rst @@ -11,7 +11,7 @@ adjust_brightness ::::::::: - img (PIL.Image|np.array|paddle.Tensor) - 输入的图像。 - - brightness_factor (float) - 调节图像亮度值的多少,可以是任何非负数。参数等于0时输出黑色图像,参数等于1时输出原始图像,参数大于1时输出图像亮度增强,如参数等于2时图像亮度增强两倍。 + - brightness_factor (float) - 调节图像亮度值的多少,可以是任何非负数。参数等于 0 时输出黑色图像,参数等于 1 时输出原始图像,参数大于 1 时输出图像亮度增强,如参数等于 2 时图像亮度增强两倍。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst b/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst index dfa7dc85419..ad97efaec79 100644 --- a/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_contrast_cn.rst @@ -11,7 +11,7 @@ adjust_contrast ::::::::: - img (PIL.Image|np.array|paddle.Tensor) - 输入的图像。 - - contrast_factor (float) - 调节图像对比度的多少,可以是任何非负数。参数等于0时输出纯灰色图像,参数等于1时输出原始图像,参数大于1时图像对比度增强,如参数等于2时图像对比度增强两倍。 + - contrast_factor (float) - 调节图像对比度的多少,可以是任何非负数。参数等于 0 时输出纯灰色图像,参数等于 1 时输出原始图像,参数大于 1 时图像对比度增强,如参数等于 2 时图像对比度增强两倍。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/adjust_hue_cn.rst b/docs/api/paddle/vision/transforms/adjust_hue_cn.rst index 35b4a033c1f..6fa460be577 100644 --- a/docs/api/paddle/vision/transforms/adjust_hue_cn.rst +++ b/docs/api/paddle/vision/transforms/adjust_hue_cn.rst @@ -11,7 +11,7 @@ adjust_hue ::::::::: - img (PIL.Image|np.array|paddle.Tensor) - 输入的图像。 - - hue_factor (float) - 图像的色调通道的偏移量。数值应在 ``[-0.5, 0.5]`` 。0.5和-0.5分别表示HSV空间中色相通道正向和负向完全反转,0表示没有调整色调。因此,-0.5和0.5都会给出一个带有互补色的图像,而0则会给出原始图像。 + - hue_factor (float) - 图像的色调通道的偏移量。数值应在 ``[-0.5, 0.5]`` 。0.5 和-0.5 分别表示 HSV 空间中色相通道正向和负向完全反转,0 表示没有调整色调。因此,-0.5 和 0.5 都会给出一个带有互补色的图像,而 0 则会给出原始图像。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/erase_cn.rst b/docs/api/paddle/vision/transforms/erase_cn.rst index 650439556d4..6be972bd2f1 100644 --- a/docs/api/paddle/vision/transforms/erase_cn.rst +++ b/docs/api/paddle/vision/transforms/erase_cn.rst @@ -10,12 +10,12 @@ erase 参数 ::::::::: - - img (paddle.Tensor|np.array|PIL.Image) - 输入的图像。对于Tensor类型的输入,形状应该为(C, H, W)。对于np.array类型的输入,形状应该为(H, W, C)。 + - img (paddle.Tensor|np.array|PIL.Image) - 输入的图像。对于 Tensor 类型的输入,形状应该为(C, H, W)。对于 np.array 类型的输入,形状应该为(H, W, C)。 - i (int) - 擦除区域左上角点的纵坐标。 - j (int) - 擦除区域左上角点的横坐标。 - h (int) - 擦除区域的高。 - w (int) - 擦除区域的宽。 - - v (paddle.Tensor|np.array) - 用于替换擦除区域中像素的值。当输入为np.array或者PIL.Image类型时,需要为np.array类型。 + - v (paddle.Tensor|np.array) - 用于替换擦除区域中像素的值。当输入为 np.array 或者 PIL.Image 类型时,需要为 np.array 类型。 - inplace (bool,可选) - 该变换是否在原地操作。默认值:False。 返回 diff --git a/docs/api/paddle/vision/transforms/pad_cn.rst b/docs/api/paddle/vision/transforms/pad_cn.rst index 5e22c328e7e..8ccb9134033 100644 --- a/docs/api/paddle/vision/transforms/pad_cn.rst +++ b/docs/api/paddle/vision/transforms/pad_cn.rst @@ -11,9 +11,9 @@ pad ::::::::: - img (PIL.Image|np.ndarray) - 被填充的图像。 - - padding (int|list|tuple) - 在图像边界上进行填充的范围。如果提供的是单个int值,则该值用于填充图像所有边;如果提供的是长度为2的元组/列表,则分别为图像左/右和顶部/底部进行填充;如果提供的是长度为4的元组/列表,则按照左,上,右和下的顺序为图像填充。 - - fill (int|tuple) - 用于填充的像素值。仅当padding_mode为constant时参数值有效。默认值:0。如果参数值是一个长度为3的元组,则会分别用于填充R,G,B通道。 - - padding_mode (string) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。 ``constant`` 表示使用常量值进行填充,该值由fill参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充2个值,结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充2个值,结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 + - padding (int|list|tuple) - 在图像边界上进行填充的范围。如果提供的是单个 int 值,则该值用于填充图像所有边;如果提供的是长度为 2 的元组/列表,则分别为图像左/右和顶部/底部进行填充;如果提供的是长度为 4 的元组/列表,则按照左,上,右和下的顺序为图像填充。 + - fill (int|tuple) - 用于填充的像素值。仅当 padding_mode 为 constant 时参数值有效。默认值:0。如果参数值是一个长度为 3 的元组,则会分别用于填充 R,G,B 通道。 + - padding_mode (string) - 填充模式。支持:constant, edge, reflect 或 symmetric。默认值:constant。 ``constant`` 表示使用常量值进行填充,该值由 fill 参数指定。``edge`` 表示使用图像边缘像素值进行填充。``reflect`` 表示使用原图像的镜像值进行填充(不使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充 2 个值,结果是 ``[3, 2, 1, 2, 3, 4, 3, 2]``。``symmetric`` 表示使用原图像的镜像值进行填充(使用边缘上的值);比如:使用该模式对 ``[1, 2, 3, 4]`` 的两端分别填充 2 个值,结果是 ``[2, 1, 1, 2, 3, 4, 4, 3]``。 返回 ::::::::: diff --git a/docs/api/paddle/vision/transforms/resize_cn.rst b/docs/api/paddle/vision/transforms/resize_cn.rst index 62b461b435e..9d9d25fa809 100644 --- a/docs/api/paddle/vision/transforms/resize_cn.rst +++ b/docs/api/paddle/vision/transforms/resize_cn.rst @@ -11,7 +11,7 @@ resize ::::::::: - img (numpy.ndarray|PIL.Image) - 输入数据,可以是(H, W, C)形状的图像或遮罩。 - - size (int|tuple) - 输出图像大小。如果size是一个序列,例如(h,w),输出大小将与此匹配。如果size为int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 + - size (int|tuple) - 输出图像大小。如果 size 是一个序列,例如(h,w),输出大小将与此匹配。如果 size 为 int,图像的较小边缘将与此数字匹配,即如果 height > width,则图像将重新缩放为(size * height / width, size)。 - interpolation (int|str, optional) - 插值的方法,默认值:'bilinear'。 - 当使用 ``pil`` 作为后端时,支持的插值方法如下 + "nearest": Image.NEAREST, diff --git a/docs/api/paddle/vision/transforms/rotate_cn.rst b/docs/api/paddle/vision/transforms/rotate_cn.rst index b94f5a978b8..efa22a3c93a 100644 --- a/docs/api/paddle/vision/transforms/rotate_cn.rst +++ b/docs/api/paddle/vision/transforms/rotate_cn.rst @@ -12,8 +12,8 @@ rotate - img (PIL.Image|numpy.ndarray) - 输入图像。 - angle (float|int) - 旋转角度,顺时针。 - - resample (int|str,可选):可选的重采样滤波器。如果省略,或者图像只有一个通道,则根据后端将其设置为PIL.Image.NEAREST或cv2.INTER_NEAREST。使采用pil后端时,支持方法如下:"nearest": Image.NEAREST, "bilinear": Image.BILINEAR, "bicubic": Image.BICUBIC;当采用cv2后端时,支持方法如下:"nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, "bicubic": cv2.INTER_CUBIC。 - - expand (bool,可选) - 是否要对旋转后的图片进行大小扩展,默认值:False,不进行扩展。当参数值为True时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。当参数值为False时,会按照原图像大小保留旋转后的图像。**这个扩展操作的前提是围绕中心旋转且没有平移。** + - resample (int|str,可选):可选的重采样滤波器。如果省略,或者图像只有一个通道,则根据后端将其设置为 PIL.Image.NEAREST 或 cv2.INTER_NEAREST。使采用 pil 后端时,支持方法如下:"nearest": Image.NEAREST, "bilinear": Image.BILINEAR, "bicubic": Image.BICUBIC;当采用 cv2 后端时,支持方法如下:"nearest": cv2.INTER_NEAREST, - "bilinear": cv2.INTER_LINEAR, "bicubic": cv2.INTER_CUBIC。 + - expand (bool,可选) - 是否要对旋转后的图片进行大小扩展,默认值:False,不进行扩展。当参数值为 True 时,会对图像大小进行扩展,让其能够足以容纳整个旋转后的图像。当参数值为 False 时,会按照原图像大小保留旋转后的图像。**这个扩展操作的前提是围绕中心旋转且没有平移。** - center (2-tuple,可选) - 旋转的中心点坐标,原点是图片左上角,默认值是图像的中心点。 - fill (int,可选) - 对图像扩展时填充的值。默认值:0。 diff --git a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst index 928b22cb286..5e3c339aa91 100644 --- a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst @@ -11,7 +11,7 @@ to_grayscale ::::::::: - img (PIL.Image|np.array) - 输入图像。 - - num_output_channels (int,可选) - 输出图像的通道数,默认值为1,单通道。 + - num_output_channels (int,可选) - 输出图像的通道数,默认值为 1,单通道。 返回 ::::::::: @@ -19,7 +19,7 @@ to_grayscale ``PIL.Image 或 numpy.ndarray``,输入图像的灰度版本。 - 如果 output_channels == 1:返回一个单通道图像。 - - 如果 output_channels == 3:返回一个RBG格式的3通道图像。 + - 如果 output_channels == 3:返回一个 RBG 格式的 3 通道图像。 代码示例 ::::::::: diff --git a/docs/api/paddle/zeros_cn.rst b/docs/api/paddle/zeros_cn.rst index 84d5fc734e8..13118855cb0 100644 --- a/docs/api/paddle/zeros_cn.rst +++ b/docs/api/paddle/zeros_cn.rst @@ -7,18 +7,18 @@ zeros -创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 +创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为 0 的 Tensor。 参数 :::::::::::: - - **shape** (tuple|list|Tensor) - 输出Tensor的形状,``shape`` 的数据类型为int32或者int64。 - - **dtype** (np.dtype|str,可选) - 输出Tensor的数据类型,数据类型必须为bool、float16、float32、float64、int32或int64。若为None,数据类型为float32,默认为None。 + - **shape** (tuple|list|Tensor) - 输出 Tensor 的形状,``shape`` 的数据类型为 int32 或者 int64。 + - **dtype** (np.dtype|str,可选) - 输出 Tensor 的数据类型,数据类型必须为 bool、float16、float32、float64、int32 或 int64。若为 None,数据类型为 float32,默认为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 +值全为 0 的 Tensor,数据类型和 ``dtype`` 定义的类型一致。 代码示例 diff --git a/docs/api/paddle/zeros_like_cn.rst b/docs/api/paddle/zeros_like_cn.rst index f6362b1e21c..d82f201f46b 100644 --- a/docs/api/paddle/zeros_like_cn.rst +++ b/docs/api/paddle/zeros_like_cn.rst @@ -6,17 +6,17 @@ zeros_like .. py:function:: paddle.zeros_like(x, dtype=None, name=None) -返回一个和 ``x`` 具有相同的形状的全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 +返回一个和 ``x`` 具有相同的形状的全零 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 参数 :::::::::: - - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 - - **dtype** (str|np.dtype,可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时,输出Tensor的数据类型与 ``x`` 相同。默认值为None。 + - **x** (Tensor) – 输入的多维 Tensor,数据类型可以是 bool,float16, float32,float64,int32,int64。输出 Tensor 的形状和 ``x`` 相同。如果 ``dtype`` 为 None,则输出 Tensor 的数据类型与 ``x`` 相同。 + - **dtype** (str|np.dtype,可选) - 输出 Tensor 的数据类型,支持 bool,float16, float32,float64,int32,int64。当该参数值为 None 时,输出 Tensor 的数据类型与 ``x`` 相同。默认值为 None。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - Tensor:和 ``x`` 具有相同的形状全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 + Tensor:和 ``x`` 具有相同的形状全零 Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 代码示例 diff --git a/docs/api_guides/X2Paddle/Caffe-Fluid.rst b/docs/api_guides/X2Paddle/Caffe-Fluid.rst index 6f895c29a87..50619ba15fa 100644 --- a/docs/api_guides/X2Paddle/Caffe-Fluid.rst +++ b/docs/api_guides/X2Paddle/Caffe-Fluid.rst @@ -1,14 +1,14 @@ .. _Caffe-Fluid: ######################## -Caffe-Fluid常用层对应表 +Caffe-Fluid 常用层对应表 ######################## -本文档梳理了Caffe常用Layer与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有Caffe使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 +本文档梳理了 Caffe 常用 Layer 与 PaddlePaddle API 对应关系和差异分析。根据文档对应关系,有 Caffe 使用经验的用户,可根据对应关系,快速熟悉 PaddlePaddle 的接口使用。 .. csv-table:: - :header: "序号", "Caffe Layer", "Fluid接口", "备注" + :header: "序号", "Caffe Layer", "Fluid 接口", "备注" :widths: 1, 8, 8, 3 "1", "`AbsVal `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" @@ -21,7 +21,7 @@ Caffe-Fluid常用层对应表 "8", "`Crop `_", ":ref:`cn_api_fluid_layers_crop`", "`差异对比 `_" "9", "`Deconvolution `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" "10", "`Dropout `_", ":ref:`cn_api_fluid_layers_dropout`", "`差异对比 `_" - "11", "`Eltwise `_", "无相应接口", "`Fluid实现 `_" + "11", "`Eltwise `_", "无相应接口", "`Fluid 实现 `_" "12", "`ELU `_", ":ref:`cn_api_fluid_layers_elu`", "功能一致" "13", "`EuclideanLoss `_", ":ref:`cn_api_fluid_layers_square_error_cost`", "`差异对比 `_" "14", "`Exp `_", ":ref:`cn_api_fluid_layers_exp`", "`差异对比 `_" @@ -33,7 +33,7 @@ Caffe-Fluid常用层对应表 "20", "`Pooling `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" "21", "`Power `_", ":ref:`cn_api_fluid_layers_pow`", "`差异对比 `_" "22", "`PReLU `_", ":ref:`cn_api_fluid_layers_prelu`", "功能一致" - "23", "`Reduction `_", "无相应接口", "`Fluid实现 `_" + "23", "`Reduction `_", "无相应接口", "`Fluid 实现 `_" "24", "`ReLU `_", ":ref:`cn_api_fluid_layers_leaky_relu`", "功能一致" "25", "`Reshape `_", ":ref:`cn_api_fluid_layers_reshape`", "`差异对比 `_" "26", "`SigmoidCrossEntropyLoss `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "`差异对比 `_" diff --git a/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst b/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst index 940904dba35..b3bda4f9fc6 100644 --- a/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst +++ b/docs/api_guides/X2Paddle/TensorFlow-Fluid.rst @@ -1,13 +1,13 @@ .. _TensorFlow-Fluid: ############################### -TensorFlow-Fluid常用接口对应表 +TensorFlow-Fluid 常用接口对应表 ############################### -本文档基于TensorFlow v1.13梳理了常用API与PaddlePaddle API对应关系和差异分析。根据文档对应关系,有TensorFlow使用经验的用户,可根据对应关系,快速熟悉PaddlePaddle的接口使用。 +本文档基于 TensorFlow v1.13 梳理了常用 API 与 PaddlePaddle API 对应关系和差异分析。根据文档对应关系,有 TensorFlow 使用经验的用户,可根据对应关系,快速熟悉 PaddlePaddle 的接口使用。 .. csv-table:: - :header: "序号", "TensorFlow接口", "Fluid接口", "备注" + :header: "序号", "TensorFlow 接口", "Fluid 接口", "备注" :widths: 1, 8, 8, 3 "1", "`tf.abs `_", ":ref:`cn_api_fluid_layers_abs`", "功能一致" @@ -31,7 +31,7 @@ TensorFlow-Fluid常用接口对应表 "19", "`tf.contrib.layers.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" "20", "`tf.contrib.layers.xavier_initializer `_", ":ref:`cn_api_fluid_initializer_Xavier`", "功能一致" "21", "`tf.nn.rnn.GRUCell `_", ":ref:`cn_api_fluid_layers_gru_unit`", "`差异对比 `_" - "22", "`tf.nn.rnn.MultiRNNCell `_", "无相应接口", "`Fluid实现 `_" + "22", "`tf.nn.rnn.MultiRNNCell `_", "无相应接口", "`Fluid 实现 `_" "23", "`tf.nn.rnn.static_rnn `_", ":ref:`cn_api_fluid_layers_DynamicRNN`", "功能一致" "24", "`tf.convert_to_tensor `_", ":ref:`cn_api_fluid_layers_assign`", "功能一致" "25", "`tf.cos `_", ":ref:`cn_api_fluid_layers_cos`", "功能一致" @@ -74,7 +74,7 @@ TensorFlow-Fluid常用接口对应表 "62", "`tf.multiply `_", ":ref:`cn_api_fluid_layers_elementwise_mul`", "功能一致" "63", "`tf.nn.avg_pool `_", ":ref:`cn_api_fluid_layers_pool2d`", "`差异对比 `_" "64", "`tf.nn.batch_normalization `_", ":ref:`cn_api_fluid_layers_batch_norm`", "功能一致" - "65", "`tf.nn.bidirectional_dynamic_rnn `_", "无相应接口", "`Fluid实现 `_" + "65", "`tf.nn.bidirectional_dynamic_rnn `_", "无相应接口", "`Fluid 实现 `_" "66", "`tf.nn.conv2d `_", ":ref:`cn_api_fluid_layers_conv2d`", "`差异对比 `_" "67", "`tf.nn.conv2d_transpose `_", ":ref:`cn_api_fluid_layers_conv2d_transpose`", "`差异对比 `_" "68", "`tf.nn.conv3d_transpose `_", ":ref:`cn_api_fluid_layers_conv3d_transpose`", "`差异对比 `_" @@ -87,7 +87,7 @@ TensorFlow-Fluid常用接口对应表 "75", "`tf.nn.relu `_", ":ref:`cn_api_fluid_layers_relu`", "功能一致" "76", "`tf.nn.relu6 `_", ":ref:`cn_api_fluid_layers_relu6`", "功能一致" "77", "`tf.nn.rnn_cell.LSTMCell `_", ":ref:`cn_api_fluid_layers_lstm_unit`", "`差异对比 `_" - "78", "`tf.nn.separable_conv2d `_", "无相应接口", "`Fluid实现 `_" + "78", "`tf.nn.separable_conv2d `_", "无相应接口", "`Fluid 实现 `_" "79", "`tf.nn.sigmoid `_", ":ref:`cn_api_fluid_layers_sigmoid`", "功能一致" "80", "`tf.nn.sigmoid_cross_entropy_with_logits `_", ":ref:`cn_api_fluid_layers_sigmoid_cross_entropy_with_logits`", "功能一致" "81", "`tf.nn.softmax `_", ":ref:`cn_api_fluid_layers_softmax`", "功能一致" @@ -107,7 +107,7 @@ TensorFlow-Fluid常用接口对应表 "95", "`tf.random_normal_initializer `_", ":ref:`cn_api_fluid_initializer_Normal`", "功能一致" "96", "`tf.random_uniform `_", ":ref:`cn_api_fluid_layers_uniform_random`", "功能一致" "97", "`tf.random_uniform_initializer `_", ":ref:`cn_api_fluid_initializer_UniformInitializer`", "功能一致" - "98", "`tf.reduce_logsumexp `_", "无相应接口", "`Fluid实现 `_" + "98", "`tf.reduce_logsumexp `_", "无相应接口", "`Fluid 实现 `_" "99", "`tf.reduce_max `_", ":ref:`cn_api_fluid_layers_reduce_max`", "功能一致" "100", "`tf.reduce_mean `_", ":ref:`cn_api_fluid_layers_reduce_mean`", "功能一致" "101", "`tf.reduce_min `_", ":ref:`cn_api_fluid_layers_reduce_min`", "功能一致" @@ -128,10 +128,10 @@ TensorFlow-Fluid常用接口对应表 "116", "`tf.split `_", ":ref:`cn_api_fluid_layers_split`", "`差异对比 `_" "117", "`tf.sqrt `_", ":ref:`cn_api_fluid_layers_sqrt`", "功能一致" "118", "`tf.square `_", ":ref:`cn_api_fluid_layers_square`", "功能一致" - "119", "`tf.squared_difference `_", "无相应接口", "`Fluid实现 `_" + "119", "`tf.squared_difference `_", "无相应接口", "`Fluid 实现 `_" "120", "`tf.squeeze `_", ":ref:`cn_api_fluid_layers_squeeze`", "功能一致" "121", "`tf.stack `_", ":ref:`cn_api_fluid_layers_stack`", "功能一致" - "122", "`tf.stop_gradient `_", "无相应接口", "`Fluid实现 `_" + "122", "`tf.stop_gradient `_", "无相应接口", "`Fluid 实现 `_" "123", "`tf.subtract `_", ":ref:`cn_api_fluid_layers_elementwise_sub`", "功能一致" "124", "`tf.tanh `_", ":ref:`cn_api_fluid_layers_tanh`", "功能一致" "125", "`tf.tile `_", ":ref:`cn_api_fluid_layers_expand`", "功能一致" diff --git a/docs/api_guides/index_cn.rst b/docs/api_guides/index_cn.rst index 01086f5c049..ba504f3a84f 100755 --- a/docs/api_guides/index_cn.rst +++ b/docs/api_guides/index_cn.rst @@ -1,8 +1,8 @@ =========== -API功能分类 +API 功能分类 =========== -本模块分功能向您介绍PaddlePaddle Fluid的API体系和用法,提高您的查找效率,帮助您快速了解PaddlePaddle Fluid API的全貌,包括以下几个模块: +本模块分功能向您介绍 PaddlePaddle Fluid 的 API 体系和用法,提高您的查找效率,帮助您快速了解 PaddlePaddle Fluid API 的全貌,包括以下几个模块: .. toctree:: :maxdepth: 1 diff --git a/docs/api_guides/low_level/backward.rst b/docs/api_guides/low_level/backward.rst index 1b5fd972894..6d880185758 100644 --- a/docs/api_guides/low_level/backward.rst +++ b/docs/api_guides/low_level/backward.rst @@ -5,7 +5,7 @@ 反向传播 ######## -神经网络对模型的表达能力依赖于优化算法,优化是一个不断计算梯度并调整可学习参数的过程,Fluid中的优化算法可参考 :ref:`api_guide_optimizer` 。 +神经网络对模型的表达能力依赖于优化算法,优化是一个不断计算梯度并调整可学习参数的过程,Fluid 中的优化算法可参考 :ref:`api_guide_optimizer` 。 在网络的训练过程中,梯度计算分为两个步骤:前向计算与 `反向传播 `_ 。 @@ -15,9 +15,9 @@ 详细实现过程可以参考阅读 `反向传导算法 `_ 。 -在Fluid中,我们并不推荐直接调用 :code:`fluid` 中反向传播相关API,因为这是一个极底层的API,请考虑使用 :ref:`api_guide_optimizer` 中的相关API替代。当您使用优化相关API时,Fluid会自动为您计算复杂的反向传播过程。 +在 Fluid 中,我们并不推荐直接调用 :code:`fluid` 中反向传播相关 API,因为这是一个极底层的 API,请考虑使用 :ref:`api_guide_optimizer` 中的相关 API 替代。当您使用优化相关 API 时,Fluid 会自动为您计算复杂的反向传播过程。 如想自己实现,您也可以使用 :ref:`cn_api_fluid_backward_append_backward` 中的 :code:`callback` 自 -定义Operator的梯度计算形式。更多用法,请参考: +定义 Operator 的梯度计算形式。更多用法,请参考: * :ref:`cn_api_fluid_backward_append_backward` diff --git a/docs/api_guides/low_level/compiled_program.rst b/docs/api_guides/low_level/compiled_program.rst index a0f2eb626ab..2eb4f786b00 100644 --- a/docs/api_guides/low_level/compiled_program.rst +++ b/docs/api_guides/low_level/compiled_program.rst @@ -4,18 +4,18 @@ CompiledProgram ################ -:ref:`cn_api_fluid_CompiledProgram` 用于把程序转化为不同的优化组合。例如,你可以使用with_data_parallel将程序转化为数据并行程序,使其能够运行在多个设备上。 +:ref:`cn_api_fluid_CompiledProgram` 用于把程序转化为不同的优化组合。例如,你可以使用 with_data_parallel 将程序转化为数据并行程序,使其能够运行在多个设备上。 .. code-block:: python # 注释: - # - 如果你想在ParallelExecutor中指定用于运行的GPU卡,需要在环境中定义 + # - 如果你想在 ParallelExecutor 中指定用于运行的 GPU 卡,需要在环境中定义 # CUDA_VISIBLE_DEVICES - # - 如果你想在ParallelExecutor中使用多CPU来运行程序,需要在环境中定义 + # - 如果你想在 ParallelExecutor 中使用多 CPU 来运行程序,需要在环境中定义 # CPU_NUM - # 首先创建Executor。 + # 首先创建 Executor。 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) # 运行启动程序仅一次。 @@ -40,6 +40,6 @@ CompiledProgram feed=feed_dict, fetch_list=[loss.name]) -- 相关API : +- 相关 API : - :ref:`cn_api_fluid_CompiledProgram` diff --git a/docs/api_guides/low_level/distributed/async_training.rst b/docs/api_guides/low_level/distributed/async_training.rst index 61f2ee0f79b..2f1cc964f91 100644 --- a/docs/api_guides/low_level/distributed/async_training.rst +++ b/docs/api_guides/low_level/distributed/async_training.rst @@ -4,20 +4,20 @@ 分布式异步训练 ############ -Fluid支持数据并行的分布式异步训练,API使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 +Fluid 支持数据并行的分布式异步训练,API 使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 :code:`pserver` 端程序和 :code:`trainer` 端程序。用户在不同的节点执行相同的一段代码,根据环境变量或启动参数, -可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid异步训练只支持pserver模式,异步训练和 `同步训练 <../distributed/sync_training.html>`_ 的主要差异在于:异步训练每个trainer的梯度是单独更新到参数上的, -而同步训练是所有trainer的梯度合并之后统一更新到参数上,因此,同步训练和异步训练的超参数需要分别调节。 +可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid 异步训练只支持 pserver 模式,异步训练和 `同步训练 <../distributed/sync_training.html>`_ 的主要差异在于:异步训练每个 trainer 的梯度是单独更新到参数上的, +而同步训练是所有 trainer 的梯度合并之后统一更新到参数上,因此,同步训练和异步训练的超参数需要分别调节。 -pserver模式分布式异步训练 +pserver 模式分布式异步训练 ====================== -API详细使用方法参考 :ref:`cn_api_fluid_DistributeTranspiler` ,简单示例用法: +API 详细使用方法参考 :ref:`cn_api_fluid_DistributeTranspiler` ,简单示例用法: .. code-block:: python config = fluid.DistributeTranspilerConfig() - # 配置策略config + # 配置策略 config config.slice_var_up = False t = fluid.DistributeTranspiler(config=config) t.transpile(trainer_id, @@ -30,4 +30,4 @@ API详细使用方法参考 :ref:`cn_api_fluid_DistributeTranspiler` ,简单 需要注意的是:进行异步训练时,请修改 :code:`sync_mode` 的值 -- :code:`sync_mode` : 是否是同步训练模式,默认为True,不传此参数也默认是同步训练模式,设置为False则为异步训练 +- :code:`sync_mode` : 是否是同步训练模式,默认为 True,不传此参数也默认是同步训练模式,设置为 False 则为异步训练 diff --git a/docs/api_guides/low_level/distributed/cluster_train_data_cn.rst b/docs/api_guides/low_level/distributed/cluster_train_data_cn.rst index bf32f8164b2..1e2f0f18e90 100644 --- a/docs/api_guides/low_level/distributed/cluster_train_data_cn.rst +++ b/docs/api_guides/low_level/distributed/cluster_train_data_cn.rst @@ -1,7 +1,7 @@ .. _api_guide_cluster_train_data: #################### -分布式训练reader准备 +分布式训练 reader 准备 #################### 一个数据并行的分布式训练任务通常会含有多个训练进程,每个训练进程处理整个数据集中的一部分,根据当前进程的唯一序号(trainer_id)以及训练进程总数(trainers)可以决定当前训练进程应该读取哪一部分数据。 @@ -29,7 +29,7 @@ 预先切分训练文件 ----------------- -由于使用 `cluster_reader` 依然会读取全量数据,对于训练进程比较多的任务,会造成IO资源的浪费、影响训练性能。另一种方法是可以将训练数据切分成多个小文件,每个进程处理其中的一部分文件, +由于使用 `cluster_reader` 依然会读取全量数据,对于训练进程比较多的任务,会造成 IO 资源的浪费、影响训练性能。另一种方法是可以将训练数据切分成多个小文件,每个进程处理其中的一部分文件, 例如在 Linux 系统中可以使用 `split `_ 命令将训练数据切分成多个小文件: .. code-block:: bash diff --git a/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst b/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst index 6ba7a716b2c..5f6ad4b3b55 100644 --- a/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst +++ b/docs/api_guides/low_level/distributed/large_scale_sparse_feature_training.rst @@ -8,22 +8,22 @@ 模型配置和训练 ============= -embedding被广泛应用在各种网络结构中,尤其是文本处理相关的模型。在某些场景,例如推荐系统或者搜索引擎中, -embedding的feature id可能会非常多,当feature id达到一定数量时,embedding参数会变得很大, +embedding 被广泛应用在各种网络结构中,尤其是文本处理相关的模型。在某些场景,例如推荐系统或者搜索引擎中, +embedding 的 feature id 可能会非常多,当 feature id 达到一定数量时,embedding 参数会变得很大, 会带来两个问题: -1. 单机内存由于无法存放如此巨大的embedding参数,导致无法训练; +1. 单机内存由于无法存放如此巨大的 embedding 参数,导致无法训练; 2. 普通的训练模式每一轮迭代都需要同步完整的参数,参数太大会让通信变得非常慢,进而影响训练速度。 -Fluid支持千亿量级超大规模稀疏特征embedding的训练,embedding参数只会保存在parameter server上,通过 -参数prefetch和梯度稀疏更新的方法,大大减少通信量,提高通信速度。 +Fluid 支持千亿量级超大规模稀疏特征 embedding 的训练,embedding 参数只会保存在 parameter server 上,通过 +参数 prefetch 和梯度稀疏更新的方法,大大减少通信量,提高通信速度。 该功能只对分布式训练有效,单机无法使用。 需要配合 `稀疏更新 <../layers/sparse_update.html>`_ 一起使用。 -使用方法:在配置embedding的时候,加上参数 :code:`is_distributed=True` 以及 :code:`is_sparse=True` 即可。 -参数 :code:`dict_size` 定义数据中总的id的数量,id可以是int64范围内的任意值,只要总id个数小于等于dict_size就可以支持。 -所以配置之前需要预估一下数据中总的feature id的数量。 +使用方法:在配置 embedding 的时候,加上参数 :code:`is_distributed=True` 以及 :code:`is_sparse=True` 即可。 +参数 :code:`dict_size` 定义数据中总的 id 的数量,id 可以是 int64 范围内的任意值,只要总 id 个数小于等于 dict_size 就可以支持。 +所以配置之前需要预估一下数据中总的 feature id 的数量。 .. code-block:: python @@ -40,5 +40,5 @@ Fluid支持千亿量级超大规模稀疏特征embedding的训练,embedding参 当特征数量达到千亿的时候,参数量很大,单机已经无法存下,所以模型的存储和加载都和普通模式不同: -1. 普通模式下,参数是在trainer端保存和加载的; -2. 分布式模式下,参数的保存和加载,都是在pserver端进行,每个pserver只保存和加载该pserver自身对应部分的参数 +1. 普通模式下,参数是在 trainer 端保存和加载的; +2. 分布式模式下,参数的保存和加载,都是在 pserver 端进行,每个 pserver 只保存和加载该 pserver 自身对应部分的参数 diff --git a/docs/api_guides/low_level/distributed/sync_training.rst b/docs/api_guides/low_level/distributed/sync_training.rst index 1ea58c39706..918cf1092f3 100644 --- a/docs/api_guides/low_level/distributed/sync_training.rst +++ b/docs/api_guides/low_level/distributed/sync_training.rst @@ -4,20 +4,20 @@ 分布式同步训练 ############ -Fluid支持数据并行的分布式同步训练,API使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 +Fluid 支持数据并行的分布式同步训练,API 使用 :code:`DistributeTranspiler` 将单机网络配置转换成可以多机执行的 :code:`pserver` 端程序和 :code:`trainer` 端程序。用户在不同的节点执行相同的一段代码,根据环境变量或启动参数, -可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid分布式同步训练同时支持pserver模式和NCCL2模式, -在API使用上有差别,需要注意。 +可以执行对应的 :code:`pserver` 或 :code:`trainer` 角色。Fluid 分布式同步训练同时支持 pserver 模式和 NCCL2 模式, +在 API 使用上有差别,需要注意。 -pserver模式分布式训练 +pserver 模式分布式训练 =================== -API详细使用方法参考 :ref:`DistributeTranspiler` ,简单实例用法: +API 详细使用方法参考 :ref:`DistributeTranspiler` ,简单实例用法: .. code-block:: python config = fluid.DistributeTranspilerConfig() - # 配置策略config + # 配置策略 config config.slice_var_up = False t = fluid.DistributeTranspiler(config=config) t.transpile(trainer_id, @@ -28,41 +28,41 @@ API详细使用方法参考 :ref:`DistributeTranspiler` ,简单实例用法: 以上参数中: -- :code:`trainer_id` : trainer节点的id,从0到n-1,n为当前训练任务中trainer节点的个数 +- :code:`trainer_id` : trainer 节点的 id,从 0 到 n-1,n 为当前训练任务中 trainer 节点的个数 - :code:`program` : 被转换的 :code:`program` 默认使用 :code:`fluid.default_main_program()` -- :code:`pservers` : 当前训练任务中pserver节点的IP端口列表 -- :code:`trainers` : int类型,当前训练任务中trainer节点的个数。注意: - * pserver模式下,trainer节点个数可以和pserver节点个数不一致,比如使用20个pserver和50个trainer。在实际训练任务中,您可以通过调整pserver节点和trainer节点个数找到最佳性能 - * NCCL2模式中,此项参数是字符串,指定trainer节点的IP端口列表 -- :code:`sync_mode` : 是否是同步训练模式,默认为True,不传此参数也默认是同步训练模式 +- :code:`pservers` : 当前训练任务中 pserver 节点的 IP 端口列表 +- :code:`trainers` : int 类型,当前训练任务中 trainer 节点的个数。注意: + * pserver 模式下,trainer 节点个数可以和 pserver 节点个数不一致,比如使用 20 个 pserver 和 50 个 trainer。在实际训练任务中,您可以通过调整 pserver 节点和 trainer 节点个数找到最佳性能 + * NCCL2 模式中,此项参数是字符串,指定 trainer 节点的 IP 端口列表 +- :code:`sync_mode` : 是否是同步训练模式,默认为 True,不传此参数也默认是同步训练模式 -其中,支持的config包括: +其中,支持的 config 包括: -- :code:`slice_var_up` : 配置是否切分一个参数到多个pserver上进行优化,默认开启。此选项适用于模型参数个数少,但需要使用大量节点的场景,有利于提升pserver端计算并行度 -- :code:`split_method` : 配置transpiler分配参数(或参数的切片)到多个pserver的方式,默认为"RoundRobin",也可以使用"HashName" -- :code:`min_block_size` : 如果配置了参数切分,指定最小Tensor的切分大小,防止RPC请求包过小,默认为8192,一般情况不需要调整此项参数 +- :code:`slice_var_up` : 配置是否切分一个参数到多个 pserver 上进行优化,默认开启。此选项适用于模型参数个数少,但需要使用大量节点的场景,有利于提升 pserver 端计算并行度 +- :code:`split_method` : 配置 transpiler 分配参数(或参数的切片)到多个 pserver 的方式,默认为"RoundRobin",也可以使用"HashName" +- :code:`min_block_size` : 如果配置了参数切分,指定最小 Tensor 的切分大小,防止 RPC 请求包过小,默认为 8192,一般情况不需要调整此项参数 - :code:`enable_dc_asgd` : 是否开启 :code:`DC-ASGD` 此选项在异步训练中生效,启用异步训练补偿算法 -- :code:`mode` : 可以选择"pserver"或"nccl2",指定使用pserver模式或NCCL2模式分布式训练 -- :code:`print_log` : 是否开启transpiler debug日志,此项为开发调试使用 +- :code:`mode` : 可以选择"pserver"或"nccl2",指定使用 pserver 模式或 NCCL2 模式分布式训练 +- :code:`print_log` : 是否开启 transpiler debug 日志,此项为开发调试使用 通用环境变量配置: -- :code:`FLAGS_rpc_send_thread_num` :int,指定RPC通信发送时线程的个数 -- :code:`FLAGS_rpc_get_thread_num` : int,指定RPC通信接受时线程的个数 -- :code:`FLAGS_rpc_prefetch_thread_num` : int,分布式lookup table执行RPC通信时,prefetch线程的个数 -- :code:`FLAGS_rpc_deadline` : int,RPC通信最长等待时间,单位为毫秒,默认180000 +- :code:`FLAGS_rpc_send_thread_num` :int,指定 RPC 通信发送时线程的个数 +- :code:`FLAGS_rpc_get_thread_num` : int,指定 RPC 通信接受时线程的个数 +- :code:`FLAGS_rpc_prefetch_thread_num` : int,分布式 lookup table 执行 RPC 通信时,prefetch 线程的个数 +- :code:`FLAGS_rpc_deadline` : int,RPC 通信最长等待时间,单位为毫秒,默认 180000 -NCCL2模式分布式训练 +NCCL2 模式分布式训练 ================= -基于NCCL2 (Collective Communication) 的多机同步训练模式,仅支持在GPU集群下进行。 -此部分详细API说明可以参考 :ref:`DistributeTranspiler` 。 +基于 NCCL2 (Collective Communication) 的多机同步训练模式,仅支持在 GPU 集群下进行。 +此部分详细 API 说明可以参考 :ref:`DistributeTranspiler` 。 -注意:NCCL2模式下,集群不需要启动pserver,只需要启动多个trainer节点即可。 +注意:NCCL2 模式下,集群不需要启动 pserver,只需要启动多个 trainer 节点即可。 -使用以下代码,将当前 :code:`Program` 转化成适用于NCCL2分布式计算的Fluid :code:`Program` : +使用以下代码,将当前 :code:`Program` 转化成适用于 NCCL2 分布式计算的 Fluid :code:`Program` : .. code-block:: python @@ -77,7 +77,7 @@ NCCL2模式分布式训练 其中: -- :code:`trainer_id` : trainer节点的id,从0到n-1,n为当前训练任务中trainer节点的个数 -- :code:`program` 和 :code:`startup_program` : 分别为Fluid 模型的主配置program和初始化startup_program -- :code:`trainers` : 字符串类型,指定当前任务所有trainer的IP和端口号,仅用于NCCL2初始化(pserver模式中,此参数为int,指定trainer节点的个数) -- :code:`current_endpoint` : 当前任务的当前节点的IP和端口号 +- :code:`trainer_id` : trainer 节点的 id,从 0 到 n-1,n 为当前训练任务中 trainer 节点的个数 +- :code:`program` 和 :code:`startup_program` : 分别为 Fluid 模型的主配置 program 和初始化 startup_program +- :code:`trainers` : 字符串类型,指定当前任务所有 trainer 的 IP 和端口号,仅用于 NCCL2 初始化(pserver 模式中,此参数为 int,指定 trainer 节点的个数) +- :code:`current_endpoint` : 当前任务的当前节点的 IP 和端口号 diff --git a/docs/api_guides/low_level/executor.rst b/docs/api_guides/low_level/executor.rst index c4368d51318..8745ff019a1 100644 --- a/docs/api_guides/low_level/executor.rst +++ b/docs/api_guides/low_level/executor.rst @@ -4,14 +4,14 @@ 执行引擎 ########## -:code:`Executor` 实现了一个简易的执行器,所有的操作在其中顺序执行。你可以在Python脚本中运行 :code:`Executor` 。PaddlePaddle Fluid中有两种执行器。一种是 :code:`Executor` 默认的单线程执行器,另一种是并行计算执行器,在 :ref:`api_guide_parallel_executor` 中进行了解释。``Executor`` 和 :ref:`api_guide_parallel_executor` 的配置不同,这可能会给部分用户带来困惑。为使执行器更加灵活,我们引入了 :ref:`api_guide_compiled_program` , :ref:`api_guide_compiled_program` 用于把一个程序转换为不同的优化组合,可以通过 :code:`Executor` 运行。 +:code:`Executor` 实现了一个简易的执行器,所有的操作在其中顺序执行。你可以在 Python 脚本中运行 :code:`Executor` 。PaddlePaddle Fluid 中有两种执行器。一种是 :code:`Executor` 默认的单线程执行器,另一种是并行计算执行器,在 :ref:`api_guide_parallel_executor` 中进行了解释。``Executor`` 和 :ref:`api_guide_parallel_executor` 的配置不同,这可能会给部分用户带来困惑。为使执行器更加灵活,我们引入了 :ref:`api_guide_compiled_program` , :ref:`api_guide_compiled_program` 用于把一个程序转换为不同的优化组合,可以通过 :code:`Executor` 运行。 :code:`Executor` 的逻辑非常简单。建议在调试阶段用 :code:`Executor` 在一台计算机上完整地运行模型,然后转向多设备或多台计算机计算。 :code:`Executor` 在构造时接受一个 :code:`Place` ,它既可能是 :ref:`api_fluid_CPUPlace` 也可能是 :ref:`api_fluid_CUDAPlace` 。 .. code-block:: python - # 首先创建Executor。 + # 首先创建 Executor。 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) # 运行启动程序仅一次。 @@ -23,5 +23,5 @@ fetch_list=[loss.name]) 简单样例请参照 `basics_fit_a_line <../../beginners_guide/basics/fit_a_line/README.cn.html>`_ -- 相关API : +- 相关 API : - :ref:`cn_api_fluid_Executor` diff --git a/docs/api_guides/low_level/layers/activations.rst b/docs/api_guides/low_level/layers/activations.rst index 5f044fb148e..5fecf03707d 100644 --- a/docs/api_guides/low_level/layers/activations.rst +++ b/docs/api_guides/low_level/layers/activations.rst @@ -11,16 +11,16 @@ PaddlePaddle Fluid 对大部分的激活函数进行了支持,其中有: :ref:`cn_api_fluid_layers_relu`, :ref:`cn_api_fluid_layers_tanh`, :ref:`cn_api_fluid_layers_sigmoid`, :ref:`cn_api_fluid_layers_elu`, :ref:`cn_api_fluid_layers_relu6`, :ref:`cn_api_fluid_layers_pow`, :ref:`cn_api_fluid_layers_stanh`, :ref:`cn_api_fluid_layers_hard_sigmoid`, :ref:`cn_api_fluid_layers_swish`, :ref:`cn_api_fluid_layers_prelu`, :ref:`cn_api_fluid_layers_brelu`, :ref:`cn_api_fluid_layers_leaky_relu`, :ref:`cn_api_fluid_layers_soft_relu`, :ref:`cn_api_fluid_layers_thresholded_relu`, :ref:`cn_api_fluid_layers_maxout`, :ref:`cn_api_fluid_layers_logsigmoid`, :ref:`cn_api_fluid_layers_hard_shrink`, :ref:`cn_api_fluid_layers_softsign`, :ref:`cn_api_fluid_layers_softplus`, :ref:`cn_api_fluid_layers_tanh_shrink`, :ref:`cn_api_fluid_layers_softshrink`, :ref:`cn_api_fluid_layers_exp`。 -**Fluid提供了两种使用激活函数的方式:** +**Fluid 提供了两种使用激活函数的方式:** -- 如果一个层的接口提供了 :code:`act` 变量(默认值为None),我们可以通过该变量指定该层的激活函数类型。该方式支持常见的激活函数: :code:`relu`, :code:`tanh`, :code:`sigmoid`, :code:`identity`。 +- 如果一个层的接口提供了 :code:`act` 变量(默认值为 None),我们可以通过该变量指定该层的激活函数类型。该方式支持常见的激活函数: :code:`relu`, :code:`tanh`, :code:`sigmoid`, :code:`identity`。 .. code-block:: python conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") -- Fluid为每个Activation提供了接口,我们可以显式的对它们进行调用。 +- Fluid 为每个 Activation 提供了接口,我们可以显式的对它们进行调用。 .. code-block:: python diff --git a/docs/api_guides/low_level/layers/control_flow.rst b/docs/api_guides/low_level/layers/control_flow.rst index 518ca9d4275..244a2fab299 100644 --- a/docs/api_guides/low_level/layers/control_flow.rst +++ b/docs/api_guides/low_level/layers/control_flow.rst @@ -4,47 +4,47 @@ 控制流 ###### -在程序语言中,控制流(control flow)决定了语句的执行顺序,常见的控制流包括顺序执行、分支和循环等。PaddlePaddle Fluid继承了这一概念,提供了多种控制流API, 以控制深度学习模型在训练或者预测过程中的执行逻辑。 +在程序语言中,控制流(control flow)决定了语句的执行顺序,常见的控制流包括顺序执行、分支和循环等。PaddlePaddle Fluid 继承了这一概念,提供了多种控制流 API, 以控制深度学习模型在训练或者预测过程中的执行逻辑。 IfElse ====== -条件分支,允许对同一个batch的输入,根据给定的条件,分别选择 :code:`true_block` 或 :code:`false_block` 中的逻辑进行执行,执行完成之后再将两个分支的输出合并为同一个输出。通常,条件表达式可由 :ref:`cn_api_fluid_layers_less_than`, :ref:`cn_api_fluid_layers_equal` 等逻辑比较 API 产生。 +条件分支,允许对同一个 batch 的输入,根据给定的条件,分别选择 :code:`true_block` 或 :code:`false_block` 中的逻辑进行执行,执行完成之后再将两个分支的输出合并为同一个输出。通常,条件表达式可由 :ref:`cn_api_fluid_layers_less_than`, :ref:`cn_api_fluid_layers_equal` 等逻辑比较 API 产生。 请参考 :ref:`cn_api_fluid_layers_IfElse` -**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_cond` 而不是 ``IfElse``。:ref:`cn_api_fluid_layers_cond` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``IfElse`` 一样。 +**注意:** 强烈建议您使用新的 OP :ref:`cn_api_fluid_layers_cond` 而不是 ``IfElse``。:ref:`cn_api_fluid_layers_cond` 的使用方式更简单,并且调用该 OP 所用的代码更少且功能与 ``IfElse`` 一样。 Switch ====== 多分支选择结构,如同程序语言中常见的 :code:`switch-case` 声明, 其根据输入表达式的取值不同,选择不同的分支执行。具体来说,Fluid 所定义的 :code:`Switch` 控制流有如下特性: -* case的条件是个bool类型的值,即在Program中是一个张量类型的Variable; -* 依次检查逐个case,选择第一个满足条件的case执行,完成执行后即退出所属的block; -* 如果所有case均不满足条件,会选择默认的case进行执行。 +* case 的条件是个 bool 类型的值,即在 Program 中是一个张量类型的 Variable; +* 依次检查逐个 case,选择第一个满足条件的 case 执行,完成执行后即退出所属的 block; +* 如果所有 case 均不满足条件,会选择默认的 case 进行执行。 请参考 :ref:`cn_api_fluid_layers_Switch` -**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_case` 而不是 ``Switch``。 :ref:`cn_api_fluid_layers_case` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``Switch`` 一样。 +**注意:** 强烈建议您使用新的 OP :ref:`cn_api_fluid_layers_case` 而不是 ``Switch``。 :ref:`cn_api_fluid_layers_case` 的使用方式更简单,并且调用该 OP 所用的代码更少且功能与 ``Switch`` 一样。 While ===== -While 循环,当条件判断为真时,循环执行 :code:`While` 控制流所属 :code:`block` 内的逻辑,条件判断为假时退出循环。与之相关的API有 +While 循环,当条件判断为真时,循环执行 :code:`While` 控制流所属 :code:`block` 内的逻辑,条件判断为假时退出循环。与之相关的 API 有 -* :ref:`cn_api_fluid_layers_increment` :累加API,通常用于对循环次数进行计数; -* :ref:`cn_api_fluid_layers_array_read` :从 :code:`LOD_TENSOR_ARRAY` 中指定的位置读入Variable,进行计算; +* :ref:`cn_api_fluid_layers_increment` :累加 API,通常用于对循环次数进行计数; +* :ref:`cn_api_fluid_layers_array_read` :从 :code:`LOD_TENSOR_ARRAY` 中指定的位置读入 Variable,进行计算; * :ref:`cn_api_fluid_layers_array_write` :将 Variable 写回到 :code:`LOD_TENSOR_ARRAY` 指定的位置,存储计算结果。 请参考 :ref:`cn_api_fluid_layers_While` -**注意:** 强烈建议您使用新的OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。 :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该OP所用的代码更少且功能与 ``While`` 一样。 +**注意:** 强烈建议您使用新的 OP :ref:`cn_api_fluid_layers_while_loop` 而不是 ``While``。 :ref:`cn_api_fluid_layers_while_loop` 的使用方式更简单,并且调用该 OP 所用的代码更少且功能与 ``While`` 一样。 DynamicRNN ========== -即动态RNN,可处理一个batch不等长的序列数据,其接受 :code:`lod_level=1` 的 Variable 作为输入,在 :code:`DynamicRNN` 的 :code:`block` 内,用户需自定义RNN的单步计算逻辑。在每一个时间步,用户可将需记忆的状态写入到 :code:`DynamicRNN` 的 :code:`memory` 中,并将需要的输出写出到其 :code:`output` 中。 +即动态 RNN,可处理一个 batch 不等长的序列数据,其接受 :code:`lod_level=1` 的 Variable 作为输入,在 :code:`DynamicRNN` 的 :code:`block` 内,用户需自定义 RNN 的单步计算逻辑。在每一个时间步,用户可将需记忆的状态写入到 :code:`DynamicRNN` 的 :code:`memory` 中,并将需要的输出写出到其 :code:`output` 中。 :ref:`cn_api_fluid_layers_sequence_last_step` 可获取 :code:`DynamicRNN` 最后一个时间步的输出。 @@ -53,6 +53,6 @@ DynamicRNN StaticRNN ========= -即静态RNN,只能处理固定长度的序列数据,接受 :code:`lod_level=0` 的 Variable 作为输入。与 :code:`DynamicRNN` 类似,在RNN的每单个时间步,用户需自定义计算逻辑,并可将状态和输出写出。 +即静态 RNN,只能处理固定长度的序列数据,接受 :code:`lod_level=0` 的 Variable 作为输入。与 :code:`DynamicRNN` 类似,在 RNN 的每单个时间步,用户需自定义计算逻辑,并可将状态和输出写出。 请参考 :ref:`cn_api_fluid_layers_StaticRNN` diff --git a/docs/api_guides/low_level/layers/conv.rst b/docs/api_guides/low_level/layers/conv.rst index 0580936bee4..7a4a4b08b61 100644 --- a/docs/api_guides/low_level/layers/conv.rst +++ b/docs/api_guides/low_level/layers/conv.rst @@ -4,62 +4,62 @@ 卷积 ##### -卷积有两组输入:特征图和卷积核,依据输入特征和卷积核的形状、Layout不同、计算方式的不同,在Fluid里,有针对变长序列特征的一维卷积,有针对定长图像特征的二维(2D Conv)、三维卷积(3D Conv),同时也有卷积计算的逆向过程,下面先介绍Fluid里的2D/3D卷积,再来介绍序列卷积。 +卷积有两组输入:特征图和卷积核,依据输入特征和卷积核的形状、Layout 不同、计算方式的不同,在 Fluid 里,有针对变长序列特征的一维卷积,有针对定长图像特征的二维(2D Conv)、三维卷积(3D Conv),同时也有卷积计算的逆向过程,下面先介绍 Fluid 里的 2D/3D 卷积,再来介绍序列卷积。 -2D/3D卷积 +2D/3D 卷积 ============== 1. 卷积输入参数: --------------------- -卷积需要依据滑动步长(stride)、填充长度(padding)、卷积核窗口大小(filter size)、分组数(groups)、扩张系数(dilation rate)来决定如何计算。groups最早在 `AlexNet `_ 中引入, 可以理解为将原始的卷积分为独立若干组卷积计算。 +卷积需要依据滑动步长(stride)、填充长度(padding)、卷积核窗口大小(filter size)、分组数(groups)、扩张系数(dilation rate)来决定如何计算。groups 最早在 `AlexNet `_ 中引入, 可以理解为将原始的卷积分为独立若干组卷积计算。 - **注意**: 同cuDNN的方式,Fluid目前只支持在特征图上下填充相同的长度,左右也是。 + **注意**: 同 cuDNN 的方式,Fluid 目前只支持在特征图上下填充相同的长度,左右也是。 -- 输入输出Layout: +- 输入输出 Layout: - 2D卷积输入特征的Layout为[N, C, H, W]或[N, H, W, C], N即batch size,C是通道数,H、W是特征的高度和宽度,输出特征和输入特征的Layout一致。(相应的3D卷积输入特征的Layout为[N, C, D, H, W]或[N, D, H, W, C],但 **注意**,Fluid的卷积当前只支持[N, C, H, W],[N, C, D, H, W]。) + 2D 卷积输入特征的 Layout 为[N, C, H, W]或[N, H, W, C], N 即 batch size,C 是通道数,H、W 是特征的高度和宽度,输出特征和输入特征的 Layout 一致。(相应的 3D 卷积输入特征的 Layout 为[N, C, D, H, W]或[N, D, H, W, C],但 **注意**,Fluid 的卷积当前只支持[N, C, H, W],[N, C, D, H, W]。) -- 卷积核的Layout: +- 卷积核的 Layout: - Fluid中2D卷积的卷积核(也称权重)的Layout为[C_o, C_in / groups, f_h, f_w],C_o、C_in表示输出、输入通道数,f_h、f_w表示卷积核窗口的高度和宽度,按行序存储。(相应的3D卷积的卷积核Layout为[C_o, C_in / groups, f_d, f_h, d_w],同样按行序存储。) + Fluid 中 2D 卷积的卷积核(也称权重)的 Layout 为[C_o, C_in / groups, f_h, f_w],C_o、C_in 表示输出、输入通道数,f_h、f_w 表示卷积核窗口的高度和宽度,按行序存储。(相应的 3D 卷积的卷积核 Layout 为[C_o, C_in / groups, f_d, f_h, d_w],同样按行序存储。) - 深度可分离卷积(depthwise separable convolution): - 在深度可分离卷积中包括depthwise convolution和pointwise convolution两组,这两个卷积的接口和上述普通卷积接口相同。前者可以通过给普通卷积设置groups来做,后者通过设置卷积核filters的大小为1x1,深度可分离卷积减少参数的同时减少了计算量。 + 在深度可分离卷积中包括 depthwise convolution 和 pointwise convolution 两组,这两个卷积的接口和上述普通卷积接口相同。前者可以通过给普通卷积设置 groups 来做,后者通过设置卷积核 filters 的大小为 1x1,深度可分离卷积减少参数的同时减少了计算量。 - 对于depthwise convolution,可以设置groups等于输入通道数,此时,2D卷积的卷积核形状为[C_o, 1, f_h, f_w]。 - 对于pointwise convolution,卷积核的形状为[C_o, C_in, 1, 1]。 + 对于 depthwise convolution,可以设置 groups 等于输入通道数,此时,2D 卷积的卷积核形状为[C_o, 1, f_h, f_w]。 + 对于 pointwise convolution,卷积核的形状为[C_o, C_in, 1, 1]。 - **注意**:Fluid针对depthwise convolution的GPU计算做了高度优化,您可以通过在 - :code:`fluid.layers.conv2d` 接口设置 :code:`use_cudnn=False` 来使用Fluid自身优化的CUDA程序。 + **注意**:Fluid 针对 depthwise convolution 的 GPU 计算做了高度优化,您可以通过在 + :code:`fluid.layers.conv2d` 接口设置 :code:`use_cudnn=False` 来使用 Fluid 自身优化的 CUDA 程序。 - 空洞卷积(dilated convolution): - 空洞卷积相比普通卷积而言,卷积核在特征图上取值时不在连续,而是间隔的,这个间隔数称作dilation,等于1时,即为普通卷积,空洞卷积相比普通卷积的感受野更大。 + 空洞卷积相比普通卷积而言,卷积核在特征图上取值时不在连续,而是间隔的,这个间隔数称作 dilation,等于 1 时,即为普通卷积,空洞卷积相比普通卷积的感受野更大。 -- API汇总: +- API 汇总: - :ref:`cn_api_fluid_layers_conv2d` - :ref:`cn_api_fluid_layers_conv3d` - :ref:`cn_api_fluid_layers_conv2d_transpose` - :ref:`cn_api_fluid_layers_conv3d_transpose` -1D序列卷积 +1D 序列卷积 ============== -Fluid可以表示变长的序列结构,这里的变长是指不同样本的时间步(step)数不一样,通常是一个2D的Tensor和一个能够区分的样本长度的辅助结构来表示。假定,2D的Tensor的形状是shape,shape[0]是所有样本的总时间步数,shape[1]是序列特征的大小。 +Fluid 可以表示变长的序列结构,这里的变长是指不同样本的时间步(step)数不一样,通常是一个 2D 的 Tensor 和一个能够区分的样本长度的辅助结构来表示。假定,2D 的 Tensor 的形状是 shape,shape[0]是所有样本的总时间步数,shape[1]是序列特征的大小。 -基于此数据结构的卷积在Fluid里称作序列卷积,也表示一维卷积。同图像卷积,序列卷积的输入参数有卷积核大小、填充大小、滑动步长,但与2D卷积不同的是,这些参数个数都为1。**注意**,目前仅支持stride为1的情况,输出序列的时间步数和输入序列相同。 +基于此数据结构的卷积在 Fluid 里称作序列卷积,也表示一维卷积。同图像卷积,序列卷积的输入参数有卷积核大小、填充大小、滑动步长,但与 2D 卷积不同的是,这些参数个数都为 1。**注意**,目前仅支持 stride 为 1 的情况,输出序列的时间步数和输入序列相同。 -假如:输入序列形状为(T, N), T即该序列的时间步数,N是序列特征大小;卷积核的上下文步长为K,输出序列长度为M,则卷积核权重形状为(K * N, M),输出序列形状为(T, M)。 +假如:输入序列形状为(T, N), T 即该序列的时间步数,N 是序列特征大小;卷积核的上下文步长为 K,输出序列长度为 M,则卷积核权重形状为(K * N, M),输出序列形状为(T, M)。 -另外,参考DeepSpeech,Fluid实现了行卷积row convolution, 或称 +另外,参考 DeepSpeech,Fluid 实现了行卷积 row convolution, 或称 `look ahead convolution `_ , 该卷积相比上述普通序列卷积可以减少参数。 -- API汇总: +- API 汇总: - :ref:`cn_api_fluid_layers_sequence_conv` - :ref:`cn_api_fluid_layers_row_conv` diff --git a/docs/api_guides/low_level/layers/conv_en.rst b/docs/api_guides/low_level/layers/conv_en.rst index 7313b4ae117..4dd4c8ea611 100755 --- a/docs/api_guides/low_level/layers/conv_en.rst +++ b/docs/api_guides/low_level/layers/conv_en.rst @@ -26,7 +26,7 @@ The convolution needs to be determined according to stride, padding, filter size - Depthwise Separable Convolution: - Depthwise Separable Convolution contains depthwise convolution和pointwise convolution. The interfaces of these two convolutions are the same as the above normal convolutional interfaces. The former can be performed by setting groups for ordinary convolutions. The latter can be realised by setting the size of the convolution kernel filters to 1x1. Depthwise Separable Convolution reduces the parameters as well as the volume of computation. + Depthwise Separable Convolution contains depthwise convolution 和 pointwise convolution. The interfaces of these two convolutions are the same as the above normal convolutional interfaces. The former can be performed by setting groups for ordinary convolutions. The latter can be realised by setting the size of the convolution kernel filters to 1x1. Depthwise Separable Convolution reduces the parameters as well as the volume of computation. For depthwise convolution, you can set groups equal to the number of input channels. At this time, the convolution kernel shape of the 2D convolution is [C_o, 1, f_h, f_w]. For pointwise convolution, the shape of the convolution kernel is [C_o, C_in, 1, 1]. diff --git a/docs/api_guides/low_level/layers/data_feeder.rst b/docs/api_guides/low_level/layers/data_feeder.rst index a2502141ca1..495ccf3f1cb 100644 --- a/docs/api_guides/low_level/layers/data_feeder.rst +++ b/docs/api_guides/low_level/layers/data_feeder.rst @@ -1,9 +1,9 @@ .. _api_guide_data_feeder: -使用DataFeeder传入训练/预测数据 +使用 DataFeeder 传入训练/预测数据 ################################### -Fluid提供 :code:`DataFeeder` 类,将numpy array等数据转换为 :code:`LoDTensor` 类型传入训练/预测网络。 +Fluid 提供 :code:`DataFeeder` 类,将 numpy array 等数据转换为 :code:`LoDTensor` 类型传入训练/预测网络。 用户创建 :code:`DataFeeder` 对象的方式为: @@ -17,12 +17,12 @@ Fluid提供 :code:`DataFeeder` 类,将numpy array等数据转换为 :code:`LoD feeder = fluid.DataFeeder(feed_list=[image, label], place=place) 其中,:code:`feed_list` 参数为变量列表,这些变量由 :code:`fluid.layers.data()` 创建, -:code:`place` 参数表示应将Python端传入的numpy array等数据转换为GPU端或是CPU端的 :code:`LoDTensor` 。 +:code:`place` 参数表示应将 Python 端传入的 numpy array 等数据转换为 GPU 端或是 CPU 端的 :code:`LoDTensor` 。 创建 :code:`DataFeeder` 对象后,用户可调用其 :code:`feed(iterable)` 方法将用户传入的 :code:`iterable` 数据转换为 :code:`LoDTensor`。 -:code:`iterable` 应为Python List或Tuple类型对象,且 :code:`iterable` 的每个元素均为长度为N的 -Python List或Tuple类型对象,其中N为创建 :code:`DataFeeder` 对象时传入的 :code:`feed_list` 变量个数。 +:code:`iterable` 应为 Python List 或 Tuple 类型对象,且 :code:`iterable` 的每个元素均为长度为 N 的 +Python List 或 Tuple 类型对象,其中 N 为创建 :code:`DataFeeder` 对象时传入的 :code:`feed_list` 变量个数。 :code:`iterable` 的具体格式为: @@ -35,10 +35,10 @@ Python List或Tuple类型对象,其中N为创建 :code:`DataFeeder` 对象时 (image_n, label_n) ] -其中,:code:`image_i` 与 :code:`label_i` 均为numpy array类型数据。若传入数据的维度为[1],如 :code:`label_i`, -则可传入Python int、float等类型数据。 :code:`image_i` 与 :code:`label_i` 的数据类型和维度不必 +其中,:code:`image_i` 与 :code:`label_i` 均为 numpy array 类型数据。若传入数据的维度为[1],如 :code:`label_i`, +则可传入 Python int、float 等类型数据。 :code:`image_i` 与 :code:`label_i` 的数据类型和维度不必 与 :code:`fluid.layers.data()` 创建时指定的 :code:`dtype` 和 :code:`shape` 完全一致,:code:`DataFeeder` 内部 -会完成数据类型和维度的转换。若 :code:`feed_list` 中的变量的 :code:`lod_level` 不为零,则Fluid会将经过维度转换后的 -:code:`iterable` 中每行数据的第0维作为返回结果的 :code:`LoD`。 +会完成数据类型和维度的转换。若 :code:`feed_list` 中的变量的 :code:`lod_level` 不为零,则 Fluid 会将经过维度转换后的 +:code:`iterable` 中每行数据的第 0 维作为返回结果的 :code:`LoD`。 具体使用方法请参见 :ref:`cn_api_fluid_DataFeeder` 。 diff --git a/docs/api_guides/low_level/layers/data_in_out.rst b/docs/api_guides/low_level/layers/data_in_out.rst index 6267a373a34..7ce42099a89 100644 --- a/docs/api_guides/low_level/layers/data_in_out.rst +++ b/docs/api_guides/low_level/layers/data_in_out.rst @@ -7,14 +7,14 @@ 数据输入 ------------- -Fluid支持两种数据输入方式,包括: +Fluid 支持两种数据输入方式,包括: -1. Python Reader: 纯Python的Reader。用户在Python端定义 :code:`fluid.layers.data` 层构建网络,并通过 +1. Python Reader: 纯 Python 的 Reader。用户在 Python 端定义 :code:`fluid.layers.data` 层构建网络,并通过 :code:`executor.run(feed=...)` 的方式读入数据。数据读取和模型训练/预测的过程是同步进行的。 -2. PyReader: 高效灵活的C++ Reader接口。PyReader内部维护容量为 :code:`capacity` 的队列(队列容量由 -:code:`fluid.layers.py_reader` 接口中的 :code:`capacity` 参数设置),Python端调用队列的 :code:`push` -方法送入训练/预测数据,C++端的训练/预测程序调用队列的 :code:`pop` 方法取出Python端送入的数据。PyReader可与 +2. PyReader: 高效灵活的 C++ Reader 接口。PyReader 内部维护容量为 :code:`capacity` 的队列(队列容量由 +:code:`fluid.layers.py_reader` 接口中的 :code:`capacity` 参数设置),Python 端调用队列的 :code:`push` +方法送入训练/预测数据,C++端的训练/预测程序调用队列的 :code:`pop` 方法取出 Python 端送入的数据。PyReader 可与 :code:`double_buffer` 配合使用,实现数据读取和训练/预测的异步执行。 具体使用方法请参考 :ref:`cn_api_fluid_layers_py_reader`。 @@ -23,11 +23,11 @@ Fluid支持两种数据输入方式,包括: 数据输出 ------------ -Fluid支持在训练/预测阶段获取当前batch的数据。 +Fluid 支持在训练/预测阶段获取当前 batch 的数据。 用户可通过 :code:`executor.run(fetch_list=[...], return_numpy=...)` 的方式 -fetch期望的输出变量,通过设置 :code:`return_numpy` 参数设置是否将输出数据转为numpy array。 +fetch 期望的输出变量,通过设置 :code:`return_numpy` 参数设置是否将输出数据转为 numpy array。 若 :code:`return_numpy` 为 :code:`False` ,则返回 :code:`LoDTensor` 类型数据。 -具体使用方式请参考相关API文档 :ref:`cn_api_fluid_executor_Executor` 和 +具体使用方式请参考相关 API 文档 :ref:`cn_api_fluid_executor_Executor` 和 :ref:`cn_api_fluid_ParallelExecutor`。 diff --git a/docs/api_guides/low_level/layers/detection.rst b/docs/api_guides/low_level/layers/detection.rst index 532954de708..ad6c7366af0 100644 --- a/docs/api_guides/low_level/layers/detection.rst +++ b/docs/api_guides/low_level/layers/detection.rst @@ -4,7 +4,7 @@ 图像检测 ######### -PaddlePaddle Fluid在图像检测任务中实现了多个特有的操作。以下分模型介绍各个api: +PaddlePaddle Fluid 在图像检测任务中实现了多个特有的操作。以下分模型介绍各个 api: 通用操作 ------------- @@ -15,7 +15,7 @@ PaddlePaddle Fluid在图像检测任务中实现了多个特有的操作。以 * 比较两个检测框并进行匹配: - * iou_similarity:计算两组框的IOU值。API Reference 请参考 :ref:`cn_api_fluid_layers_iou_similarity` + * iou_similarity:计算两组框的 IOU 值。API Reference 请参考 :ref:`cn_api_fluid_layers_iou_similarity` * bipartite_match:通过贪心二分匹配算法得到每一列中距离最大的一行。API Reference 请参考 :ref:`cn_api_fluid_layers_bipartite_match` @@ -31,31 +31,31 @@ PaddlePaddle Fluid在图像检测任务中实现了多个特有的操作。以 RCNN ------------- -RCNN系列模型是两阶段目标检测器,其中包含`Faster RCNN `_,`Mask RCNN `_,相较于传统提取区域的方法,RCNN中RPN网络通过共享卷积层参数大幅提高提取区域的效率,并提出高质量的候选区域。RPN网络需要对输入anchor和真实值进行比较生成初选候选框,并对初选候选框分配分类和回归值,需要如下五个特有api: +RCNN 系列模型是两阶段目标检测器,其中包含`Faster RCNN `_,`Mask RCNN `_,相较于传统提取区域的方法,RCNN 中 RPN 网络通过共享卷积层参数大幅提高提取区域的效率,并提出高质量的候选区域。RPN 网络需要对输入 anchor 和真实值进行比较生成初选候选框,并对初选候选框分配分类和回归值,需要如下五个特有 api: -* rpn_target_assign:通过anchor和真实框为anchor分配RPN网络的分类和回归目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_rpn_target_assign` +* rpn_target_assign:通过 anchor 和真实框为 anchor 分配 RPN 网络的分类和回归目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_rpn_target_assign` -* anchor_generator:为每个位置生成一系列anchor。API Reference 请参考 :ref:`cn_api_fluid_layers_anchor_generator` +* anchor_generator:为每个位置生成一系列 anchor。API Reference 请参考 :ref:`cn_api_fluid_layers_anchor_generator` -* generate_proposal_labels: 通过generate_proposals得到的候选框和真实框得到RCNN部分的分类和回归的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposal_labels` +* generate_proposal_labels: 通过 generate_proposals 得到的候选框和真实框得到 RCNN 部分的分类和回归的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposal_labels` -* generate_proposals: 对RPN网络输出box解码并筛选得到新的候选框。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposals` +* generate_proposals: 对 RPN 网络输出 box 解码并筛选得到新的候选框。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_proposals` -* generate_mask_labels: 通过generate_proposal_labels得到的RoI,和真实框对比后进一步筛选RoI并得到Mask分支的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_mask_labels` +* generate_mask_labels: 通过 generate_proposal_labels 得到的 RoI,和真实框对比后进一步筛选 RoI 并得到 Mask 分支的目标值。API Reference 请参考 :ref:`cn_api_fluid_layers_generate_mask_labels` FPN ------------- -`FPN `_ 全称Feature Pyramid Networks, 采用特征金字塔做目标检测。 顶层特征通过上采样和低层特征做融合,并将FPN放在RPN网络中用于生成候选框,有效的提高检测精度,需要如下两种特有api: +`FPN `_ 全称 Feature Pyramid Networks, 采用特征金字塔做目标检测。 顶层特征通过上采样和低层特征做融合,并将 FPN 放在 RPN 网络中用于生成候选框,有效的提高检测精度,需要如下两种特有 api: -* collect_fpn_proposals: 拼接多层RoI,同时选择分数较高的RoI。API Reference 请参考 :ref:`cn_api_fluid_layers_collect_fpn_proposals` +* collect_fpn_proposals: 拼接多层 RoI,同时选择分数较高的 RoI。API Reference 请参考 :ref:`cn_api_fluid_layers_collect_fpn_proposals` -* distribute_fpn_proposals: 将多个RoI依据面积分配到FPN的多个层级中。API Reference 请参考 :ref:`cn_api_fluid_layers_distribute_fpn_proposals` +* distribute_fpn_proposals: 将多个 RoI 依据面积分配到 FPN 的多个层级中。API Reference 请参考 :ref:`cn_api_fluid_layers_distribute_fpn_proposals` SSD ---------------- -`SSD `_ 全称Single Shot MultiBox Detector,是目标检测领域较新且效果较好的检测算法之一,具有检测速度快且检测精度高的特点。与两阶段的检测方法不同,单阶段目标检测并不进行区域推荐,而是直接从特征图回归出目标的边界框和分类概率。SSD网络对六个尺度特>征图计算损失,进行预测,需要如下五种特有api: +`SSD `_ 全称 Single Shot MultiBox Detector,是目标检测领域较新且效果较好的检测算法之一,具有检测速度快且检测精度高的特点。与两阶段的检测方法不同,单阶段目标检测并不进行区域推荐,而是直接从特征图回归出目标的边界框和分类概率。SSD 网络对六个尺度特>征图计算损失,进行预测,需要如下五种特有 api: * 根据不同参数为每个输入位置生成一系列候选框。 @@ -63,39 +63,39 @@ SSD * density_prior box: API Reference 请参考 :ref:`cn_api_fluid_layers_density_prior_box` -* multi_box_head :得到不同prior box的位置和置信度。API Reference 请参考 :ref:`cn_api_fluid_layers_multi_box_head` +* multi_box_head :得到不同 prior box 的位置和置信度。API Reference 请参考 :ref:`cn_api_fluid_layers_multi_box_head` -* detection_output:对prioir box解码,通过多分类NMS得到检测结果。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_output` +* detection_output:对 prioir box 解码,通过多分类 NMS 得到检测结果。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_output` * ssd_loss:通过位置偏移预测值,置信度,检测框位置和真实框位置和标签计算损失。API Reference 请参考 :ref:`cn_api_fluid_layers_ssd_loss` -* detection_map: 利用mAP评估SSD网络模型。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_map` +* detection_map: 利用 mAP 评估 SSD 网络模型。API Reference 请参考 :ref:`cn_api_fluid_layers_detection_map` YOLO V3 --------------- -`YOLO V3 `_ 是单阶段目标检测器,同时具备了精度高,速度快的特点。对特征图划分多个区块,每个区块得到坐标位置和置信度。采用了多尺度融合的方式预测以得到更高的训练精度,需要如下两种特有api: +`YOLO V3 `_ 是单阶段目标检测器,同时具备了精度高,速度快的特点。对特征图划分多个区块,每个区块得到坐标位置和置信度。采用了多尺度融合的方式预测以得到更高的训练精度,需要如下两种特有 api: -* yolo_box: 从YOLOv3网络的输出生成YOLO检测框。API Reference 请参考 :ref:`cn_api_fluid_layers_yolo_box` +* yolo_box: 从 YOLOv3 网络的输出生成 YOLO 检测框。API Reference 请参考 :ref:`cn_api_fluid_layers_yolo_box` -* yolov3_loss:通过给定的预测结果和真实框生成yolov3损失。API Reference 请参考 :ref:`cn_api_fluid_layers_yolov3_loss` +* yolov3_loss:通过给定的预测结果和真实框生成 yolov3 损失。API Reference 请参考 :ref:`cn_api_fluid_layers_yolov3_loss` RetinaNet --------------- -`RetinaNet `_ 是单阶段目标检测器,引入Focal Loss和FPN后,能以更快的速率实现与双阶段目标检测网络近似或更优的效果,需要如下三种特有api: +`RetinaNet `_ 是单阶段目标检测器,引入 Focal Loss 和 FPN 后,能以更快的速率实现与双阶段目标检测网络近似或更优的效果,需要如下三种特有 api: * sigmoid_focal_loss: 用于处理单阶段检测器中类别不平均问题的损失。API Reference 请参考 :ref:`cn_api_fluid_layers_sigmoid_focal_loss` -* retinanet_target_assign: 对给定anchor和真实框,为每个anchor分配分类和回归的目标值,用于训练RetinaNet。API Reference 请参考 :ref:`cn_api_fluid_layers_retinanet_target_assign` +* retinanet_target_assign: 对给定 anchor 和真实框,为每个 anchor 分配分类和回归的目标值,用于训练 RetinaNet。API Reference 请参考 :ref:`cn_api_fluid_layers_retinanet_target_assign` * retinanet_detection_output: 对检测框进行解码,并做非极大值抑制后得到检测输出。API Reference 请参考 :ref:`cn_api_fluid_layers_retinanet_detection_output` OCR --------- -场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。OCR任务中需要对检测框进行不规则变换,其中需要如下两个api: +场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。OCR 任务中需要对检测框进行不规则变换,其中需要如下两个 api: -* roi_perspective_transform:对输入roi做透视变换。API Reference 请参考 :ref:`cn_api_fluid_layers_roi_perspective_transform` +* roi_perspective_transform:对输入 roi 做透视变换。API Reference 请参考 :ref:`cn_api_fluid_layers_roi_perspective_transform` * polygon_box_transform:对不规则检测框进行坐标变换。API Reference 请参考 :ref:`cn_api_fluid_layers_polygon_box_transform` diff --git a/docs/api_guides/low_level/layers/learning_rate_scheduler.rst b/docs/api_guides/low_level/layers/learning_rate_scheduler.rst index b510df09862..bf835e5ef9b 100644 --- a/docs/api_guides/low_level/layers/learning_rate_scheduler.rst +++ b/docs/api_guides/low_level/layers/learning_rate_scheduler.rst @@ -11,53 +11,53 @@ :align: center -学习率调度器定义了常用的学习率衰减策略来动态生成学习率,学习率衰减函数以epoch或step为参数,返回一个随训练逐渐减小的学习率,从而兼顾降低训练时间和在局部极小值能更好寻优两个方面。 +学习率调度器定义了常用的学习率衰减策略来动态生成学习率,学习率衰减函数以 epoch 或 step 为参数,返回一个随训练逐渐减小的学习率,从而兼顾降低训练时间和在局部极小值能更好寻优两个方面。 -下面介绍学习率调度器中相关的Api: +下面介绍学习率调度器中相关的 Api: ====== * :code:`NoamDecay`: 诺姆衰减,相关算法请参考 `《Attention Is All You Need》 `_ 。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_NoamDecay` + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_NoamDecay` * :code:`ExponentialDecay`: 指数衰减,即每次将当前学习率乘以给定的衰减率得到下一个学习率。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_ExponentialDecay` + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_ExponentialDecay` * :code:`NaturalExpDecay`: 自然指数衰减,即每次将当前学习率乘以给定的衰减率的自然指数得到下一个学习率。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_NaturalExpDecay` + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_NaturalExpDecay` * :code:`InverseTimeDecay`: 逆时间衰减,即得到的学习率与当前衰减次数成反比。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_InverseTimeDecay` + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_InverseTimeDecay` * :code:`PolynomialDecay`: 多项式衰减,即得到的学习率为初始学习率和给定最终学习之间由多项式计算权重定比分点的插值。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_PolynomialDecay` + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_PolynomialDecay` -* :code:`PiecewiseDecay`: 分段衰减,即由给定step数分段呈阶梯状衰减,每段内学习率相同。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_PiecewiseDecay` +* :code:`PiecewiseDecay`: 分段衰减,即由给定 step 数分段呈阶梯状衰减,每段内学习率相同。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_PiecewiseDecay` -* :code:`CosineAnnealingDecay`: 余弦式衰减,即学习率随step数变化呈余弦函数周期变化。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_CosineAnnealingDecay` +* :code:`CosineAnnealingDecay`: 余弦式衰减,即学习率随 step 数变化呈余弦函数周期变化。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_CosineAnnealingDecay` -* :code:`LinearWarmup`: 学习率随step数线性增加到指定学习率。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_LinearWarmup` +* :code:`LinearWarmup`: 学习率随 step 数线性增加到指定学习率。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_LinearWarmup` -* :code:`StepDecay`: 学习率每隔一定的step数进行衰减,需要指定step_size。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_StepDecay` +* :code:`StepDecay`: 学习率每隔一定的 step 数进行衰减,需要指定 step_size。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_StepDecay` -* :code:`MultiStepDecay`: 学习率在指定的step数时进行衰减,需要指定衰减的节点位置。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_MultiStepDecay` +* :code:`MultiStepDecay`: 学习率在指定的 step 数时进行衰减,需要指定衰减的节点位置。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_MultiStepDecay` -* :code:`LambdaDecay`: 学习率根据自定义的lambda函数进行衰减。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_LambdaDecay` +* :code:`LambdaDecay`: 学习率根据自定义的 lambda 函数进行衰减。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_LambdaDecay` -* :code:`ReduceOnPlateau`: 学习率根据当前监控指标(一般为loss)来进行自适应调整,当loss趋于稳定时衰减学习率。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_ReduceOnPlateau` +* :code:`ReduceOnPlateau`: 学习率根据当前监控指标(一般为 loss)来进行自适应调整,当 loss 趋于稳定时衰减学习率。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_ReduceOnPlateau` -* :code:`MultiplicativeDecay`: 每次将当前学习率乘以lambda函数得到下一个学习率。 - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_MultiplicativeDecay` +* :code:`MultiplicativeDecay`: 每次将当前学习率乘以 lambda 函数得到下一个学习率。 + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_MultiplicativeDecay` -* :code:`OneCycleLR`: One Cycle衰减,学习率上升至最大,再下降至最小. - 相关API Reference请参考 :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` +* :code:`OneCycleLR`: One Cycle 衰减,学习率上升至最大,再下降至最小. + 相关 API Reference 请参考 :ref:`cn_api_paddle_optimizer_lr_OneCycleLR` * :code:`CyclicLR`: 学习率根据指定的缩放策略以固定频率在最小和最大学习率之间进行循环。 - 相关API Reference请参考 :ref:`_cn_api_paddle_optimizer_lr_CyclicLR` + 相关 API Reference 请参考 :ref:`_cn_api_paddle_optimizer_lr_CyclicLR` diff --git a/docs/api_guides/low_level/layers/math.rst b/docs/api_guides/low_level/layers/math.rst index 0cf58c8999f..2044c91d64e 100644 --- a/docs/api_guides/low_level/layers/math.rst +++ b/docs/api_guides/low_level/layers/math.rst @@ -4,7 +4,7 @@ 数学操作 ######### -Paddle提供了丰富的数学操作,以下列出的数学操作都是对目标张量进行逐元素的操作。其中,如果二元操作的两个输入有不同形状,会先进行 :code:`broadcast`. 部分数学操作还支持数学操作符,比如: :code:`+`, :code:`-`, :code:`*`, :code:`/` 等。数学操作符不仅支持张量,还支持标量。 +Paddle 提供了丰富的数学操作,以下列出的数学操作都是对目标张量进行逐元素的操作。其中,如果二元操作的两个输入有不同形状,会先进行 :code:`broadcast`. 部分数学操作还支持数学操作符,比如: :code:`+`, :code:`-`, :code:`*`, :code:`/` 等。数学操作符不仅支持张量,还支持标量。 一元操作 @@ -98,7 +98,7 @@ API Reference 请参考 :ref:`cn_api_fluid_layers_reciprocal` reduce ------------------ -对输入 :code:`Tensor` 在指定的若干轴上做reduce操作,包括:min, max, sum, mean, product +对输入 :code:`Tensor` 在指定的若干轴上做 reduce 操作,包括:min, max, sum, mean, product API Reference 请参考: :ref:`cn_api_fluid_layers_reduce_min` diff --git a/docs/api_guides/low_level/layers/pooling.rst b/docs/api_guides/low_level/layers/pooling.rst index 6d615fb7126..b109e52b7b3 100644 --- a/docs/api_guides/low_level/layers/pooling.rst +++ b/docs/api_guides/low_level/layers/pooling.rst @@ -6,21 +6,21 @@ 池化的作用是对输入特征做下采样和降低过拟合。降低过拟合是减小输出大小的结果,它同样也减少了后续层中的参数的数量。 -池化通常只需要将前一层的特征图作为输入,此外需要一些参数来确定池化具体的操作。在PaddlePaddle中我们同样通过设定池化的大小,方式,步长,是否是全局池化,是否使用cudnn,是否使用ceil函数计算输出等参数来选择具体池化的方式。 -PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3d),RoI池化(roi_pool),以及针对序列的序列池化(sequence_pool),同时也有池化计算的反向过程,下面先介绍2D/3D池化,以及RoI池化,再来介绍序列池化。 +池化通常只需要将前一层的特征图作为输入,此外需要一些参数来确定池化具体的操作。在 PaddlePaddle 中我们同样通过设定池化的大小,方式,步长,是否是全局池化,是否使用 cudnn,是否使用 ceil 函数计算输出等参数来选择具体池化的方式。 +PaddlePaddle 中有针对定长图像特征的二维(pool2d)、三维卷积(pool3d),RoI 池化(roi_pool),以及针对序列的序列池化(sequence_pool),同时也有池化计算的反向过程,下面先介绍 2D/3D 池化,以及 RoI 池化,再来介绍序列池化。 -------------- 1. pool2d/pool3d ------------------------ -- ``input`` : 池化操作接收任何符合layout是:\ ``N(batch size)* C(channel size) * H(height) * W(width)``\ 格式的\ ``Tensor``\ 类型作为输入。 +- ``input`` : 池化操作接收任何符合 layout 是:\ ``N(batch size)* C(channel size) * H(height) * W(width)``\ 格式的\ ``Tensor``\ 类型作为输入。 - ``pool_size``\ : 用来确定池化\ ``filter``\ 的大小,即将多大范围内的数据池化为一个值。 - ``num_channels``\ : 用来确定输入的\ ``channel``\ 数量,如果未设置参数或设置为\ ``None``\ ,其实际值将自动设置为输入的\ ``channel``\ 数量。 -- ``pool_type``\ : 接收\ ``avg``\ 和\ ``max``\ 2种类型之一作为pooling的方式,默认值为\ ``max``\ 。其中\ ``max``\ 意为最大池化,即计算池化\ ``filter``\ 区域内的数据的最大值作为输出;而\ ``avg``\ 意为平均池化,即计算池化\ ``filter``\ 区域内的数据的平均值作为输出。 +- ``pool_type``\ : 接收\ ``avg``\ 和\ ``max``\ 2 种类型之一作为 pooling 的方式,默认值为\ ``max``\ 。其中\ ``max``\ 意为最大池化,即计算池化\ ``filter``\ 区域内的数据的最大值作为输出;而\ ``avg``\ 意为平均池化,即计算池化\ ``filter``\ 区域内的数据的平均值作为输出。 - ``pool_stride``\ : 意为池化的\ ``filter``\ 在输入特征图上移动的步长。 @@ -28,9 +28,9 @@ PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3 - ``global_pooling``\ : 意为是否使用全局池化,全局池化是指使用和特征图大小相同的\ ``filter``\ 来进行池化,同样这个过程也可以使用平均池化或者最大池化来做为池化的方式,全局池化通常会用来替换全连接层以大量减少参数防止过拟合。 -- ``use_cudnn``\ : 选项可以来选择是否使用cudnn来优化计算池化速度。 +- ``use_cudnn``\ : 选项可以来选择是否使用 cudnn 来优化计算池化速度。 -- ``ceil_mode``\ : 是否使用ceil函数计算输出高度和宽度。\ ``ceil mode``\ 意为天花板模式,是指会把特征图中不足\ ``filter size``\ 的边给保留下来,单独另算,或者也可以理解为在原来的数据上补充了值为-NAN的边。而floor模式则是直接把不足\ ``filter size``\ 的边给舍弃了。具体计算公式如下: +- ``ceil_mode``\ : 是否使用 ceil 函数计算输出高度和宽度。\ ``ceil mode``\ 意为天花板模式,是指会把特征图中不足\ ``filter size``\ 的边给保留下来,单独另算,或者也可以理解为在原来的数据上补充了值为-NAN 的边。而 floor 模式则是直接把不足\ ``filter size``\ 的边给舍弃了。具体计算公式如下: - 非\ ``ceil_mode``\ 下:\ ``输出大小 = (输入大小 - filter size + 2 * padding) / stride(步长) + 1`` @@ -38,7 +38,7 @@ PaddlePaddle中有针对定长图像特征的二维(pool2d)、三维卷积(pool3 -api汇总: +api 汇总: - :ref:`cn_api_fluid_layers_pool2d` - :ref:`cn_api_fluid_layers_pool3d` @@ -49,14 +49,14 @@ api汇总: ``roi_pool``\ 一般用于检测网络中,将输入特征图依据候选框池化到特定的大小。 -- ``rois``\ : 接收\ ``LoDTensor``\ 类型来表示需要池化的 Regions of Interest,关于RoI的解释请参考\ `论文 `__ +- ``rois``\ : 接收\ ``LoDTensor``\ 类型来表示需要池化的 Regions of Interest,关于 RoI 的解释请参考\ `论文 `__ - ``pooled_height`` 和 ``pooled_width``\ : 这里可以接受非正方的池化窗口大小 -- ``spatial_scale``\ : 用作设定缩放RoI和原图缩放的比例,注意,这里的设定需要用户自行计算RoI和原图的实际缩放比例。 +- ``spatial_scale``\ : 用作设定缩放 RoI 和原图缩放的比例,注意,这里的设定需要用户自行计算 RoI 和原图的实际缩放比例。 -api汇总: +api 汇总: - :ref:`cn_api_fluid_layers_roi_pool` @@ -65,7 +65,7 @@ api汇总: -------------------- ``sequence_pool``\ 是一个用作对于不等长序列进行池化的接口,它将每一个实例的全部时间步的特征进行池化,它同样支持 -``average``, ``sum``, ``sqrt`` 和\ ``max``\ 4种类型之一作为pooling的方式。 其中: +``average``, ``sum``, ``sqrt`` 和\ ``max``\ 4 种类型之一作为 pooling 的方式。 其中: - ``average``\ 是对于每一个时间步内的数据求和后分别取平均值做为池化的结果。 @@ -75,6 +75,6 @@ api汇总: - ``max``\ 则是对每一个时间步内的数据分别求取最大值作为池化的结果。 -api汇总: +api 汇总: - :ref:`cn_api_fluid_layers_sequence_pool` diff --git a/docs/api_guides/low_level/layers/sequence.rst b/docs/api_guides/low_level/layers/sequence.rst index 13801e41a74..ffdc6cc2f2e 100644 --- a/docs/api_guides/low_level/layers/sequence.rst +++ b/docs/api_guides/low_level/layers/sequence.rst @@ -5,56 +5,56 @@ ######## 在深度学习领域许多问题涉及到对 `序列(sequence) `_ 的处理。 -从Wiki上的释义可知,序列可以表征多种物理意义,但在深度学习中,最常见的仍然是"时间序列"——一个序列包含多个时间步的信息。 +从 Wiki 上的释义可知,序列可以表征多种物理意义,但在深度学习中,最常见的仍然是"时间序列"——一个序列包含多个时间步的信息。 -在Paddle Fluid中,我们将序列表示为 :ref:`cn_api_fluid_LoDTensor` 。 -因为一般进行神经网络计算时都是一个batch一个batch地计算,所以我们用一个LoDTensor来存储一个mini batch的序列。 -一个LoDTensor的第0维包含该mini batch中所有序列的所有时间步,并且用LoD来记录各个序列的长度,区分不同序列。 -而在运算时,还需要根据LoD信息将LoDTensor中一个mini batch的第0维拆开成多个序列。(具体请参考上述LoD相关的文档。) -所以,对这类LoDTensor第0维的操作不能简单地使用一般的layer来进行,针对这一维的操作必须要结合LoD的信息。 -(例如,你不能用 :code:`layers.reshape` 来对一个序列的第0维进行reshape)。 +在 Paddle Fluid 中,我们将序列表示为 :ref:`cn_api_fluid_LoDTensor` 。 +因为一般进行神经网络计算时都是一个 batch 一个 batch 地计算,所以我们用一个 LoDTensor 来存储一个 mini batch 的序列。 +一个 LoDTensor 的第 0 维包含该 mini batch 中所有序列的所有时间步,并且用 LoD 来记录各个序列的长度,区分不同序列。 +而在运算时,还需要根据 LoD 信息将 LoDTensor 中一个 mini batch 的第 0 维拆开成多个序列。(具体请参考上述 LoD 相关的文档。) +所以,对这类 LoDTensor 第 0 维的操作不能简单地使用一般的 layer 来进行,针对这一维的操作必须要结合 LoD 的信息。 +(例如,你不能用 :code:`layers.reshape` 来对一个序列的第 0 维进行 reshape)。 -为了实行各类针对序列的操作,我们设计了一系列序列相关的API,专门用于正确处理序列相关的操作。 -实践中,由于一个LoDTensor包括一个mini batch的序列,同一个mini batch中不同的序列通常属于多个sample,它们彼此之间不会也不应该发生相互作用。 -因此,若一个layer以两个(或多个)LoDTensor为输入(或者以一个list的LoDTensor为输入),每一个LoDTensor代表一个mini batch的序列,则第一个LoDTensor中的第一个序列只会和第二个LoDTensor中的第一个序列发生计算, -第一个LoDTensor中的第二个序列只会和第二个LoDTensor中的第二个序列发生计算,第一个LoDTensor中的第i个序列只会和第二个LoDTensor中第i个序列发生计算,依此类推。 +为了实行各类针对序列的操作,我们设计了一系列序列相关的 API,专门用于正确处理序列相关的操作。 +实践中,由于一个 LoDTensor 包括一个 mini batch 的序列,同一个 mini batch 中不同的序列通常属于多个 sample,它们彼此之间不会也不应该发生相互作用。 +因此,若一个 layer 以两个(或多个)LoDTensor 为输入(或者以一个 list 的 LoDTensor 为输入),每一个 LoDTensor 代表一个 mini batch 的序列,则第一个 LoDTensor 中的第一个序列只会和第二个 LoDTensor 中的第一个序列发生计算, +第一个 LoDTensor 中的第二个序列只会和第二个 LoDTensor 中的第二个序列发生计算,第一个 LoDTensor 中的第 i 个序列只会和第二个 LoDTensor 中第 i 个序列发生计算,依此类推。 -**总而言之,一个LoDTensor存储一个mini batch的多个序列,其中的序列个数为batch size;多个LoDTensor间发生计算时,每个LoDTensor中的第i个序列只会和其他LoDTensor中第i个序列发生计算。理解这一点对于理解接下来序列相关的操作会至关重要。** +**总而言之,一个 LoDTensor 存储一个 mini batch 的多个序列,其中的序列个数为 batch size;多个 LoDTensor 间发生计算时,每个 LoDTensor 中的第 i 个序列只会和其他 LoDTensor 中第 i 个序列发生计算。理解这一点对于理解接下来序列相关的操作会至关重要。** 1. sequence_softmax ------------------- -这个layer以一个mini batch的序列为输入,在每个序列内做softmax操作。其输出为一个mini batch相同shape的序列,但在序列内是经softmax归一化过的。 -这个layer往往用于在每个sequence内做softmax归一化。 +这个 layer 以一个 mini batch 的序列为输入,在每个序列内做 softmax 操作。其输出为一个 mini batch 相同 shape 的序列,但在序列内是经 softmax 归一化过的。 +这个 layer 往往用于在每个 sequence 内做 softmax 归一化。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_softmax` 2. sequence_concat ------------------ -这个layer以一个list为输入,该list中可以含有多个LoDTensor,每个LoDTensor为一个mini batch的序列。 -该layer会将每个batch中第i个序列在时间维度上拼接成一个新序列,作为返回的batch中的第i个序列。 -理所当然地,list中每个LoDTensor的序列必须有相同的batch size。 +这个 layer 以一个 list 为输入,该 list 中可以含有多个 LoDTensor,每个 LoDTensor 为一个 mini batch 的序列。 +该 layer 会将每个 batch 中第 i 个序列在时间维度上拼接成一个新序列,作为返回的 batch 中的第 i 个序列。 +理所当然地,list 中每个 LoDTensor 的序列必须有相同的 batch size。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_concat` 3. sequence_first_step ---------------------- -这个layer以一个LoDTensor作为输入,会取出每个序列中的第一个元素(即第一个时间步的元素),并作为返回值。 +这个 layer 以一个 LoDTensor 作为输入,会取出每个序列中的第一个元素(即第一个时间步的元素),并作为返回值。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_first_step` 4. sequence_last_step --------------------- -同 :code:`sequence_first_step` ,除了本layer是取每个序列中最后一个元素(即最后一个时间步)作为返回值。 +同 :code:`sequence_first_step` ,除了本 layer 是取每个序列中最后一个元素(即最后一个时间步)作为返回值。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_last_step` 5. sequence_expand ------------------ -这个layer有两个LoDTensor的序列作为输入,并按照第二个LoDTensor中序列的LoD信息来扩展第一个batch中的序列。 +这个 layer 有两个 LoDTensor 的序列作为输入,并按照第二个 LoDTensor 中序列的 LoD 信息来扩展第一个 batch 中的序列。 通常用来将只有一个时间步的序列(例如 :code:`sequence_first_step` 的返回结果)延展成有多个时间步的序列,以此方便与有多个时间步的序列进行运算。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_expand` @@ -62,50 +62,50 @@ API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_expand` 6. sequence_expand_as --------------------- -这个layer需要两个LoDTensor的序列作为输入,然后将第一个Tensor序列中的每一个序列延展成和第二个Tensor中对应序列等长的序列。 -不同于 :code:`sequence_expand` ,这个layer会将第一个LoDTensor中的序列严格延展为和第二个LoDTensor中的序列等长。 -如果无法延展成等长的(例如第二个batch中的序列长度不是第一个batch中序列长度的整数倍),则会报错。 +这个 layer 需要两个 LoDTensor 的序列作为输入,然后将第一个 Tensor 序列中的每一个序列延展成和第二个 Tensor 中对应序列等长的序列。 +不同于 :code:`sequence_expand` ,这个 layer 会将第一个 LoDTensor 中的序列严格延展为和第二个 LoDTensor 中的序列等长。 +如果无法延展成等长的(例如第二个 batch 中的序列长度不是第一个 batch 中序列长度的整数倍),则会报错。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_expand_as` 7. sequence_enumerate --------------------- -这个layer需要一个LoDTensor的序列作为输入,同时需要指定一个 :code:`win_size` 的长度。这个layer将依次取所有序列中长度为 :code:`win_size` 的子序列,并组合成新的序列。 +这个 layer 需要一个 LoDTensor 的序列作为输入,同时需要指定一个 :code:`win_size` 的长度。这个 layer 将依次取所有序列中长度为 :code:`win_size` 的子序列,并组合成新的序列。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_enumerate` 8. sequence_reshape ------------------- -这个layer需要一个LoDTensor的序列作为输入,同时需要指定一个 :code:`new_dim` 作为新的序列的维度。 -该layer会将mini batch内每个序列reshape为new_dim给定的维度。注意,每个序列的长度会改变(因此LoD信息也会变),以适应新的形状。 +这个 layer 需要一个 LoDTensor 的序列作为输入,同时需要指定一个 :code:`new_dim` 作为新的序列的维度。 +该 layer 会将 mini batch 内每个序列 reshape 为 new_dim 给定的维度。注意,每个序列的长度会改变(因此 LoD 信息也会变),以适应新的形状。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_reshape` 9. sequence_scatter ------------------- -这个layer可以将一个序列的数据scatter到另一个tensor上。这个layer有三个input,一个要被scatter的目标tensor :code:`input`; -一个是序列的数据 :code:`update` ,一个是目标tensor的上坐标 :code:`index` 。Output为scatter后的tensor,形状和 :code:`input` 相同。 +这个 layer 可以将一个序列的数据 scatter 到另一个 tensor 上。这个 layer 有三个 input,一个要被 scatter 的目标 tensor :code:`input`; +一个是序列的数据 :code:`update` ,一个是目标 tensor 的上坐标 :code:`index` 。Output 为 scatter 后的 tensor,形状和 :code:`input` 相同。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_scatter` 10. sequence_pad ---------------- -这个layer可以将不等长的序列补齐成等长序列。使用这个layer需要提供一个 :code:`PadValue` 和一个 :code:`padded_length`。 -前者是用来补齐序列的元素,可以是一个数也可以是一个tensor;后者是序列补齐的目标长度。 -这个layer会返回补齐后的序列,以及一个记录补齐前各个序列长度的tensor :code:`Length`。 +这个 layer 可以将不等长的序列补齐成等长序列。使用这个 layer 需要提供一个 :code:`PadValue` 和一个 :code:`padded_length`。 +前者是用来补齐序列的元素,可以是一个数也可以是一个 tensor;后者是序列补齐的目标长度。 +这个 layer 会返回补齐后的序列,以及一个记录补齐前各个序列长度的 tensor :code:`Length`。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_pad` 11. sequence_mask ----------------- -这个layer会根据 :code:`input` 生成一个mask,:code:`input` 是一个记录了每个序列长度的tensor。 -此外这个layer还需要一个参数 :code:`maxlen` 用于指定序列中最长的序列长度。 -通常这个layer用于生成一个mask,将被pad后的序列中pad的部分过滤掉。 -:code:`input` 的长度tensor通常可以直接用 :code:`sequence_pad` 返回的 :code:`Length`。 +这个 layer 会根据 :code:`input` 生成一个 mask,:code:`input` 是一个记录了每个序列长度的 tensor。 +此外这个 layer 还需要一个参数 :code:`maxlen` 用于指定序列中最长的序列长度。 +通常这个 layer 用于生成一个 mask,将被 pad 后的序列中 pad 的部分过滤掉。 +:code:`input` 的长度 tensor 通常可以直接用 :code:`sequence_pad` 返回的 :code:`Length`。 API Reference 请参考 :ref:`cn_api_fluid_layers_sequence_mask` diff --git a/docs/api_guides/low_level/layers/sparse_update.rst b/docs/api_guides/low_level/layers/sparse_update.rst index 63b8522978e..c77b9f90809 100644 --- a/docs/api_guides/low_level/layers/sparse_update.rst +++ b/docs/api_guides/low_level/layers/sparse_update.rst @@ -4,20 +4,20 @@ 稀疏更新 ##### -Fluid的 :ref:`cn_api_fluid_layers_embedding` 层在单机训练和分布式训练时,均可以支持“稀疏更新”,即梯度以sparse tensor 结构存储,只保存梯度不为0的行。 -在分布式训练中,对于较大的embedding层,开启稀疏更新有助于减少通信数据量,提升训练速度。 +Fluid 的 :ref:`cn_api_fluid_layers_embedding` 层在单机训练和分布式训练时,均可以支持“稀疏更新”,即梯度以 sparse tensor 结构存储,只保存梯度不为 0 的行。 +在分布式训练中,对于较大的 embedding 层,开启稀疏更新有助于减少通信数据量,提升训练速度。 -在paddle内部,我们用lookup_table来实现embedding。下边这张图说明了embedding在正向和反向计算的过程: +在 paddle 内部,我们用 lookup_table 来实现 embedding。下边这张图说明了 embedding 在正向和反向计算的过程: -如图所示:一个Tensor中有两行不为0,正向计算的过程中,我们使用ids存储不为0的行,并使用对应的两行数据来进行计算;反向更新的过程也只更新这两行。 +如图所示:一个 Tensor 中有两行不为 0,正向计算的过程中,我们使用 ids 存储不为 0 的行,并使用对应的两行数据来进行计算;反向更新的过程也只更新这两行。 .. image:: ../../../images/lookup_table_training.png :scale: 50 % -embedding使用例子: +embedding 使用例子: --------------------- -API详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一个简单的例子: +API 详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一个简单的例子: .. code-block:: python @@ -37,9 +37,9 @@ API详细使用方法参考 :ref:`cn_api_fluid_layers_embedding` ,以下是一 以上参数中: -- :code:`is_sparse` : 反向计算的时候梯度是否为sparse tensor。如果不设置,梯度是一个 :ref:`Lod_Tensor ` 。默认为False。 +- :code:`is_sparse` : 反向计算的时候梯度是否为 sparse tensor。如果不设置,梯度是一个 :ref:`Lod_Tensor ` 。默认为 False。 -- :code:`is_distributed` : 标志是否是用在分布式的场景下。一般大规模稀疏更新(embedding的第0维维度很大,比如几百万以上)才需要设置。具体可以参考大规模稀疏的API guide :ref:`cn_api_guide_async_training` 。默认为False。 +- :code:`is_distributed` : 标志是否是用在分布式的场景下。一般大规模稀疏更新(embedding 的第 0 维维度很大,比如几百万以上)才需要设置。具体可以参考大规模稀疏的 API guide :ref:`cn_api_guide_async_training` 。默认为 False。 -- API汇总: +- API 汇总: - :ref:`cn_api_fluid_layers_embedding` diff --git a/docs/api_guides/low_level/layers/tensor.rst b/docs/api_guides/low_level/layers/tensor.rst index 87f953b5a01..c4d7709bd82 100644 --- a/docs/api_guides/low_level/layers/tensor.rst +++ b/docs/api_guides/low_level/layers/tensor.rst @@ -13,14 +13,14 @@ Tensor 1. create_tensor --------------------- -Tensor用于在框架中承载数据,使用 :code:`create_tensor` 可以创建一个指定数据类型的Lod-Tensor变量, +Tensor 用于在框架中承载数据,使用 :code:`create_tensor` 可以创建一个指定数据类型的 Lod-Tensor 变量, API reference 请参考: :ref:`cn_api_fluid_layers_create_tensor` 2. create_parameter --------------------- -神经网络的训练过程是一个对参数的学习过程,Fluid 使用 :code:`create_parameter` 创建一个可学习的参数。该参数的值可以被operator改变。 +神经网络的训练过程是一个对参数的学习过程,Fluid 使用 :code:`create_parameter` 创建一个可学习的参数。该参数的值可以被 operator 改变。 API reference 请参考::ref:`cn_api_fluid_layers_create_parameter` @@ -28,7 +28,7 @@ API reference 请参考::ref:`cn_api_fluid_layers_create_parameter` 3. create_global_var --------------------- -Fluid 使用 :code:`create_global_var` 创建一个全局tensor,通过此 API 可以指定被创建 Tensor 变量的数据类型、形状和值。 +Fluid 使用 :code:`create_global_var` 创建一个全局 tensor,通过此 API 可以指定被创建 Tensor 变量的数据类型、形状和值。 API reference 请参考::ref:`cn_api_fluid_layers_create_global_var` @@ -94,14 +94,14 @@ API reference 请参考: :ref:`cn_api_fluid_layers_argsort` 12. ones ------------- -Fluid 使用 :code:`ones` 创建一个指定大小和数据类型的Tensor,且初始值为1。 +Fluid 使用 :code:`ones` 创建一个指定大小和数据类型的 Tensor,且初始值为 1。 API reference 请参考: :ref:`cn_api_fluid_layers_ones` 13. zeros --------------- -Fluid 使用 :code:`zeros` 创建一个指定大小和数据类型的Tensor,且初始值为0。 +Fluid 使用 :code:`zeros` 创建一个指定大小和数据类型的 Tensor,且初始值为 0。 API reference 请参考: :ref:`cn_api_fluid_layers_zeros` @@ -117,12 +117,12 @@ API reference 请参考: :ref:`cn_api_fluid_layers_reverse` LoD-Tensor ============ -LoD-Tensor非常适用于序列数据,相关知识可以参考阅读 `LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor.html>`_ 。 +LoD-Tensor 非常适用于序列数据,相关知识可以参考阅读 `LoD_Tensor <../../../user_guides/howto/basic_concept/lod_tensor.html>`_ 。 1. create_lod_tensor ----------------------- -Fluid 使用 :code:`create_lod_tensor` 基于numpy数组、列表或现有 LoD_Tensor 创建拥有新的层级信息的 LoD_Tensor。 +Fluid 使用 :code:`create_lod_tensor` 基于 numpy 数组、列表或现有 LoD_Tensor 创建拥有新的层级信息的 LoD_Tensor。 API reference 请参考: :ref:`cn_api_fluid_create_lod_tensor` diff --git a/docs/api_guides/low_level/metrics.rst b/docs/api_guides/low_level/metrics.rst index e10312fed05..0d85cb5e191 100644 --- a/docs/api_guides/low_level/metrics.rst +++ b/docs/api_guides/low_level/metrics.rst @@ -7,7 +7,7 @@ 分类任务评价 ------------------ -分类任务中最常用的是二分类,而多分类任务也可以转化为多个二分类任务的组合,二分类任务常用的评价指标有准确率、正确率、召回率、AUC和平均准确度。 +分类任务中最常用的是二分类,而多分类任务也可以转化为多个二分类任务的组合,二分类任务常用的评价指标有准确率、正确率、召回率、AUC 和平均准确度。 - 准确率: :code:`Precision` ,用来衡量二分类中召回真值和召回值的比例。 @@ -22,11 +22,11 @@ API Reference 请参考 :ref:`cn_api_fluid_metrics_Recall` -- AUC: :code:`Area Under Curve`, 适用于二分类的分类模型评估,用来计算 `ROC曲线的累积面积 `_。:code:`Auc` 通过python计算实现,如果关注性能,可以使用 :code:`fluid.layers.auc` 代替。 +- AUC: :code:`Area Under Curve`, 适用于二分类的分类模型评估,用来计算 `ROC 曲线的累积面积 `_。:code:`Auc` 通过 python 计算实现,如果关注性能,可以使用 :code:`fluid.layers.auc` 代替。 API Reference 请参考 :ref:`cn_api_fluid_metrics_Auc` -- 平均准确度: :code:`Average Precision` ,常用在Faster R-CNN和SSD等物体检测任务中。在不同召回条件下,计算了准确率的平均值,具体可以参考文档 `Average-precision `_ 和 `SSD: Single Shot MultiBox Detector `_。 +- 平均准确度: :code:`Average Precision` ,常用在 Faster R-CNN 和 SSD 等物体检测任务中。在不同召回条件下,计算了准确率的平均值,具体可以参考文档 `Average-precision `_ 和 `SSD: Single Shot MultiBox Detector `_。 API Reference 请参考 :ref:`cn_api_fluid_metrics_DetectionMAP` @@ -34,16 +34,16 @@ 序列标注任务评价 ------------------ -序列标注任务中,token的分组称为语块(chunk),模型会同时将输入的token分组和分类,常用的评估方法是语块评估方法。 +序列标注任务中,token 的分组称为语块(chunk),模型会同时将输入的 token 分组和分类,常用的评估方法是语块评估方法。 -- 语块评估方法: :code:`ChunkEvaluator` ,接收 :code:`chunk_eval` 接口的输出,累积每一个minibatch的语块统计值,最后计算准确率、召回率和F1值。:code:`ChunkEvaluator` 支持IOB, IOE, IOBES和IO四种标注模式。可以参考文档 `Chunking with Support Vector Machines `_ 。 +- 语块评估方法: :code:`ChunkEvaluator` ,接收 :code:`chunk_eval` 接口的输出,累积每一个 minibatch 的语块统计值,最后计算准确率、召回率和 F1 值。:code:`ChunkEvaluator` 支持 IOB, IOE, IOBES 和 IO 四种标注模式。可以参考文档 `Chunking with Support Vector Machines `_ 。 API Reference 请参考 :ref:`cn_api_fluid_metrics_ChunkEvaluator` 生成任务评价 ------------------ -生成任务会依据输入直接产生输出。对应NLP任务中(比如语音识别),则生成新字符串。评估生成字符串和目标字符串之间距离的方法也有多种,比如多分类评估方法,而另外一种常用的方法叫做编辑距离。 +生成任务会依据输入直接产生输出。对应 NLP 任务中(比如语音识别),则生成新字符串。评估生成字符串和目标字符串之间距离的方法也有多种,比如多分类评估方法,而另外一种常用的方法叫做编辑距离。 - 编辑距离: :code:`EditDistance` ,用来衡量两个字符串的相似度。可以参考文档 `Edit_distance `_。 diff --git a/docs/api_guides/low_level/model_save_reader.rst b/docs/api_guides/low_level/model_save_reader.rst index c8afd62817d..b057f7fd534 100644 --- a/docs/api_guides/low_level/model_save_reader.rst +++ b/docs/api_guides/low_level/model_save_reader.rst @@ -4,7 +4,7 @@ 模型保存与加载 ######### -模型的保存与加载主要涉及到如下八个API: +模型的保存与加载主要涉及到如下八个 API: :code:`fluid.io.save_vars`、:code:`fluid.io.save_params`、:code:`fluid.io.save_persistables`、:code:`fluid.io.save_inference_model`、:code:`fluid.io.load_vars`、:code:`fluid.io.load_params`、:code:`fluid.io.load_persistables` 和 :code:`fluid.io.load_inference_model`。 变量、持久性变量和参数 @@ -12,7 +12,7 @@ 在 :code:`Paddle` 中,算子(:code:`Operator`)的每一个输入和输出都是一个变量(:code:`Variable`),而参数(:code:`Parameter`)是变量(:code:`Variable`)的子类。持久性变量(:code:`Persistables`)是一种在每次迭代结束后均不会被删除的变量。参数是一种持久性变量,其在每次迭代后都会被优化器(:ref:`api_guide_optimizer`)更新。训练神经网络本质上就是在更新参数。 -模型保存API介绍 +模型保存 API 介绍 ==================== - :code:`fluid.io.save_vars`:通过执行器(:ref:`api_guide_executor`)保存变量到指定的目录中。保存变量的方式有两种: @@ -35,7 +35,7 @@ - :code:`fluid.io.save_inference_model`:请参考 :ref:`api_guide_inference`。 -模型加载API介绍 +模型加载 API 介绍 ==================== - :code:`fluid.io.load_vars`:通过执行器(:code:`Executor`)加载指定目录中的变量。加载变量的方式有两种: diff --git a/docs/api_guides/low_level/nets.rst b/docs/api_guides/low_level/nets.rst index ddb118e5041..956c07556f2 100644 --- a/docs/api_guides/low_level/nets.rst +++ b/docs/api_guides/low_level/nets.rst @@ -43,7 +43,7 @@ API Reference 请参考 :ref:`cn_api_fluid_nets_sequence_conv_pool` 4.glu ----- :code:`glu` 全称 Gated Linear Units, 来源于论文 `Language Modeling with Gated Convolutional Networks `_ ,由 :ref:`cn_api_fluid_layers_split` , :ref:`cn_api_fluid_layers_sigmoid` 和 :ref:`cn_api_fluid_layers_elementwise_mul` 组成。 -它会把输入数据均分为2等份,并对第二部分求 `Sigmoid `_ , 然后再与第一部分数据求点乘得到输出。 +它会把输入数据均分为 2 等份,并对第二部分求 `Sigmoid `_ , 然后再与第一部分数据求点乘得到输出。 API Reference 请参考 :ref:`cn_api_fluid_nets_glu` diff --git a/docs/api_guides/low_level/optimizer.rst b/docs/api_guides/low_level/optimizer.rst index 38cc8855be6..855ced378b4 100644 --- a/docs/api_guides/low_level/optimizer.rst +++ b/docs/api_guides/low_level/optimizer.rst @@ -21,8 +21,8 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ---------------------------- :code:`Momentum` 优化器在 :code:`SGD` 基础上引入动量,减少了随机梯度下降过程中存在的噪声问题。 -用户在使用时可以将 :code:`ues_nesterov` 参数设置为False或True,分别对应传统 `Momentum(论文4.1节) -`_ 算法和 `Nesterov accelerated gradient(论文4.2节) +用户在使用时可以将 :code:`ues_nesterov` 参数设置为 False 或 True,分别对应传统 `Momentum(论文 4.1 节) +`_ 算法和 `Nesterov accelerated gradient(论文 4.2 节) `_ 算法。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_MomentumOptimizer` @@ -37,8 +37,8 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_AdagradOptimizer` 4.RMSPropOptimizer ------------------ -`RMSProp优化器 `_ ,是一种自适应调整学习率的方法, -主要解决使用Adagrad后,模型训练中后期学习率急剧下降的问题。 +`RMSProp 优化器 `_ ,是一种自适应调整学习率的方法, +主要解决使用 Adagrad 后,模型训练中后期学习率急剧下降的问题。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_RMSPropOptimizer` @@ -65,7 +65,7 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 7.DecayedAdagrad/ DecayedAdagradOptimizer ------------------------------------------- -`DecayedAdagrad `_ 优化器,可以看做是引入了衰减速率的 :code:`Adagrad` 算法,解决使用Adagrad后,模型训练中后期学习率急剧下降的问题。 +`DecayedAdagrad `_ 优化器,可以看做是引入了衰减速率的 :code:`Adagrad` 算法,解决使用 Adagrad 后,模型训练中后期学习率急剧下降的问题。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_DecayedAdagrad` @@ -75,7 +75,7 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_DecayedAdagrad` 8. Ftrl/FtrlOptimizer ---------------------- -`FtrlOptimizer `_ 优化器结合了 `FOBOS算法 `_ 的高精度与 `RDA算法 +`FtrlOptimizer `_ 优化器结合了 `FOBOS 算法 `_ 的高精度与 `RDA 算法 `_ 的稀疏性,是目前效果非常好的一种 `Online Learning `_ 算法。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` @@ -85,6 +85,6 @@ API Reference 请参考 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 9.ModelAverage ----------------- -:code:`ModelAverage` 优化器,在训练中通过窗口来累计历史 parameter,在预测时使用取平均值后的paramet,整体提高预测的精度。 +:code:`ModelAverage` 优化器,在训练中通过窗口来累计历史 parameter,在预测时使用取平均值后的 paramet,整体提高预测的精度。 API Reference 请参考 :ref:`cn_api_fluid_optimizer_ModelAverage` diff --git a/docs/api_guides/low_level/parallel_executor.rst b/docs/api_guides/low_level/parallel_executor.rst index f2ae56c0324..2f88318b9cc 100644 --- a/docs/api_guides/low_level/parallel_executor.rst +++ b/docs/api_guides/low_level/parallel_executor.rst @@ -5,10 +5,10 @@ ##### -:code:`ParallelExecutor` 是以数据并行的方式在多个节点上分别执行 :code:`Program` 的执行器。用户可以通过Python脚本驱动 :code:`ParallelExecutor` 执行, :code:`ParallelExecutor` 的执行过程: +:code:`ParallelExecutor` 是以数据并行的方式在多个节点上分别执行 :code:`Program` 的执行器。用户可以通过 Python 脚本驱动 :code:`ParallelExecutor` 执行, :code:`ParallelExecutor` 的执行过程: - 首先根据 :code:`Program` 、 :code:`GPU` 卡的数目(或者 :code:`CPU` 的核数)以及 :ref:`cn_api_fluid_BuildStrategy` 构建 :code:`SSA Graph` 和一个线程池; -- 执行过程中,根据Op的输入是否Ready决定是否执行该Op,这样可以使没有相互依赖的多个Op可在线程池中并行执行; +- 执行过程中,根据 Op 的输入是否 Ready 决定是否执行该 Op,这样可以使没有相互依赖的多个 Op 可在线程池中并行执行; :code:`ParallelExecutor` 在构造时需要指定当前 :code:`Program` 的设备类型, :code:`GPU` 或者 :code:`CPU` : @@ -22,12 +22,12 @@ :code:`ParallelExecutor` 在模型训练时支持两种模式的梯度聚合, :code:`AllReduce` 和 :code:`Reduce` : -* :code:`AllReduce` 模式下, :code:`ParallelExecutor` 调用AllReduce操作使多个节点上参数梯度完全相等,然后各个节点独立进行参数的更新; -* :code:`Reduce` 模式下, :code:`ParallelExecutor` 会预先将所有参数的更新分派到不同的节点上,在执行过程中 :code:`ParallelExecutor` 调用Reduce操作将参数梯度在预先指定的节点上进行聚合,并进行参数更新,最后调用Broadcast操作将更新后的参数发送到其他节点。 +* :code:`AllReduce` 模式下, :code:`ParallelExecutor` 调用 AllReduce 操作使多个节点上参数梯度完全相等,然后各个节点独立进行参数的更新; +* :code:`Reduce` 模式下, :code:`ParallelExecutor` 会预先将所有参数的更新分派到不同的节点上,在执行过程中 :code:`ParallelExecutor` 调用 Reduce 操作将参数梯度在预先指定的节点上进行聚合,并进行参数更新,最后调用 Broadcast 操作将更新后的参数发送到其他节点。 这两种模式通过 :code:`build_strategy` 来指定,使用方法,请参考 :ref:`cn_api_fluid_BuildStrategy` 。 -**注意** :如果在Reduce模式下使用 :code:`CPU` 多线程执行 :code:`Program` , :code:`Program` 的参数在多个线程间是共享的,在某些模型上,Reduce模式可以大幅节省内存。 +**注意** :如果在 Reduce 模式下使用 :code:`CPU` 多线程执行 :code:`Program` , :code:`Program` 的参数在多个线程间是共享的,在某些模型上,Reduce 模式可以大幅节省内存。 鉴于模型的执行速率和模型结构及执行器的执行策略有关,:code:`ParallelExecutor` 允许你修改执行器的相关参数,例如线程池的规模( :code:`num_threads` )、为清除临时变量 :code:`num_iteration_per_drop_scope` 需要进行的循环次数。更多信息请参照 :ref:`cn_api_fluid_ExecutionStrategy` 。 @@ -35,16 +35,16 @@ .. code-block:: python # 注释: - # - 如果你想在ParallelExecutor中指定用于运行的GPU卡,需要在环境中定义 + # - 如果你想在 ParallelExecutor 中指定用于运行的 GPU 卡,需要在环境中定义 # CUDA_VISIBLE_DEVICES - # - 如果你想在ParallelExecutor中使用多CPU来运行程序,需要在环境中定义 + # - 如果你想在 ParallelExecutor 中使用多 CPU 来运行程序,需要在环境中定义 # CPU_NUM - # 首先创建Executor。 + # 首先创建 Executor。 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) # 运行启动程序仅一次。 exe.run(fluid.default_startup_program()) - # 定义train_exe和test_exe + # 定义 train_exe 和 test_exe exec_strategy = fluid.ExecutionStrategy() exec_strategy.num_threads = dev_count * 4 # the size of thread pool. build_strategy = fluid.BuildStrategy() @@ -54,7 +54,7 @@ build_strategy=build_strategy, exec_strategy=exec_strategy, loss_name=loss.name) - # 注释:对于test_exe,loss_name是不必要的。 + # 注释:对于 test_exe,loss_name 是不必要的。 test_exe = fluid.ParallelExecutor(use_cuda=True, main_program=test_program, build_strategy=build_strategy, @@ -62,6 +62,6 @@ share_vars_from=train_exe) train_loss, = train_exe.run(fetch_list=[loss.name], feed=feed_dict) test_loss, = test_exe.run(fetch_list=[loss.name], feed=feed_dict) -- 相关API : +- 相关 API : - :ref:`cn_api_fluid_ParallelExecutor` - :ref:`cn_api_fluid_BuildStrategy` diff --git a/docs/api_guides/low_level/parameter.rst b/docs/api_guides/low_level/parameter.rst index af7b63f1fd6..b09843437ef 100644 --- a/docs/api_guides/low_level/parameter.rst +++ b/docs/api_guides/low_level/parameter.rst @@ -4,7 +4,7 @@ 模型参数 ######### -模型参数为模型中的weight和bias统称,在fluid中对应fluid.Parameter类,继承自fluid.Variable,是一种可持久化的variable。模型的训练就是不断学习更新模型参数的过程。模型参数相关的属性可以通过 :ref:`cn_api_fluid_ParamAttr` 来配置,可配置内容有: +模型参数为模型中的 weight 和 bias 统称,在 fluid 中对应 fluid.Parameter 类,继承自 fluid.Variable,是一种可持久化的 variable。模型的训练就是不断学习更新模型参数的过程。模型参数相关的属性可以通过 :ref:`cn_api_fluid_ParamAttr` 来配置,可配置内容有: - 初始化方式 - 正则化 @@ -14,7 +14,7 @@ 初始化方式 ================= -fluid通过设置 :code:`ParamAttr` 的 :code:`initializer` 属性为单个parameter设置初始化方式。 +fluid 通过设置 :code:`ParamAttr` 的 :code:`initializer` 属性为单个 parameter 设置初始化方式。 示例如下: .. code-block:: python @@ -24,7 +24,7 @@ fluid通过设置 :code:`ParamAttr` 的 :code:`initializer` 属性为单个param y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) -以下为fluid支持的初始化方式: +以下为 fluid 支持的初始化方式: 1. BilinearInitializer ----------------------- @@ -33,16 +33,16 @@ fluid通过设置 :code:`ParamAttr` 的 :code:`initializer` 属性为单个param 可用别名:Bilinear -API请参考::ref:`cn_api_fluid_initializer_BilinearInitializer` +API 请参考::ref:`cn_api_fluid_initializer_BilinearInitializer` 2. ConstantInitializer ---------------------- -常数初始化方式,将parameter初始化为指定的数值。 +常数初始化方式,将 parameter 初始化为指定的数值。 可用别名:Constant -API请参考::ref:`cn_api_fluid_initializer_ConstantInitializer` +API 请参考::ref:`cn_api_fluid_initializer_ConstantInitializer` 3. MSRAInitializer ------------------ @@ -51,7 +51,7 @@ API请参考::ref:`cn_api_fluid_initializer_ConstantInitializer` 可用别名:MSRA -API请参考::ref:`cn_api_fluid_initializer_MSRAInitializer` +API 请参考::ref:`cn_api_fluid_initializer_MSRAInitializer` 4. NormalInitializer --------------------- @@ -60,7 +60,7 @@ API请参考::ref:`cn_api_fluid_initializer_MSRAInitializer` 可用别名:Normal -API请参考::ref:`cn_api_fluid_initializer_NormalInitializer` +API 请参考::ref:`cn_api_fluid_initializer_NormalInitializer` 5. TruncatedNormalInitializer ----------------------------- @@ -69,7 +69,7 @@ API请参考::ref:`cn_api_fluid_initializer_NormalInitializer` 可用别名:TruncatedNormal -API请参考::ref:`cn_api_fluid_initializer_TruncatedNormalInitializer` +API 请参考::ref:`cn_api_fluid_initializer_TruncatedNormalInitializer` 6. UniformInitializer -------------------- @@ -78,7 +78,7 @@ API请参考::ref:`cn_api_fluid_initializer_TruncatedNormalInitializer` 可用别名:Uniform -API请参考::ref:`cn_api_fluid_initializer_UniformInitializer` +API 请参考::ref:`cn_api_fluid_initializer_UniformInitializer` 7. XavierInitializer -------------------- @@ -87,12 +87,12 @@ API请参考::ref:`cn_api_fluid_initializer_UniformInitializer` 可用别名:Xavier -API请参考::ref:`cn_api_fluid_initializer_XavierInitializer` +API 请参考::ref:`cn_api_fluid_initializer_XavierInitializer` 正则化方式 ============= -fluid通过设置 :code:`ParamAttr` 的 :code:`regularizer` 属性为单个parameter设置正则化。 +fluid 通过设置 :code:`ParamAttr` 的 :code:`regularizer` 属性为单个 parameter 设置正则化。 .. code-block:: python @@ -100,7 +100,7 @@ fluid通过设置 :code:`ParamAttr` 的 :code:`regularizer` 属性为单个param regularizer=fluid.regularizer.L1DecayRegularizer(0.1)) y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) -以下为fluid支持的正则化方式: +以下为 fluid 支持的正则化方式: - :ref:`cn_api_fluid_regularizer_L1DecayRegularizer` (别名:L1Decay) - :ref:`cn_api_fluid_regularizer_L2DecayRegularizer` (别名:L2Decay) @@ -108,7 +108,7 @@ fluid通过设置 :code:`ParamAttr` 的 :code:`regularizer` 属性为单个param Clipping ========== -fluid通过设置 :code:`ParamAttr` 的 :code:`gradient_clip` 属性为单个parameter设置clipping方式。 +fluid 通过设置 :code:`ParamAttr` 的 :code:`gradient_clip` 属性为单个 parameter 设置 clipping 方式。 .. code-block:: python @@ -117,41 +117,41 @@ fluid通过设置 :code:`ParamAttr` 的 :code:`gradient_clip` 属性为单个par y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) -以下为fluid支持的clipping方式: +以下为 fluid 支持的 clipping 方式: 1. ErrorClipByValue ------------------- -用来将一个tensor的值clipping到指定范围。 +用来将一个 tensor 的值 clipping 到指定范围。 -API请参考::ref:`cn_api_fluid_clip_ErrorClipByValue` +API 请参考::ref:`cn_api_fluid_clip_ErrorClipByValue` 2. GradientClipByGlobalNorm --------------------------- -用来将多个Tensor的global-norm限制在 :code:`clip_norm` 以内。 +用来将多个 Tensor 的 global-norm 限制在 :code:`clip_norm` 以内。 -API请参考::ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` +API 请参考::ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 3. GradientClipByNorm --------------------- -将Tensor的l2-norm限制在 :code:`max_norm` 以内。如果Tensor的l2-norm超过了 :code:`max_norm` , -会将计算出一个 :code:`scale` ,该Tensor的所有值乘上计算出来的 :code:`scale` . +将 Tensor 的 l2-norm 限制在 :code:`max_norm` 以内。如果 Tensor 的 l2-norm 超过了 :code:`max_norm` , +会将计算出一个 :code:`scale` ,该 Tensor 的所有值乘上计算出来的 :code:`scale` . -API请参考::ref:`cn_api_fluid_clip_GradientClipByNorm` +API 请参考::ref:`cn_api_fluid_clip_GradientClipByNorm` 4. GradientClipByValue ---------------------- -将parameter对应的gradient的值限制在[min, max]范围内。 +将 parameter 对应的 gradient 的值限制在[min, max]范围内。 -API请参考::ref:`cn_api_fluid_clip_GradientClipByValue` +API 请参考::ref:`cn_api_fluid_clip_GradientClipByValue` 模型平均 ======== -fluid通过 :code:`ParamAttr` 的 :code:`do_model_average` 属性设置单个parameter是否进行平均优化。 +fluid 通过 :code:`ParamAttr` 的 :code:`do_model_average` 属性设置单个 parameter 是否进行平均优化。 示例如下: .. code-block:: python @@ -160,8 +160,8 @@ fluid通过 :code:`ParamAttr` 的 :code:`do_model_average` 属性设置单个par do_model_average=true) y_predict = fluid.layers.fc(input=x, size=10, param_attr=param_attrs) -在miniBatch训练过程中,每个batch过后,都会更新一次parameters,模型平均做的就是平均最近k次更新产生的parameters。 +在 miniBatch 训练过程中,每个 batch 过后,都会更新一次 parameters,模型平均做的就是平均最近 k 次更新产生的 parameters。 -平均后的parameters只是被用来进行测试和预测,其并不参与实际的训练过程。 +平均后的 parameters 只是被用来进行测试和预测,其并不参与实际的训练过程。 -具体API请参考::ref:`cn_api_fluid_optimizer_ModelAverage` +具体 API 请参考::ref:`cn_api_fluid_optimizer_ModelAverage` diff --git a/docs/api_guides/low_level/program.rst b/docs/api_guides/low_level/program.rst index f51b7468a33..9c18d04516f 100644 --- a/docs/api_guides/low_level/program.rst +++ b/docs/api_guides/low_level/program.rst @@ -8,7 +8,7 @@ Program ================== -:code:`Fluid` 中使用类似于编程语言的抽象语法树的形式描述用户的神经网络配置,用户对计算的描述都将写入一段Program。Fluid 中的 Program 替代了传统框架中模型的概念,通过对顺序执行、条件选择和循环执行三种执行结构的支持,做到对任意复杂模型的描述。书写 :code:`Program` 的过程非常接近于写一段通用程序,如果您已经具有一定的编程经验,会很自然地将自己的知识迁移过来。 +:code:`Fluid` 中使用类似于编程语言的抽象语法树的形式描述用户的神经网络配置,用户对计算的描述都将写入一段 Program。Fluid 中的 Program 替代了传统框架中模型的概念,通过对顺序执行、条件选择和循环执行三种执行结构的支持,做到对任意复杂模型的描述。书写 :code:`Program` 的过程非常接近于写一段通用程序,如果您已经具有一定的编程经验,会很自然地将自己的知识迁移过来。 总得来说: @@ -19,7 +19,7 @@ Program * :code:`Block` 中的计算由顺序执行、条件选择或者循环执行三种方式组合,构成复杂的计算逻辑; -* :code:`Block` 中包含对计算和计算对象的描述。计算的描述称之为 Operator;计算作用的对象(或者说 Operator 的输入和输出)被统一为 Tensor,在Fluid中,Tensor 用层级为0的 :ref:`Lod_Tensor ` 表示。 +* :code:`Block` 中包含对计算和计算对象的描述。计算的描述称之为 Operator;计算作用的对象(或者说 Operator 的输入和输出)被统一为 Tensor,在 Fluid 中,Tensor 用层级为 0 的 :ref:`Lod_Tensor ` 表示。 @@ -29,7 +29,7 @@ Program Block ========= -:code:`Block` 是高级语言中变量作用域的概念,在编程语言中,Block是一对大括号,其中包含局部变量定义和一系列指令或操作符。编程语言中的控制流结构 :code:`if-else` 和 :code:`for` 在深度学习中可以被等效为: +:code:`Block` 是高级语言中变量作用域的概念,在编程语言中,Block 是一对大括号,其中包含局部变量定义和一系列指令或操作符。编程语言中的控制流结构 :code:`if-else` 和 :code:`for` 在深度学习中可以被等效为: +----------------------+-------------------------+ | 编程语言 | Fluid | @@ -55,7 +55,7 @@ Operator 这是因为一些常见的对 Tensor 的操作可能是由更多基础操作构成,为了提高使用的便利性,框架内部对基础 Operator 进行了一些封装,包括创建 Operator 依赖可学习参数,可学习参数的初始化细节等,减少用户重复开发的成本。 -更多内容可参考阅读 `Fluid设计思想 <../../advanced_usage/design_idea/fluid_design_idea.html>`_ +更多内容可参考阅读 `Fluid 设计思想 <../../advanced_usage/design_idea/fluid_design_idea.html>`_ .. _api_guide_Variable: @@ -75,9 +75,9 @@ Name Fluid 中部分网络层里包含了 :code:`name` 参数,如 :ref:`cn_api_fluid_layers_fc` 。此 :code:`name` 一般用来作为网络层输出、权重的前缀标识,具体规则如下: -* 用于网络层输出的前缀标识。若网络层中指定了 :code:`name` 参数,Fluid 将以 ``name值.tmp_数字`` 作为唯一标识对网络层输出进行命名;未指定 :code:`name` 参数时,则以 ``OP名_数字.tmp_数字`` 的方式进行命名,其中的数字会自动递增,以区分同名OP下的不同网络层。 +* 用于网络层输出的前缀标识。若网络层中指定了 :code:`name` 参数,Fluid 将以 ``name 值.tmp_数字`` 作为唯一标识对网络层输出进行命名;未指定 :code:`name` 参数时,则以 ``OP 名_数字.tmp_数字`` 的方式进行命名,其中的数字会自动递增,以区分同名 OP 下的不同网络层。 -* 用于权重或偏置变量的前缀标识。若在网络层中通过 ``param_attr`` 和 ``bias_attr`` 创建了权重变量或偏置变量, 如 :ref:`cn_api_fluid_layers_embedding` 、 :ref:`cn_api_fluid_layers_fc` ,则 Fluid 会自动生成 ``前缀.w_数字`` 或 ``前缀.b_数字`` 的唯一标识对其进行命名,其中 ``前缀`` 为用户指定的 :code:`name` 或自动生成的 ``OP名_数字`` 。若在 ``param_attr`` 和 ``bias_attr`` 中指定了 :code:`name` ,则用此 :code:`name` ,不再自动生成。细节请参考示例代码。 +* 用于权重或偏置变量的前缀标识。若在网络层中通过 ``param_attr`` 和 ``bias_attr`` 创建了权重变量或偏置变量, 如 :ref:`cn_api_fluid_layers_embedding` 、 :ref:`cn_api_fluid_layers_fc` ,则 Fluid 会自动生成 ``前缀.w_数字`` 或 ``前缀.b_数字`` 的唯一标识对其进行命名,其中 ``前缀`` 为用户指定的 :code:`name` 或自动生成的 ``OP 名_数字`` 。若在 ``param_attr`` 和 ``bias_attr`` 中指定了 :code:`name` ,则用此 :code:`name` ,不再自动生成。细节请参考示例代码。 此外,在 :ref:`cn_api_fluid_ParamAttr` 中,可通过指定 :code:`name` 参数实现多个网络层的权重共享。 @@ -118,9 +118,9 @@ Fluid 中部分网络层里包含了 :code:`name` 参数,如 :ref:`cn_api_flui ret = exe.run(feed={'x': x_lodTensor}, fetch_list=[fc_none, fc_none1, my_fc1, my_fc2], return_numpy=False) -上述示例中, ``fc_none`` 和 ``fc_none1`` 均未指定 :code:`name` 参数,则以 ``OP名_数字.tmp_数字`` 分别对该OP输出进行命名:``fc_0.tmp_1`` 和 ``fc_1.tmp_1`` ,其中 ``fc_0`` 和 ``fc_1`` 中的数字自动递增以区分两个全连接层; ``my_fc1`` 和 ``my_fc2`` 均指定了 :code:`name` 参数,但取值相同,Fluid 以后缀 ``tmp_数字`` 进行区分,即 ``my_fc.tmp_1`` 和 ``my_fc.tmp_3`` 。 +上述示例中, ``fc_none`` 和 ``fc_none1`` 均未指定 :code:`name` 参数,则以 ``OP 名_数字.tmp_数字`` 分别对该 OP 输出进行命名:``fc_0.tmp_1`` 和 ``fc_1.tmp_1`` ,其中 ``fc_0`` 和 ``fc_1`` 中的数字自动递增以区分两个全连接层; ``my_fc1`` 和 ``my_fc2`` 均指定了 :code:`name` 参数,但取值相同,Fluid 以后缀 ``tmp_数字`` 进行区分,即 ``my_fc.tmp_1`` 和 ``my_fc.tmp_3`` 。 -对于网络层中创建的变量, ``emb`` 层和 ``fc_none`` 、 ``fc_none1`` 层均默认以 ``OP名_数字`` 为前缀对权重或偏置变量进行命名,如 ``embedding_0.w_0`` 、 ``fc_0.w_0`` 、 ``fc_0.b_0`` ,其前缀与OP输出的前缀一致。 ``my_fc1`` 层和 ``my_fc2`` 层则优先以 ``ParamAttr`` 中指定的 ``fc_weight`` 作为共享权重的名称。而偏置变量 ``my_fc.b_0`` 和 ``my_fc.b_1`` 则次优地以 :code:`name` 作为前缀标识。 +对于网络层中创建的变量, ``emb`` 层和 ``fc_none`` 、 ``fc_none1`` 层均默认以 ``OP 名_数字`` 为前缀对权重或偏置变量进行命名,如 ``embedding_0.w_0`` 、 ``fc_0.w_0`` 、 ``fc_0.b_0`` ,其前缀与 OP 输出的前缀一致。 ``my_fc1`` 层和 ``my_fc2`` 层则优先以 ``ParamAttr`` 中指定的 ``fc_weight`` 作为共享权重的名称。而偏置变量 ``my_fc.b_0`` 和 ``my_fc.b_1`` 则次优地以 :code:`name` 作为前缀标识。 在上述示例中,``my_fc1`` 和 ``my_fc2`` 两个全连接层通过构建 ``ParamAttr`` ,并指定 :code:`name` 参数,实现了网络层权重变量的共享机制。 @@ -131,7 +131,7 @@ ParamAttr ========= ========= -相关API +相关 API ========= * 用户配置的单个神经网络叫做 :ref:`cn_api_fluid_Program` 。值得注意的是,训练神经网 @@ -142,4 +142,4 @@ ParamAttr * 用户还可以使用 :ref:`cn_api_fluid_program_guard` 配合 :code:`with` 语句,修改配置好的 :ref:`cn_api_fluid_default_startup_program` 和 :ref:`cn_api_fluid_default_main_program` 。 -* 在Fluid中,Block内部执行顺序由控制流决定,如 :ref:`cn_api_fluid_layers_IfElse` , :ref:`cn_api_fluid_layers_While`, :ref:`cn_api_fluid_layers_Switch` 等,更多内容可参考: :ref:`api_guide_control_flow` +* 在 Fluid 中,Block 内部执行顺序由控制流决定,如 :ref:`cn_api_fluid_layers_IfElse` , :ref:`cn_api_fluid_layers_While`, :ref:`cn_api_fluid_layers_Switch` 等,更多内容可参考: :ref:`api_guide_control_flow` diff --git a/docs/design/dynamic_rnn/index_cn.rst b/docs/design/dynamic_rnn/index_cn.rst index 1d224d22cf7..ca44d9442cb 100644 --- a/docs/design/dynamic_rnn/index_cn.rst +++ b/docs/design/dynamic_rnn/index_cn.rst @@ -1,4 +1,4 @@ -动态RNN +动态 RNN ------------ .. toctree:: diff --git a/docs/design/dynamic_rnn/rnn_design.md b/docs/design/dynamic_rnn/rnn_design.md index 6b21f2d0e45..1c5fde8f403 100644 --- a/docs/design/dynamic_rnn/rnn_design.md +++ b/docs/design/dynamic_rnn/rnn_design.md @@ -1,28 +1,28 @@ # RNN 变长输入设计 -对变长序列的学习,现有主流框架比如 tensorflow, pytorch, caffe2, mxnet 等均使用了padding的方式, -即将一个mini-batch内不同长度的序列补0到固定长度参与计算。 +对变长序列的学习,现有主流框架比如 tensorflow, pytorch, caffe2, mxnet 等均使用了 padding 的方式, +即将一个 mini-batch 内不同长度的序列补 0 到固定长度参与计算。 -现有Paddle包括 `RecurrentLayerGroup` 在内的RNN均实现了无padding的变长序列支持,本文也将基于该模块的思路,设计重构后的变长序列支持。 +现有 Paddle 包括 `RecurrentLayerGroup` 在内的 RNN 均实现了无 padding 的变长序列支持,本文也将基于该模块的思路,设计重构后的变长序列支持。 ## 背景介绍 -由于tensor必须有明确的shape,因此基于tensor 的主流框架在存储变长序列时, -必须用zero-padding的方式将变长序列补全为固定shape的tensor。 +由于 tensor 必须有明确的 shape,因此基于 tensor 的主流框架在存储变长序列时, +必须用 zero-padding 的方式将变长序列补全为固定 shape 的 tensor。 -由于padding是一种框架实现变长序列的妥协, 从用户角度,在使用RNN类模型时自然会比较介意padding的存在, -因此会有pytorch中对非padding方式变长序列支持长篇的讨论[3]。 +由于 padding 是一种框架实现变长序列的妥协, 从用户角度,在使用 RNN 类模型时自然会比较介意 padding 的存在, +因此会有 pytorch 中对非 padding 方式变长序列支持长篇的讨论[3]。 -由于padding对内存和计算会有额外的消耗,tensorflow和mxnet均使用了bucketing来进行优化[1][2], -但不管是padding还是bucket,对于用户都是额外的使用负担。 +由于 padding 对内存和计算会有额外的消耗,tensorflow 和 mxnet 均使用了 bucketing 来进行优化[1][2], +但不管是 padding 还是 bucket,对于用户都是额外的使用负担。 -因此,**paddle原生支持变长序列的方式,能直接满足用户对变长序列的最直接的需求,在当前主流平台中可以算是一大优势**。 +因此,**paddle 原生支持变长序列的方式,能直接满足用户对变长序列的最直接的需求,在当前主流平台中可以算是一大优势**。 但对变长序列的支持,需要对目前框架做一些修改,下面讨论如何在最小修改下支持变长序列。 ## 多层序列数据格式 `LODTensor` -目前 Paddle 会将一个mini-batch内的数据存储在一维的内存上, +目前 Paddle 会将一个 mini-batch 内的数据存储在一维的内存上, 额外使用 `Argument.sequenceStartPositions` 来存储每个句子的信息。 -Paddle里使用 `Argument.subSequenceStartPositions` 来存储2层的序列信息,更高维度的序列则无法直接支持; +Paddle 里使用 `Argument.subSequenceStartPositions` 来存储 2 层的序列信息,更高维度的序列则无法直接支持; 为了支持 `N-level` 序列的存储,本文将序列信息定义成如下数据结构: @@ -37,10 +37,10 @@ typedef std::vector level_t; std::vector lod_start_pos; ``` -这里的每一个 `level_t` 存储一个粒度(level)的偏移信息,和paddle目前做法一致。 +这里的每一个 `level_t` 存储一个粒度(level)的偏移信息,和 paddle 目前做法一致。 -为了更透明地传递序列信息,我们引入了一种新的tensor 称为 `LODTensor`[4], -其关于tensor相关的接口都直接继承自 `Tensor`,但另外添加了序列相关接口。 +为了更透明地传递序列信息,我们引入了一种新的 tensor 称为 `LODTensor`[4], +其关于 tensor 相关的接口都直接继承自 `Tensor`,但另外添加了序列相关接口。 如此,在操作一个 `LODTensor` 时,普通 `Op` 直接当成 `Tensor` 使用, 而操作序列的 `Op` 会额外操作 `LODTensor` 的变长序列操作的相关接口。 @@ -86,9 +86,9 @@ private: 为了实现 `LODTensor` 的传递,框架里很多 `Tensor` 都需要变成 `LODTensor`, 简单实现,直接 **把之前所有的`Tensor` 全部替换成 `LODTensor`,这里可以直接修改 `pybind.cc` 里面创建`Tensor`的接口**。 -此外,用户有可能需要感知序列的存在(比如序列的可视化需要解析模型中输出的序列),因此一些序列操作的API也需要暴露到 python 层。 +此外,用户有可能需要感知序列的存在(比如序列的可视化需要解析模型中输出的序列),因此一些序列操作的 API 也需要暴露到 python 层。 -### `lod_start_pos` 随着Op调用链传递 +### `lod_start_pos` 随着 Op 调用链传递 框架需要支持下列特性,以实现`lod_start_pos`的传递: 1. 以 `shared_ptr` 的方式实现传递 @@ -98,21 +98,21 @@ private: - producer 需要创建自己的独立的内存,以存储自己独立的修改,并暴露 `shared_ptr` 给后续 consumer - 由于传递过程是以复制`shared_ptr`的方式实现,因此框架只需要传递一次 `lod_start_pos` -2. 对于不感知 `lod_start_pos` 的Op足够透明 -3. 需要修改 `lod_start_pos` 的producer Op可以在 `Run` 时更新自己的 `lod_start_pos` 数据 +2. 对于不感知 `lod_start_pos` 的 Op 足够透明 +3. 需要修改 `lod_start_pos` 的 producer Op 可以在 `Run` 时更新自己的 `lod_start_pos` 数据 -具体的设计分为以下3小节 +具体的设计分为以下 3 小节 #### `load_start_pos` 的传递 -- 对于不需要修改 `lod_start_pos` 的情况,调用 LODTensor的 `ShareConstLODFrom` 接口实现复制 +- 对于不需要修改 `lod_start_pos` 的情况,调用 LODTensor 的 `ShareConstLODFrom` 接口实现复制 - 需要修改的,调用`ShareMutableLODFrom` 接口自己分配内存以存储修改 #### 框架透明 传递这一步需要加入到网络跑之前的初始化操作中,并且只需要初始化一次,基于当前框架设计的初步方案如下 - 在 Op 的 `attrs` 中添加一项 `do_mutate_lod_info` 的属性,默认为 `false` - - 有需要修改 `lod_start_pos` 的Op需要在定义 `OpProto` 时设置为 `true` + - 有需要修改 `lod_start_pos` 的 Op 需要在定义 `OpProto` 时设置为 `true` - `OperatorBase` 的 `InferShape` 中会读取 `do_mutate_lod_info` ,并且调用 `LODTensor` 相关的方法实现 `lod_start_pos` 的复制。 - `OperatorBase` 中添加一个 member `is_lod_inited{false}` 来保证传递只进行一次 @@ -147,15 +147,15 @@ private: }; ``` -如此,`lod_start_pos` 的信息的传递对非OLD的Op的实现是完全透明的。 +如此,`lod_start_pos` 的信息的传递对非 OLD 的 Op 的实现是完全透明的。 #### `lod_start_pos` 的更新 -上一小节介绍到,对于需要修改 `load_start_pos` 的Op,`OperatorBase` 会分配一块自己的内存以存储修改, -Op在 `Run` 的实现中,操作更新自己的 `load_start_pos` , +上一小节介绍到,对于需要修改 `load_start_pos` 的 Op,`OperatorBase` 会分配一块自己的内存以存储修改, +Op 在 `Run` 的实现中,操作更新自己的 `load_start_pos` , 而所有依赖其 outputs 的 op 会通过共享的指针自动获取到其更新。 ## 根据长度排序 -按照长度排序后,从前往后的时间步的batch size会自然地递减,可以直接塞入 Net 做batch计算 +按照长度排序后,从前往后的时间步的 batch size 会自然地递减,可以直接塞入 Net 做 batch 计算 比如原始的输入: @@ -171,7 +171,7 @@ xxx xx ``` -经过 `SegmentInputs` 之后,每个会有4个时间步,每个时间步的输入如下(纵向排列) +经过 `SegmentInputs` 之后,每个会有 4 个时间步,每个时间步的输入如下(纵向排列) ``` 0 1 2 3 @@ -197,18 +197,18 @@ std::vector SortBySeqLen(const LODTensor& tensor); 由于输入序列的顺序变化,以下现有的接口需要针对性地修改: -- InitMemories, memory需要根据 `sorted_seqs` 重新排列 +- InitMemories, memory 需要根据 `sorted_seqs` 重新排列 - SetmentInputs - ConcatOutputs -此外,由于 `sorted_seqs` 需要被 `RecurrentGradientOp` 复用,因此会变成 `RecurrentOp` 一个新的output输出, +此外,由于 `sorted_seqs` 需要被 `RecurrentGradientOp` 复用,因此会变成 `RecurrentOp` 一个新的 output 输出, 之后作为 `RecurrentGradientOp` 的一个输入传入。 ## InitMemories -由于序列顺序的变化,`boot_memories` 的batch上的element的顺序也需要对应重新排列。 +由于序列顺序的变化,`boot_memories` 的 batch 上的 element 的顺序也需要对应重新排列。 ## SegmentInputs -`SegmentInputs` 会依赖 `sorted_seqs` 的信息,将原始的序列按照排序后的序列顺序,从横向切割,转为每个step中的inputs。 +`SegmentInputs` 会依赖 `sorted_seqs` 的信息,将原始的序列按照排序后的序列顺序,从横向切割,转为每个 step 中的 inputs。 即下面的转变: ``` @@ -229,8 +229,8 @@ x x ## ConcatOutputs `ConcatOutputs` 需要 -- 将每个时间步的输出重新还原为原始输入的序列顺序(以防止Infer阶段顺序打乱) -- 将每个序列concat 为规则的mini-batch表示 +- 将每个时间步的输出重新还原为原始输入的序列顺序(以防止 Infer 阶段顺序打乱) +- 将每个序列 concat 为规则的 mini-batch 表示 ## 参考文献 [TensorFlow Bucketing](https://www.tensorflow.org/versions/r0.12/api_docs/python/contrib.training/bucketing) diff --git a/docs/design/others/releasing_process.md b/docs/design/others/releasing_process.md index 5b321611687..4913524f381 100644 --- a/docs/design/others/releasing_process.md +++ b/docs/design/others/releasing_process.md @@ -1,60 +1,60 @@ -# PaddlePaddle发行规范 +# PaddlePaddle 发行规范 -PaddlePaddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示PaddlePaddle版本号。 +PaddlePaddle 使用 git-flow branching model 做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示 PaddlePaddle 版本号。 -PaddlePaddle每次发新的版本,遵循以下流程: +PaddlePaddle 每次发新的版本,遵循以下流程: 1. 从`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0` -1. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 +1. 将新分支的版本打上 tag,tag 为`版本号 rc.Patch 号`。第一个 tag 为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 1. 对这个版本的提交,做如下几个操作: * 修改`python/setup.py.in`中的版本信息,并将`istaged`字段设为`True`。 - * 编译这个版本的Docker发行镜像,发布到dockerhub。如果失败,修复Docker编译镜像问题,Patch号加一,返回第二步 - * 编译这个版本的Ubuntu Deb包。如果失败,修复Ubuntu Deb包编译问题,Patch号加一,返回第二步。 - * 使用Regression Test List作为检查列表,测试Docker镜像/ubuntu安装包的功能正确性 - * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有bug后,Patch号加一,返回第二步 - * 编译这个版本的python wheel包,并发布到pypi。 - * 由于pypi.python.org目前遵循[严格的命名规范PEP 513](https://www.python.org/dev/peps/pep-0513),在使用twine上传之前,需要重命名wheel包中platform相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 - * pypi上的package名称为paddlepaddle和paddlepaddle_gpu,如果要上传GPU版本的包,需要修改build/python/setup.py中,name: "paddlepaddle_gpu"并重新打包wheel包:`python setup.py bdist_wheel`。 + * 编译这个版本的 Docker 发行镜像,发布到 dockerhub。如果失败,修复 Docker 编译镜像问题,Patch 号加一,返回第二步 + * 编译这个版本的 Ubuntu Deb 包。如果失败,修复 Ubuntu Deb 包编译问题,Patch 号加一,返回第二步。 + * 使用 Regression Test List 作为检查列表,测试 Docker 镜像/ubuntu 安装包的功能正确性 + * 如果失败,记录下所有失败的例子,在这个`release/版本号`分支中,修复所有 bug 后,Patch 号加一,返回第二步 + * 编译这个版本的 python wheel 包,并发布到 pypi。 + * 由于 pypi.python.org 目前遵循[严格的命名规范 PEP 513](https://www.python.org/dev/peps/pep-0513),在使用 twine 上传之前,需要重命名 wheel 包中 platform 相关的后缀,比如将`linux_x86_64`修改成`manylinux1_x86_64`。 + * pypi 上的 package 名称为 paddlepaddle 和 paddlepaddle_gpu,如果要上传 GPU 版本的包,需要修改 build/python/setup.py 中,name: "paddlepaddle_gpu"并重新打包 wheel 包:`python setup.py bdist_wheel`。 * 上传方法: ``` cd build/python pip install twine twine upload dist/[package to upload] ``` -1. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 -1. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 -1. 协同完成Release Note的书写 +1. 第三步完成后,将`release/版本号`分支合入 master 分支,并删除`release/版本号`分支。将 master 分支的合入 commit 打上 tag,tag 为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 +1. 编译 master 分支的 Docker 发行镜像,发布到 dockerhub。编译 ubuntu 的 deb 包,发布到 github release 页面 +1. 协同完成 Release Note 的书写 需要注意的是: -* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试PaddlePaddle的行为。 -* 在`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop`和`release/版本号`这三个分支。 +* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试 PaddlePaddle 的行为。 +* 在`release/版本号`分支存在的时候,如果有 bugfix 的行为,需要将 bugfix 的分支同时 merge 到`master`, `develop`和`release/版本号`这三个分支。 ## PaddlePaddle 分支规范 -PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。 +PaddlePaddle 开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应 github 的特性做了一些区别。 -* PaddlePaddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中: +* PaddlePaddle 的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中: * `master`分支为稳定(stable branch)版本分支。每一个`master`分支的版本都是经过单元测试和回归测试的版本。 * `develop`分支为开发(develop branch)版本分支。每一个`develop`分支的版本都经过单元测试,但并没有经过回归测试。 - * `release/版本号`分支为每一次Release时建立的临时分支。在这个阶段的代码正在经历回归测试。 + * `release/版本号`分支为每一次 Release 时建立的临时分支。在这个阶段的代码正在经历回归测试。 -* 其他用户的fork版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有fork的版本库的所有分支都相当于特性分支。 - * 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支 - * 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。 - * 当功能分支开发完毕后,向PaddlePaddle的主版本库提交`Pull Reuqest`,进而进行代码评审。 +* 其他用户的 fork 版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有 fork 的版本库的所有分支都相当于特性分支。 + * 建议,开发者 fork 的版本库使用`develop`分支同步主版本库的`develop`分支 + * 建议,开发者 fork 的版本库中,再基于`develop`版本 fork 出自己的功能分支。 + * 当功能分支开发完毕后,向 PaddlePaddle 的主版本库提交`Pull Reuqest`,进而进行代码评审。 * 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。 -* BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。 +* BugFix 分支也是在开发者自己的 fork 版本库维护,与功能分支不同的是,BugFix 分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。 -## PaddlePaddle回归测试列表 +## PaddlePaddle 回归测试列表 -本列表说明PaddlePaddle发版之前需要测试的功能点。 +本列表说明 PaddlePaddle 发版之前需要测试的功能点。 -### PaddlePaddle Book中所有章节 +### PaddlePaddle Book 中所有章节 -PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 +PaddlePaddle 每次发版本首先要保证 PaddlePaddle Book 中所有章节功能的正确性。功能的正确性包括验证 PaddlePaddle 目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 | | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | diff --git a/docs/design/phi/design.md b/docs/design/phi/design.md index 62a7dc4a468..a863a17a8b6 100644 --- a/docs/design/phi/design.md +++ b/docs/design/phi/design.md @@ -1,90 +1,90 @@ -飞桨高可复用算子库 PHI (Paddle HIgh reusability operator library),或者我们也称为函数式算子库,支持组合式算子功能复用、Primitive算子内核复用、插件式硬件加速库复用。针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 Phi,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 Python 开发接口保持一致的 C++ 运算类 API,以及近500个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持Primitive API方式开发算子内核,可支持不同硬件(比如GPU和XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如NPU)的加速库,实现低成本复用硬件加速库。 +飞桨高可复用算子库 PHI (Paddle HIgh reusability operator library),或者我们也称为函数式算子库,支持组合式算子功能复用、Primitive 算子内核复用、插件式硬件加速库复用。针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 Phi,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 Python 开发接口保持一致的 C++ 运算类 API,以及近 500 个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持 Primitive API 方式开发算子内核,可支持不同硬件(比如 GPU 和 XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如 NPU)的加速库,实现低成本复用硬件加速库。 -> 本文档撰写于phi架构基本成型之时(2022年2月),仅代表该时间点的基本设计形态,可能和最新形态有细微差别;此外,在2.3版本发布的phi算子库仍然处于初期形态,后续仍然需要持续建设并完善,设计上也有可能调整。 +> 本文档撰写于 phi 架构基本成型之时(2022 年 2 月),仅代表该时间点的基本设计形态,可能和最新形态有细微差别;此外,在 2.3 版本发布的 phi 算子库仍然处于初期形态,后续仍然需要持续建设并完善,设计上也有可能调整。 # 一、背景与目标 > 介绍设计并建设 phi 要解决的问题 -最初 phi 项目的启动仅是为了优化飞桨动态图调度开销、并提升Kernel开发的复用能力而提出来的,但后续决定借此机会,建立能够同时在训练和推理场景(包括服务器端和移动端场景)中使用的“训推一体”算子库,长远上降低 paddle 生态中各基础设施开发及维护算子的成本,逐渐扩充了项目的目标范围,目前 phi 已经承载了多维度的意义。 +最初 phi 项目的启动仅是为了优化飞桨动态图调度开销、并提升 Kernel 开发的复用能力而提出来的,但后续决定借此机会,建立能够同时在训练和推理场景(包括服务器端和移动端场景)中使用的“训推一体”算子库,长远上降低 paddle 生态中各基础设施开发及维护算子的成本,逐渐扩充了项目的目标范围,目前 phi 已经承载了多维度的意义。 -> 关于算子库的命名,开发过程中有过迭代:初期算子库目录名为 pten ,意为paddle Tensor运算库 (Paddle Tensor Operation Library),因此一些历史 PR 以PTen为前缀,后期认为该名称表述范围不够准确,因此更名为 phi +> 关于算子库的命名,开发过程中有过迭代:初期算子库目录名为 pten ,意为 paddle Tensor 运算库 (Paddle Tensor Operation Library),因此一些历史 PR 以 PTen 为前缀,后期认为该名称表述范围不够准确,因此更名为 phi ## 1.1 背景问题 具体地,phi 算子库项目,承载着解决 Paddle 以下问题的期望: -### 1.1.1 Op&OpKernel之间可复用性差,冗余代码较多 +### 1.1.1 Op&OpKernel 之间可复用性差,冗余代码较多 -2.3版本之前,Paddle中的Operator(后续简称Op)之间的可复用性比较差,仅在少数的反向Op中,通过在GradOpMaker实现中调用SetType复用了一些简单的运算,大部分本身可以复用已有Op实现的情况,代码都是copy重写的。 +2.3 版本之前,Paddle 中的 Operator(后续简称 Op)之间的可复用性比较差,仅在少数的反向 Op 中,通过在 GradOpMaker 实现中调用 SetType 复用了一些简单的运算,大部分本身可以复用已有 Op 实现的情况,代码都是 copy 重写的。 -可复用性差的根本原因还是原先Op体系设计导致的: +可复用性差的根本原因还是原先 Op 体系设计导致的: -1. 当一个Op去复用另一个Op的`Opkernel::Compute`方法,都需要先构造一个`ExecutionContext`,复用上是比较繁琐的 +1. 当一个 Op 去复用另一个 Op 的`Opkernel::Compute`方法,都需要先构造一个`ExecutionContext`,复用上是比较繁琐的 - - 如果能直接调用一个函数形式的Kernel,就会方便很多 + - 如果能直接调用一个函数形式的 Kernel,就会方便很多 -2. 由于额外的数据结构构造及独立Op调度引入了开销,从计算性能的角度考虑,复用Op不如直接把计算代码copy过来,导致我们逐渐抛弃了早期反向Op复用前向Op的原则,开始为每个反向Op单独实现Kernel +2. 由于额外的数据结构构造及独立 Op 调度引入了开销,从计算性能的角度考虑,复用 Op 不如直接把计算代码 copy 过来,导致我们逐渐抛弃了早期反向 Op 复用前向 Op 的原则,开始为每个反向 Op 单独实现 Kernel - - 只有Op之前复用的开销足够小,复用已有Op实现新Op才有可能被大范围推广 + - 只有 Op 之前复用的开销足够小,复用已有 Op 实现新 Op 才有可能被大范围推广 ### 1.1.2 执行调度的简洁性与细粒度化 #### 1.1.2.1 动态图 -Paddle 2.0发布之后,多次收到内外部用户反馈动态图在小模型CPU执行场景下与竞品在性能上有数倍的差距。 +Paddle 2.0 发布之后,多次收到内外部用户反馈动态图在小模型 CPU 执行场景下与竞品在性能上有数倍的差距。 -这个问题的主要原因是:Padddle动态图C++端的执行路径比较冗长,调度开销比较重,这和动态图早期设计兼容静态图,继承了静态图Op的许多对象构造过程有关 +这个问题的主要原因是:Padddle 动态图 C++端的执行路径比较冗长,调度开销比较重,这和动态图早期设计兼容静态图,继承了静态图 Op 的许多对象构造过程有关 -- 问题issue:https://github.com/PaddlePaddle/Paddle/issues/28774 +- 问题 issue:https://github.com/PaddlePaddle/Paddle/issues/28774 -因此,动态图需要升级为基于函数的调度架构,抛开原先复杂的Op体系,才能解决这个问题,这依赖于OpKernel改为函数式的写法。 +因此,动态图需要升级为基于函数的调度架构,抛开原先复杂的 Op 体系,才能解决这个问题,这依赖于 OpKernel 改为函数式的写法。 #### 1.1.2.2 静态图 + IR -我们目前的静态图还不够“静态”,目前静态图仍然有许多运行时动态选择的逻辑,例如,运行时选择OpKernel,运行时判断是否要进行跨设备数据拷贝等等,但这些其实可以在静态图模型组网编译期间就确定下来,将执行过程确定为一系列OpKernel的执行,不再做动态的判断选择,从而进一步提升执行效率。 +我们目前的静态图还不够“静态”,目前静态图仍然有许多运行时动态选择的逻辑,例如,运行时选择 OpKernel,运行时判断是否要进行跨设备数据拷贝等等,但这些其实可以在静态图模型组网编译期间就确定下来,将执行过程确定为一系列 OpKernel 的执行,不再做动态的判断选择,从而进一步提升执行效率。 -而这些依赖于OpKernel本身的细粒度化,将现有复杂的大OpKernel解耦成具体场景、具体设备的小Kernel,才能支持这样的调度。 +而这些依赖于 OpKernel 本身的细粒度化,将现有复杂的大 OpKernel 解耦成具体场景、具体设备的小 Kernel,才能支持这样的调度。 ### 1.1.3 自定义算子的易用性提升需求 -2021年初上线的新自定义C++外部算子体系,在接口与函数编写的层面上,用法已经比较直观了,但是因为我们缺少基本运算的C++ API体系,事实上,在实现具体的自定义Op运算逻辑时,一些基础的加减乘除及矩阵运算都仍然需要重新实现一遍,不能复用Paddle已有的、经过优化的基础运算,因此一些复杂运算的外部开发成本仍然是比较高的。而要想复用Paddle内部的基础运算,依赖于的Op体系升级为函数式,并整理对应的C++ API体系才能解决。 +2021 年初上线的新自定义 C++外部算子体系,在接口与函数编写的层面上,用法已经比较直观了,但是因为我们缺少基本运算的 C++ API 体系,事实上,在实现具体的自定义 Op 运算逻辑时,一些基础的加减乘除及矩阵运算都仍然需要重新实现一遍,不能复用 Paddle 已有的、经过优化的基础运算,因此一些复杂运算的外部开发成本仍然是比较高的。而要想复用 Paddle 内部的基础运算,依赖于的 Op 体系升级为函数式,并整理对应的 C++ API 体系才能解决。 ### 1.1.4 共建训推一体算子库,降低推理算子维护成本 -长久以来,由于paddle主框架和paddle-lite的算子是分开维护的,paddle新增的算子,lite需要的话,就要手动在lite中重新实现一遍,而且当主框架算子升级,lite又没有及时感知到,会直接导致推理模型在lite执行时出现bug,这维护成本是很高的,只有统一算子库,仅维护一份代码,才能长久解决这个问题。 +长久以来,由于 paddle 主框架和 paddle-lite 的算子是分开维护的,paddle 新增的算子,lite 需要的话,就要手动在 lite 中重新实现一遍,而且当主框架算子升级,lite 又没有及时感知到,会直接导致推理模型在 lite 执行时出现 bug,这维护成本是很高的,只有统一算子库,仅维护一份代码,才能长久解决这个问题。 -因此,本次函数式算子库会由训练和推理共同建设,计算库整理完成后,作为独立的编译组件和底层基础设施(目前还没有独立拆分出来),能够同时服务于训练、预测以及Lite等执行体系。 +因此,本次函数式算子库会由训练和推理共同建设,计算库整理完成后,作为独立的编译组件和底层基础设施(目前还没有独立拆分出来),能够同时服务于训练、预测以及 Lite 等执行体系。 -### 1.1.5 推理新Runtime设计infrt的适配 +### 1.1.5 推理新 Runtime 设计 infrt 的适配 -推理设计了新的runtime infrt,预计要统一paddle-inference和paddle-lite的执行,将来需要直接调用本次共建的phi算子库中的算子,因此在设计时需要考虑对infrt的适配。 +推理设计了新的 runtime infrt,预计要统一 paddle-inference 和 paddle-lite 的执行,将来需要直接调用本次共建的 phi 算子库中的算子,因此在设计时需要考虑对 infrt 的适配。 -### 1.1.6 Op及Kernel参数规范化 +### 1.1.6 Op 及 Kernel 参数规范化 -Python 2.0 API项目规范了Paddle Python端API的参数列表,使其变得简洁、易用,但是限于当时的情况,Op层面的参数列表并没有规范化,因此会有不少早期开发的算子和Python API参数相差较多,例如conv op这种,Python API仅有7个参数,但C++ Op却有30+参数的分裂情况,而API和Op本质上是同一层概念,都是对一个运算的描述,参数应该是一致的。推理为了解决此问题,推动了算子定义增强项目,为部分不需要的参数添加了AsExtra以及AsQuant的声明,但并未从根本上解决问题,这也是phi算子库构建希望重点去解决的。 +Python 2.0 API 项目规范了 Paddle Python 端 API 的参数列表,使其变得简洁、易用,但是限于当时的情况,Op 层面的参数列表并没有规范化,因此会有不少早期开发的算子和 Python API 参数相差较多,例如 conv op 这种,Python API 仅有 7 个参数,但 C++ Op 却有 30+参数的分裂情况,而 API 和 Op 本质上是同一层概念,都是对一个运算的描述,参数应该是一致的。推理为了解决此问题,推动了算子定义增强项目,为部分不需要的参数添加了 AsExtra 以及 AsQuant 的声明,但并未从根本上解决问题,这也是 phi 算子库构建希望重点去解决的。 -我们希望能做到,Python API -> Op(C++ API) -> Kernel API三层参数一致,使整体架构清晰,每一层复用也清晰,一套Python官方文档,基本能够满足三层API的共同参考需求,不再着重维护额外的文档体系,降低维护成本。 +我们希望能做到,Python API -> Op(C++ API) -> Kernel API 三层参数一致,使整体架构清晰,每一层复用也清晰,一套 Python 官方文档,基本能够满足三层 API 的共同参考需求,不再着重维护额外的文档体系,降低维护成本。 ## 1.2 目标及范围 -- 总体目标:飞桨核心框架复用同一函数式算子库,基础数据结构Tensor具备良好的可扩展性,从根本上做到训练推理协同一致、基础组件稳定可靠、增量开发体验良好。 +- 总体目标:飞桨核心框架复用同一函数式算子库,基础数据结构 Tensor 具备良好的可扩展性,从根本上做到训练推理协同一致、基础组件稳定可靠、增量开发体验良好。 - 目标范围: - - phi算子库初期构建更关注Kernel“迁移”,人力因素,原Kernel逻辑迁移时暂不强制升级为“组合式”写法,前反向Kernel均如此 - - phi算子库初期提供的"组合式Kernel二次开发"能力面向后续增量的新算子使用,已有算子仍然保持其原先的编码实现,降低迁移成本 - - phi算子库初期提供的“新硬件扩展能力”仅在新硬件自身范围内提供,比如XPU已经实现了50个Kernel,后续其可以基于50个Kernel去组合新的Kernel,但这仅限于XPU范围内,其实现不和CPU、CUDA等实现通用 - - phi算子库项目重点关注“Kernel函数化&Op规范化”的工作,Kernel改为函数式,C++API与Op命名及参数列表在尽可能确保兼容性的前提下与逐渐规范化为与Python API一致 + - phi 算子库初期构建更关注 Kernel“迁移”,人力因素,原 Kernel 逻辑迁移时暂不强制升级为“组合式”写法,前反向 Kernel 均如此 + - phi 算子库初期提供的"组合式 Kernel 二次开发"能力面向后续增量的新算子使用,已有算子仍然保持其原先的编码实现,降低迁移成本 + - phi 算子库初期提供的“新硬件扩展能力”仅在新硬件自身范围内提供,比如 XPU 已经实现了 50 个 Kernel,后续其可以基于 50 个 Kernel 去组合新的 Kernel,但这仅限于 XPU 范围内,其实现不和 CPU、CUDA 等实现通用 + - phi 算子库项目重点关注“Kernel 函数化&Op 规范化”的工作,Kernel 改为函数式,C++API 与 Op 命名及参数列表在尽可能确保兼容性的前提下与逐渐规范化为与 Python API 一致 # 二、设计概览 ## 2.1 命名及位置 -飞桨高可复用算子库 (Paddle HIgh reusability operator library),简称 PHI(phi),phi代码目录在paddle目录下,和fluid平级,而不是放在fluid目录下,这样放置的原因是:phi是一个由fluid,lite,infrt等多种上层runtime共同调用的基础组件,后续会作为单独的编译的动态库存在,不适合作为fluid的子模块。 +飞桨高可复用算子库 (Paddle HIgh reusability operator library),简称 PHI(phi),phi 代码目录在 paddle 目录下,和 fluid 平级,而不是放在 fluid 目录下,这样放置的原因是:phi 是一个由 fluid,lite,infrt 等多种上层 runtime 共同调用的基础组件,后续会作为单独的编译的动态库存在,不适合作为 fluid 的子模块。 ## 2.2 目录结构 @@ -95,31 +95,31 @@ Python 2.0 API项目规范了Paddle Python端API的参数列表,使其变得 - 在目录设计上支持算子库的各种拆分编译需求,包括 - 按运算设备拆分编译 - - 例如:仅编译cpu的,或者仅编译cuda的 + - 例如:仅编译 cpu 的,或者仅编译 cuda 的 - 按训练和推理场景拆分编译 - - 例如:推理不编译反向相关kernel,也不编译带有Intermediate输出的前向kernel + - 例如:推理不编译反向相关 kernel,也不编译带有 Intermediate 输出的前向 kernel - 按移动端设备实际使用算子精准裁剪编译(目前尚未支持) - - 例如:一个模型只用了add和mul,极致情况下应该能裁到仅剩2个kernel -- 长线上支持良好的kernel复用实现需求 - - 解释:kernel复用实现时,能否通过简单的include引入对应函数,不会因为目录过于复杂而找不到复用的kernel + - 例如:一个模型只用了 add 和 mul,极致情况下应该能裁到仅剩 2 个 kernel +- 长线上支持良好的 kernel 复用实现需求 + - 解释:kernel 复用实现时,能否通过简单的 include 引入对应函数,不会因为目录过于复杂而找不到复用的 kernel -- 长线上支持跨设备kernel的写法统一需求,并且直观易用,不引入不必要的模板参数 - - 解释:算子库下层还有Kernel Primitive API模块,其长线愿景是每个运算,只要一个kernel,能够适应多种设备,真正区分设备的代码,仅在Kernel Primitive API实现中;不希望未来的kernel在复用时从传入较复杂的模板参数,需要尽可能限制地简洁一些 +- 长线上支持跨设备 kernel 的写法统一需求,并且直观易用,不引入不必要的模板参数 + - 解释:算子库下层还有 Kernel Primitive API 模块,其长线愿景是每个运算,只要一个 kernel,能够适应多种设备,真正区分设备的代码,仅在 Kernel Primitive API 实现中;不希望未来的 kernel 在复用时从传入较复杂的模板参数,需要尽可能限制地简洁一些 -- 易用性上,开发者能精准理解自己新增Kernel应该放到什么位置,无歧义 - - 解释:开发者新增一个API,不会困惑自己应该将对应kernel放在那个目录,也不会出现不同的人对于同一个kernel应该放在什么位置出现二义性的理解 +- 易用性上,开发者能精准理解自己新增 Kernel 应该放到什么位置,无歧义 + - 解释:开发者新增一个 API,不会困惑自己应该将对应 kernel 放在那个目录,也不会出现不同的人对于同一个 kernel 应该放在什么位置出现二义性的理解 - 不引入大量的重复目录设计 - - 解释:概念拆分是需要的,但也要有边界,避免在多个目录下有命名相同的子目录,容易混乱,比如不能cpu下面有eigen, funcs, math等,gpu下面也有。新算子库的目录设计以根据设备拆分为主,其他层次的目录拆分尽可能弱化,比如尽量不根据功能拆分,尽量不根据领域拆分等 + - 解释:概念拆分是需要的,但也要有边界,避免在多个目录下有命名相同的子目录,容易混乱,比如不能 cpu 下面有 eigen, funcs, math 等,gpu 下面也有。新算子库的目录设计以根据设备拆分为主,其他层次的目录拆分尽可能弱化,比如尽量不根据功能拆分,尽量不根据领域拆分等 - 不造成迁移时的文件数目膨胀 - - 解释:不能因为kernel设备拆分,导致kernel实现文件大规模增多 + - 解释:不能因为 kernel 设备拆分,导致 kernel 实现文件大规模增多 - 不引入层级过深的目录设计 - 解释:目录层级不应过深,理解和维护成本都较高 - 不引入过高的迁移成本 - - 解释:迁移kernel时,不能要求对kernel本身做太多改动和拆分,否则迁移成本太高 + - 解释:迁移 kernel 时,不能要求对 kernel 本身做太多改动和拆分,否则迁移成本太高 ### 2.2.2 具体目录设计 @@ -127,78 +127,78 @@ Python 2.0 API项目规范了Paddle Python端API的参数列表,使其变得 ``` paddle/phi -./api (对外暴露的高层API及其实现) - ./include(对外暴露的高层API头文件) - ./lib(对外暴露API的实现) +./api (对外暴露的高层 API 及其实现) + ./include(对外暴露的高层 API 头文件) + ./lib(对外暴露 API 的实现) ./common (内外部均会使用到的基础数据结构) -./core (基础组件,比如基础Tensor相关接口,kernel注册接口,管理单元等) -./backends (各设备及后端的基础组件,下设cpu,gpu等后端目录) -./infermeta (shape、dtype、layout等meta信息的推导函数) -./kernels (各设备及后端的kernel实现) -./ops (各op的定义,后续采取自动生成的方式完成大部分工作,目前仅有兼容用的代码) +./core (基础组件,比如基础 Tensor 相关接口,kernel 注册接口,管理单元等) +./backends (各设备及后端的基础组件,下设 cpu,gpu 等后端目录) +./infermeta (shape、dtype、layout 等 meta 信息的推导函数) +./kernels (各设备及后端的 kernel 实现) +./ops (各 op 的定义,后续采取自动生成的方式完成大部分工作,目前仅有兼容用的代码) ./tests (单元测试) ``` 部分目录结构说明: -- `api`:API模块,面向外部用户 - - 直接使用类Python的C++ Tensor计算 API,和Python端形式高度一致 - - 该部分可能反向依赖框架的DeviceContextPool等实现,所以单独管理 - - 在该类API上,训练和预测也可能是不同的 -- `common`:phi内部及phi api目录均要使用的数据结构,这些数据结构既不属于phi core,也不属于api目录 -- `core`:phi内部会有一些自己需要的,公用的模块实现,比如基础DenseTensor、kernel注册及管理模块 -- `backends`:backends中组织后续需要为各个后端的新增的数据结构,比如CPUContext、GPUContext等 - - core中放置对于算子库来讲通用的基础数据结构,而特定后端的专用数据结构不放在core中,且依赖关系严格保证backends依赖core,但core不能依赖backends - - 例1:Context如果有基类,则在core中,而继承的CPUContext在backends/cpu中,GPUContext在baackends/gpu中 - - 例2:TensorBase在core中,DenseTensor给多数设备使用,也在core中,如果有MKLDNNTensor的话,因为它只给mkldnn用,应该在backends/dnnl中 -- `infermeta`: infermeta函数的整理位置,infermeta函数相当于infershape+inferdtype+inferlayout等 -- `kernels`:各设备相关kernels +- `api`:API 模块,面向外部用户 + - 直接使用类 Python 的 C++ Tensor 计算 API,和 Python 端形式高度一致 + - 该部分可能反向依赖框架的 DeviceContextPool 等实现,所以单独管理 + - 在该类 API 上,训练和预测也可能是不同的 +- `common`:phi 内部及 phi api 目录均要使用的数据结构,这些数据结构既不属于 phi core,也不属于 api 目录 +- `core`:phi 内部会有一些自己需要的,公用的模块实现,比如基础 DenseTensor、kernel 注册及管理模块 +- `backends`:backends 中组织后续需要为各个后端的新增的数据结构,比如 CPUContext、GPUContext 等 + - core 中放置对于算子库来讲通用的基础数据结构,而特定后端的专用数据结构不放在 core 中,且依赖关系严格保证 backends 依赖 core,但 core 不能依赖 backends + - 例 1:Context 如果有基类,则在 core 中,而继承的 CPUContext 在 backends/cpu 中,GPUContext 在 baackends/gpu 中 + - 例 2:TensorBase 在 core 中,DenseTensor 给多数设备使用,也在 core 中,如果有 MKLDNNTensor 的话,因为它只给 mkldnn 用,应该在 backends/dnnl 中 +- `infermeta`: infermeta 函数的整理位置,infermeta 函数相当于 infershape+inferdtype+inferlayout 等 +- `kernels`:各设备相关 kernels - `cpu, gpu, ...` -- `ops`: ops中组织新形式的Op定义、以及兼容原有op的一些组件 +- `ops`: ops 中组织新形式的 Op 定义、以及兼容原有 op 的一些组件 -#### 2.2.2.2 Kernels目录 +#### 2.2.2.2 Kernels 目录 ``` paddle/phi/kernels -./ (放置设备无关的kernel声明和实现) -./cpu(仅放置cpu后端的kernel实现) +./ (放置设备无关的 kernel 声明和实现) +./cpu(仅放置 cpu 后端的 kernel 实现) ./gpu ./xpu ./dnnl ./gpudnn -./impl (考虑到现状,放置原先Kernel在CPU和GPU或其他设备一致的实现,便于复用) -./funcs(放置原fluid operators下一些支持多设备的functor和funcs) -./primitive(放置Kernel Primitive API的基础实现) +./impl (考虑到现状,放置原先 Kernel 在 CPU 和 GPU 或其他设备一致的实现,便于复用) +./funcs(放置原 fluid operators 下一些支持多设备的 functor 和 funcs) +./primitive(放置 Kernel Primitive API 的基础实现) ... ``` 目录结构说明如下: -- kernels下主目录,放置设备无关的kernel.h和kernel.cc,原则上每个kernel一个.h和.cc - - 例如一个kernel是使用Primitive api实现的,或者是复用其他基础kernel实现的,那么不论在什么设备上,应该都只有一种实现,所以它的声明和实现均直接放置到kernels目录下即可(这是将来的理想状态) - - 目前我们大部分kernel都不具备跨设备实现统一的特征,但是kernel的输入参数返回值除了DeviceContext之外,应该是一致的,所以kernel参数声明头文件还放到主目录下(和原先的设计保持一致,DeviceContext和T作为模板参数),各设备的函数实现在相应的设备文件夹中 - - 注意,这里跨设备实现统一,并不是指一个kernel的CPU和GPU实现就算统一了,而是在所有设备的实现都一样,目前至少包括CPU,GPU,XPU,MKLDNN,GPUDNN等 - - 反向kernel如果不需要支持裁剪,可以做适当归并(但如果要为支持端侧训练留可能性,反向kernel可能也是裁剪的潜在目标) -- kernels下一级子目录,原则上按照backend分类按需新建,仅保留两个特殊的目录: - - funcs:为了兼容原先fluid operators中functor和function设计保留的目录,放置支持多种后端的function和functor,还按照原先的一个头文件,多个.cc(u)的方式组织(这部分代码在将来可能被移除,因为会逐渐被Kernel Primirive API及Kernel间复用替代,这里不做过度设计) - - 例1:一个公共函数XXXFunction在reduce CPU和reduce CUDA的kernel实现中都被调用,并且reduce CPU和reduce GPU的kernel实现是不一样的,那么这个XXXFunction应该在funcs目录中 - - primitive:Kernel Primitive API,多设备统一kernel实现的一些基础工具 - - impl:paddle目前的op kernel实现,有很多仍然是CPU和GPU复用同一份代码的,在大量的xx_op.h,这部分代码,不适合放在cpu或者gpu目录中,也不适合放在funcs目录中(会导致funcs目录中最终放置了相当一部分kernel实现,过于臃肿且混乱,funcs目录的定位是放置原先operators/math目录下那样的工具functor和function),也不适合放到kernels根目录下(并不是真正设备无关的实现,仅是cpu和gpu共用的实现),因此为了使这部分代码迁移时不需要做过多考虑,并且放置的位置也相对符合其实现性质,创建了impl这个目录 - - impl目录下,仅放置跨部分设备实现一致的kernel函数,均为头文件,命名均以xxx_kernel_impl.h为后缀 - - 例如:scale,fill_constant,fill_any_like这些kernel均属于此类情况 -- kernel迁移过来之后,首先创建对应kenrel头文件直接放置到kernels的根目录中,各后端的kernel实现放在相应的设备文件夹中 - - 可参考原先op的归并程度,如matmul原先是单独的.h/.cc,那移过来之后保持,但activation相关的基本写在一个.h/.cc,移过来也仍然保持归并(后续有必要再进一步拆分) - - 例1:原先cast op的Kernel在cast_op.h中,迁移过来之后在根目录创建cast_kernel.h,cast_kernel.cc/cu根据使用的后端放到对应的目录,即cast_kernel.cc放置到cpu中,cast_kernel.cu放置到gpu中 - - 例2:原先scale op的kernel使用eigen实现,CPU和GPU实现一致,迁移过来之后,公共实现应该在impl中的scale_kernel_impl.h中,公共头文件在kernels根目录下的scale_kernel.h中,scale_kernel.cc在cpu中,scale_kernel.cu在gpu中 -- 迁移时,只有本kernel用到的辅助函数,一律和kernel实现放到同一个backend文件中,创建.h管理代码,不再单独在别处整理代码,除非这些辅助的函数实现是有多处使用的 +- kernels 下主目录,放置设备无关的 kernel.h 和 kernel.cc,原则上每个 kernel 一个.h 和.cc + - 例如一个 kernel 是使用 Primitive api 实现的,或者是复用其他基础 kernel 实现的,那么不论在什么设备上,应该都只有一种实现,所以它的声明和实现均直接放置到 kernels 目录下即可(这是将来的理想状态) + - 目前我们大部分 kernel 都不具备跨设备实现统一的特征,但是 kernel 的输入参数返回值除了 DeviceContext 之外,应该是一致的,所以 kernel 参数声明头文件还放到主目录下(和原先的设计保持一致,DeviceContext 和 T 作为模板参数),各设备的函数实现在相应的设备文件夹中 + - 注意,这里跨设备实现统一,并不是指一个 kernel 的 CPU 和 GPU 实现就算统一了,而是在所有设备的实现都一样,目前至少包括 CPU,GPU,XPU,MKLDNN,GPUDNN 等 + - 反向 kernel 如果不需要支持裁剪,可以做适当归并(但如果要为支持端侧训练留可能性,反向 kernel 可能也是裁剪的潜在目标) +- kernels 下一级子目录,原则上按照 backend 分类按需新建,仅保留两个特殊的目录: + - funcs:为了兼容原先 fluid operators 中 functor 和 function 设计保留的目录,放置支持多种后端的 function 和 functor,还按照原先的一个头文件,多个.cc(u)的方式组织(这部分代码在将来可能被移除,因为会逐渐被 Kernel Primirive API 及 Kernel 间复用替代,这里不做过度设计) + - 例 1:一个公共函数 XXXFunction 在 reduce CPU 和 reduce CUDA 的 kernel 实现中都被调用,并且 reduce CPU 和 reduce GPU 的 kernel 实现是不一样的,那么这个 XXXFunction 应该在 funcs 目录中 + - primitive:Kernel Primitive API,多设备统一 kernel 实现的一些基础工具 + - impl:paddle 目前的 op kernel 实现,有很多仍然是 CPU 和 GPU 复用同一份代码的,在大量的 xx_op.h,这部分代码,不适合放在 cpu 或者 gpu 目录中,也不适合放在 funcs 目录中(会导致 funcs 目录中最终放置了相当一部分 kernel 实现,过于臃肿且混乱,funcs 目录的定位是放置原先 operators/math 目录下那样的工具 functor 和 function),也不适合放到 kernels 根目录下(并不是真正设备无关的实现,仅是 cpu 和 gpu 共用的实现),因此为了使这部分代码迁移时不需要做过多考虑,并且放置的位置也相对符合其实现性质,创建了 impl 这个目录 + - impl 目录下,仅放置跨部分设备实现一致的 kernel 函数,均为头文件,命名均以 xxx_kernel_impl.h 为后缀 + - 例如:scale,fill_constant,fill_any_like 这些 kernel 均属于此类情况 +- kernel 迁移过来之后,首先创建对应 kenrel 头文件直接放置到 kernels 的根目录中,各后端的 kernel 实现放在相应的设备文件夹中 + - 可参考原先 op 的归并程度,如 matmul 原先是单独的.h/.cc,那移过来之后保持,但 activation 相关的基本写在一个.h/.cc,移过来也仍然保持归并(后续有必要再进一步拆分) + - 例 1:原先 cast op 的 Kernel 在 cast_op.h 中,迁移过来之后在根目录创建 cast_kernel.h,cast_kernel.cc/cu 根据使用的后端放到对应的目录,即 cast_kernel.cc 放置到 cpu 中,cast_kernel.cu 放置到 gpu 中 + - 例 2:原先 scale op 的 kernel 使用 eigen 实现,CPU 和 GPU 实现一致,迁移过来之后,公共实现应该在 impl 中的 scale_kernel_impl.h 中,公共头文件在 kernels 根目录下的 scale_kernel.h 中,scale_kernel.cc 在 cpu 中,scale_kernel.cu 在 gpu 中 +- 迁移时,只有本 kernel 用到的辅助函数,一律和 kernel 实现放到同一个 backend 文件中,创建.h 管理代码,不再单独在别处整理代码,除非这些辅助的函数实现是有多处使用的 - 即使有多处调用,如果仍然限于同一设备,直接建头文件放到同一个目录下 -- 反向kernel与前向kernel实现放置在不同的文件中,文件后缀采用``*_grad_kernel.*``,便于cmake分离编译 - - 不再为反向kernel单独创建目录,否则反向kernel目录下还要创建cpu/gpu等目录 - - 二阶导、三阶导的实现统一也放到grad kernel实现文件中 +- 反向 kernel 与前向 kernel 实现放置在不同的文件中,文件后缀采用``*_grad_kernel.*``,便于 cmake 分离编译 + - 不再为反向 kernel 单独创建目录,否则反向 kernel 目录下还要创建 cpu/gpu 等目录 + - 二阶导、三阶导的实现统一也放到 grad kernel 实现文件中 - 为什么目录名叫`gpu`而不是`cuda`和`hip`? - - cuda和hip代码重复度非常高,统一实现维护成本较低 + - cuda 和 hip 代码重复度非常高,统一实现维护成本较低 ## 2.3 核心组件 @@ -334,15 +334,15 @@ enum class DataType { }; ``` -- 这里什么不使用原先fluid的VarType? - - 理由1:原先fluid的DataType和VarType是同级概念,设计是比较混乱的,例如LoDTensor和FLOAT32是同级概念,但这两者显然不是的,我们不希望继承原先有明显缺陷的设计 - - 理由2:和fluid解耦依赖,便于后续phi可以独立编译 +- 这里什么不使用原先 fluid 的 VarType? + - 理由 1:原先 fluid 的 DataType 和 VarType 是同级概念,设计是比较混乱的,例如 LoDTensor 和 FLOAT32 是同级概念,但这两者显然不是的,我们不希望继承原先有明显缺陷的设计 + - 理由 2:和 fluid 解耦依赖,便于后续 phi 可以独立编译 #### 2.3.1.4 Scalar -Scalar (标量)用来统一表示具有不同基础数据类型(float, double, int, bool等)的变量。(目前也支持表示元素数量为1的Tensor标量,但后续可能会放弃该功能的支持) +Scalar (标量)用来统一表示具有不同基础数据类型(float, double, int, bool 等)的变量。(目前也支持表示元素数量为 1 的 Tensor 标量,但后续可能会放弃该功能的支持) -以`ScaleKernel`为例,其中的`scale`参数可以传入int,float,double等普通数据类型。如果不使用`Scalar`来表示的话,需要为每种数据类型单独创建一个函数接口,这样会大大增加开发Kernel的代码量,因此`Scalar`主要应用在具有不同数据类型的同一参数上,可以避免该场景下需要编写多个重载函数的问题。 +以`ScaleKernel`为例,其中的`scale`参数可以传入 int,float,double 等普通数据类型。如果不使用`Scalar`来表示的话,需要为每种数据类型单独创建一个函数接口,这样会大大增加开发 Kernel 的代码量,因此`Scalar`主要应用在具有不同数据类型的同一参数上,可以避免该场景下需要编写多个重载函数的问题。 ``` template @@ -356,9 +356,9 @@ void ScaleKernel(const Context& dev_ctx, #### 2.3.1.5 IntArray -IntArray 是一个整数类型数组,可以由`vector`,`Tensor`以及`vector`进行构造,目前主要用来表示shape,index以及aixs等维度索引变量。 +IntArray 是一个整数类型数组,可以由`vector`,`Tensor`以及`vector`进行构造,目前主要用来表示 shape,index 以及 aixs 等维度索引变量。 -以FullKernel为例,其中的shape参数用来表示返回Tensor的维度信息(如[2,8,8]),在调用FullKernel时该项参数传入`vector`,`Tensor`和`vector`类型的变量兼可完成调用。使用IntArray避免了每种shape类型单独编写一个重载函数的问题。 +以 FullKernel 为例,其中的 shape 参数用来表示返回 Tensor 的维度信息(如[2,8,8]),在调用 FullKernel 时该项参数传入`vector`,`Tensor`和`vector`类型的变量兼可完成调用。使用 IntArray 避免了每种 shape 类型单独编写一个重载函数的问题。 ``` template @@ -368,7 +368,7 @@ void FullKernel(const Context& dev_ctx, DenseTensor* out); ``` -### 2.3.2 Tensor体系 +### 2.3.2 Tensor 体系 整体设计类图如下 @@ -377,35 +377,35 @@ void FullKernel(const Context& dev_ctx, 以下依次进行介绍。 -#### 2.3.2.1 API Tensor接口 +#### 2.3.2.1 API Tensor 接口 -- 最上层是API级别的Tensor接口封装,里面包含两个指针成员,TensorBase和AbstractAutogradMeta。 - - 两个成员均使用了Interface设计,不会依赖于真实的Tensor和Autograd实现 - - AutogradMeta仅在动态图API级别的Tensor中有意义,在具体的kernel计算中,不会被使用到,所以将其放到最上层的Tensor接口中 +- 最上层是 API 级别的 Tensor 接口封装,里面包含两个指针成员,TensorBase 和 AbstractAutogradMeta。 + - 两个成员均使用了 Interface 设计,不会依赖于真实的 Tensor 和 Autograd 实现 + - AutogradMeta 仅在动态图 API 级别的 Tensor 中有意义,在具体的 kernel 计算中,不会被使用到,所以将其放到最上层的 Tensor 接口中 - 另外,这样设计也是为了方便数据共享,并且减少拷贝开销 - - 当一个Tensor赋值给另一个Tensor,或者Tensor作为函数返回值时,实际上只会拷贝指针,不会产生真实的数据拷贝 + - 当一个 Tensor 赋值给另一个 Tensor,或者 Tensor 作为函数返回值时,实际上只会拷贝指针,不会产生真实的数据拷贝 -- 最上层C++ Tensor与Python端Tensor扮演类似的角色,在接口设计上尽可能与Python端保持一致 - - 包含基础的Tensor属性访问及数据访问方法 +- 最上层 C++ Tensor 与 Python 端 Tensor 扮演类似的角色,在接口设计上尽可能与 Python 端保持一致 + - 包含基础的 Tensor 属性访问及数据访问方法 - shape, place, dtype, data - - 包含动态图Tensor需要的autograd方法 + - 包含动态图 Tensor 需要的 autograd 方法 - gradient, backward - - 包含Tensor间的转换方法 - - cpu, gpu, xpu等 - - 包含tensor相关的计算方法(暂未添加) + - 包含 Tensor 间的转换方法 + - cpu, gpu, xpu 等 + - 包含 tensor 相关的计算方法(暂未添加) - `paddle.tensor` 模块下所有方法 - 编译解耦: - - 这里带有的autograd信息,只是一个指针索引,默认为空 + - 这里带有的 autograd 信息,只是一个指针索引,默认为空 - `std::unique_ptr autograd_meta_ = nullptr;` - - 而这里的AbstractAutogradMeta是一个抽象类接口,不会依赖autograd的任何模块,因此不会影响 phi 的独立编译,同时又兼顾了动态图Tensor需要持有反向信息的需求 + - 而这里的 AbstractAutogradMeta 是一个抽象类接口,不会依赖 autograd 的任何模块,因此不会影响 phi 的独立编译,同时又兼顾了动态图 Tensor 需要持有反向信息的需求 -- 这里的AutogradMeta仅在动态图场景中才会设置,不需要的场景,比如静态图内就仅仅是个空指针而已 +- 这里的 AutogradMeta 仅在动态图场景中才会设置,不需要的场景,比如静态图内就仅仅是个空指针而已 -Tensor设备的判断及转换 +Tensor 设备的判断及转换 -- Tensor的设备及类型判断 +- Tensor 的设备及类型判断 ``` bool is_cpu() const; @@ -417,45 +417,45 @@ bool is_opencl() const; // 待添加 bool is_metal() const; // 待添加 ``` -- Tensor间类型转换,通过与Python端一致的API实现(待添加) +- Tensor 间类型转换,通过与 Python 端一致的 API 实现(待添加) ``` -Tensor cpu() const; // 转换为cpu tensor -Tensor gpu() const; // 转换为gpu tensor +Tensor cpu() const; // 转换为 cpu tensor +Tensor gpu() const; // 转换为 gpu tensor Tensor xpu() const; Tensor mkldnn() const; ``` -- 这个转换的过程可能是cast,也可能是copy - - 如果不需要进行数据拷贝,就是cast - - 如果需要进行数据拷贝,就是copy - - 转换通过函数式kernel去实现 +- 这个转换的过程可能是 cast,也可能是 copy + - 如果不需要进行数据拷贝,就是 cast + - 如果需要进行数据拷贝,就是 copy + - 转换通过函数式 kernel 去实现 -- 在API场景中的使用 - - 用户在完整训练场景中,使用API的时候,最初读入的数据一般是从磁盘读入,先放入CPU,然后再转换到具体执行设备上,比如DataLoader +- 在 API 场景中的使用 + - 用户在完整训练场景中,使用 API 的时候,最初读入的数据一般是从磁盘读入,先放入 CPU,然后再转换到具体执行设备上,比如 DataLoader #### 2.3.2.2 TensorBase -- Tensor实现的接口类,接口中仅包含必要的纯虚Tensor方法,不包含有实际含义的成员,这里的方法在开发过程中也要严格控制 +- Tensor 实现的接口类,接口中仅包含必要的纯虚 Tensor 方法,不包含有实际含义的成员,这里的方法在开发过程中也要严格控制 - 为什么要在这一层用抽象类设计? - - 一方面是为了隔离Tensor API与Tensor具体实现,不产生过多依赖,如果将来Tensor API需要重新设计,或者说需要放弃掉autograd信息,只需要重新设计一个Tensor API即可,对于底层Tensor的实现几乎没有影响 - - 另一方面是为了给异构化的Tensor保留充足的扩展空间,框架API层仅需要一个Tensor数据结构即可,不需要再暴露多种数据结构设计,这里其实做了一个大范围定义,框架内所有数据结构均是Tensor - - 对于内存布局基本一致,或者说Tensor描述基本一致的实现,可以基于一种DenseTensor的实现去继承 - - 如果是异构化程度高的Tensor,可以直接从Interface继承去实现新的Tensor分支,比如只有一个Object的Tensor,确保在Tensor扩展灵活性上不会出现瓶颈 + - 一方面是为了隔离 Tensor API 与 Tensor 具体实现,不产生过多依赖,如果将来 Tensor API 需要重新设计,或者说需要放弃掉 autograd 信息,只需要重新设计一个 Tensor API 即可,对于底层 Tensor 的实现几乎没有影响 + - 另一方面是为了给异构化的 Tensor 保留充足的扩展空间,框架 API 层仅需要一个 Tensor 数据结构即可,不需要再暴露多种数据结构设计,这里其实做了一个大范围定义,框架内所有数据结构均是 Tensor + - 对于内存布局基本一致,或者说 Tensor 描述基本一致的实现,可以基于一种 DenseTensor 的实现去继承 + - 如果是异构化程度高的 Tensor,可以直接从 Interface 继承去实现新的 Tensor 分支,比如只有一个 Object 的 Tensor,确保在 Tensor 扩展灵活性上不会出现瓶颈 #### 2.3.3.3 DenseTensor、SparseTensor -- 对应原fluid内的LoDTensor类,是Tensor的基类实现,Allocation就是现有Allocation,包含现有Tensor的基础成员 -- SparseCsrTensor、SparseCooTensor为新设计的稀疏tensor类型,详见代码实现 +- 对应原 fluid 内的 LoDTensor 类,是 Tensor 的基类实现,Allocation 就是现有 Allocation,包含现有 Tensor 的基础成员 +- SparseCsrTensor、SparseCooTensor 为新设计的稀疏 tensor 类型,详见代码实现 -> 为了兼容原先框架调度及算子,SelectedRows我们也迁移过来作为一种基础Tensor类型,后续如果能够被新的稀疏Tensor替代,长期会移除 +> 为了兼容原先框架调度及算子,SelectedRows 我们也迁移过来作为一种基础 Tensor 类型,后续如果能够被新的稀疏 Tensor 替代,长期会移除 -#### 2.3.3.4 其他异构Tensor +#### 2.3.3.4 其他异构 Tensor -- 如果现有Allocation的描述无法满足一些第三方库对于Tensor内存的描述需求,可以继承TensorBase之后,使用新的Allocation实现 -- 而这种Tensor本质上没有脱离通用Tensor的范畴,只是访存方式有所区别,其他的TensorMeta信息,它仍然是需要的 -- 可以自行定义特殊的TensorAllocation描述类,去构建自定义的Tensor,例如MetalTensor +- 如果现有 Allocation 的描述无法满足一些第三方库对于 Tensor 内存的描述需求,可以继承 TensorBase 之后,使用新的 Allocation 实现 +- 而这种 Tensor 本质上没有脱离通用 Tensor 的范畴,只是访存方式有所区别,其他的 TensorMeta 信息,它仍然是需要的 +- 可以自行定义特殊的 TensorAllocation 描述类,去构建自定义的 Tensor,例如 MetalTensor ``` template @@ -478,23 +478,23 @@ template class OpenCLTensor : public SpatialTensor {}; ``` -- 通过这种方式,无论Tensor的需求如何特殊,均可以在对外API保持一致的前提下进行内部适配 +- 通过这种方式,无论 Tensor 的需求如何特殊,均可以在对外 API 保持一致的前提下进行内部适配 -其他高自由度Tensor继承:直接继承TensorBase +其他高自由度 Tensor 继承:直接继承 TensorBase -- TensorBase是抽象类,为具体Tensor的描述留了较大的空间,如果传统Tensor的描述无法满足需求,可以设计特异化的Tensor实现 +- TensorBase 是抽象类,为具体 Tensor 的描述留了较大的空间,如果传统 Tensor 的描述无法满足需求,可以设计特异化的 Tensor 实现 ### 2.3.3 C++ API -#### 2.3.3.1 C++ API形式 +#### 2.3.3.1 C++ API 形式 > 本节要点: -> 1. C++ API与Python 2.0 API对应,函数名、参数名、参数顺序、返回值均一致 +> 1. C++ API 与 Python 2.0 API 对应,函数名、参数名、参数顺序、返回值均一致 -经过调研,我们发现只有框架产品在设计时考虑了C++ API易用性层面的问题的。出于长期考虑,我们若想要吸引更多的开发者共建飞桨生态,提供规范易用的C++ API体系也是十分重要的。同时,Python 2.0 API项目为C++ API奠定了良好的参考基础,我们可以直接继承其成果。 +经过调研,我们发现只有框架产品在设计时考虑了 C++ API 易用性层面的问题的。出于长期考虑,我们若想要吸引更多的开发者共建飞桨生态,提供规范易用的 C++ API 体系也是十分重要的。同时,Python 2.0 API 项目为 C++ API 奠定了良好的参考基础,我们可以直接继承其成果。 -因此,目前我们期望Tensor计算库的C++ API声明形式如下: +因此,目前我们期望 Tensor 计算库的 C++ API 声明形式如下: ``` Tensor mean(const Tensor& x); @@ -507,35 +507,35 @@ Tensor scale(const Tensor& x, 说明如下: -- 尽可能与Python API属性保持一致,函数名,参数列表,返回值均保持一致,使用户在Python与C++的切换中,几乎没有新增的学习成本(如果必须不一致,可以增加新的C++ API,Python已有的运算类API与C++ API一一对应) +- 尽可能与 Python API 属性保持一致,函数名,参数列表,返回值均保持一致,使用户在 Python 与 C++的切换中,几乎没有新增的学习成本(如果必须不一致,可以增加新的 C++ API,Python 已有的运算类 API 与 C++ API 一一对应) -**这个新建的C++ API体系目前主要用于什么场景?** +**这个新建的 C++ API 体系目前主要用于什么场景?** -1. 作为自定义算子开发时可调用的C++ API,提升易用性 - - 例如现在用户在自定义算子中初始化一个Tensor需要循环遍历Tensor数据并赋值,有API之后可以直接调用`paddle::ones`,`paddle::fill`这些API +1. 作为自定义算子开发时可调用的 C++ API,提升易用性 + - 例如现在用户在自定义算子中初始化一个 Tensor 需要循环遍历 Tensor 数据并赋值,有 API 之后可以直接调用`paddle::ones`,`paddle::fill`这些 API 2. 作为新动态图的基础调用单元 - - 新动态图会以API作为调度计算单元,不会再调用Op体系,以提升调度性能 -3. 作为反向Op复用前向Op进行开发的基础 - - 现在反向op kernel需要单独实现,在API体系成型后,希望可以通过复用前向API完成反向Op实现 + - 新动态图会以 API 作为调度计算单元,不会再调用 Op 体系,以提升调度性能 +3. 作为反向 Op 复用前向 Op 进行开发的基础 + - 现在反向 op kernel 需要单独实现,在 API 体系成型后,希望可以通过复用前向 API 完成反向 Op 实现 -#### 2.3.3.2 C++ API自动生成 +#### 2.3.3.2 C++ API 自动生成 -**为什么要自动生成C++ API?** +**为什么要自动生成 C++ API?** - - C++ API的实现代码在形式上相对固定,理论上可以采用自动生成的方式来实现 - - 使用代码自动生成可以有效降低C++ API的开发成本,且方便修改和维护 + - C++ API 的实现代码在形式上相对固定,理论上可以采用自动生成的方式来实现 + - 使用代码自动生成可以有效降低 C++ API 的开发成本,且方便修改和维护 -**如何自动生成C++ API?** +**如何自动生成 C++ API?** - C++ API的自动生成是通过解析 YAML 配置文件来进行生成的,YAML 配置文件分为: + C++ API 的自动生成是通过解析 YAML 配置文件来进行生成的,YAML 配置文件分为: - - 前向API配置文件(`Python/paddle/utils/code_gen/api.yaml`,解析后生成代码文件为`paddle/phi/api/include/api.h`和`paddle/phi/api/lib/api.cc`) - - 反向API配置文件(`Python/paddle/utils/code_gen/backward.yaml`,解析后生成的代码文件为`paddle/phi/api/backward/backward_api.h`和`paddle/phi/api/lib/backward_api.cc`)。 + - 前向 API 配置文件(`Python/paddle/utils/code_gen/api.yaml`,解析后生成代码文件为`paddle/phi/api/include/api.h`和`paddle/phi/api/lib/api.cc`) + - 反向 API 配置文件(`Python/paddle/utils/code_gen/backward.yaml`,解析后生成的代码文件为`paddle/phi/api/backward/backward_api.h`和`paddle/phi/api/lib/backward_api.cc`)。 -C++ API生成的关键在于 YAML 文件的配置,以matmul为例,其前向和反向的配置文件如下: +C++ API 生成的关键在于 YAML 文件的配置,以 matmul 为例,其前向和反向的配置文件如下: ``` -# 前向API配置 +# 前向 API 配置 - api : matmul args : (Tensor x, Tensor y, bool transpose_x=false, bool transpose_y=false) output : Tensor @@ -545,7 +545,7 @@ C++ API生成的关键在于 YAML 文件的配置,以matmul为例,其前向 func : matmul backward : matmul_grad -# 反向API配置 +# 反向 API 配置 - backward_api : matmul_grad forward : matmul (Tensor x, Tensor y, bool transpose_x, bool transpose_y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad, bool transpose_x=false, bool transpose_y=false) @@ -558,30 +558,30 @@ C++ API生成的关键在于 YAML 文件的配置,以matmul为例,其前向 其中各项配置参数含义: -- api:函数名称,需与Phi Kernel注册的函数名相同 -- args:函数参数,顺序和数据类型必须与Phi Kernel同名函数完全一致,Attributes类型必须排在Tensor类型之后。 -- output:输出类型,如果有多个输出间用逗号(“,”) 分隔开。可以在类型后用"()"选择性标记每个输入的名字(如`Tensor(out)`),如果没有标记则默认处理为out0, out1, … -- infer_meta:计算返回Tensor的维度与类型(详见InferMeta函数介绍) - - func为调用的InferMeta函数,默认输入为args项的所有参数和output参数,其中的Tensor类型变量会自动替换为MetaTensor。 -- kernel:API调用的具体Kernel函数 - - func:kernel函数的注册名(REGISTER使用的name,非函数名),默认输入为args项的所有参数和output参数 -- backward:(可选)对应的反向函数名称,没有则生成纯前向API。 +- api:函数名称,需与 Phi Kernel 注册的函数名相同 +- args:函数参数,顺序和数据类型必须与 Phi Kernel 同名函数完全一致,Attributes 类型必须排在 Tensor 类型之后。 +- output:输出类型,如果有多个输出间用逗号(“,”) 分隔开。可以在类型后用"()"选择性标记每个输入的名字(如`Tensor(out)`),如果没有标记则默认处理为 out0, out1, … +- infer_meta:计算返回 Tensor 的维度与类型(详见 InferMeta 函数介绍) + - func 为调用的 InferMeta 函数,默认输入为 args 项的所有参数和 output 参数,其中的 Tensor 类型变量会自动替换为 MetaTensor。 +- kernel:API 调用的具体 Kernel 函数 + - func:kernel 函数的注册名(REGISTER 使用的 name,非函数名),默认输入为 args 项的所有参数和 output 参数 +- backward:(可选)对应的反向函数名称,没有则生成纯前向 API。 -YAML 解析脚本将根据上述配置项自动生成对应的C++ API,生成的代码中会完成包括Kernel自动选择、Tensor转换、Data Transform、InferMeta以及Kernel调用等相关处理逻辑,具体可参考生成的`api.cc`内代码。 +YAML 解析脚本将根据上述配置项自动生成对应的 C++ API,生成的代码中会完成包括 Kernel 自动选择、Tensor 转换、Data Transform、InferMeta 以及 Kernel 调用等相关处理逻辑,具体可参考生成的`api.cc`内代码。 -由于C++ API数量较多,且有着各种各样的形式与功能,为此在 YAML 配置机制上也提供了很多更为灵活的配置项,如`invoke`等。 +由于 C++ API 数量较多,且有着各种各样的形式与功能,为此在 YAML 配置机制上也提供了很多更为灵活的配置项,如`invoke`等。 -### 2.3.4 Kernel形式、注册及管理 +### 2.3.4 Kernel 形式、注册及管理 -#### 2.3.4.1 Kernel形式 +#### 2.3.4.1 Kernel 形式 > 本节要点: -> 1. Kernel函数形式要点: -> (1)数据类型T,与DeviceContext(简写为Context)作为模板参数; -> (2)Context作为Kernel第一个参数; -> (3)返回值Tensor以指针形式作为输入参数,Kernel本身返回值为void +> 1. Kernel 函数形式要点: +> (1)数据类型 T,与 DeviceContext(简写为 Context)作为模板参数; +> (2)Context 作为 Kernel 第一个参数; +> (3)返回值 Tensor 以指针形式作为输入参数,Kernel 本身返回值为 void -这一层是具体的Kernel层,这一层实现的函数,会作为Kernel注册到框架中,供框架统一查找和调度。 +这一层是具体的 Kernel 层,这一层实现的函数,会作为 Kernel 注册到框架中,供框架统一查找和调度。 目前我们期望这一层的形式如下,以`scale`为例: @@ -599,45 +599,45 @@ void Scale(const Context& dev_ctx, 说明如下: -- 不同设备的kernel要有不同的函数实现,函数名采用**驼峰式命名**,除了首字母大写之外,命名尽可能和API函数名保持一致,同一个计算的函数命名保持一致,通过不同文件或者目录管理不同设备的函数 -- 一般有两个模板参数,T和Context(尽可能),用于运行时决定数据类型和设备类型 - - 按照我们目前的体系,绝大多数的Kernel都是按照**特化DeviceContext和数据类型**这种方式缩减代码的,这与原先OpKernel的形式一致性比较强 - - 形式要统一,将来如果Kernel层也作为细粒度API暴露的话,易用性有保障 +- 不同设备的 kernel 要有不同的函数实现,函数名采用**驼峰式命名**,除了首字母大写之外,命名尽可能和 API 函数名保持一致,同一个计算的函数命名保持一致,通过不同文件或者目录管理不同设备的函数 +- 一般有两个模板参数,T 和 Context(尽可能),用于运行时决定数据类型和设备类型 + - 按照我们目前的体系,绝大多数的 Kernel 都是按照**特化 DeviceContext 和数据类型**这种方式缩减代码的,这与原先 OpKernel 的形式一致性比较强 + - 形式要统一,将来如果 Kernel 层也作为细粒度 API 暴露的话,易用性有保障 - 函数输入参数规定: - - 以具体的DeviceContext作为第一个输入参数,如CPUContext,CUDAContext,用于满足运行时需要特定上下文信息的需求,如多stream需要传stream进来 - - 暂不支持一个Kernel传入多个DeviceContext参数,目前认为这样的需求不太合理 - - 参数列表和API保持一致,如果有其他的特殊信息需要传入Kernel,通过Context传递 - - 随后是所有的输入Tensor与输入Attribute,均以const &方式传入,POD类型直接以值传入 - - 输入的Tensor是具体的Tensor类型,如DenseTensor或SelectedRows,不是对外接口API那个Tensor - - 最后是函数的返回值Tensor,以指针形式传入 - - 为了满足灵活性,让kernel可以适配更多的场景,后续会允许声明灵活类型的输入、输出和参数,参考tfrt的Argument(输入), Attribute,(属性) Return(输出)等模板,以适配非Tensor的输入输出,以及Tensor类的Attribute,让机制更加灵活 + - 以具体的 DeviceContext 作为第一个输入参数,如 CPUContext,CUDAContext,用于满足运行时需要特定上下文信息的需求,如多 stream 需要传 stream 进来 + - 暂不支持一个 Kernel 传入多个 DeviceContext 参数,目前认为这样的需求不太合理 + - 参数列表和 API 保持一致,如果有其他的特殊信息需要传入 Kernel,通过 Context 传递 + - 随后是所有的输入 Tensor 与输入 Attribute,均以 const &方式传入,POD 类型直接以值传入 + - 输入的 Tensor 是具体的 Tensor 类型,如 DenseTensor 或 SelectedRows,不是对外接口 API 那个 Tensor + - 最后是函数的返回值 Tensor,以指针形式传入 + - 为了满足灵活性,让 kernel 可以适配更多的场景,后续会允许声明灵活类型的输入、输出和参数,参考 tfrt 的 Argument(输入), Attribute,(属性) Return(输出)等模板,以适配非 Tensor 的输入输出,以及 Tensor 类的 Attribute,让机制更加灵活 - 函数内部实现按需决定: - 短期: - - 将现有OpKernel内实现,迁移到具体的设备Kernel内 - - 将存在设备公用的OpKernel实现抽离为函数,由多个设备Kernel共同调用 + - 将现有 OpKernel 内实现,迁移到具体的设备 Kernel 内 + - 将存在设备公用的 OpKernel 实现抽离为函数,由多个设备 Kernel 共同调用 - 长期: - - 复杂Kernel直接调用基础Kernel完成计算,鼓励Kernel复用,简化代码 + - 复杂 Kernel 直接调用基础 Kernel 完成计算,鼓励 Kernel 复用,简化代码 > FAQ: ->- 为什么第一个参数需要是DeviceContext?为什么不能不传? - - phi kernel要求是纯函数形式,即函数内使用的变量均通过参数传入,或者在函数内部创建,不允许在函数内部使用全局单例,为了适配多样的kernel需求,像DeviceContext这种存储上下文信息的参数是必要的 +>- 为什么第一个参数需要是 DeviceContext?为什么不能不传? + - phi kernel 要求是纯函数形式,即函数内使用的变量均通过参数传入,或者在函数内部创建,不允许在函数内部使用全局单例,为了适配多样的 kernel 需求,像 DeviceContext 这种存储上下文信息的参数是必要的 >- 为什么需要两个模板参数? - - 为了方便设备无关kernel的复用,假如我们要实现一个傅里叶变换fft kernel,假设这个kernel能够使用基础kernel组合得出, + - 为了方便设备无关 kernel 的复用,假如我们要实现一个傅里叶变换 fft kernel,假设这个 kernel 能够使用基础 kernel 组合得出, -#### 2.3.4.3 Kernel实现 +#### 2.3.4.3 Kernel 实现 > 本节要点: -> 1. Kernel专注表达数学算法,不掺杂调度逻辑 -> 2. Kernel足够细粒度,边界清晰,没有可选参数,便于复用 +> 1. Kernel 专注表达数学算法,不掺杂调度逻辑 +> 2. Kernel 足够细粒度,边界清晰,没有可选参数,便于复用 -现有Kernel因为Op参数过于复杂,引入了调度逻辑,例如 +现有 Kernel 因为 Op 参数过于复杂,引入了调度逻辑,例如 -- 通过`use_cudnn`判断是否执行cudnn分支,在新的Tensor计算库中,使用cudnn计算是单独的Kernel +- 通过`use_cudnn`判断是否执行 cudnn 分支,在新的 Tensor 计算库中,使用 cudnn 计算是单独的 Kernel -为了降低成本,Phi Kernel实现会尽可能继承原先的OpKernel实现,大部分Kernel的实现仅需要将原先OpKernel中取Input,Output的逻辑移除,并且修改一些关键方法即可,以sign为例: +为了降低成本,Phi Kernel 实现会尽可能继承原先的 OpKernel 实现,大部分 Kernel 的实现仅需要将原先 OpKernel 中取 Input,Output 的逻辑移除,并且修改一些关键方法即可,以 sign 为例: -原先sign OpKernel: +原先 sign OpKernel: ``` template @@ -658,7 +658,7 @@ class SignKernel : public framework::OpKernel { }; ``` -迁移后的phi sign kernel: +迁移后的 phi sign kernel: ``` template @@ -675,26 +675,26 @@ void SignKernel(const Context& dev_ctx, } ``` -除了kernel形式从结构体变为函数式之外,还有两处主要变化: +除了 kernel 形式从结构体变为函数式之外,还有两处主要变化: -1. 由于参数都是具体的输入,所以不需要再到context里取输入输出,相关代码移除 -2. phi kernel中要求输出tensor的内存申请统一使用`ctx.Alloc`或者`ctx.HostAlloc`方法,不能再使用原先的`mutable_data`申请内存 +1. 由于参数都是具体的输入,所以不需要再到 context 里取输入输出,相关代码移除 +2. phi kernel 中要求输出 tensor 的内存申请统一使用`ctx.Alloc`或者`ctx.HostAlloc`方法,不能再使用原先的`mutable_data`申请内存 > FAQ -> 1. 为什么mutable_data要替换成ctx.Alloc? -> 答:因为原先的mutable_data方法中调用的全局方法memory::AllocShared内部使用了全局单例进行内存分配,这不符合前面说过的纯函数设计原则,从业务需求上来讲,kernel里面如果使用单例确定显存分配的方式,在推理的多线程环境中,不能线程不能指定不同的存储分配方式。 +> 1. 为什么 mutable_data 要替换成 ctx.Alloc? +> 答:因为原先的 mutable_data 方法中调用的全局方法 memory::AllocShared 内部使用了全局单例进行内存分配,这不符合前面说过的纯函数设计原则,从业务需求上来讲,kernel 里面如果使用单例确定显存分配的方式,在推理的多线程环境中,不能线程不能指定不同的存储分配方式。 -#### 2.3.4.4 Kernel注册 +#### 2.3.4.4 Kernel 注册 > 本节要点: -> 1. Kernel将自身全部关键信息暴露给框架,记录其输入、输出和属性的信息,否则将导致框架调度与 Kernel 计算之间界限不清 +> 1. Kernel 将自身全部关键信息暴露给框架,记录其输入、输出和属性的信息,否则将导致框架调度与 Kernel 计算之间界限不清 -现有 fluid Kernel 注册时仅记录了 Kernel 的 place,layout,dtype,输入输出等统一由 ExecutionContext管理,没有相应的信息记录,现在kernel要改成函数式,每一个函数的输入输出和属性都是明确的,我们希望在这里记录每一个输入输出的信息,也是为了兼容paddle-lite的调度。 +现有 fluid Kernel 注册时仅记录了 Kernel 的 place,layout,dtype,输入输出等统一由 ExecutionContext 管理,没有相应的信息记录,现在 kernel 要改成函数式,每一个函数的输入输出和属性都是明确的,我们希望在这里记录每一个输入输出的信息,也是为了兼容 paddle-lite 的调度。 -同时,我们需要简化Kernel注册的写法,现有的写法都不够简洁: +同时,我们需要简化 Kernel 注册的写法,现有的写法都不够简洁: -1. fluid的Kernel注册写法,有不少冗余信息,以scale为例,可以看到每个kernel除了最后的data type,前面函数名和DeviceContext特化的信息都是冗余的 +1. fluid 的 Kernel 注册写法,有不少冗余信息,以 scale 为例,可以看到每个 kernel 除了最后的 data type,前面函数名和 DeviceContext 特化的信息都是冗余的 ``` REGISTER_OP_CPU_KERNEL( @@ -709,7 +709,7 @@ void SignKernel(const Context& dev_ctx, ops::ScaleKernel); ``` -2. Paddle-Lite的kernel注册写法,为每一个Kernel都声明了输入输出信息,但由于每个数据类型的kernel都是不同的,也会造成写法上的冗余,如下代码可以看到,除了data type,其他的信息也基本是冗余的 +2. Paddle-Lite 的 kernel 注册写法,为每一个 Kernel 都声明了输入输出信息,但由于每个数据类型的 kernel 都是不同的,也会造成写法上的冗余,如下代码可以看到,除了 data type,其他的信息也基本是冗余的 ``` #ifdef LITE_BUILD_EXTRA @@ -760,11 +760,11 @@ void SignKernel(const Context& dev_ctx, .Finalize(); ``` -因此,本次设计,不希望继续保持目前这种冗余的写法,希望kernel注册方法足够简洁,同时还能够灵活地满足Kernel输入输出信息配置的需求。 +因此,本次设计,不希望继续保持目前这种冗余的写法,希望 kernel 注册方法足够简洁,同时还能够灵活地满足 Kernel 输入输出信息配置的需求。 -对于这个问题,关键点在于kernel需要指定自己的device,layout和dtype作为它自己的key信息,而大部分kernel输入输出Tensor的device,layout和dtype和kernel自身是一致的,对于这类kernel,我们可以按照kernel的信息自动生成填充每个输入输出的信息,不需要通过BindInput,BindOutput声明;我们只需要针对与kernel信息不一致的输入输出去配置特殊信息即可。 +对于这个问题,关键点在于 kernel 需要指定自己的 device,layout 和 dtype 作为它自己的 key 信息,而大部分 kernel 输入输出 Tensor 的 device,layout 和 dtype 和 kernel 自身是一致的,对于这类 kernel,我们可以按照 kernel 的信息自动生成填充每个输入输出的信息,不需要通过 BindInput,BindOutput 声明;我们只需要针对与 kernel 信息不一致的输入输出去配置特殊信息即可。 -新实现的kernel注册形式如下: +新实现的 kernel 注册形式如下: ``` PT_REGISTER_KERNEL("sign", CPU, NCHW, pt::Sign, float, double) {} @@ -782,18 +782,18 @@ PT_REGISTER_KERNEL("scale.host", CPU, NCHW, pt::ScaleHost, float, double, bfloat 说明如下: -- 去除了之前注册方法中大量的冗余信息,可以一行代码完成8个数据类型的scale kernel注册,同时根据kernel信息默认记录每个输入输出的信息 -- 对于有`ScaleTensor`这种动态attr输入的kernel,可以在函数体重配置具体参数的Backend,Layout和Dtype信息;没有此类需求的,函数体为空即可 +- 去除了之前注册方法中大量的冗余信息,可以一行代码完成 8 个数据类型的 scale kernel 注册,同时根据 kernel 信息默认记录每个输入输出的信息 +- 对于有`ScaleTensor`这种动态 attr 输入的 kernel,可以在函数体重配置具体参数的 Backend,Layout 和 Dtype 信息;没有此类需求的,函数体为空即可 -此外,在`PT_REGISTER_KERNEL`宏内,通过模板推导,对Kernel函数的函数形式了归一化处理。 +此外,在`PT_REGISTER_KERNEL`宏内,通过模板推导,对 Kernel 函数的函数形式了归一化处理。 -输入参数列表各异的kernel统一被归一化为如下形式,从而能够以统一的函数指针存储到下文中的Kernel数据结构中: +输入参数列表各异的 kernel 统一被归一化为如下形式,从而能够以统一的函数指针存储到下文中的 Kernel 数据结构中: ``` using KernelFn = void (*)(KernelContext* ctx); ``` -通过在Kernel函数外包裹`PT_KERNEL`进行自动推导 +通过在 Kernel 函数外包裹`PT_KERNEL`进行自动推导 ``` #define PT_KERNEL(...) \ @@ -802,33 +802,33 @@ using KernelFn = void (*)(KernelContext* ctx); 此外,目前仅实现了基本的模板适配,后续我们会根据需求添加,以让在整体机制更加灵活,适用范围更广。 -#### 2.3.4.4 Kernel管理 +#### 2.3.4.4 Kernel 管理 > 本节要点: -> 1. 介绍目前Kernel管理组件的设计 +> 1. 介绍目前 Kernel 管理组件的设计 -对于新形式Kernel的管理,目前设计类图如下: +对于新形式 Kernel 的管理,目前设计类图如下: ![kernel-design.png](./images/kernel-design.png) 说明如下: -- `KernelFactory`作为管理Kernel的全局单例数据结构,和fluid的OpKernelMap类似,两级map,第一层根据name找到Kernel集合,第二层根据KernelKey找到具体的Kernel -- `KernelKey`和原先的OpKernelType类似,但将palce和library_type字段合二为一称之为Backend,因为原先的LibraryType是一个有局限的枚举类,原本就和place是强相关的,拆分反而增加了理解成本 -- `Kernel`相比原先的OpKernel持有了更多信息,除了执行时的Function,还持有了具体参数的信息,即`KernelArgsDef`,对于Tensor类输入输出,保存了Tensor类型信息、Device,数据类型、数据布局,对于Attribute类输入输出,保存了类型信息 +- `KernelFactory`作为管理 Kernel 的全局单例数据结构,和 fluid 的 OpKernelMap 类似,两级 map,第一层根据 name 找到 Kernel 集合,第二层根据 KernelKey 找到具体的 Kernel +- `KernelKey`和原先的 OpKernelType 类似,但将 palce 和 library_type 字段合二为一称之为 Backend,因为原先的 LibraryType 是一个有局限的枚举类,原本就和 place 是强相关的,拆分反而增加了理解成本 +- `Kernel`相比原先的 OpKernel 持有了更多信息,除了执行时的 Function,还持有了具体参数的信息,即`KernelArgsDef`,对于 Tensor 类输入输出,保存了 Tensor 类型信息、Device,数据类型、数据布局,对于 Attribute 类输入输出,保存了类型信息 -### 2.3.5 Kernel自动化编译及依赖分析 +### 2.3.5 Kernel 自动化编译及依赖分析 > 本节要点: -> 1. 介绍kernel的自动化编译设计 -> 2. 介绍kernel的自动化依赖分析设计 +> 1. 介绍 kernel 的自动化编译设计 +> 2. 介绍 kernel 的自动化依赖分析设计 -原OpKernel迁移至phi之后,在编译上需要创建新的编译target,目前phi也设计了相应的自动化编译方式,使大家在迁移之后,尽可能不需要关注编译相关的内容。 +原 OpKernel 迁移至 phi 之后,在编译上需要创建新的编译 target,目前 phi 也设计了相应的自动化编译方式,使大家在迁移之后,尽可能不需要关注编译相关的内容。 -#### 2.3.5.1 Kernel自动化编译 +#### 2.3.5.1 Kernel 自动化编译 -目前按照相应的规范迁移kernel之后,重新执行cmake,cmake会自动根据新增kernel的文件名,创建相应的编译对象,相关的逻辑在`paddle/phi/kernels/CMakeLists.txt` +目前按照相应的规范迁移 kernel 之后,重新执行 cmake,cmake 会自动根据新增 kernel 的文件名,创建相应的编译对象,相关的逻辑在`paddle/phi/kernels/CMakeLists.txt` ``` set(COMMON_KERNEL_DEPS dense_tensor sparse_coo_tensor sparse_csr_tensor kernel_context kernel_factory arg_map_context convert_utils lod_utils) @@ -847,37 +847,37 @@ endif() kernel_library(math_kernel DEPS ${MATH_KERNEL_DEPS}) ``` -1. 首先,定义kernel的公共依赖集合`COMMON_KERNEL_DEPS`,有较多kernel依赖的组件均可以放置到该集合中 -2. 通过函数`register_kernels`,自动解析kernels目录下的`***_kernel.h`文件,自动创建对应的target -3. 如果某个kernel有自己独特的依赖,可以将其标记在`register_kernels`的EXCLUDES集合中,跳过对其的自动生成,后面再使用`kernel_library`函数,生成对应的kernel target,`kernel_library`也是根据文件名自动生成编译target的 +1. 首先,定义 kernel 的公共依赖集合`COMMON_KERNEL_DEPS`,有较多 kernel 依赖的组件均可以放置到该集合中 +2. 通过函数`register_kernels`,自动解析 kernels 目录下的`***_kernel.h`文件,自动创建对应的 target +3. 如果某个 kernel 有自己独特的依赖,可以将其标记在`register_kernels`的 EXCLUDES 集合中,跳过对其的自动生成,后面再使用`kernel_library`函数,生成对应的 kernel target,`kernel_library`也是根据文件名自动生成编译 target 的 具体`register_kernels`和`kernel_library`如果扫描文件并生成编译对象,可以参考`camke/phi.cmake`中的函数实现,此处不展开介绍了 -#### 2.3.5.2 Kernel依赖自动化分析 +#### 2.3.5.2 Kernel 依赖自动化分析 -phi kernel整体改为了函数式,本意就是让kernel之间可以更加方便地复用,但是复用kernel会引入kernel之间的编译依赖关系,比如A Kernel调用了B Kernel,那么在编译上,A Kernel需要DEPS B Kernel,这样的编译依赖声明对于开发者来讲同样是非常繁琐的,因此我们也设计了对应的自动化解析方式,具体如下: +phi kernel 整体改为了函数式,本意就是让 kernel 之间可以更加方便地复用,但是复用 kernel 会引入 kernel 之间的编译依赖关系,比如 A Kernel 调用了 B Kernel,那么在编译上,A Kernel 需要 DEPS B Kernel,这样的编译依赖声明对于开发者来讲同样是非常繁琐的,因此我们也设计了对应的自动化解析方式,具体如下: -在编译A Kernel时,我们会分析A Kernel相关的`.h`和`.cc/cu`文件中include声明,如果A Kernel include了 B Kernel的头文件声明,我们会自动为A Kernel添加B Kernel target的依赖,例如: +在编译 A Kernel 时,我们会分析 A Kernel 相关的`.h`和`.cc/cu`文件中 include 声明,如果 A Kernel include 了 B Kernel 的头文件声明,我们会自动为 A Kernel 添加 B Kernel target 的依赖,例如: -dot_kernel.h有`#include "paddle/phi/kernels/empty_kernel.h"`,那么在编译时,dot_kernel会自动依赖empty_kernel,这一过程也是在`register_kernels`和`kernel_library`函数中实现的,可以参考`camke/phi.cmake`中的函数实现。 +dot_kernel.h 有`#include "paddle/phi/kernels/empty_kernel.h"`,那么在编译时,dot_kernel 会自动依赖 empty_kernel,这一过程也是在`register_kernels`和`kernel_library`函数中实现的,可以参考`camke/phi.cmake`中的函数实现。 -因此,开发时如果需要进行Kernel复用,正确include相应头文件即可。 +因此,开发时如果需要进行 Kernel 复用,正确 include 相应头文件即可。 -> 注意:这里只有kernel间的复用是会自动解析的,如果某个kernel依赖了某个function或者functor,仍然是需要手动声明依赖的,phi的设计鼓励kernel之间的复用,因为kernel本身也成为function了,因此像之前那种调用function的方式长期来讲是基本可以被淘汰掉的,只需要尽可能将function实现为kernel即可 +> 注意:这里只有 kernel 间的复用是会自动解析的,如果某个 kernel 依赖了某个 function 或者 functor,仍然是需要手动声明依赖的,phi 的设计鼓励 kernel 之间的复用,因为 kernel 本身也成为 function 了,因此像之前那种调用 function 的方式长期来讲是基本可以被淘汰掉的,只需要尽可能将 function 实现为 kernel 即可 ### 2.3.6 InferMeta(Shape)抽象整合 -原先fluid Op的InferShape和OpKernel一样,存在重复开发的问题,因为不同Op的InferShape函数无法复用,因此即使不同Op的InferShape逻辑一样或者类似,也都是重写一遍,本次phi的重构也需要解决此问题。 +原先 fluid Op 的 InferShape 和 OpKernel 一样,存在重复开发的问题,因为不同 Op 的 InferShape 函数无法复用,因此即使不同 Op 的 InferShape 逻辑一样或者类似,也都是重写一遍,本次 phi 的重构也需要解决此问题。 -我们将InferShape同样改写为函数式,支持不同的Op可以调用同一个InferShape函数,提升易用性,降低维护成本。 +我们将 InferShape 同样改写为函数式,支持不同的 Op 可以调用同一个 InferShape 函数,提升易用性,降低维护成本。 > FAQ: -> 1. 为什么要叫InferMeta,而不是继续叫InferShape? -> 答:InferMeta的Meta来源于DenseTensor中的meta成员,在phi中,一个op有两大组件,InferMeta和Kernel。这里InferMeta覆盖了InferShape的功能,但又不限于InferShape,除了对dims和lod的推断,InferMeta中也会承担dtype和layout的推断,这一点和原先是不一样的。 +> 1. 为什么要叫 InferMeta,而不是继续叫 InferShape? +> 答:InferMeta 的 Meta 来源于 DenseTensor 中的 meta 成员,在 phi 中,一个 op 有两大组件,InferMeta 和 Kernel。这里 InferMeta 覆盖了 InferShape 的功能,但又不限于 InferShape,除了对 dims 和 lod 的推断,InferMeta 中也会承担 dtype 和 layout 的推断,这一点和原先是不一样的。 -#### 2.3.6.1 InferMeta相关设计 +#### 2.3.6.1 InferMeta 相关设计 -首先InferMeta也为函数式,几个示例如下: +首先 InferMeta 也为函数式,几个示例如下: ``` void UnchangedInferMeta(const MetaTensor& x, MetaTensor* out) { @@ -908,12 +908,12 @@ void ConcatInferMeta(const std::vector& x, 特征介绍如下: 1. 函数命名为`[FunctionDesc|OpName]InferMeta` -2. 函数形式与Kernel类似,函数参数依次为MetaTensor输入,Attribute,MetaTensor输出,返回值为空,原则上InferMeta函数与其对应Kernel函数的参数列表是一一对应的,差别仅为Tensor参数类型,InferMeta函数的Tensor参数为MetaTensor,Kernel函数的Tensor参数为DenseTensor,SparseTensor等 -3. 对于一些需要区分编译期与执行期的InferMeta函数,在末尾添加MetaConfig参数,config中有is_runtime的flag成员,之所以用结构体,是为了便于后续扩展其他flag成员。 +2. 函数形式与 Kernel 类似,函数参数依次为 MetaTensor 输入,Attribute,MetaTensor 输出,返回值为空,原则上 InferMeta 函数与其对应 Kernel 函数的参数列表是一一对应的,差别仅为 Tensor 参数类型,InferMeta 函数的 Tensor 参数为 MetaTensor,Kernel 函数的 Tensor 参数为 DenseTensor,SparseTensor 等 +3. 对于一些需要区分编译期与执行期的 InferMeta 函数,在末尾添加 MetaConfig 参数,config 中有 is_runtime 的 flag 成员,之所以用结构体,是为了便于后续扩展其他 flag 成员。 -这里使用MetaTensor是为了屏蔽多种Tensor类型,以及兼容原先fluid的VarDesc及Variable,一个op对应一个InferMeta函数即可,如果不对类型进行屏蔽,本身InferMeta函数就会因为输入类型不同而重复开发多份。 +这里使用 MetaTensor 是为了屏蔽多种 Tensor 类型,以及兼容原先 fluid 的 VarDesc 及 Variable,一个 op 对应一个 InferMeta 函数即可,如果不对类型进行屏蔽,本身 InferMeta 函数就会因为输入类型不同而重复开发多份。 -其中MetaTensor的基础设计如下: +其中 MetaTensor 的基础设计如下: ``` class MetaTensor { @@ -943,13 +943,13 @@ class MetaTensor { }; ``` -基类的MetaTensor中有一个TensorBase的指针成员,因此在phi中可以兼容DenseTensor,SelectedRows,SparseTensor等多种类型。 +基类的 MetaTensor 中有一个 TensorBase 的指针成员,因此在 phi 中可以兼容 DenseTensor,SelectedRows,SparseTensor 等多种类型。 -#### 2.3.6.2 InferMeta注册管理 +#### 2.3.6.2 InferMeta 注册管理 -为了支持InferMeta函数的统一调用,InferMeta函数也进行了统一的注册管理。 +为了支持 InferMeta 函数的统一调用,InferMeta 函数也进行了统一的注册管理。 -首先也需要类似前述Kernel形式归一化的`PT_KERTNEL`工具宏,命名为`PT_INFER_META`,并实现类似KernelContext的InferMetaContext(实现不展开了,仅放置部分片段,详见`phi/core/infermeta_utils.h`) +首先也需要类似前述 Kernel 形式归一化的`PT_KERTNEL`工具宏,命名为`PT_INFER_META`,并实现类似 KernelContext 的 InferMetaContext(实现不展开了,仅放置部分片段,详见`phi/core/infermeta_utils.h`) ``` class InferMetaContext { @@ -978,7 +978,7 @@ struct InferMetaFnImpl { }; ``` -然后设计对应的单例类用来存储MetaFn +然后设计对应的单例类用来存储 MetaFn ``` class MetaFnFactory { @@ -1034,17 +1034,17 @@ class MetaFnFactory { }; ``` -封装对应的注册宏,用于InferMeta的注册,注册写法示例如下: +封装对应的注册宏,用于 InferMeta 的注册,注册写法示例如下: ``` PT_REGISTER_INFER_META_FN(sign, phi::UnchangedInferMeta); ``` -对于InferMeta的注册,一般不需要开发者手写,我们通过yaml中api name和InferMeta的映射关系,自动生成对应的注册条目。 +对于 InferMeta 的注册,一般不需要开发者手写,我们通过 yaml 中 api name 和 InferMeta 的映射关系,自动生成对应的注册条目。 -#### 2.3.6.3 InferMeta兼容fluid InferShape +#### 2.3.6.3 InferMeta 兼容 fluid InferShape -在fluid中,继承MetaTensor实现CompatMetaTensor,重写对应的成员方法,以使InferMeta函数兼容VarDesc和Variable的输入,以dims为例,CompatMetaTensor的dims实现为: +在 fluid 中,继承 MetaTensor 实现 CompatMetaTensor,重写对应的成员方法,以使 InferMeta 函数兼容 VarDesc 和 Variable 的输入,以 dims 为例,CompatMetaTensor 的 dims 实现为: ``` class CompatMetaTensor : public phi::MetaTensor { @@ -1080,9 +1080,9 @@ class CompatMetaTensor : public phi::MetaTensor { }; ``` -然后,为了将函数式的InferMeta嫁接回fluid的Op体系上,需要将函数式的InferMeta归一化为functor形式。 +然后,为了将函数式的 InferMeta 嫁接回 fluid 的 Op 体系上,需要将函数式的 InferMeta 归一化为 functor 形式。 -通过前面介绍的PT_INFER_META宏归一化函数形式,然后将`PT_INFER_META(***InferMeta)`包装到一个functor中,functor中先将InferShapeContext转换为InferMetaContext,再调用相应InferMeta函数,通过一个宏统一管理代码 +通过前面介绍的 PT_INFER_META 宏归一化函数形式,然后将`PT_INFER_META(***InferMeta)`包装到一个 functor 中,functor 中先将 InferShapeContext 转换为 InferMetaContext,再调用相应 InferMeta 函数,通过一个宏统一管理代码 ``` #define DELCARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn) \ @@ -1096,9 +1096,9 @@ class CompatMetaTensor : public phi::MetaTensor { } ``` -这其中的关键函数是`BuildInferMetaContext`,这个函数会从InferShapeContext中,将InferMeta函数需要的参数取出,统一放到InferMetaContext中并返回,InferMeta需要的参数列表通过ArgumentMapping函数获取(详细在2.4 动静态图执行兼容适配中介绍)。 +这其中的关键函数是`BuildInferMetaContext`,这个函数会从 InferShapeContext 中,将 InferMeta 函数需要的参数取出,统一放到 InferMetaContext 中并返回,InferMeta 需要的参数列表通过 ArgumentMapping 函数获取(详细在 2.4 动静态图执行兼容适配中介绍)。 -然后将该functor在Op注册时维护到相应OpInfo中即可,同时删除原先Op的InferShape实现,示例如下 +然后将该 functor 在 Op 注册时维护到相应 OpInfo 中即可,同时删除原先 Op 的 InferShape 实现,示例如下 ``` // 原先实现 @@ -1136,24 +1136,24 @@ REGISTER_OPERATOR(sign, ops::SignOp, ops::SignOpMaker, ``` -至此,实现原Op的InferShape函数迁移至phi InferMeta之后,可以重新注册回fluid中被调用,从而实现InferShape的函数化复用与全局统一。 +至此,实现原 Op 的 InferShape 函数迁移至 phi InferMeta 之后,可以重新注册回 fluid 中被调用,从而实现 InferShape 的函数化复用与全局统一。 ## 2.4 动静态图执行兼容适配 > 本节要点: -> 1. 新形式Kernel如何在现有静态图和动态图体系中调用,难点在于解决多参数Op到少参数Kernel的匹配问题 +> 1. 新形式 Kernel 如何在现有静态图和动态图体系中调用,难点在于解决多参数 Op 到少参数 Kernel 的匹配问题 -### 2.4.1 ArgumentMapping体系设计 +### 2.4.1 ArgumentMapping 体系设计 -由于新形式Kernel参数列表与Python API对齐,和原先的OpMaker中注册的参数列表存在差异,导致新形式Kernel在原先fluid体系中调用时会很难匹配 +由于新形式 Kernel 参数列表与 Python API 对齐,和原先的 OpMaker 中注册的参数列表存在差异,导致新形式 Kernel 在原先 fluid 体系中调用时会很难匹配 -例如conv2d op,它的OpMaker中注册了4个Input,1个Output,26个Attribute,而conv2d的Python API一共只有8个参数(不算name,3个Tensor输入,5个Attribute输入) +例如 conv2d op,它的 OpMaker 中注册了 4 个 Input,1 个 Output,26 个 Attribute,而 conv2d 的 Python API 一共只有 8 个参数(不算 name,3 个 Tensor 输入,5 个 Attribute 输入) -运行时,调用新Kernel之前,需要将Kernel需要的参数从OpMaker注册的参数中选出来,再传给新Kernel使用。 +运行时,调用新 Kernel 之前,需要将 Kernel 需要的参数从 OpMaker 注册的参数中选出来,再传给新 Kernel 使用。 -对于一些原先就编写规范的算子,它的OpMaker参数和Python api参数本就是对应的,这种标准的情况,不存在需要选参数的需求,对于这部分算子,根据OpProto中输入输出属性的注册顺序,跳过标记为Extra和Quant的成员,可以解决一部分Op和Kernel的参数匹配问题;然而对于一些不太规范,或者说是fluid时代遗留的算子,比如像conv,就需要这样的映射函数,且这个映射函数根据op不同,可能存在非常复杂的判断逻辑,因此现阶段没有办法可以自动化处理。 +对于一些原先就编写规范的算子,它的 OpMaker 参数和 Python api 参数本就是对应的,这种标准的情况,不存在需要选参数的需求,对于这部分算子,根据 OpProto 中输入输出属性的注册顺序,跳过标记为 Extra 和 Quant 的成员,可以解决一部分 Op 和 Kernel 的参数匹配问题;然而对于一些不太规范,或者说是 fluid 时代遗留的算子,比如像 conv,就需要这样的映射函数,且这个映射函数根据 op 不同,可能存在非常复杂的判断逻辑,因此现阶段没有办法可以自动化处理。 -为此,目前设计了ArgumentMapping函数映射的体系,在phi/ops/compat目录下,实现相应的映射函数并注册,然后在phi kernel执行适配时,会调用对应的ArgumentMapping函数,得到phi kernel需要的参数,例如scale op的映射函数如下: +为此,目前设计了 ArgumentMapping 函数映射的体系,在 phi/ops/compat 目录下,实现相应的映射函数并注册,然后在 phi kernel 执行适配时,会调用对应的 ArgumentMapping 函数,得到 phi kernel 需要的参数,例如 scale op 的映射函数如下: ``` /** @@ -1210,7 +1210,7 @@ KernelSignature ScaleOpArgumentMapping(const ArgumentMappingContext& ctx) { } ``` -其中的ArgumentMappingContext基本接口设计如下: +其中的 ArgumentMappingContext 基本接口设计如下: ``` // TODO(chenweihang): Add more methods if needed in future @@ -1237,11 +1237,11 @@ class ArgumentMappingContext { }; ``` -无论ScaleOpArgumentMapping是在fluid中使用,还是在infrt中使用,只要能够构造出特定框架的ArgumentMappingContext,即可获得对应的参数映射关系。 +无论 ScaleOpArgumentMapping 是在 fluid 中使用,还是在 infrt 中使用,只要能够构造出特定框架的 ArgumentMappingContext,即可获得对应的参数映射关系。 -**1)对fluid的适配** +**1)对 fluid 的适配** -在fluid中,该函数需要同时在静态图和动态图中使用,比较直接的思路是,直接通过ExecutionContext构造ArgumentMappingContext,然后在op执行时调用,例如 +在 fluid 中,该函数需要同时在静态图和动态图中使用,比较直接的思路是,直接通过 ExecutionContext 构造 ArgumentMappingContext,然后在 op 执行时调用,例如 ``` // TODO(chenweihang): split impl based OpProto or Dygraph if needed @@ -1283,9 +1283,9 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext { }; ``` -**2)对infrt的适配** +**2)对 infrt 的适配** -若在infrt中,infrt只有训练存储的推理program,也就是只有Proto这一层的信息,那么可以通过Proto信息去构造对应的Context使用,**proto中的信息目前在支持参数匹配上是完备的**,例如 +若在 infrt 中,infrt 只有训练存储的推理 program,也就是只有 Proto 这一层的信息,那么可以通过 Proto 信息去构造对应的 Context 使用,**proto 中的信息目前在支持参数匹配上是完备的**,例如 ``` class ProtoArgumentMappingContext : public phi::ArgumentMappingContext { @@ -1365,11 +1365,11 @@ class ProtoArgumentMappingContext : public phi::ArgumentMappingContext { }; ``` -### 2.4.2 phi Kernel兼容调度执行 +### 2.4.2 phi Kernel 兼容调度执行 -目前phi kernel可以兼容地在老Executor,ParallelExecutor,动态图的Tracer,Engine,推理的Predictor,以及新执行器InterpreterCore等在执行体系中被调度执行。 +目前 phi kernel 可以兼容地在老 Executor,ParallelExecutor,动态图的 Tracer,Engine,推理的 Predictor,以及新执行器 InterpreterCore 等在执行体系中被调度执行。 -具体地,在动静态图调用OpKernel之前,判断对于当前计算,比如`scale`是否有新形式的Kernel已经注册,如果已经注册了,则调用新形式的Kernel去执行,如果没找到合适的Kernel,仍然执行之前已有的OpKernel。 +具体地,在动静态图调用 OpKernel 之前,判断对于当前计算,比如`scale`是否有新形式的 Kernel 已经注册,如果已经注册了,则调用新形式的 Kernel 去执行,如果没找到合适的 Kernel,仍然执行之前已有的 OpKernel。 ``` if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) { @@ -1452,11 +1452,11 @@ class ProtoArgumentMappingContext : public phi::ArgumentMappingContext { } ``` -对于phi kernel的执行,有两个关键函数 +对于 phi kernel 的执行,有两个关键函数 **GetExpectedPhiKernelArgs** -- 在调用phi kernel时,要完成多属性到少属性的匹配,这里就需要调用前述的ArgumentMapping函数,从而得到phi kernel的参数列表,GetExpectedPhiKernelArgs实现如下: +- 在调用 phi kernel 时,要完成多属性到少属性的匹配,这里就需要调用前述的 ArgumentMapping 函数,从而得到 phi kernel 的参数列表,GetExpectedPhiKernelArgs 实现如下: ``` KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs( @@ -1469,10 +1469,10 @@ KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs( **BuildPhiKernelContext** -- 要调用phi kernel,需要准备phi kernel需要的Context,PhiKernelContext和原先的RuntimeContext及ExecutionContext不同之处在于,PhiKernelContext中是以SmallVector存储输入输出及属性,访问效率上要比原先的map高一些 -- PhiKernelContext中不存储输入输出及属性的name,要求这几项顺次存储,和kernel的参数列表顺序一致 +- 要调用 phi kernel,需要准备 phi kernel 需要的 Context,PhiKernelContext 和原先的 RuntimeContext 及 ExecutionContext 不同之处在于,PhiKernelContext 中是以 SmallVector 存储输入输出及属性,访问效率上要比原先的 map 高一些 +- PhiKernelContext 中不存储输入输出及属性的 name,要求这几项顺次存储,和 kernel 的参数列表顺序一致 -Phi KernelContext的基本设计如下: +Phi KernelContext 的基本设计如下: ``` /** @@ -1582,7 +1582,7 @@ class KernelContext { ## 2.5 产品思考及后续规划 -目前,phi算子库仍然处在Kernel体系的建设阶段,Kernel尚未完全迁移,且仍然存在诸多完善点,但将来phi算子库会更好地将“算子”的概念纳入进来,这还需要比较长的时间和比较大的人力投入。最后,从“产品”的角度介绍一下phi后续对于算子开发范式的规划,也能够让开发者更容易理解 “为什么要做算子库重构?” 这件事。 +目前,phi 算子库仍然处在 Kernel 体系的建设阶段,Kernel 尚未完全迁移,且仍然存在诸多完善点,但将来 phi 算子库会更好地将“算子”的概念纳入进来,这还需要比较长的时间和比较大的人力投入。最后,从“产品”的角度介绍一下 phi 后续对于算子开发范式的规划,也能够让开发者更容易理解 “为什么要做算子库重构?” 这件事。 ### 2.5.1 原算子开发范式 @@ -1601,38 +1601,38 @@ class KernelContext { 算子同样可以按照这个原则去类比: -1. 这个算子叫什么,有哪些参数,返回值是什么(即Op) -2. 这个算子在不同场景,不同设备中,怎么执行,怎么计算(即Kernel) +1. 这个算子叫什么,有哪些参数,返回值是什么(即 Op) +2. 这个算子在不同场景,不同设备中,怎么执行,怎么计算(即 Kernel) -如果我们**能分得清楚1和2的边界,并守住这个边界**,我们设计就能够趋于简练。 +如果我们**能分得清楚 1 和 2 的边界,并守住这个边界**,我们设计就能够趋于简练。 这是什么意思?就是说如果我们一定要用两段式来介绍一个对象,那么哪部分应该在第一段?哪部分应该在第二段?得有个逻辑清晰的认知。例如,我们用两段式介绍一个人: -- 方式1:1. 他叫张三;2. 他在百度工作,他喜欢唱歌、爬山、骑行,他待人真诚,认真负责 -- 方式2:1. 他叫张三,他喜欢唱歌;2. 他在百度工作,他喜欢爬山、骑行,他待人真诚,认真负责 +- 方式 1:1. 他叫张三;2. 他在百度工作,他喜欢唱歌、爬山、骑行,他待人真诚,认真负责 +- 方式 2:1. 他叫张三,他喜欢唱歌;2. 他在百度工作,他喜欢爬山、骑行,他待人真诚,认真负责 -哪种分段方式更好一些呢?答案是显然的,方式2的两段中有同样形式的内容,逻辑不清。 +哪种分段方式更好一些呢?答案是显然的,方式 2 的两段中有同样形式的内容,逻辑不清。 为什么用这种方式来类比?因为我们的算子开发面临的场景我觉得是一样的,市面上现有的框架,对于算子的定义,都围绕着**“1. 算子描述,2. 算子执行”**的两段式进行设计。 -顺着这个思路,我从“语文、逻辑和信息认知”的角度介绍一下我对fluid算子开发现状的理解,如果把现在的算子体系当做一篇以**算子**为题目的“小学作文”来看的话,拿高分有点困难。 +顺着这个思路,我从“语文、逻辑和信息认知”的角度介绍一下我对 fluid 算子开发现状的理解,如果把现在的算子体系当做一篇以**算子**为题目的“小学作文”来看的话,拿高分有点困难。 **(1)"生僻词"比较多** -fluid的Op开发概念对于新人来讲,可能是一种看“文言文”的感觉,似懂非懂。 +fluid 的 Op 开发概念对于新人来讲,可能是一种看“文言文”的感觉,似懂非懂。 -如果我要描述一个“运算”,我需要讲清楚它叫什么,输入输出有哪些,这就够了,例如一个乘法运算,`叫multiply,输入x,y,得到out`,在这一点上,Python API是足够简练的。 +如果我要描述一个“运算”,我需要讲清楚它叫什么,输入输出有哪些,这就够了,例如一个乘法运算,`叫 multiply,输入 x,y,得到 out`,在这一点上,Python API 是足够简练的。 那么现在我们的内部算子要怎么描述呢?要实现以下类和函数,可以参考 [mul_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc) : ``` -# 前向op +# 前向 op OperatorWithKernel - InferShape - GetExpectedKernelType - GetKernelTypeForVar OpMaker -# 反向op +# 反向 op GradOp GradOpMaker # 类型推导 @@ -1640,17 +1640,17 @@ VarTypeInference # 显存优化 InplaceOpInference NoNeedBufferVarsInference -# Op注册 +# Op 注册 REGISTER_OPERATOR -# Op版本管理 +# Op 版本管理 REGISTER_OP_VERSION ``` 直观看说实话会有点困惑,新人可能会有什么疑问呢? -- Operator可以理解,是算子,OpMaker也可以理解,是告诉这个算子怎么生成,但为什么要两个呢?OpMaker都已经告诉你要怎么生成这个Op了,为什么还需要再写一个Operator?Maker不应该把Operator make出来吗? +- Operator 可以理解,是算子,OpMaker 也可以理解,是告诉这个算子怎么生成,但为什么要两个呢?OpMaker 都已经告诉你要怎么生成这个 Op 了,为什么还需要再写一个 Operator?Maker 不应该把 Operator make 出来吗? - 除了这俩,剩下的都看不懂。。。这些都什么意思?什么时候用?我新开发这个算子哪个需要写,哪个不需要写? -- 算了,我短时间内搞不懂,算子也着急,找一个类似的算子,copy一份,它写了什么我照着挪过来,能跑对就行。。。(这可能是大部分新人开发算子的心态) +- 算了,我短时间内搞不懂,算子也着急,找一个类似的算子,copy 一份,它写了什么我照着挪过来,能跑对就行。。。(这可能是大部分新人开发算子的心态) **(2)重复的“修饰”比较多** @@ -1658,51 +1658,51 @@ REGISTER_OP_VERSION 以 [MulOpMaker](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/mul_op.cc#L72) 和 [DotOpMaker](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/dot_op.cc#L35) 的实现为例,我们可以发现以下几点: -1. 除了Op名字,输入、输出和参数命名,两段结构极其类似?为什么我们不能把这几个空抠出来让开发者直接填空? -2. 输入、输出和参数后面的大段描述属于**重复建设**,并且现阶段没有用处,因为Python端已经写过一遍了,并且写得更规范,更清楚,C++端这里的参数注释没有人把关,质量参差不齐。 +1. 除了 Op 名字,输入、输出和参数命名,两段结构极其类似?为什么我们不能把这几个空抠出来让开发者直接填空? +2. 输入、输出和参数后面的大段描述属于**重复建设**,并且现阶段没有用处,因为 Python 端已经写过一遍了,并且写得更规范,更清楚,C++端这里的参数注释没有人把关,质量参差不齐。 -再看Operator的GetExpectedKernelType([mul](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/mul_op.cc#L41)和[dot](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/dot_op.cc#L28)):,也一样,都是根据x选择kernel,那为什么还要让开发者写其他的内容呢?直接做个填空,填x是不是就行了。 +再看 Operator 的 GetExpectedKernelType([mul](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/mul_op.cc#L41)和[dot](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/dot_op.cc#L28)):,也一样,都是根据 x 选择 kernel,那为什么还要让开发者写其他的内容呢?直接做个填空,填 x 是不是就行了。 -我们开发Op的时候,这些组件多少都存在这样的问题,这增加了大家工作量和理解成本。 +我们开发 Op 的时候,这些组件多少都存在这样的问题,这增加了大家工作量和理解成本。 **(3)相同的“段落”写了好多遍** -这里主要指OpKernel的开发,我们现在的OpKernel之间可复用性较差,比如已经有了mul和add的Kernel,我们现在要新增一个fc算子(由mul和add)组成,我们得去mul和add的kernel中把代码拷贝一份过来使用,而不能直接调用mul和add的kernel。 +这里主要指 OpKernel 的开发,我们现在的 OpKernel 之间可复用性较差,比如已经有了 mul 和 add 的 Kernel,我们现在要新增一个 fc 算子(由 mul 和 add)组成,我们得去 mul 和 add 的 kernel 中把代码拷贝一份过来使用,而不能直接调用 mul 和 add 的 kernel。 -这是我们建设phi初期要解决的问题,并且我从周围新人的口中已经听到过多次这样的反馈: +这是我们建设 phi 初期要解决的问题,并且我从周围新人的口中已经听到过多次这样的反馈: -- 我开发新算子,需要一个broadcast操作,我得去另一个算子里copy过来,还得先调通,copy的时候可能没copy全,或者应用场景稍有不同,这都需要额外的时间 -- 实现gumbol-softmax算子,因为softmax是其中的子运算,我得先把softmax的kernel实现copy过来 +- 我开发新算子,需要一个 broadcast 操作,我得去另一个算子里 copy 过来,还得先调通,copy 的时候可能没 copy 全,或者应用场景稍有不同,这都需要额外的时间 +- 实现 gumbol-softmax 算子,因为 softmax 是其中的子运算,我得先把 softmax 的 kernel 实现 copy 过来 **(4)“描述”本身有二义性分段** -说回开始的两段式结构,”1.算子描述;2.算子执行“,分这两段是必要的,也是业界普遍的做法,我们不需要再分第三段了,但paddle目前存在第三段,算子描述分了两段进行,并且这两段还不一致,即PythonaAPI和Op。 +说回开始的两段式结构,”1.算子描述;2.算子执行“,分这两段是必要的,也是业界普遍的做法,我们不需要再分第三段了,但 paddle 目前存在第三段,算子描述分了两段进行,并且这两段还不一致,即 PythonaAPI 和 Op。 -API和Op都是对算子运行行为的概要描述,本质上只是同一段内容的不同展现形式,比如[Python dot API](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/Python/paddle/tensor/linalg.py#L993)和[DotOpMaker](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/dot_op.cc#L35),就是告诉别人“它叫什么,参数都是什么”。 +API 和 Op 都是对算子运行行为的概要描述,本质上只是同一段内容的不同展现形式,比如[Python dot API](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/Python/paddle/tensor/linalg.py#L993)和[DotOpMaker](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/dot_op.cc#L35),就是告诉别人“它叫什么,参数都是什么”。 咱们对同一个东西的描述,分两个地方写,还写得不一样,这是很令人费解的。就好像你介绍一个人,在学校你说他叫”张三“,在公司你说他叫”张三丰“,有相像之处,但又不是一个意思。 对于一个算子,它的输入、输出应该在各个场景下都是一致的,如果不一致,那本质上就不是一个算子。 -比如,conv2d的api和op,[Python conv2d API](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/Python/paddle/nn/functional/conv.py#L416),很简单,8个输入参数;但是对应的[conv2d op](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/conv_op.cc#L259),有**32个**输入参数,让人摸不着头脑。 +比如,conv2d 的 api 和 op,[Python conv2d API](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/Python/paddle/nn/functional/conv.py#L416),很简单,8 个输入参数;但是对应的[conv2d op](https://github.com/PaddlePaddle/Paddle/blob/c6f49f0b9f189e043b458348d7fd1468e2645621/paddle/fluid/operators/conv_op.cc#L259),有**32 个**输入参数,让人摸不着头脑。 -开发者也会很困惑,我开发op的时候,API和Op不是一个东西吗,我应该写得一样呢?还是不一样? +开发者也会很困惑,我开发 op 的时候,API 和 Op 不是一个东西吗,我应该写得一样呢?还是不一样? -推理之前为什么要做**算子增强推全**,就是op的参数太多了,但API的参数很少,这两者本来是介绍一个东西,却差别如此之大,所以需要发动全员,在op的某些参数上标记AsExtra,就声明这个参数可能是多余的。 +推理之前为什么要做**算子增强推全**,就是 op 的参数太多了,但 API 的参数很少,这两者本来是介绍一个东西,却差别如此之大,所以需要发动全员,在 op 的某些参数上标记 AsExtra,就声明这个参数可能是多余的。 当然我们演变成现在这样,有一定历史原因: -1. Op输入输出参数规范限制差,留的口子太大,可以天马行空地写; -2. 2.0 API对外层Python API的形态做了大范围规整,但是Op层保持不变,是导致目前同一段描述差异变大的一个主要原因。 +1. Op 输入输出参数规范限制差,留的口子太大,可以天马行空地写; +2. 2.0 API 对外层 Python API 的形态做了大范围规整,但是 Op 层保持不变,是导致目前同一段描述差异变大的一个主要原因。 -对于这个问题的解决,我们的方向是很明确的,就是**Op层描述向API层靠拢,因为API层的定义是经过2.0 API项目仔细设计过的**。 +对于这个问题的解决,我们的方向是很明确的,就是**Op 层描述向 API 层靠拢,因为 API 层的定义是经过 2.0 API 项目仔细设计过的**。 ### 2.5.2 新算子开发范式:完形填空 + 拼积木 -phi期望的Op开发方式:**“完形填空”式算子描述实现 + “堆积木”式算子执行实现** +phi 期望的 Op 开发方式:**“完形填空”式算子描述实现 + “堆积木”式算子执行实现** -**Op实现:** +**Op 实现:** 需要写的内容如下: @@ -1719,15 +1719,15 @@ phi期望的Op开发方式:**“完形填空”式算子描述实现 + “堆 param : [x, y, -1] ``` -以填空为主要方式,名字,输入、输出、输出的增强推断,用什么Kernel。 +以填空为主要方式,名字,输入、输出、输出的增强推断,用什么 Kernel。 原先需要写得大段重复代码,全部通过”代码自动生成“的手段去实现,开发者不用再关注。 主要思想:仅让开发者关注最小的差异化信息集合,填空指定信息。 -这里Op配置时,要求和Python端参数命名等完全一致,做到上下层描述一致,不给开发者留空间在op层自由发挥,导致想加什么加什么的随意行为。如果需要给op加参数,API也要一起更新,这首先需要通过不兼容升级评审。 +这里 Op 配置时,要求和 Python 端参数命名等完全一致,做到上下层描述一致,不给开发者留空间在 op 层自由发挥,导致想加什么加什么的随意行为。如果需要给 op 加参数,API 也要一起更新,这首先需要通过不兼容升级评审。 -**Kernel实现:** +**Kernel 实现:** ``` template @@ -1738,6 +1738,6 @@ Fc(const Context& dev_ctx, const Tensor& x, const Tensor& w, const Tensor& b, Te PT_REGISTE_KERNEL("fc", Fc, ...) ``` -mul和add操作的拼接,代码量很少,再加一个注册声明。 +mul 和 add 操作的拼接,代码量很少,再加一个注册声明。 -整个Op+Kernel的开发也就十几行代码,在去除所有冗余信息,仅保留差异化信息上,这种方式已经是没有什么精简空间了。 +整个 Op+Kernel 的开发也就十几行代码,在去除所有冗余信息,仅保留差异化信息上,这种方式已经是没有什么精简空间了。 diff --git a/docs/design/quantization/training_quantization_model_format.md b/docs/design/quantization/training_quantization_model_format.md index 55811b3721e..90784c6abdf 100644 --- a/docs/design/quantization/training_quantization_model_format.md +++ b/docs/design/quantization/training_quantization_model_format.md @@ -1,48 +1,48 @@ # 量化训练模型格式说明 -PaddlePaddle框架主要支持动态量化和静态量化两种量化训练模式。其中,动态量化会在每次推断过程中动态计算量化比例系数的值,而静态量化则对不同的输入采用相同的量化比例系数值。 对于权重而言,在训练过程中采用动态量化模式。换句话说,在每次迭代过程中量化比例系数均会被重新计算得到直至训练过程结束。 对于激活而言,可以选择动态量化模式也可以选择静态量化模式。若选择使用静态量化模式,则量化比例系数会在训练过程中被评估求得,且在推断过程中被使用(不同的输入均保持不变)。除此之外,卷积权重的动态量化亦包括两种形式:1)Tensor-wise量化,即直接求取整个权重Tensor的量化scale值(单一值);2)Channel-wise量化,即对权重Tensor按照channel维度进行分片,然后求取每个通道Tensor的scale值。 +PaddlePaddle 框架主要支持动态量化和静态量化两种量化训练模式。其中,动态量化会在每次推断过程中动态计算量化比例系数的值,而静态量化则对不同的输入采用相同的量化比例系数值。 对于权重而言,在训练过程中采用动态量化模式。换句话说,在每次迭代过程中量化比例系数均会被重新计算得到直至训练过程结束。 对于激活而言,可以选择动态量化模式也可以选择静态量化模式。若选择使用静态量化模式,则量化比例系数会在训练过程中被评估求得,且在推断过程中被使用(不同的输入均保持不变)。除此之外,卷积权重的动态量化亦包括两种形式:1)Tensor-wise 量化,即直接求取整个权重 Tensor 的量化 scale 值(单一值);2)Channel-wise 量化,即对权重 Tensor 按照 channel 维度进行分片,然后求取每个通道 Tensor 的 scale 值。 -## 1. Tensor-wise量化 +## 1. Tensor-wise 量化 ### 1.1 动态量化 -动态量化主要通过`fake_quantize_abs_max`op实现,该op对输入tensor进行量化并输出值域在-127~+127范围内的量化tensor。`fake_quantize_abs_max`op在对输入tensor进行量化时使用的量化scale是动态计算出来的,即取输入tensor元素的绝对值最大值。动态计算出的量化scale会作为反量化op的一个输入,用于求取反量化tensor。下面是对`fake_quantize_abs_max`op的整体描述: +动态量化主要通过`fake_quantize_abs_max`op 实现,该 op 对输入 tensor 进行量化并输出值域在-127~+127 范围内的量化 tensor。`fake_quantize_abs_max`op 在对输入 tensor 进行量化时使用的量化 scale 是动态计算出来的,即取输入 tensor 元素的绝对值最大值。动态计算出的量化 scale 会作为反量化 op 的一个输入,用于求取反量化 tensor。下面是对`fake_quantize_abs_max`op 的整体描述: ``` fake_quantize_abs_max { inputs { - X(Tensor): 激活tensor或权重tensor + X(Tensor): 激活 tensor 或权重 tensor } outputs { - Out(Tensor): 已量化tensor - OutScale(Tensor): 动态计算得到的量化scale,其元素个数为1(tensor-wise量化) + Out(Tensor): 已量化 tensor + OutScale(Tensor): 动态计算得到的量化 scale,其元素个数为 1(tensor-wise 量化) } attrs { - bit_length(int): 量化bit数,如8-bit + bit_length(int): 量化 bit 数,如 8-bit } } ``` ### 1.2 静态量化 -与动态量化不同,静态量化的量化scale是在量化训练时通过**窗口滑动平均**或者**窗口绝对值最大值**等方法计算求得的。静态量化主要通过`fake_quantize_moving_average_abs_max`op或者`fake_quantize_range_abs_max`op实现,它们利用输入的量化scale将输入tensor量化到-127~127值域范围内。`fake_quantize_moving_average_abs_max`op和`fake_quantize_range_abs_max`op的输入和输出格式都是一样的,不同点在于op内部计算量化scale时使用的策略不同。`fake_quantize_moving_average_abs_max`op使用一个窗口内绝对值最大值的滑动平均值作为量化sacle,而`fake_quantize_range_abs_max`op使用一个窗口内绝对值最大值的最大值作为量化sacle。下面以`fake_quantize_moving_average_abs_max`op为例,对其进行整体描述: +与动态量化不同,静态量化的量化 scale 是在量化训练时通过**窗口滑动平均**或者**窗口绝对值最大值**等方法计算求得的。静态量化主要通过`fake_quantize_moving_average_abs_max`op 或者`fake_quantize_range_abs_max`op 实现,它们利用输入的量化 scale 将输入 tensor 量化到-127~127 值域范围内。`fake_quantize_moving_average_abs_max`op 和`fake_quantize_range_abs_max`op 的输入和输出格式都是一样的,不同点在于 op 内部计算量化 scale 时使用的策略不同。`fake_quantize_moving_average_abs_max`op 使用一个窗口内绝对值最大值的滑动平均值作为量化 sacle,而`fake_quantize_range_abs_max`op 使用一个窗口内绝对值最大值的最大值作为量化 sacle。下面以`fake_quantize_moving_average_abs_max`op 为例,对其进行整体描述: ``` fake_quantize_moving_average_abs_max { inputs { - X(Tensor): 一般为激活tensor - InScale(Tensor): 量化训练中计算求得的scale + X(Tensor): 一般为激活 tensor + InScale(Tensor): 量化训练中计算求得的 scale } outputs { - Out(Tensor): 已量化tensor - OutScale(Tensor): 量化训练中计算求得的scale,用于继续传递到反量化op + Out(Tensor): 已量化 tensor + OutScale(Tensor): 量化训练中计算求得的 scale,用于继续传递到反量化 op } attrs { is_test(bool): 指明是量化训练过程还是预测推断过程 - bit_length(int): 量化bit数,如8-bit + bit_length(int): 量化 bit 数,如 8-bit moving_rate(float): 滑动平均衰减系数 } } @@ -50,94 +50,94 @@ fake_quantize_moving_average_abs_max { ### 1.3 反量化 -无论是静态量化还是动态量化,在量化计算完成后都需要进行反量化操作,该操作即是通过`fake_dequantize_abs_max`op实现的。具体来说,`fake_quantize_abs_max`op负责将fp32数值映射到int8值域(-127~127),而`fake_dequantize_abs_max` op是将int8数值映射到fp32值域。 +无论是静态量化还是动态量化,在量化计算完成后都需要进行反量化操作,该操作即是通过`fake_dequantize_abs_max`op 实现的。具体来说,`fake_quantize_abs_max`op 负责将 fp32 数值映射到 int8 值域(-127~127),而`fake_dequantize_abs_max` op 是将 int8 数值映射到 fp32 值域。


-根据[量化训练的原理](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#1-quantization-aware-training%E9%87%8F%E5%8C%96%E4%BB%8B%E7%BB%8D)可知,`fake_dequantize_abs_max` op主要通过公式1-3-1进行反量化操作。在实现中,`fake_dequantize_abs_max` op将激活scale作为Variable(Tensor)进行输入($X_{scale}$),将公式1-3-1中关于scale的剩余部分作为max\_range属性(即公式1-3-2)。`fake_dequantize_abs_max` op的整体描述如下: +根据[量化训练的原理](https://github.com/PaddlePaddle/models/blob/develop/PaddleSlim/docs/tutorial.md#1-quantization-aware-training%E9%87%8F%E5%8C%96%E4%BB%8B%E7%BB%8D)可知,`fake_dequantize_abs_max` op 主要通过公式 1-3-1 进行反量化操作。在实现中,`fake_dequantize_abs_max` op 将激活 scale 作为 Variable(Tensor)进行输入($X_{scale}$),将公式 1-3-1 中关于 scale 的剩余部分作为 max\_range 属性(即公式 1-3-2)。`fake_dequantize_abs_max` op 的整体描述如下: ``` fake_dequantize_abs_max { inputs { - X(Tensor): 输入tensor - Scale(Tensor): 激活scale + X(Tensor): 输入 tensor + Scale(Tensor): 激活 scale } outputs { - Out(Tensor): 已反量化tensor + Out(Tensor): 已反量化 tensor } attrs { - max_range(float): 根据公式1-3-2和公式1-3-3计算所得 + max_range(float): 根据公式 1-3-2 和公式 1-3-3 计算所得 } } ``` -## 2. 卷积权重Channel-wise量化 -### 2.1 分channel量化 +## 2. 卷积权重 Channel-wise 量化 +### 2.1 分 channel 量化 -分channel量化与动态量化类似,也是将输入tensor量化到-127~+127值域范围内,不同之处在于分channel量化会对tensor按照channel维度进行分片,然后求取每个通道tensor的scale值。在PaddlePaddle框架中,`fake_channel_wise_quantize_abs_max`op实现了分channel量化的逻辑。注意,目前仅对权重进行分channel量化,对激活是不进行分channel量化的,并且分channel量化只作用在卷积操作上(包括`conv2d`和`depthwise_conv2d`)。下面是对`fake_channel_wise_quantize_abs_max`op的整体描述: +分 channel 量化与动态量化类似,也是将输入 tensor 量化到-127~+127 值域范围内,不同之处在于分 channel 量化会对 tensor 按照 channel 维度进行分片,然后求取每个通道 tensor 的 scale 值。在 PaddlePaddle 框架中,`fake_channel_wise_quantize_abs_max`op 实现了分 channel 量化的逻辑。注意,目前仅对权重进行分 channel 量化,对激活是不进行分 channel 量化的,并且分 channel 量化只作用在卷积操作上(包括`conv2d`和`depthwise_conv2d`)。下面是对`fake_channel_wise_quantize_abs_max`op 的整体描述: ``` fake_channel_wise_quantize_abs_max { inputs { - X(Tensor): 权重tensor + X(Tensor): 权重 tensor } outputs { - Out(Tensor): 已量化tensor - OutScale(Tensor): 分channel计算得到的scale,其元素个数与输入tensor的通道数相同 + Out(Tensor): 已量化 tensor + OutScale(Tensor): 分 channel 计算得到的 scale,其元素个数与输入 tensor 的通道数相同 } attrs { - bit_length(int): 量化bit数,如8-bit + bit_length(int): 量化 bit 数,如 8-bit } } ``` -### 2.2 分channel反量化 -若对卷积权重进行了分channel量化,则反量化操作时必须采用分channel反量化。`fake_channel_wise_dequantize_max_abs`op实现了分channel反量化的逻辑,它的输入Scales包括两个scale tensor,即激活scale和权重scale。根据2.1节的描述可知,权重采用的是channel-wise量化而激活采用的是tensor-wise量化,所以激活scale对应的tensor仅包含一个值而权重scale对应的tensor包括输出通道数个值。下面是对`fake_channel_wise_dequantize_max_abs`op的整体描述: +### 2.2 分 channel 反量化 +若对卷积权重进行了分 channel 量化,则反量化操作时必须采用分 channel 反量化。`fake_channel_wise_dequantize_max_abs`op 实现了分 channel 反量化的逻辑,它的输入 Scales 包括两个 scale tensor,即激活 scale 和权重 scale。根据 2.1 节的描述可知,权重采用的是 channel-wise 量化而激活采用的是 tensor-wise 量化,所以激活 scale 对应的 tensor 仅包含一个值而权重 scale 对应的 tensor 包括输出通道数个值。下面是对`fake_channel_wise_dequantize_max_abs`op 的整体描述: ``` fake_channel_wise_dequantize_max_abs { inputs { - X(Tensor): 输入tensor - Scales(Tensor List): 一般包括两个tensor,且第一个为权重scale,第二个为激活scale + X(Tensor): 输入 tensor + Scales(Tensor List): 一般包括两个 tensor,且第一个为权重 scale,第二个为激活 scale } outputs { - Out(Tensor): 已反量化tensor + Out(Tensor): 已反量化 tensor } attrs { - quant_bits(int list): 一般包括两个整数值,分别为求取Scales中不同scale值时对应的量化bit数。 + quant_bits(int list): 一般包括两个整数值,分别为求取 Scales 中不同 scale 值时对应的量化 bit 数。 } } ``` ## 3. 注意点 -1) 8-bit量化训练中采用有符号的int8进行量化,且所有的scale都是没有除以127。 +1) 8-bit 量化训练中采用有符号的 int8 进行量化,且所有的 scale 都是没有除以 127。 -2)以上描述中仅`fake_dequantize_abs_max`op将scale的一部分作为属性值,其他op的scale均作为输入Variable(Tensor)。 +2)以上描述中仅`fake_dequantize_abs_max`op 将 scale 的一部分作为属性值,其他 op 的 scale 均作为输入 Variable(Tensor)。 -3)若之后为量化训练添加新的量化op或反量化op,**建议使用Variable(Tensor)作为scale的数据传递方式**。因为量化训练的主要目的就是为了求取合适的量化/反量化scale,而将这些scale信息作为tensor的方式存储下来会方便后续scale数据向其他格式的转换。 +3)若之后为量化训练添加新的量化 op 或反量化 op,**建议使用 Variable(Tensor)作为 scale 的数据传递方式**。因为量化训练的主要目的就是为了求取合适的量化/反量化 scale,而将这些 scale 信息作为 tensor 的方式存储下来会方便后续 scale 数据向其他格式的转换。 ## 4. 附录图解


-图1: 动态量化,其中卷积权重已预先进行量化 +图 1: 动态量化,其中卷积权重已预先进行量化


-图2: 静态量化,其中卷积权重已预先进行量化 +图 2: 静态量化,其中卷积权重已预先进行量化


-图3: 分channel量化,其中卷积权重已预先进行分channel量化 +图 3: 分 channel 量化,其中卷积权重已预先进行分 channel 量化

diff --git a/docs/dev_guides/Overview_cn.md b/docs/dev_guides/Overview_cn.md index 321dc61fc62..af1ead4cf4a 100644 --- a/docs/dev_guides/Overview_cn.md +++ b/docs/dev_guides/Overview_cn.md @@ -4,10 +4,10 @@ 飞桨社区非常欢迎你加入到飞桨。你可以通过以下方式参与贡献: -- [新建一个 ISSUE来反馈bug](https://github.com/PaddlePaddle/Paddle/issues/new/choose) -- [新建一个 ISSUE来提出新功能需求](https://github.com/PaddlePaddle/Paddle/issues/new/choose) -- [提PR来修复一个bug](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/10_contribution/local_dev_guide_cn.html) -- [提PR来实现一个新功能](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/10_contribution/local_dev_guide_cn.html) +- [新建一个 ISSUE 来反馈 bug](https://github.com/PaddlePaddle/Paddle/issues/new/choose) +- [新建一个 ISSUE 来提出新功能需求](https://github.com/PaddlePaddle/Paddle/issues/new/choose) +- [提 PR 来修复一个 bug](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/10_contribution/local_dev_guide_cn.html) +- [提 PR 来实现一个新功能](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/10_contribution/local_dev_guide_cn.html) - [优化我们的文档](https://github.com/PaddlePaddle/docs/wiki/%E6%96%87%E6%A1%A3%E8%B4%A1%E7%8C%AE%E6%8C%87%E5%8D%97) 感谢你对飞桨开源项目的贡献! diff --git a/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md b/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md index 80deb345d36..8ef4739db61 100644 --- a/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md @@ -1,50 +1,50 @@ -# API单测开发及验收规范 +# API 单测开发及验收规范 -## API单测开发规范 +## API 单测开发规范 -API单测的测试点需覆盖以下场景: +API 单测的测试点需覆盖以下场景: -- **编程范式场景**:常规覆盖动态图和静态图的测试场景,如果仅支持其中一种,需要在设计文档RFC和API文档中体现。 -- **硬件场景**:常规需覆盖CPU、GPU两种测试场景,如果仅支持其中一种,需要在设计文档RFC和API文档中体现。部分需覆盖XPU、ARM等硬件场景。 -- **Tensor精度场景**:常规需要支持FP32、FP64,部分需支持FP16、INT8、INT16、INT32、INT64等。 -- **参数组合场景**:常规覆盖API的全部入参,需要对全部入参进行参数有效性和边界值测试,同时可选参数也需有相应的测试覆盖。 +- **编程范式场景**:常规覆盖动态图和静态图的测试场景,如果仅支持其中一种,需要在设计文档 RFC 和 API 文档中体现。 +- **硬件场景**:常规需覆盖 CPU、GPU 两种测试场景,如果仅支持其中一种,需要在设计文档 RFC 和 API 文档中体现。部分需覆盖 XPU、ARM 等硬件场景。 +- **Tensor 精度场景**:常规需要支持 FP32、FP64,部分需支持 FP16、INT8、INT16、INT32、INT64 等。 +- **参数组合场景**:常规覆盖 API 的全部入参,需要对全部入参进行参数有效性和边界值测试,同时可选参数也需有相应的测试覆盖。 - **计算精度**:需要保证前向计算、反向计算的精度正确性。 - - 前向计算:需要有通过numpy或其他数学方法实现的函数的对比结果。 - - 反向计算:需要复用现有单测框架反向计算验证方式保障反向正确性。注意:1)使用Python组合方式新增的API,由于反向计算已经在各组合API单测中分别验证了,因此,该API的反向计算不要求验证。2)如现有单测框架无法满足要求,需要通过numpy推导或函数直接实现反向等方式验证反向计算结果正确性。 + - 前向计算:需要有通过 numpy 或其他数学方法实现的函数的对比结果。 + - 反向计算:需要复用现有单测框架反向计算验证方式保障反向正确性。注意:1)使用 Python 组合方式新增的 API,由于反向计算已经在各组合 API 单测中分别验证了,因此,该 API 的反向计算不要求验证。2)如现有单测框架无法满足要求,需要通过 numpy 推导或函数直接实现反向等方式验证反向计算结果正确性。 - **异常测试**:需对于参数异常值输入,应该有友好的报错信息及异常反馈。 - 除了以上,还需注意: - - [OP单测必须使用大尺寸输入](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-input-shape-requirements) - - [反向Op必须调用check_grad](https://github.com/PaddlePaddle/Paddle/wiki/Gradient-Check-Is-Required-for-Op-Test) - - [单测精度中atol, rtol, eps, max_relative_error, 不允许自行放大阈值](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements) - - [OP单测精度必须覆盖float64](https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64) - - [Op单测必须通过“编译时/运行时一致性检查”](https://github.com/PaddlePaddle/Paddle/wiki/Compile_vs_Runtime-Check-Specification) - - [Sequence相关Op单测中必须包含batch size为1的LoDTensor输入](https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-batch_size=1-in-sequence-OP-test) - - [Sequence相关Op单测中必须包含instance size为0的LoDTensor输入](https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-instance_size=0-in-sequence-OP-test) + - [OP 单测必须使用大尺寸输入](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-input-shape-requirements) + - [反向 Op 必须调用 check_grad](https://github.com/PaddlePaddle/Paddle/wiki/Gradient-Check-Is-Required-for-Op-Test) + - [单测精度中 atol, rtol, eps, max_relative_error, 不允许自行放大阈值](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements) + - [OP 单测精度必须覆盖 float64](https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64) + - [Op 单测必须通过“编译时/运行时一致性检查”](https://github.com/PaddlePaddle/Paddle/wiki/Compile_vs_Runtime-Check-Specification) + - [Sequence 相关 Op 单测中必须包含 batch size 为 1 的 LoDTensor 输入](https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-batch_size=1-in-sequence-OP-test) + - [Sequence 相关 Op 单测中必须包含 instance size 为 0 的 LoDTensor 输入](https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-instance_size=0-in-sequence-OP-test) -## API单测验收规范 +## API 单测验收规范 -API单测的验收包含两方面,一方面是要验收是否符合上述的开发规范,另一方面要验收是否符合以下的通用规范: +API 单测的验收包含两方面,一方面是要验收是否符合上述的开发规范,另一方面要验收是否符合以下的通用规范: - **命名规范**: - - 单测中需要有充分的断言检查,单测case禁止使用test1/test2等无实际含义的命名方式。 - - API单测命名、参数命名、暴露方式、代码目录层级需要与设计文档保持一致,可参考[API通用设计文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.html)要求。 + - 单测中需要有充分的断言检查,单测 case 禁止使用 test1/test2 等无实际含义的命名方式。 + - API 单测命名、参数命名、暴露方式、代码目录层级需要与设计文档保持一致,可参考[API 通用设计文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.html)要求。 - **提交规范**: - - 单元测试内容需要和开发代码放在同一个PR提交,后续修改也需要基于此PR。 - - 对于API单测增强任务,需在PR描述中(可参考 [PR41191](https://github.com/PaddlePaddle/Paddle/pull/41191))写明每个算子缺失的单测、问题定位及修复思路的简单描述 -- **覆盖率规范**:PR需要通过所有的CI验证,且`PR-CI-Coverage`需要满足新增代码行覆盖率达到90%以上,覆盖率信息可通过CI详情页面查看,如下: + - 单元测试内容需要和开发代码放在同一个 PR 提交,后续修改也需要基于此 PR。 + - 对于 API 单测增强任务,需在 PR 描述中(可参考 [PR41191](https://github.com/PaddlePaddle/Paddle/pull/41191))写明每个算子缺失的单测、问题定位及修复思路的简单描述 +- **覆盖率规范**:PR 需要通过所有的 CI 验证,且`PR-CI-Coverage`需要满足新增代码行覆盖率达到 90%以上,覆盖率信息可通过 CI 详情页面查看,如下: ![coverage_not_pass.png](./images/coverage_not_pass.png) - **耗时规范**: - - 新增单测的执行不允许超过15s,`PR-CI-Coverage`有相应的检查,检查逻辑可见 `tools/check_added_ut.sh`。如果你新增的单测无法在15s内执行完成,可以尝试减少数据维度(可见[链接](https://github.com/PaddlePaddle/Paddle/pull/42267/commits/17344408d69f10e9fe5cf3200be1e381bc454694#diff-02f1ef59dfd03557054d7b20c9128ac9828735fc1f8be9e44d0587a96a06f685L236))或通过在[CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/a1d87776ac500b1a3c3250dd9897f103515909c6/python/paddle/fluid/tests/unittests/CMakeLists.txt#L617-L618)指定该单测的Timeout时间。如果你通过修改Timeout时间,你需要在PR描述中说明原因,同时会有相关同学review后进行approve后才能合入。原则上Timeout设定时间不能超过120s。 + - 新增单测的执行不允许超过 15s,`PR-CI-Coverage`有相应的检查,检查逻辑可见 `tools/check_added_ut.sh`。如果你新增的单测无法在 15s 内执行完成,可以尝试减少数据维度(可见[链接](https://github.com/PaddlePaddle/Paddle/pull/42267/commits/17344408d69f10e9fe5cf3200be1e381bc454694#diff-02f1ef59dfd03557054d7b20c9128ac9828735fc1f8be9e44d0587a96a06f685L236))或通过在[CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/a1d87776ac500b1a3c3250dd9897f103515909c6/python/paddle/fluid/tests/unittests/CMakeLists.txt#L617-L618)指定该单测的 Timeout 时间。如果你通过修改 Timeout 时间,你需要在 PR 描述中说明原因,同时会有相关同学 review 后进行 approve 后才能合入。原则上 Timeout 设定时间不能超过 120s。 ![add_ut.png](./images/add_ut.png) - - 现有单测的修改原则上不允许超过120s,`PR-CI-Coverage`有相应的检查,若有特殊情况可修改[CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/a1d87776ac500b1a3c3250dd9897f103515909c6/python/paddle/fluid/tests/unittests/CMakeLists.txt#L617-L618)文件中该单测的Timeout时间,处理逻辑同上诉新增单测超过15s一致。 -- **单测retry机制**:为提高单测执行效率,所有的单测均以一定的并发度执行,而这样的策略可能会引起单测随机挂。因此对失败的单测设定了retry机制,一共retry四次,如果成功率未达到50%,就认为该单测可能存在问题,CI失败。 + - 现有单测的修改原则上不允许超过 120s,`PR-CI-Coverage`有相应的检查,若有特殊情况可修改[CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/a1d87776ac500b1a3c3250dd9897f103515909c6/python/paddle/fluid/tests/unittests/CMakeLists.txt#L617-L618)文件中该单测的 Timeout 时间,处理逻辑同上诉新增单测超过 15s 一致。 +- **单测 retry 机制**:为提高单测执行效率,所有的单测均以一定的并发度执行,而这样的策略可能会引起单测随机挂。因此对失败的单测设定了 retry 机制,一共 retry 四次,如果成功率未达到 50%,就认为该单测可能存在问题,CI 失败。 ## 交流与改进 -PR内容会有Paddle同学及Paddle QA同学进行review,确保完整覆盖了待测功能点后,会给予approved。 +PR 内容会有 Paddle 同学及 Paddle QA 同学进行 review,确保完整覆盖了待测功能点后,会给予 approved。 -若review过程中发现测试缺失和遗漏的测试点,会通过github代码行comment的和request changes的方式交流改进,待PR修改完毕后给予approved。 +若 review 过程中发现测试缺失和遗漏的测试点,会通过 github 代码行 comment 的和 request changes 的方式交流改进,待 PR 修改完毕后给予 approved。 ## 后续维护 -代码成功merge后,如果发现对框架造成了严重影响,例如阻塞了某些方向的功能开发,或者和部分功能存在严重冲突导致Bug,会对代码进行Revert并通知贡献者。待对PR修复后重新合入。 +代码成功 merge 后,如果发现对框架造成了严重影响,例如阻塞了某些方向的功能开发,或者和部分功能存在严重冲突导致 Bug,会对代码进行 Revert 并通知贡献者。待对 PR 修复后重新合入。 diff --git a/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst b/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst index b03bbd15edc..a534cfbce61 100644 --- a/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst +++ b/docs/dev_guides/api_contributing_guides/api_contributing_guides_cn.rst @@ -1,8 +1,8 @@ ##################### -新增API 开发&提交流程 +新增 API 开发&提交流程 ##################### -飞桨作为一个开源项目,我们鼓励生态开发者为paddlepaddle贡献API,当你想要为飞桨开发新API功能时,请遵守此API贡献流程在Github上完成文档设计和代码设计并提交至相应的github仓库。 +飞桨作为一个开源项目,我们鼓励生态开发者为 paddlepaddle 贡献 API,当你想要为飞桨开发新 API 功能时,请遵守此 API 贡献流程在 Github 上完成文档设计和代码设计并提交至相应的 github 仓库。 API 贡献流程如下 @@ -20,19 +20,19 @@ API 贡献流程如下 **1、任务认领** -如果你想参与飞桨 API 开源贡献,可以在Github paddle 项目上的issue 区域进行任务认领,飞桨官网会发布一些新增 API 的任务,用户可以认领飞桨发布的任务,也可以产出自己的新增API想法,并按照此贡献流程提交设计文档。 +如果你想参与飞桨 API 开源贡献,可以在 Github paddle 项目上的 issue 区域进行任务认领,飞桨官网会发布一些新增 API 的任务,用户可以认领飞桨发布的任务,也可以产出自己的新增 API 想法,并按照此贡献流程提交设计文档。 **2、签订贡献者许可协议(CLA)** 对于你贡献的源代码,你将拥有合法的知识产权,为了保护你的权益,你需要先签署一份 `贡献者许可协议 `_ 。 -注意:当你签署完CLA后,我们才会继续对您提交的设计方案和实现代码进行评审及合入 +注意:当你签署完 CLA 后,我们才会继续对您提交的设计方案和实现代码进行评审及合入 -**3、提交API设计文档** +**3、提交 API 设计文档** -API设计文档的目的是为了社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和pr请求,在后续提交实现代码之前确保API设计方案与飞桨设计理念一致,也让后续代码评审及代码合入变得更加容易。 +API 设计文档的目的是为了社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和 pr 请求,在后续提交实现代码之前确保 API 设计方案与飞桨设计理念一致,也让后续代码评审及代码合入变得更加容易。 -当你想要发起一个新增API的贡献时,你需要先对API进行开发设计,并提交一份API设计文档。设计时请遵守飞桨API设计及命名规范。同时,飞桨为大家提供了API 设计文档撰写模版和API设计文档示例。完成后,你需要将设计文档提交至Github开发者社区仓库,并根据本地开发指南提交PR。 +当你想要发起一个新增 API 的贡献时,你需要先对 API 进行开发设计,并提交一份 API 设计文档。设计时请遵守飞桨 API 设计及命名规范。同时,飞桨为大家提供了 API 设计文档撰写模版和 API 设计文档示例。完成后,你需要将设计文档提交至 Github 开发者社区仓库,并根据本地开发指南提交 PR。 此过程请参考相应的开发规范,并提交以下内容: @@ -42,57 +42,57 @@ API设计文档的目的是为了社区开发者更容易的参与开源项目 "1、API 设计文档", "- `API 设计及命名规范 <./api_design_guidelines_standard_cn.html>`_ - `API 设计文档模版 `_ - - `API 设计文档示例 `_ ", "`Github开发者社区仓库 `_" + - `API 设计文档示例 `_ ", "`Github 开发者社区仓库 `_" -同时,飞桨为大家提供了 `API 设计文档模版 `_ 和 `API 设计文档demo `_ ,你可以使用这份模版撰写API设计文档。完成后,你需要将设计文档提交至 `Github开发者社区仓库 `_ ,并根据 `本地开发指南 `_ 提交PR。 +同时,飞桨为大家提供了 `API 设计文档模版 `_ 和 `API 设计文档 demo `_ ,你可以使用这份模版撰写 API 设计文档。完成后,你需要将设计文档提交至 `Github 开发者社区仓库 `_ ,并根据 `本地开发指南 `_ 提交 PR。 **4、设计文档评审&公示** -飞桨专家对你提交的API设计文档进行审核,同时此文档也将接受来自开发者社区的评估,大家可以在pr评论区进行广泛的交流。开发者根据飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 +飞桨专家对你提交的 API 设计文档进行审核,同时此文档也将接受来自开发者社区的评估,大家可以在 pr 评论区进行广泛的交流。开发者根据飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 -如果你的API功能比较复杂,我们可能会在社区中针对API设计文档发起评审会议,会提前在pr评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,我们会在pr中发出会议结论。 +如果你的 API 功能比较复杂,我们可能会在社区中针对 API 设计文档发起评审会议,会提前在 pr 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,我们会在 pr 中发出会议结论。 **5、公布评审结果&合入文档** -当设计文档评审&公示通过后,你的API设计文档将会合入至 `飞桨开发者社区仓库 `_ ,并在开源社区中同步。 +当设计文档评审&公示通过后,你的 API 设计文档将会合入至 `飞桨开发者社区仓库 `_ ,并在开源社区中同步。 -**6、提交API实现代码** +**6、提交 API 实现代码** -当API设计文档合入后,开发者根据评审通过的API设计内容进行代码开发。此过程请参考相应的开发规范,并提交以下内容: +当 API 设计文档合入后,开发者根据评审通过的 API 设计内容进行代码开发。此过程请参考相应的开发规范,并提交以下内容: .. csv-table:: :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30,30 - "1、API实现代码", "- `API 设计及命名规范 <./api_design_guidelines_standard_cn.html>`_ - - Python API开发指南(请期待) - - `C++ API开发指南 <./new_cpp_op_cn.html>`_ - ", "`Github飞桨训练框架仓库 `_" - "2、API英文文档", "- `API文档书写规范 `_", "`Github飞桨训练框架仓库 `_" - "3、API中文文档", "- `API文档书写规范 `_", "`Github飞桨文档仓库 `_" - "4、API单测代码", "- `API 验收标准 <./api_accpetance_criteria_cn.html>`_", "`Github飞桨训练框架仓库 `_" + "1、API 实现代码", "- `API 设计及命名规范 <./api_design_guidelines_standard_cn.html>`_ + - Python API 开发指南(请期待) + - `C++ API 开发指南 <./new_cpp_op_cn.html>`_ + ", "`Github 飞桨训练框架仓库 `_" + "2、API 英文文档", "- `API 文档书写规范 `_", "`Github 飞桨训练框架仓库 `_" + "3、API 中文文档", "- `API 文档书写规范 `_", "`Github 飞桨文档仓库 `_" + "4、API 单测代码", "- `API 验收标准 <./api_accpetance_criteria_cn.html>`_", "`Github 飞桨训练框架仓库 `_" -当开发者完成以上代码设计后,需要将代码提交至 `Github飞桨训练框架仓库 `_ ,并根据 `本地开发指南 `_ 提交PR、准备接受社区的评审。 +当开发者完成以上代码设计后,需要将代码提交至 `Github 飞桨训练框架仓库 `_ ,并根据 `本地开发指南 `_ 提交 PR、准备接受社区的评审。 **7、实现代码评审&公示** -飞桨官方会及时安排专家进行API代码审核,代码也将接受来自开发者社区的评审,开发者可以在pr评论区进行广泛的交流,开发者对飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 +飞桨官方会及时安排专家进行 API 代码审核,代码也将接受来自开发者社区的评审,开发者可以在 pr 评论区进行广泛的交流,开发者对飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 -如果你的API 功能比较复杂,官方可能会在社区中针对API实现代码发起评审会议,会提前在pr评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,我们会在pr 中发出会议结论。 +如果你的 API 功能比较复杂,官方可能会在社区中针对 API 实现代码发起评审会议,会提前在 pr 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,我们会在 pr 中发出会议结论。 **8、公布评审结果&合入代码** -当设计文档评审&公示通过后,官方会在开源社区中同步,你的API 实现代码将会合入至 `Github飞桨训练框架仓库 `_ 。 +当设计文档评审&公示通过后,官方会在开源社区中同步,你的 API 实现代码将会合入至 `Github 飞桨训练框架仓库 `_ 。 -**9、通过API测试及验收** +**9、通过 API 测试及验收** -当你的代码合入 `Github飞桨训练框架仓库 `_ 后,官方会对你的代码进行集成测试,并通知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成;如果测试不通过,我们会联系你进行代码修复,请及时关注github上的最新动态; +当你的代码合入 `Github 飞桨训练框架仓库 `_ 后,官方会对你的代码进行集成测试,并通知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成;如果测试不通过,我们会联系你进行代码修复,请及时关注 github 上的最新动态; -注意:代码合入develop分之后的第二天你可以从官网下载develop 编译的安装包体验此功能。飞桨后续也会将此功能纳入正式版的发版计划~ +注意:代码合入 develop 分之后的第二天你可以从官网下载 develop 编译的安装包体验此功能。飞桨后续也会将此功能纳入正式版的发版计划~ **10、贡献完成** diff --git a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md index aba7d896dcd..b9da89a0beb 100644 --- a/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md @@ -1,88 +1,88 @@ -# 飞桨API的设计和命名规范 +# 飞桨 API 的设计和命名规范 -## API设计规范 +## API 设计规范 ### 总体原则 -1. 单一职责,每个API应只完成单一的任务 +1. 单一职责,每个 API 应只完成单一的任务 2. 接口设计应考虑通用性,避免只适用于某些单一场景 -3. 符合行业标准,综合参考开源深度学习框架的接口设计,借鉴各框架的优点;除非飞桨API设计有明确优势的情况下可保留自有特色,否则需要符合行业标准 -4. 功能类似的接口,参数名和行为需要保持一致,比如,lstm和gru +3. 符合行业标准,综合参考开源深度学习框架的接口设计,借鉴各框架的优点;除非飞桨 API 设计有明确优势的情况下可保留自有特色,否则需要符合行业标准 +4. 功能类似的接口,参数名和行为需要保持一致,比如,lstm 和 gru 5. 优先保证清晰,然后考虑简洁,避免使用不容易理解的缩写 6. 历史一致,如无必要原因,应避免接口修改 -7. 动静统一,如无特殊原因,动态图和静态图下的API输入、输出要求一致。开发者使用相同的代码,均可以在动态图和静态图模式下执行 +7. 动静统一,如无特殊原因,动态图和静态图下的 API 输入、输出要求一致。开发者使用相同的代码,均可以在动态图和静态图模式下执行 ### 动态图与静态图模式 -关于飞桨框架支持的开发模式,为了便于用户理解,代码和API均采用“动态图”和“静态图”的说法;文档优先使用“动态图”和“静态图”的说法,不推荐使用“命令式编程”和“声明式编程”的说法。 +关于飞桨框架支持的开发模式,为了便于用户理解,代码和 API 均采用“动态图”和“静态图”的说法;文档优先使用“动态图”和“静态图”的说法,不推荐使用“命令式编程”和“声明式编程”的说法。 -### API目录结构规范 +### API 目录结构规范 -- 公开API代码应该放置到以下列出的对应位置目录/文件中,并添加到目录下\_\_init\_\_.py的 all列表中。非公开API不能添加到all列表中 +- 公开 API 代码应该放置到以下列出的对应位置目录/文件中,并添加到目录下\_\_init\_\_.py 的 all 列表中。非公开 API 不能添加到 all 列表中 -| paddle | paddle基础API,Tensor操作相关API | +| paddle | paddle 基础 API,Tensor 操作相关 API | | ------------------------------ | ------------------------------------------------------------ | -| paddle.tensor | 跟tensor操作相关的API,比如:创建zeros, 矩阵运算matmul, 变换concat, 计算add, 查找argmax等 | -| paddle.nn | 跟组网相关的API,比如:Linear, Conv2D,损失函数,卷积,LSTM等,激活函数等 | -| paddle.nn.functional | 跟组网相关的函数类API,比如:conv2d、avg_pool2d等 | -| paddle.nn.initializer | 网络初始化相关API,比如Normal、Uniform等 | -| paddle.nn.utils | 网络相关工具类API,比如weight_norm、spectral_norm等 | -| paddle.static.nn | 静态图下组网专用API,比如:输入占位符data/Input,控制流while_loop/cond | -| paddle.static | 静态图下基础框架相关API,比如:Variable, Program, Executor等 | -| paddle.optimizer | 优化算法相关API,比如:SGD、Adagrad、Adam等 | -| paddle.optimizer.lr(文件) | 学习率策略相关API,比如LinearWarmup、LRScheduler等 | -| paddle.metric | 评估指标计算相关的API,比如:accuracy, auc等 | -| paddle.io | 数据输入输出相关API,比如:save, load, Dataset, DataLoader等 | -| paddle.device(文件) | 设备管理相关API,通用类,比如:CPUPlace等 | -| paddle.device.cuda | 设备管理相关API,CUDA相关,比如:CUDAPlace等 | -| paddle.distributed | 分布式相关基础API | -| paddle.distributed.fleet | 分布式相关高层API | -| paddle.distributed.fleet.utils | 分布式高层API文件系统相关API,比如LocalFS等 | -| paddle.distributed.utils | 分布式工具类API,比如get_host_name_ip等 | -| paddle.vision | 视觉领域API,基础操作且不属于子目录所属大类 | -| paddle.vision.datasets | 视觉领域API,公开数据集相关,比如Cifar10、MNIST等 | -| paddle.vision.models | 视觉领域API,模型相关,比如ResNet、LeNet等 | -| paddle.vision.transforms | 视觉领域API,数据预处理相关,比如CenterCrop、hflip等 | -| paddle.vision.ops | 视觉领域API,基础op相关,比如DeformConv2D、yolo_box等 | -| paddle.text | NLP领域API, 比如,数据集,数据处理,常用网络结构,比如transformer | -| paddle.utils | paddle.utils 目录下包含飞桨框架工具类的API,且不属于子目录所属大类 | -| paddle.utils.download | 工具类自动下载相关API,比如get_weights_path_from_url | -| paddle.utils.profiler | 工具类通用性能分析器相关API,比如profiler等 | -| paddle.utils.cpp_extension | 工具类C++扩展相关的API,比如CppExtension、CUDAExtension等 | -| paddle.utils.unique_name | 工具类命名相关API,比如generate、guard等 | -| paddle.amp | paddle.amp 目录下包含飞桨框架支持的动态图自动混合精度(AMP)相关的API,比如GradScaler等 | -| paddle.jit | paddle.jit 目录下包含飞桨框架支持动态图转静态图相关的API,比如to_static等 | -| paddle.distribution | paddle.distribution 目录下包含飞桨框架支持的概率分布相关的API,比如Normal等 | -| paddle.regularizer | 正则相关的API,比如L1Decay等 | -| paddle.sysconfig | Paddle系统路径相关API,比如get_include、get_lib等 | -| paddle.callbacks | paddle.callbacks 目录下包含飞桨框架支持的回调函数相关的API,比如Callback等 | -| paddle.hub | paddle.hub 目录下包含飞桨框架模型拓展相关的API以及支持的模型库列表,比如list等 | -| paddle.autograd | 自动梯度求导相关,比如grad、backward等 | -| paddle.inference | paddle预测相关,比如Predictor等 | -| paddle.onnx | onnx导出相关,比如onnx.export | +| paddle.tensor | 跟 tensor 操作相关的 API,比如:创建 zeros, 矩阵运算 matmul, 变换 concat, 计算 add, 查找 argmax 等 | +| paddle.nn | 跟组网相关的 API,比如:Linear, Conv2D,损失函数,卷积,LSTM 等,激活函数等 | +| paddle.nn.functional | 跟组网相关的函数类 API,比如:conv2d、avg_pool2d 等 | +| paddle.nn.initializer | 网络初始化相关 API,比如 Normal、Uniform 等 | +| paddle.nn.utils | 网络相关工具类 API,比如 weight_norm、spectral_norm 等 | +| paddle.static.nn | 静态图下组网专用 API,比如:输入占位符 data/Input,控制流 while_loop/cond | +| paddle.static | 静态图下基础框架相关 API,比如:Variable, Program, Executor 等 | +| paddle.optimizer | 优化算法相关 API,比如:SGD、Adagrad、Adam 等 | +| paddle.optimizer.lr(文件) | 学习率策略相关 API,比如 LinearWarmup、LRScheduler 等 | +| paddle.metric | 评估指标计算相关的 API,比如:accuracy, auc 等 | +| paddle.io | 数据输入输出相关 API,比如:save, load, Dataset, DataLoader 等 | +| paddle.device(文件) | 设备管理相关 API,通用类,比如:CPUPlace 等 | +| paddle.device.cuda | 设备管理相关 API,CUDA 相关,比如:CUDAPlace 等 | +| paddle.distributed | 分布式相关基础 API | +| paddle.distributed.fleet | 分布式相关高层 API | +| paddle.distributed.fleet.utils | 分布式高层 API 文件系统相关 API,比如 LocalFS 等 | +| paddle.distributed.utils | 分布式工具类 API,比如 get_host_name_ip 等 | +| paddle.vision | 视觉领域 API,基础操作且不属于子目录所属大类 | +| paddle.vision.datasets | 视觉领域 API,公开数据集相关,比如 Cifar10、MNIST 等 | +| paddle.vision.models | 视觉领域 API,模型相关,比如 ResNet、LeNet 等 | +| paddle.vision.transforms | 视觉领域 API,数据预处理相关,比如 CenterCrop、hflip 等 | +| paddle.vision.ops | 视觉领域 API,基础 op 相关,比如 DeformConv2D、yolo_box 等 | +| paddle.text | NLP 领域 API, 比如,数据集,数据处理,常用网络结构,比如 transformer | +| paddle.utils | paddle.utils 目录下包含飞桨框架工具类的 API,且不属于子目录所属大类 | +| paddle.utils.download | 工具类自动下载相关 API,比如 get_weights_path_from_url | +| paddle.utils.profiler | 工具类通用性能分析器相关 API,比如 profiler 等 | +| paddle.utils.cpp_extension | 工具类 C++扩展相关的 API,比如 CppExtension、CUDAExtension 等 | +| paddle.utils.unique_name | 工具类命名相关 API,比如 generate、guard 等 | +| paddle.amp | paddle.amp 目录下包含飞桨框架支持的动态图自动混合精度(AMP)相关的 API,比如 GradScaler 等 | +| paddle.jit | paddle.jit 目录下包含飞桨框架支持动态图转静态图相关的 API,比如 to_static 等 | +| paddle.distribution | paddle.distribution 目录下包含飞桨框架支持的概率分布相关的 API,比如 Normal 等 | +| paddle.regularizer | 正则相关的 API,比如 L1Decay 等 | +| paddle.sysconfig | Paddle 系统路径相关 API,比如 get_include、get_lib 等 | +| paddle.callbacks | paddle.callbacks 目录下包含飞桨框架支持的回调函数相关的 API,比如 Callback 等 | +| paddle.hub | paddle.hub 目录下包含飞桨框架模型拓展相关的 API 以及支持的模型库列表,比如 list 等 | +| paddle.autograd | 自动梯度求导相关,比如 grad、backward 等 | +| paddle.inference | paddle 预测相关,比如 Predictor 等 | +| paddle.onnx | onnx 导出相关,比如 onnx.export | | paddle.incubate | 新增功能孵化目录 | -- 常用的API可以在更高层级建立别名,当前规则如下: - 1. paddle.tensor目录下的API,均在paddle根目录建立别名,其他所有API在paddle根目录下均没有别名。 - 2. paddle.nn目录下除了functional目录以外的所有API,在paddle.nn目录下均有别名。 +- 常用的 API 可以在更高层级建立别名,当前规则如下: + 1. paddle.tensor 目录下的 API,均在 paddle 根目录建立别名,其他所有 API 在 paddle 根目录下均没有别名。 + 2. paddle.nn 目录下除了 functional 目录以外的所有 API,在 paddle.nn 目录下均有别名。 ```python - paddle.nn.functional.mse_loss # functional下的函数不建立别名,使用完整名称 - paddle.nn.Conv2D # 为paddle.nn.layer.conv.Conv2D建立的别名 + paddle.nn.functional.mse_loss # functional 下的函数不建立别名,使用完整名称 + paddle.nn.Conv2D # 为 paddle.nn.layer.conv.Conv2D 建立的别名 ``` - 1. 一些特殊情况比如特别常用的API会直接在paddle下建立别名 + 1. 一些特殊情况比如特别常用的 API 会直接在 paddle 下建立别名 ```python - paddle.tanh # 为常用函数paddle.tensor.math.tanh建立的别名 - paddle.linspace# 为常用函数paddle.fluid.layers.linspace建立的别名 + paddle.tanh # 为常用函数 paddle.tensor.math.tanh 建立的别名 + paddle.linspace# 为常用函数 paddle.fluid.layers.linspace 建立的别名 ``` -### API行为定义规范 +### API 行为定义规范 -- 动静统一要求。除了paddle.static目录中的API外,其他目录的所有API原则上均需要支持动态图和静态图模式下的执行,且输入、输出要求一致。开发者使用相同的代码,可以在动态图和静态图模式下执行。 +- 动静统一要求。除了 paddle.static 目录中的 API 外,其他目录的所有 API 原则上均需要支持动态图和静态图模式下的执行,且输入、输出要求一致。开发者使用相同的代码,可以在动态图和静态图模式下执行。 ```python #静态图专用 @@ -95,14 +95,14 @@ ``` -- API不需要用户指定执行硬件,框架可以自动根据当前配置,选择执行的库。 +- API 不需要用户指定执行硬件,框架可以自动根据当前配置,选择执行的库。 - 设置缺省参数类型 - 组网类API去除dtype参数,比如Linear, Conv2d等,通过使用paddle.set_default_dtype和paddle.get_default_dtype设置全局的数据类型。 + 组网类 API 去除 dtype 参数,比如 Linear, Conv2d 等,通过使用 paddle.set_default_dtype 和 paddle.get_default_dtype 设置全局的数据类型。 - 数据类型转换规则 - 1. 不支持Tensor和Tensor之间的隐式数据类型转换,隐藏类型转换虽然方便,但风险很高,很容易出现转换错误。如果发现类型不匹配,进行隐式类型转换,一旦转换造成精度损失,会导致模型的精度降低,由于没有任何提示,问题非常难以追查;而如果直接向用户报错或者警告,用户确认后,修改起来会很容易。避免了出错的风险。 + 1. 不支持 Tensor 和 Tensor 之间的隐式数据类型转换,隐藏类型转换虽然方便,但风险很高,很容易出现转换错误。如果发现类型不匹配,进行隐式类型转换,一旦转换造成精度损失,会导致模型的精度降低,由于没有任何提示,问题非常难以追查;而如果直接向用户报错或者警告,用户确认后,修改起来会很容易。避免了出错的风险。 ```python import paddle @@ -113,33 +113,33 @@ # ......\paddle\fluid\dygraph\math_op_patch.py:239: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float32, but right dtype is paddle.int32, the right dtype will convert to paddle.float32 format(lhs_dtype, rhs_dtype, lhs_dtype)) ``` - 2. 支持Tensor和python Scalar之间的隐式类型转换,当 Tensor 的数据类型和 python Scalar + 2. 支持 Tensor 和 python Scalar 之间的隐式类型转换,当 Tensor 的数据类型和 python Scalar 是同一类的数据类型时(都是整型,或者都是浮点型),或者 Tensor 是浮点型而 python Scalar 是 整型的,默认会将 python Scalar 转换成 Tensor 的数据类型。而如果 Tensor 的数据类型是整型而 python Scalar 是浮点型时,计算结果会是 float32 类型的。 ```python import paddle a = paddle.to_tensor([1.0], dtype='float32') - b = a + 1 # 由于python scalar默认采用int64, 转换后b的类型为'float32' - c = a + 1.0 # 虽然 python scalar 是 float64, 但计算结果c的类型为'float32' + b = a + 1 # 由于 python scalar 默认采用 int64, 转换后 b 的类型为'float32' + c = a + 1.0 # 虽然 python scalar 是 float64, 但计算结果 c 的类型为'float32' a = paddle.to_tensor([1], dtype='int32') - b = a + 1.0 # 虽然 python scalar 是 float64, 但计算结果b的类型为 'float32 - c = a + 1 # 虽然 python scalar 是 int64, 但计算结果c的类型为'int32' + b = a + 1.0 # 虽然 python scalar 是 float64, 但计算结果 b 的类型为 'float32 + c = a + 1 # 虽然 python scalar 是 int64, 但计算结果 c 的类型为'int32' ``` ### 数据类型规范 - 参数数据类型 - 对于loss类API,比如cross_entropy, bce_loss等,输入的label需要支持[int32, int64, float32, float64]数据类型。 + 对于 loss 类 API,比如 cross_entropy, bce_loss 等,输入的 label 需要支持[int32, int64, float32, float64]数据类型。 - 返回值数据类型 - 实现需要返回下标indices的API,比如argmax、argmin、argsort、topk、unique等接口时,需要提供dtype参数,用于控制返回值类型是int32或者int64,默认使用dtype=’int64’(与numpy, tf, pytorch保持一致),主要目的是当用户在明确输入数据不超过int32表示范围时,可以手动设置dtype=’int32’来减少显存的占用;对于dtype=’int32’设置,需要对输入数据的下标进行检查,如果超过int32表示范围,通过报错提示用户使用int64数据类型。 + 实现需要返回下标 indices 的 API,比如 argmax、argmin、argsort、topk、unique 等接口时,需要提供 dtype 参数,用于控制返回值类型是 int32 或者 int64,默认使用 dtype=’int64’(与 numpy, tf, pytorch 保持一致),主要目的是当用户在明确输入数据不超过 int32 表示范围时,可以手动设置 dtype=’int32’来减少显存的占用;对于 dtype=’int32’设置,需要对输入数据的下标进行检查,如果超过 int32 表示范围,通过报错提示用户使用 int64 数据类型。 -## API命名规范 +## API 命名规范 -**API的命名应使用准确的深度学习相关英文术语,具体参考附录的中英术语表。** +**API 的命名应使用准确的深度学习相关英文术语,具体参考附录的中英术语表。** ### 类名与方法名的规范 @@ -187,10 +187,10 @@ paddle.cumsum ``` -- API命名时,缩写的使用不应引起歧义或误解;在容易引起歧义或误解的情况下,需要使用全称,比如 +- API 命名时,缩写的使用不应引起歧义或误解;在容易引起歧义或误解的情况下,需要使用全称,比如 ```python - # pytorch使用ge,lt之类的缩写,可读性较差,应保留全称,与numpy和paddle保持一致 + # pytorch 使用 ge,lt 之类的缩写,可读性较差,应保留全称,与 numpy 和 paddle 保持一致 paddle.tensor.greater_equal paddle.tensor.less_than # optimizer 不使用缩写 @@ -199,13 +199,13 @@ paddle.nn.create_parameter ``` -- 在用于API命名时,常见的缩写列表如下: +- 在用于 API 命名时,常见的缩写列表如下: ```python conv、max、min、prod、norm、gru、lstm、add、func、op、num、cond ``` -- 在用于API命名时,以下建议使用全称,不推荐使用缩写 +- 在用于 API 命名时,以下建议使用全称,不推荐使用缩写 | 不规范命名 | 规范命名 | | :-------- | :----------- | @@ -230,16 +230,16 @@ -- API命名不应包含版本号 +- API 命名不应包含版本号 ```python # 不使用版本号 paddle.nn.multiclass_nms2 ``` -- 常见的数学计算API中的逐元素操作不需要加上elementwise前缀,按照某一轴操作不需要加上reduce前缀,一些例子如下 +- 常见的数学计算 API 中的逐元素操作不需要加上 elementwise 前缀,按照某一轴操作不需要加上 reduce 前缀,一些例子如下 - | paddle2.0之前 | pytorch | numpy | tensorflow | paddle2.0之后 | + | paddle2.0 之前 | pytorch | numpy | tensorflow | paddle2.0 之后 | | :------------- | :----- | :------ | :--------- | :--------------- | | elementwise_add | add | add | add | add | | elementwise_sub | sub | subtract | subtract | subract | @@ -263,26 +263,26 @@ | 库 | 取余整除 | 取余 | 取模整除 | 取模 | | ---------- | :----------------------- | :-------------------- | :--------------- | :---------------------------------- | - | tf | truncatediv | truncatemod | //或floordiv | %或floormod | - | torch | //或floor_divide | fmod | 无 | %或remainder | + | tf | truncatediv | truncatemod | //或 floordiv | %或 floormod | + | torch | //或 floor_divide | fmod | 无 | %或 remainder | | math | 无 | math.fmod | 无 | math.remainder | | python | 无 | 无 | // | % | - | numpy | 无 | 无 | //或floor_divide | %或mod remainder | - | paddle | //或elementwise_div(int) | %或elemtwise_mod(int) | 无 | %或elemtwise_mod(float) | - | paddle 2.0 | truncate_divide(int) | %或truncate_mod(int) | //或floor_divide | %或floor_mod(float)或mod或remainder | + | numpy | 无 | 无 | //或 floor_divide | %或 mod remainder | + | paddle | //或 elementwise_div(int) | %或 elemtwise_mod(int) | 无 | %或 elemtwise_mod(float) | + | paddle 2.0 | truncate_divide(int) | %或 truncate_mod(int) | //或 floor_divide | %或 floor_mod(float)或 mod 或 remainder | - | | paddle2.0之前 | torch | numpy | tensorflow | math | python | paddle2.0之后 | + | | paddle2.0 之前 | torch | numpy | tensorflow | math | python | paddle2.0 之后 | | :------: | :----------------------- | :-------------- | :---------------- | :------------ | :------------- | :----- | :---------------------------------- | - | 取余整除 | //或elementwise_div(int) | //或floor_divid | 无 | truncatediv | - | 无 | truncate_divide(int) | - | 取余 | %或elemtwise_mod(int) | fmod | 无 | truncatemod | math.fmod | 无 | %或truncate_mod(int) | - | 取模整除 | - | - | floor_divide | //或floordiv- | - | // | //或floor_divide | - | 取模 | %或elemtwise_mod(float) | %或remainder | %或mod或remainder | %或floormod | math.remainder | % | %或floor_mod(float)或mod或remainder | + | 取余整除 | //或 elementwise_div(int) | //或 floor_divid | 无 | truncatediv | - | 无 | truncate_divide(int) | + | 取余 | %或 elemtwise_mod(int) | fmod | 无 | truncatemod | math.fmod | 无 | %或 truncate_mod(int) | + | 取模整除 | - | - | floor_divide | //或 floordiv- | - | // | //或 floor_divide | + | 取模 | %或 elemtwise_mod(float) | %或 remainder | %或 mod 或 remainder | %或 floormod | math.remainder | % | %或 floor_mod(float)或 mod 或 remainder | -- 常用组网API命名规范 +- 常用组网 API 命名规范 ```python # 卷积: - paddle.nn.Conv2D #采用2D后缀,2D表示维度时通常大写 + paddle.nn.Conv2D #采用 2D 后缀,2D 表示维度时通常大写 paddle.nn.Conv2DTranspose paddle.nn.functional.conv2d paddle.nn.functional.conv2d_transpose @@ -310,23 +310,23 @@ - 参数名可以区分单复数形态,单数表示输入参数是一个或多个变量,复数表示输入明确是含有多个变量的列表 ```python - paddle.nn.Softmax(axis=-1) # axis明确为一个int数 - paddle.squeeze(x, axis=None, dtype=None, keepdim=False, name=None): # axis可以为一数也可以为多个 - paddle.strided_slice(x, axes, starts, ends, strides, name=None) #axis明确是多个数,则参数用复数形式axes + paddle.nn.Softmax(axis=-1) # axis 明确为一个 int 数 + paddle.squeeze(x, axis=None, dtype=None, keepdim=False, name=None): # axis 可以为一数也可以为多个 + paddle.strided_slice(x, axes, starts, ends, strides, name=None) #axis 明确是多个数,则参数用复数形式 axes ``` -- 函数操作只有一个待操作的张量参数时,用x命名;如果有2个待操作的张量参数时,且含义明确时,用x, y命名 +- 函数操作只有一个待操作的张量参数时,用 x 命名;如果有 2 个待操作的张量参数时,且含义明确时,用 x, y 命名 ```python paddle.sum(x, axis=None, dtype=None, keepdim=False, name=None) paddle.divide(x, y, name=None) ``` -- 原则上所有的输入都用x表示,包括functional下的linear, conv2d, lstm, batch_norm等 +- 原则上所有的输入都用 x 表示,包括 functional 下的 linear, conv2d, lstm, batch_norm 等 -- 原则上都要具备name参数,用于标记layer,方便调试和可视化 +- 原则上都要具备 name 参数,用于标记 layer,方便调试和可视化 -- loss类的函数,使用`input` 表示输入,使用`label` 表示真实预测值/类别,部分情况下,为了更好的便于用户理解,可以选用其他更恰当的参数名称。如`softmax_with_logits`时,输入参数名可用`logits` +- loss 类的函数,使用`input` 表示输入,使用`label` 表示真实预测值/类别,部分情况下,为了更好的便于用户理解,可以选用其他更恰当的参数名称。如`softmax_with_logits`时,输入参数名可用`logits` ```python paddle.nn.functional.mse_loss(input, label, reduction='mean', name=None) @@ -341,17 +341,17 @@ ``` -- Tensor名称和操作 +- Tensor 名称和操作 | 中文 | 英文 | 缩写 | 说明 | | ------------- | ------------------- | ---- | -------------------------------- | | 张量 | tensor | | | | 形状 | shape | | | | 维数(阶) | rank | | | - | 第几维(轴) | axis/axes | | 从0开始编号 | - | 0阶张量(标量) | scalar | | | - | 1阶张量(向量) | vector | | | - | 2阶张量(矩阵) | matrix/matrice | | | + | 第几维(轴) | axis/axes | | 从 0 开始编号 | + | 0 阶张量(标量) | scalar | | | + | 1 阶张量(向量) | vector | | | + | 2 阶张量(矩阵) | matrix/matrice | | | | 矩阵转置 | transpose | | | | 点积 | dot | | 一维向量,内积;二维矩阵,矩阵乘 | | 内积 | inner | | | @@ -364,48 +364,48 @@ | 中文名 | 推荐 | 不推荐写法 | 示例 | 备注 | | ------------ | ------------- | ------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | 算子名 | name | input | relu(x, inplace=False, name=None) | 调用api所创建的算子名称 | + | 算子名 | name | input | relu(x, inplace=False, name=None) | 调用 api 所创建的算子名称 | | 单个输入张量 | x | x | relu(x, inplace=False, name=None) | 单个待操作的张量 | | 两个输入张量 | x, y | input, other/ X, Y | elementwise_add(x, y, axis=-1, activation=None, name=None) | 两个待操作的张量 | | 数据类型 | dtype | type, data_type | unique(x, dtype='int32') | | | 输出张量 | out | output | | | - | 轴 | axis/axes | dim/dims | concat(x, axis=0, name=None) | 虽然pytorch的dim单词比较简单,但axis跟numpy, tf和paddle历史一致。axis通常从0开始编号,dim一般从1开始编号,比如3维空间的第1维 | + | 轴 | axis/axes | dim/dims | concat(x, axis=0, name=None) | 虽然 pytorch 的 dim 单词比较简单,但 axis 跟 numpy, tf 和 paddle 历史一致。axis 通常从 0 开始编号,dim 一般从 1 开始编号,比如 3 维空间的第 1 维 | | 参数属性 | param_attr | | fc(param_attr=None, ...) | | | 偏置属性 | bias_attr | | fc(bias_attr=None, ... ) | | - | 激活函数 | activation | act | batch_norm(input, activation=None, ...) | act简称不容易理解,跟pytorch保持一致 | - | 标签 | label | target | def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex) | label容易理解,跟paddle历史保持一致 | + | 激活函数 | activation | act | batch_norm(input, activation=None, ...) | act 简称不容易理解,跟 pytorch 保持一致 | + | 标签 | label | target | def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex) | label 容易理解,跟 paddle 历史保持一致 | | 张量形状 | shape | size | | | | 程序 | program | prog | | | - | 数据格式 | data_format | data_layout | conv2d(x, weight, bias=None, padding=0, stride=1, dilation=1, groups=1, activation=None, data_format="NCHW", name=None) | 跟paddle历史保持一致 | - | 文件名 | filename | file_name | | 跟paddle历史和c语言fopen函数保持一致 | + | 数据格式 | data_format | data_layout | conv2d(x, weight, bias=None, padding=0, stride=1, dilation=1, groups=1, activation=None, data_format="NCHW", name=None) | 跟 paddle 历史保持一致 | + | 文件名 | filename | file_name | | 跟 paddle 历史和 c 语言 fopen 函数保持一致 | | 目录名 | path | dirname | | | - | 设备 | device | place/force_cpu | ones(shape, dtype=None, out=None, device=None) | device比place更容易理解;跟pytorch一致 | + | 设备 | device | place/force_cpu | ones(shape, dtype=None, out=None, device=None) | device 比 place 更容易理解;跟 pytorch 一致 | | 执行器 | executor | exe | | | | 下标 | index | idx | | | - | 字母epsilon | epsilon | eps | | | + | 字母 epsilon | epsilon | eps | | | | 值 | value | val/v | | | | 变量 | variable | var/v | | | | 批大小 | batch_size | batch_num,batch_number | | | | 隐层大小 | hidden_size | hidden, hid_size, hid_dim, hidden_dim | | | | 卷积核大小 | filter_size | filter | | | - | 范围 | start, stop | begin, end | | 跟python的range函数一致,numpy的arange | + | 范围 | start, stop | begin, end | | 跟 python 的 range 函数一致,numpy 的 arange | | 步数 | step | num, count | | | | 条件 | cond | condition | | | - | 禁用梯度 | stop_gradient | require_grad | | 跟tf和paddle历史一致 | + | 禁用梯度 | stop_gradient | require_grad | | 跟 tf 和 paddle 历史一致 | | 学习率 | learning_rate | lr | | | | 保持维度 | keep_dim | keepdim | | | - | 禁用梯度 | no_grad_vars | no_grad_set | gradients ( targets, inputs, target_gradients=None, no_grad_vars=None ) | 跟dygraph.grad保持一致 | - | dropout比例 | dropout_rate | dropout_prob, dropout | | | + | 禁用梯度 | no_grad_vars | no_grad_set | gradients ( targets, inputs, target_gradients=None, no_grad_vars=None ) | 跟 dygraph.grad 保持一致 | + | dropout 比例 | dropout_rate | dropout_prob, dropout | | | | 传入 | feed | feed_dict | | | | 取出 | fetch | fetch_list, fetch_targets | | | | 转置 | transpose | trans, trans_x | | | - | decay步数 | decay_steps | step_each_epoch | | | + | decay 步数 | decay_steps | step_each_epoch | | | | 类别数 | num_classes | class_nums | | | | 通道数 | num_channels | channels | | | | 卷积核数 | num_filters | filters | | | | 组数 | num_groups | groups | | | | 操作输入 | inplace=True | in_place | | | - | 训练模式 | training=True | is_train, is_test | | 跟pytorch保持一致, tf用trainable | + | 训练模式 | training=True | is_train, is_test | | 跟 pytorch 保持一致, tf 用 trainable | ## 附-中英术语表 @@ -445,7 +445,7 @@ | 推断 | inference | | | 上溢 | overflow | | | 下溢 | underflow | | -| softmax函数 | softmax function | | +| softmax 函数 | softmax function | | | softmax | softmax | | | 欠估计 | underestimation | | | 过估计 | overestimation | | @@ -456,8 +456,8 @@ | 代价函数 | cost function | | | 代价 | cost | | | 损失函数 | loss function | | -| PR曲线 | PR curve | | -| F值 | F-score | | +| PR 曲线 | PR curve | | +| F 值 | F-score | | | 损失 | loss | | | 误差函数 | error function | | | 梯度下降 | gradient descent | | @@ -523,13 +523,13 @@ | 磨合 | Burning-in | | | 混合时间 | Mixing Time | | | 混合 | Mixing | | -| Gibbs采样 | Gibbs Sampling | | +| Gibbs 采样 | Gibbs Sampling | | | 吉布斯步数 | Gibbs steps | | | Bagging | bootstrap aggregating | | | 掩码 | mask | | | 批标准化 | batch normalization | | | 参数共享 | parameter sharing | | -| KL散度 | KL divergence | | +| KL 散度 | KL divergence | | | 温度 | temperature | | | 临界温度 | critical temperatures | | | 并行回火 | parallel tempering | | @@ -681,10 +681,10 @@ | 分解的 | factorized | | | 均匀场 | meanfield | | | 最大似然估计 | maximum likelihood estimation | | -| 概率PCA | probabilistic PCA | | +| 概率 PCA | probabilistic PCA | | | 随机梯度上升 | Stochastic Gradient Ascent | | | 团 | clique | | -| Dirac分布 | dirac distribution | | +| Dirac 分布 | dirac distribution | | | 不动点方程 | fixed point equation | | | 变分法 | calculus of variations | | | 信念网络 | belief network | | @@ -701,10 +701,10 @@ | 相关系数 | correlation | | | 标准正态分布 | standard normal distribution | | | 协方差矩阵 | covariance matrix | | -| Bernoulli分布 | Bernoulli distribution | | -| Bernoulli输出分布 | Bernoulli output distribution | | -| Multinoulli分布 | multinoulli distribution | | -| Multinoulli输出分布 | multinoulli output distribution | | +| Bernoulli 分布 | Bernoulli distribution | | +| Bernoulli 输出分布 | Bernoulli output distribution | | +| Multinoulli 分布 | multinoulli distribution | | +| Multinoulli 输出分布 | multinoulli output distribution | | | 范畴分布 | categorical distribution | | | 多项式分布 | multinomial distribution | | | 正态分布 | normal distribution | | @@ -731,7 +731,7 @@ | 贝叶斯规则 | Bayes' rule | | | 测度论 | measure theory | | | 零测度 | measure zero | | -| Jacobian矩阵 | Jacobian matrix | | +| Jacobian 矩阵 | Jacobian matrix | | | 自信息 | self-information | | | 奈特 | nats | | | 比特 | bit | | @@ -739,7 +739,7 @@ | 香农熵 | Shannon entropy | | | 微分熵 | differential entropy | | | 微分方程 | differential equation | | -| KL散度 | Kullback-Leibler (KL) divergence | | +| KL 散度 | Kullback-Leibler (KL) divergence | | | 交叉熵 | cross-entropy | | | 熵 | entropy | | | 分解 | factorization | | @@ -763,7 +763,7 @@ | 矩阵乘积 | matrix product | | | AdaGrad | AdaGrad | | | 逐元素乘积 | element-wise product | | -| Hadamard乘积 | Hadamard product | | +| Hadamard 乘积 | Hadamard product | | | 团势能 | clique potential | | | 因子 | factor | | | 未归一化概率函数 | unnormalized probability function | | @@ -823,7 +823,7 @@ | 绝对值整流 | absolute value rectification | | | 渗漏整流线性单元 | Leaky ReLU | | | 参数化整流线性单元 | parametric ReLU | PReLU | -| maxout单元 | maxout unit | | +| maxout 单元 | maxout unit | | | 硬双曲正切函数 | hard tanh | | | 架构 | architecture | | | 操作 | operation | | @@ -872,7 +872,7 @@ | 收缩 | contractive | | | 长期依赖 | long-term dependency | | | 跳跃连接 | skip connection | | -| 门控RNN | gated RNN | | +| 门控 RNN | gated RNN | | | 门控 | gated | | | 卷积 | convolution | | | 输入 | input | | diff --git a/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md b/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md index d0a8c70bf1d..c31d7265e91 100644 --- a/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md +++ b/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md @@ -1,19 +1,19 @@ -# 飞桨API文档书写规范 +# 飞桨 API 文档书写规范 -1. **至关重要:** **API文档对该API的描述,一定要与API的行为保持一致。中英文文档的内容要严格一致。** -2. **API文档的字段:** API名称、API功能描述、API参数、API返回、API代码示例、API属性(class)、API方法(methods)等。是否写API抛出异常的情况,不做强制要求。 -3. **API功能描述:** 请注意,看文档的用户没有和开发同学一样的知识背景。因此,请提示用户在什么场景下使用该API。请使用深度学习领域通用的词汇和说法。([深度学习常用术语表](https://github.com/PaddlePaddle/FluidDoc/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%B8%B8%E7%94%A8%E6%9C%AF%E8%AF%AD%E8%A1%A8))。 -4. **API参数:** 写清楚对输入参数的要求,写清楚在不同情况下的行为区别(例默认值时的行为)。同类性质的参数(如:输入tensor `x`,每个API中的`name` 参数),可以直接从这里copy内容:**[常用文档写法](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/templates/common_docs.py)**。 -5. **API代码示例:** 中英文文档当中的代码示例完全一致(means identical, comments可不用翻译)。代码示例使用2.0版本中的API,可运行。尽量不用random输入,注释形式给出输出值。 -6. **其他:** 2.0 中的API,对于``Variable、LodTensor、Tensor``,统一使用``Tensor``。`to_variable`也统一改为`to_tensor`。 -7. 构造输入数据时,尽量使用paddle提供的API,如:`paddle.zeros, paddle.ones, paddle.full, paddle.arange, paddle.rand, paddle.randn, paddle.randint, paddle.normal, paddle.uniform`。 -8. 对于`Linear`, `Conv2D`, `L1Loss` 这些class形式的API,需要写清楚当这个`callable`被调用时的输入输出的形状。(i,e.`forward`函数的参数)。位置放在现在的Parameters/参数这个block后面,具体为: +1. **至关重要:** **API 文档对该 API 的描述,一定要与 API 的行为保持一致。中英文文档的内容要严格一致。** +2. **API 文档的字段:** API 名称、API 功能描述、API 参数、API 返回、API 代码示例、API 属性(class)、API 方法(methods)等。是否写 API 抛出异常的情况,不做强制要求。 +3. **API 功能描述:** 请注意,看文档的用户没有和开发同学一样的知识背景。因此,请提示用户在什么场景下使用该 API。请使用深度学习领域通用的词汇和说法。([深度学习常用术语表](https://github.com/PaddlePaddle/FluidDoc/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%B8%B8%E7%94%A8%E6%9C%AF%E8%AF%AD%E8%A1%A8))。 +4. **API 参数:** 写清楚对输入参数的要求,写清楚在不同情况下的行为区别(例默认值时的行为)。同类性质的参数(如:输入 tensor `x`,每个 API 中的`name` 参数),可以直接从这里 copy 内容:**[常用文档写法](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/templates/common_docs.py)**。 +5. **API 代码示例:** 中英文文档当中的代码示例完全一致(means identical, comments 可不用翻译)。代码示例使用 2.0 版本中的 API,可运行。尽量不用 random 输入,注释形式给出输出值。 +6. **其他:** 2.0 中的 API,对于``Variable、LodTensor、Tensor``,统一使用``Tensor``。`to_variable`也统一改为`to_tensor`。 +7. 构造输入数据时,尽量使用 paddle 提供的 API,如:`paddle.zeros, paddle.ones, paddle.full, paddle.arange, paddle.rand, paddle.randn, paddle.randint, paddle.normal, paddle.uniform`。 +8. 对于`Linear`, `Conv2D`, `L1Loss` 这些 class 形式的 API,需要写清楚当这个`callable`被调用时的输入输出的形状。(i,e.`forward`函数的参数)。位置放在现在的 Parameters/参数这个 block 后面,具体为: 中文时: 形状: - - input: 形状为(批大小, 通道数,高度,宽度),即,HCHW格式的4-D Tensor。 - - output: 形状为(批大小, 卷积核个数,输出图像的高度,输出图像的高度)的4-D Tensor。 + - input: 形状为(批大小, 通道数,高度,宽度),即,HCHW 格式的 4-D Tensor。 + - output: 形状为(批大小, 卷积核个数,输出图像的高度,输出图像的高度)的 4-D Tensor。 英文时: @@ -81,23 +81,23 @@ .. py:function:: paddle.add(x, y, name=None) - 该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。计算公式为: + 该 OP 是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。计算公式为: .. math:: out = x + y .. note:: - ``paddle.add`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.add`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 .. - 说明:以上为API描述部分,只需要尽可能简单的描述出API的功能作用即可,要让用户能快速看懂。这个case可以拆解为3个部分,功能作用 + 计算公式 + 注解部分; + 说明:以上为 API 描述部分,只需要尽可能简单的描述出 API 的功能作用即可,要让用户能快速看懂。这个 case 可以拆解为 3 个部分,功能作用 + 计算公式 + 注解部分; 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 - - y (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 - - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 + - y (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 + - name (str, 可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 .. - 说明:API参数可优先copy常用文档写法中的参数,参数的描述要准确,还要重点描述参数的功能作用及使用场景。 + 说明:API 参数可优先 copy 常用文档写法中的参数,参数的描述要准确,还要重点描述参数的功能作用及使用场景。 返回 ::::::::: @@ -128,13 +128,13 @@ # [12., 53., 24. ] .. - 尽量不使用random的输入; + 尽量不使用 random 的输入; 优先使用动态图,在一个代码示例中给出多个使用场景; -## API文档各模块写作说明 +## API 文档各模块写作说明 ### API 名称 -API名称直接写API的名字即可,不需要将全路径写全; +API 名称直接写 API 的名字即可,不需要将全路径写全; **如 paddle.add ** @@ -142,7 +142,7 @@ API名称直接写API的名字即可,不需要将全路径写全; --------- ### API 声明 -API的声明部分,要给出API的声明信息; +API 的声明部分,要给出 API 的声明信息; **function: 如 paddle.add** @@ -153,15 +153,15 @@ API的声明部分,要给出API的声明信息; .. py:class:: paddle.nn.Conv2d(num_channels, num_filters, filter_size, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW", dtype="float32") ### API 功能描述 -API功能描述部分只需要尽可能简单的描述出API的功能作用即可,要让用户能快速看懂。如``paddle.add[https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/math/add_cn.html#add]``: +API 功能描述部分只需要尽可能简单的描述出 API 的功能作用即可,要让用户能快速看懂。如``paddle.add[https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/math/add_cn.html#add]``: -可以拆解为3个部分,功能作用 + 计算公式 + 注解部分,其中: +可以拆解为 3 个部分,功能作用 + 计算公式 + 注解部分,其中: - - 功能作用:描述该API文档的功能作用;**由于用户没有对应的背景**,所以需要补充必要的细节,比如是不是element_wise的,如 ``paddle.add``: + - 功能作用:描述该 API 文档的功能作用;**由于用户没有对应的背景**,所以需要补充必要的细节,比如是不是 element_wise 的,如 ``paddle.add``: - 该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 + 该 OP 是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 - - 计算公式:给出该API的计算公式,由于公式中每个变量都对应API的参数,所以不需要做额外的说明,如 ``paddle.add``: + - 计算公式:给出该 API 的计算公式,由于公式中每个变量都对应 API 的参数,所以不需要做额外的说明,如 ``paddle.add``: 计算公式为: @@ -169,65 +169,65 @@ API功能描述部分只需要尽可能简单的描述出API的功能作用即 out = x + y - - 注解部分:加入API有需要特殊说明的部分,可以在注解部分给出,比如:该API与其他API功能相似,需要给出该API与另一个API的使用上的区别。 + - 注解部分:加入 API 有需要特殊说明的部分,可以在注解部分给出,比如:该 API 与其他 API 功能相似,需要给出该 API 与另一个 API 的使用上的区别。 **注意事项:** -1. 写作API文档中,请使用深度学习领域通用的词汇和说法。([深度学习常用术语表](https://github.com/PaddlePaddle/FluidDoc/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%B8%B8%E7%94%A8%E6%9C%AF%E8%AF%AD%E8%A1%A8))。 -2. 文档中的**前后说明要一致**,比如维度的说明,统一使用 4-D Tensor的格式,不确定的写“多维”,示例如下: +1. 写作 API 文档中,请使用深度学习领域通用的词汇和说法。([深度学习常用术语表](https://github.com/PaddlePaddle/FluidDoc/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%B8%B8%E7%94%A8%E6%9C%AF%E8%AF%AD%E8%A1%A8))。 +2. 文档中的**前后说明要一致**,比如维度的说明,统一使用 4-D Tensor 的格式,不确定的写“多维”,示例如下: ![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-836a496baa91882d77fde5ad1259a098d408afce) 3. 文档相互引用的方式:[如何让文档相互引用](https://github.com/PaddlePaddle/FluidDoc/wiki/%E9%A3%9E%E6%A1%A8%E6%96%87%E6%A1%A3%E7%9B%B8%E4%BA%92%E5%BC%95%E7%94%A8) 4. 功能描述中涉及到的专有数据结构如``Tensor``、``LoDTensor``或``Variable``,中英文都统一使用``Tensor``无需翻译。 -5. 如果涉及到一些通用的知识,如broadcasting,可以以Note的方式写出来,示例如下: +5. 如果涉及到一些通用的知识,如 broadcasting,可以以 Note 的方式写出来,示例如下: 中文: .. note:: - ``paddle.add`` 遵守broadcasting,如您想了解更多, 请参见 :ref:`cn_user_guide_broadcasting` . + ``paddle.add`` 遵守 broadcasting,如您想了解更多, 请参见 :ref:`cn_user_guide_broadcasting` . 英文: **Note**: ``paddle.add`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` . -**总结:paddle.add的描述如下** +**总结:paddle.add 的描述如下** - 该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。计算公式为: + 该 OP 是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。计算公式为: .. math:: out = x + y .. note:: - ``paddle.add`` 遵守broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 + ``paddle.add`` 遵守 broadcasting,如您想了解更多,请参见 :ref:`cn_user_guide_broadcasting` 。 ### API 参数(重要) -**注意:** 一些通用的参数说明,直接copy[**常用文档写法**](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/templates/common_docs.py 'common_args')中的描述即可。 +**注意:** 一些通用的参数说明,直接 copy[**常用文档写法**](https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/templates/common_docs.py 'common_args')中的描述即可。 -API参数部分,要解释清楚每个参数的意义和使用场景。需要注意以下两点: +API 参数部分,要解释清楚每个参数的意义和使用场景。需要注意以下两点: **1、**对于有**默认值**的参数,至少要讲清楚在默认值下的逻辑,而不仅仅是介绍这个参数是什么以及默认值是什么; -如``stop_gradient``的对比,要添加默认值为True的行为,即**表示停止计算梯度** 。 +如``stop_gradient``的对比,要添加默认值为 True 的行为,即**表示停止计算梯度** 。 - stop_gradient (bool,可选) - 提示是否应该停止计算梯度,默认值为False。 + stop_gradient (bool,可选) - 提示是否应该停止计算梯度,默认值为 False。 # wrong - stop_gradient (bool,可选) - 提示是否应该停止计算梯度,默认值为True,表示停止计算梯度。 + stop_gradient (bool,可选) - 提示是否应该停止计算梯度,默认值为 True,表示停止计算梯度。 # right 或者如``return_numpy``的对比: - return_numpy (bool) – 该变量表示是否将fetched tensor转换为numpy。默认为:True。 + return_numpy (bool) – 该变量表示是否将 fetched tensor 转换为 numpy。默认为:True。 # wrong - return_numpy (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为Tensor,否则返回变量的类型为numpy.ndarray。默认为:True。 + return_numpy (bool) – 该参数表示是否将返回的计算结果(fetch list 中指定的变量)转化为 numpy;如果为 False,则每个变量返回的类型为 Tensor,否则返回变量的类型为 numpy.ndarray。默认为:True。 # right -可以看出,第二行的return_numpy的描述更为清晰,分别描述了True和False的两种情况。而第一行的说明过于简单。 +可以看出,第二行的 return_numpy 的描述更为清晰,分别描述了 True 和 False 的两种情况。而第一行的说明过于简单。 -**2、** 在讲清楚每个API参数是什么的同时,还需要描述清楚每个参数的具体作用是什么。如再如``feeded_var_names``: +**2、** 在讲清楚每个 API 参数是什么的同时,还需要描述清楚每个参数的具体作用是什么。如再如``feeded_var_names``: - feeded_var_names (list[str]) – 字符串列表,包含着Inference Program预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 + feeded_var_names (list[str]) – 字符串列表,包含着 Inference Program 预测时所需提供数据的所有变量名称(即所有输入变量的名称)。 target_vars (list[Variable]) – Variable (详见 基础概念 )类型列表,包含着模型的所有输出变量。通过这些输出变量即可得到模型的预测结果。 用户看了描述以后,完全不知道这两个参数可以用来做**网络裁剪**,所以需要将一些参数的功能作用描述出来。 @@ -235,13 +235,13 @@ API参数部分,要解释清楚每个参数的意义和使用场景。需要 参数 ::::::::: - - x (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 - - y (Tensor) - 输入的Tensor,数据类型为:float32、float64、int32、int64。 - - name (str, 可选) - 操作的名称(可选,默认值为None)。更多信息请参见 :ref:`api_guide_Name`。 + - x (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 + - y (Tensor) - 输入的 Tensor,数据类型为:float32、float64、int32、int64。 + - name (str, 可选) - 操作的名称(可选,默认值为 None)。更多信息请参见 :ref:`api_guide_Name`。 ### API 返回 - 先描述API 返回值的类型,然后描述API的返回值及其含义。 + 先描述 API 返回值的类型,然后描述 API 的返回值及其含义。 **如 paddle.add** @@ -251,33 +251,33 @@ API参数部分,要解释清楚每个参数的意义和使用场景。需要 ``Tensor``,维度和数据类型都与 ``x`` 相同,存储运算后的结果。 ### API 抛出异常 -API抛出异常部分,不做强制要求,可以在一些特殊的场景下给出抛出异常的信息,如: +API 抛出异常部分,不做强制要求,可以在一些特殊的场景下给出抛出异常的信息,如: -**场景1:API使用有先后依赖关系** +**场景 1:API 使用有先后依赖关系** 如:[paddle.fluid.layers.DynamicRNN.update_memory()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/DynamicRNN_cn.html#update-memory-ex-mem-new-mem 'update_memory')框中的部分: ![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-2a1cbab0eb4c76208a5f1e0a0cc6a19bb0d85f1a) -**场景2:多个参数相互影响** +**场景 2:多个参数相互影响** 如[paddle.layers.pool2d](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/pool2d_cn.html#id4 'pool2d')框出的部分: ![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-ce7f6e6f0bc3ccae564b47ff47007f4d518204a0) -**场景3:该API的使用场景有特殊限制** +**场景 3:该 API 的使用场景有特殊限制** 如[paddle.fluid.layers.DynamicRNN.step_input()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/layers_cn/DynamicRNN_cn.html#step-input-x-level-0 'step_input')框出的部分: ![图片](https://agroup-bos-bj.cdn.bcebos.com/bj-5fef53592710f7638d0902b2a816570d8ac6beea) ### API 代码示例(重要) -代码示例是API文档的核心部分之一,毕竟talk is cheap,show me the code。所以,在API代码示例中,应该对前文描述的API使用中的各种场景,尽可能的在一个示例中给出,并用注释给出对应的结果。 +代码示例是 API 文档的核心部分之一,毕竟 talk is cheap,show me the code。所以,在 API 代码示例中,应该对前文描述的 API 使用中的各种场景,尽可能的在一个示例中给出,并用注释给出对应的结果。 如 **注意事项** - - 示例代码需要与当前版本及推荐用法保持一致:**develop分支下fluid namespace 以外的API,不能再有fluid关键字,只需要提供动态图的示例代码。** - - 尽量不用random输入,需要以注释形式给出输出值。 + - 示例代码需要与当前版本及推荐用法保持一致:**develop 分支下 fluid namespace 以外的 API,不能再有 fluid 关键字,只需要提供动态图的示例代码。** + - 尽量不用 random 输入,需要以注释形式给出输出值。 - 中英文示例代码,不做任何翻译,保持相同(means identical)。 - - 原则上,所有提供的API都需要提供示例代码,对于``class member methods, abstract API, callback``,等情况,可以在提交PR时说明相应的使用方法的文档的位置或文档计划后,通过白名单审核机制通过CI检查。 - - 对于仅为GPU环境提供的API,当该示例代码在CPU上运行时,运行后给出含有”Not compiled with CUDA”的错误提示,也可认为该API行为正确。 + - 原则上,所有提供的 API 都需要提供示例代码,对于``class member methods, abstract API, callback``,等情况,可以在提交 PR 时说明相应的使用方法的文档的位置或文档计划后,通过白名单审核机制通过 CI 检查。 + - 对于仅为 GPU 环境提供的 API,当该示例代码在 CPU 上运行时,运行后给出含有”Not compiled with CUDA”的错误提示,也可认为该 API 行为正确。 -英文API代码示例格式规范如下: +英文 API 代码示例格式规范如下: def api(): """ @@ -324,17 +324,17 @@ API抛出异常部分,不做强制要求,可以在一些特殊的场景下 print(z) # [3., 8., 6. ] ### API 属性 -API的属性用来描述API所包含的属性。如果API有属性,每个属性需要分为四个部分描述: +API 的属性用来描述 API 所包含的属性。如果 API 有属性,每个属性需要分为四个部分描述: - 名称:属性名称直接写属性的名字即可,不需要将全路径写全; - - 注意:列举出使用该属性时应注意的一些问题,如果没有可以不填;如不同的版本、是否是只读属性、使用的一些tricks等等,如``Program``的``rand_seed``: + - 注意:列举出使用该属性时应注意的一些问题,如果没有可以不填;如不同的版本、是否是只读属性、使用的一些 tricks 等等,如``Program``的``rand_seed``: .. note:: - 必须在相关OP被添加之前设置。 + 必须在相关 OP 被添加之前设置。 - - 描述:API功能描述部分要求一致; - - 返回:API返回部分要求一致; - - 代码示例:与API代码示例部分要求一致; + - 描述:API 功能描述部分要求一致; + - 返回:API 返回部分要求一致; + - 代码示例:与 API 代码示例部分要求一致; **总结:paddle.Program.random_seed** @@ -342,12 +342,12 @@ API的属性用来描述API所包含的属性。如果API有属性,每个属 '''''''''''' ..note: - 必须在相关OP被添加之前设置。 + 必须在相关 OP 被添加之前设置。 - 程序中随机运算符的默认随机种子。0意味着随机生成随机种子。 + 程序中随机运算符的默认随机种子。0 意味着随机生成随机种子。 **返回** - int64,返回该Program中当前正在使用的random seed。。 + int64,返回该 Program 中当前正在使用的 random seed。。 **代码示例** @@ -369,14 +369,14 @@ API的属性用来描述API所包含的属性。如果API有属性,每个属 ### API 方法 -API的方法用来描述API所包含的方法,一些类的API会有这个内容,没有方法的API可以不写此模块。如果有,每个方法需要分为六个部分描述: +API 的方法用来描述 API 所包含的方法,一些类的 API 会有这个内容,没有方法的 API 可以不写此模块。如果有,每个方法需要分为六个部分描述: - 名称:方法名称直接写方法的名字即可,不需要将全路径写全; - - 声明:与API声明的要求一致。 - - 参数:与API参数的要求一致。 - - 描述:与API功能描述的要求一致。 - - 返回:与API返回的要求一致。。 - - 代码示例:与API代码示例的要求一致。 + - 声明:与 API 声明的要求一致。 + - 参数:与 API 参数的要求一致。 + - 描述:与 API 功能描述的要求一致。 + - 返回:与 API 返回的要求一致。。 + - 代码示例:与 API 代码示例的要求一致。 **总结:paddle.Program.parse_from_string** @@ -414,13 +414,13 @@ API的方法用来描述API所包含的方法,一些类的API会有这个内 # The two Programs printed here should be same ### 注解 -注解部分描述一些用户使用该API时需要额外注意的一些注意事项,可以出现在任意需要提示用户的地方;可以是当前版本存在的一些问题,如例1;也可以是该API使用上的一些注意事项,如例2; +注解部分描述一些用户使用该 API 时需要额外注意的一些注意事项,可以出现在任意需要提示用户的地方;可以是当前版本存在的一些问题,如例 1;也可以是该 API 使用上的一些注意事项,如例 2; -**例1 paddle.fluid.cuda.cuda_places:** +**例 1 paddle.fluid.cuda.cuda_places:** 中文: .. note:: - 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的 GPU 设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 英文: @@ -428,7 +428,7 @@ API的方法用来描述API所包含的方法,一些类的API会有这个内 For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device. The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable. -**例2 paddle.sqrt** +**例 2 paddle.sqrt** 中文: .. note:: @@ -440,24 +440,24 @@ API的方法用来描述API所包含的方法,一些类的API会有这个内 input value must be greater than or equal to zero. ### 警告 -警告部分需要慎重使用,一般是不推荐用户使用的API方法(例1),或者是已经有计划要废弃的API(例2),需要用警告来说明。 +警告部分需要慎重使用,一般是不推荐用户使用的 API 方法(例 1),或者是已经有计划要废弃的 API(例 2),需要用警告来说明。 -**例1 paddle.fluid.layers.DynamicRNN** +**例 1 paddle.fluid.layers.DynamicRNN** 中文: .. warning:: - 目前不支持在DynamicRNN的 block 中任何层上配置 is_sparse = True 。 + 目前不支持在 DynamicRNN 的 block 中任何层上配置 is_sparse = True 。 英文: Warning: Currently it is not supported to set :code:`is_sparse = True` of any layers defined within DynamicRNN's :code:`block` function. -**例2 paddle.fluid.clip.set_gradient_clip** +**例 2 paddle.fluid.clip.set_gradient_clip** 中文: .. warning:: - 此API对位置使用的要求较高,其必须位于组建网络之后, ``minimize`` 之前,因此在未来版本中可能被删除,故不推荐使用。推荐在 ``optimizer`` 初始化时设置梯度裁剪。 有三种裁剪策略: ``GradientClipByGlobalNorm`` 、 ``GradientClipByNorm`` 、 ``GradientClipByValue``。 如果在 ``optimizer`` 中设置过梯度裁剪,又使用了 ``set_gradient_clip`` ,``set_gradient_clip`` 将不会生效。 + 此 API 对位置使用的要求较高,其必须位于组建网络之后, ``minimize`` 之前,因此在未来版本中可能被删除,故不推荐使用。推荐在 ``optimizer`` 初始化时设置梯度裁剪。 有三种裁剪策略: ``GradientClipByGlobalNorm`` 、 ``GradientClipByNorm`` 、 ``GradientClipByValue``。 如果在 ``optimizer`` 中设置过梯度裁剪,又使用了 ``set_gradient_clip`` ,``set_gradient_clip`` 将不会生效。 英文: @@ -473,7 +473,7 @@ API的方法用来描述API所包含的方法,一些类的API会有这个内 - 中文文档、英文文档齐全,内容一一对应。 - 文档清晰可读,易于用户使用 - - 给出易于理解的api介绍,文字描述,公式描述。 + - 给出易于理解的 api 介绍,文字描述,公式描述。 - 参数命名通俗易懂无歧义,明确给出传参类型,对参数含义以及使用方法进行详细说明,对返回值进行详细说明。 - 异常类型和含义进行详细说明。 - 示例代码需要做到复制粘贴即可运行,并且需要明确给出预期运行结果(如果可以)。 diff --git a/docs/dev_guides/api_contributing_guides/new_cpp_op_cn.md b/docs/dev_guides/api_contributing_guides/new_cpp_op_cn.md index 4da2a3426c7..03d77e89a12 100644 --- a/docs/dev_guides/api_contributing_guides/new_cpp_op_cn.md +++ b/docs/dev_guides/api_contributing_guides/new_cpp_op_cn.md @@ -6,12 +6,12 @@ 本教程对新增算子的方法进行介绍,首先新增一个算子大概需要以下几个步骤: -1. 新增算子描述及定义:描述前反向算子的输入、输出、属性,实现InferMeta函数 -2. 新增算子Kernel:实现算子在各种设备上的计算逻辑 -3. 封装Python API:封装Python端调用算子的接口 +1. 新增算子描述及定义:描述前反向算子的输入、输出、属性,实现 InferMeta 函数 +2. 新增算子 Kernel:实现算子在各种设备上的计算逻辑 +3. 封装 Python API:封装 Python 端调用算子的接口 4. 添加单元测试:验证新增算子的正确性 -以上4个步骤添加的文件,在Paddle中的位置如下(假设算子名为`xxx`): +以上 4 个步骤添加的文件,在 Paddle 中的位置如下(假设算子名为`xxx`): @@ -26,16 +26,16 @@ - - + + - + - + @@ -44,7 +44,7 @@
paddle/phi/api/yaml/api.yaml & paddle/phi/api/yaml/backward.yaml
算子InferMetapaddle/phi/infermeta目录下的相应文件中算子 InferMetapaddle/phi/infermeta 目录下的相应文件中
算子kernel算子 kernel paddle/phi/kernels/xxx_kernel.h & xxx_kernel.cc & xxx_grad_kernel.h & xxx_grad_kernel.cc(一般情况)
Python APIpython/paddle目录下的相应子目录中python/paddle 目录下的相应子目录中
单元测试
-接下来,我们以Trace操作,计算输入 Tensor 在指定平面上的对角线元素之和,并输出相应的计算结果,即 [trace](../../api/paddle/trace_cn.html#trace) 为例来介绍如何新增算子。 +接下来,我们以 Trace 操作,计算输入 Tensor 在指定平面上的对角线元素之和,并输出相应的计算结果,即 [trace](../../api/paddle/trace_cn.html#trace) 为例来介绍如何新增算子。 ## 2. 新增算子描述及定义 @@ -90,81 +90,81 @@ api -算子名称,与该算子Python API函数名相同(命名方式为:全小写+下划线),示例中为trace +算子名称,与该算子 Python API 函数名相同(命名方式为:全小写+下划线),示例中为 trace args -算子输入参数,与该算子Python API函数的输入参数对应(当前支持的输入数据类型包括:Tensor, Tensor[], float, double, bool, int, int64_t, int[], int64_t[], str, Place, DataType, DataLayout, IntArray, Scalar)。我们一般称这里Tensor类型的参数为Input(输入),非Tensor类型的参数为Attribute(属性)
-注:Tensor[]表示Tensor数组;IntArray为int类型数组,主要用于表示shape,index和axes等类型数据,可以直接使用Tensor或者普通整型数组构造,目前仍在测试阶段,如非必要暂不建议使用;Scalar表示标量,可以支持不同的普通数据类型 +算子输入参数,与该算子 Python API 函数的输入参数对应(当前支持的输入数据类型包括:Tensor, Tensor[], float, double, bool, int, int64_t, int[], int64_t[], str, Place, DataType, DataLayout, IntArray, Scalar)。我们一般称这里 Tensor 类型的参数为 Input(输入),非 Tensor 类型的参数为 Attribute(属性)
+注:Tensor[]表示 Tensor 数组;IntArray 为 int 类型数组,主要用于表示 shape,index 和 axes 等类型数据,可以直接使用 Tensor 或者普通整型数组构造,目前仍在测试阶段,如非必要暂不建议使用;Scalar 表示标量,可以支持不同的普通数据类型 output -算子输出类型(目前支持Tensor和Tensor[]类型),多个输出间用逗号“,”分隔开。可以使用”()”选择性标记输入的名字,如未标记默认为'out'
-注:当返回类型为Tensor[]时,由于数组的size要在kernel执行前推导完成,所以需要在Tensor[]后的'{}'内通过表达式指定返回数组的size,如:Tensor[](out){input.size()} +算子输出类型(目前支持 Tensor 和 Tensor[]类型),多个输出间用逗号“,”分隔开。可以使用”()”选择性标记输入的名字,如未标记默认为'out'
+注:当返回类型为 Tensor[]时,由于数组的 size 要在 kernel 执行前推导完成,所以需要在 Tensor[]后的'{}'内通过表达式指定返回数组的 size,如:Tensor[](out){input.size()} infer_meta -InferMeta函数负责根据输入变量推断返回Tensor的维度与类型,这里是对算子使用的InferMeta函数进行配置 +InferMeta 函数负责根据输入变量推断返回 Tensor 的维度与类型,这里是对算子使用的 InferMeta 函数进行配置 infer_meta:func -调用的InferMeta函数,这里trace调用的是TraceInferMeta函数 +调用的 InferMeta 函数,这里 trace 调用的是 TraceInferMeta 函数 infer_meta:param -InferMeta函数的输入参数,可以对args中的参数进行选择传入,未配置则默认传入args中的所有参数。示例中未配置本项,所以传入的参数为[x, offset, axis1, axis2]。output项中的参数作为输出无需配置会自动传入InferMeta函数中 +InferMeta 函数的输入参数,可以对 args 中的参数进行选择传入,未配置则默认传入 args 中的所有参数。示例中未配置本项,所以传入的参数为[x, offset, axis1, axis2]。output 项中的参数作为输出无需配置会自动传入 InferMeta 函数中 kernel -算子的计算Kernel配置 +算子的计算 Kernel 配置 kernel:func -算子对应kernel函数的注册名 +算子对应 kernel 函数的注册名 kernel:param -kernel函数的输入参数,配置规则与InferMeta函数的param配置项相同 +kernel 函数的输入参数,配置规则与 InferMeta 函数的 param 配置项相同 kernel:data_type -根据指定参数推导调用kernel的data_type(对应kernel函数的模板参数'T'),默认不进行配置,会根据输入Tensor自动进行推导。如果kernel的data_type类型由某个输入参数(Tensor或者DataType参数),需要将该参数的变量名填入该项。示例中未配置则kernel的data_type由输入变量'x'决定 +根据指定参数推导调用 kernel 的 data_type(对应 kernel 函数的模板参数'T'),默认不进行配置,会根据输入 Tensor 自动进行推导。如果 kernel 的 data_type 类型由某个输入参数(Tensor 或者 DataType 参数),需要将该参数的变量名填入该项。示例中未配置则 kernel 的 data_type 由输入变量'x'决定 kernel:backend -根据指定参数来选择调用kernel的Backend(Kernel执行的具体设备,如CPU、GPU等),默认不进行配置,会根据输入Tensor自动进行推导。如果kernel执行的backend类型由某个输入参数(Tensor或者Backend参数)决定,需要将该参数的变量名填入该项。示例中未配置则kernel执行的Backend与输入变量'x'的Backend相同 +根据指定参数来选择调用 kernel 的 Backend(Kernel 执行的具体设备,如 CPU、GPU 等),默认不进行配置,会根据输入 Tensor 自动进行推导。如果 kernel 执行的 backend 类型由某个输入参数(Tensor 或者 Backend 参数)决定,需要将该参数的变量名填入该项。示例中未配置则 kernel 执行的 Backend 与输入变量'x'的 Backend 相同 backward -算子对应的反向算子名称,如果没有反向则不需要配置,示例中trace算子的反向为trace_grad +算子对应的反向算子名称,如果没有反向则不需要配置,示例中 trace 算子的反向为 trace_grad 特殊配置项(目前特殊配置项还处于不稳定阶段,后续可能会有调整更新) optional -指定输入Tensor为可选输入,用法可参考dropout中seed_tensor(python/paddle/utils/code_gen/legacy_api.yaml中) +指定输入 Tensor 为可选输入,用法可参考 dropout 中 seed_tensor(python/paddle/utils/code_gen/legacy_api.yaml 中) inplace -算子对指定的输入做原位处理并作为输出结果返回,使用格式:(x -> out),具体用法可参考relu算子
-特殊规则:如果api中算子名称有'_'后缀则只生成支持inplace功能的接口,如果算子名称没有'_'后缀,则会同时生成支持inplace操作的接口(自动添加'_'后缀)和不支持inplace的普通接口共两套接口 +算子对指定的输入做原位处理并作为输出结果返回,使用格式:(x -> out),具体用法可参考 relu 算子
+特殊规则:如果 api 中算子名称有'_'后缀则只生成支持 inplace 功能的接口,如果算子名称没有'_'后缀,则会同时生成支持 inplace 操作的接口(自动添加'_'后缀)和不支持 inplace 的普通接口共两套接口 view -与inplace机制类似,区别在于view模式返回的结果只是与输入共享内存,并不是输入Tensor变量本身,使用格式:(x -> out),具体用法可参考reshape算子 +与 inplace 机制类似,区别在于 view 模式返回的结果只是与输入共享内存,并不是输入 Tensor 变量本身,使用格式:(x -> out),具体用法可参考 reshape 算子 intermediate -标记前向计算中输出的用于反向计算的中间变量,不会出现在Python API的返回结果中,相关设计正在完善中,新增算子时不建议使用 +标记前向计算中输出的用于反向计算的中间变量,不会出现在 Python API 的返回结果中,相关设计正在完善中,新增算子时不建议使用 invoke -复用已有的算子接口或实现自定义的C++ API,配置时以函数调用的形式配置即可,使用invoke时则不需要配置infer_meta和kernel。
-a. 如果是复用已有算子,需要被复用的算子为前向算子且两者的返回值类型相同,可参考zeros_like算子
-b. 如果是实现自定义的C++ API,需要在'paddle/phi/api/lib/api_custom_impl.h'声明自定义实现函数并在'paddle/phi/api/lib/api_custom_impl.cc'中进行实现,具体可参考embedding算子 +复用已有的算子接口或实现自定义的 C++ API,配置时以函数调用的形式配置即可,使用 invoke 时则不需要配置 infer_meta 和 kernel。
+a. 如果是复用已有算子,需要被复用的算子为前向算子且两者的返回值类型相同,可参考 zeros_like 算子
+b. 如果是实现自定义的 C++ API,需要在'paddle/phi/api/lib/api_custom_impl.h'声明自定义实现函数并在'paddle/phi/api/lib/api_custom_impl.cc'中进行实现,具体可参考 embedding 算子 @@ -185,18 +185,18 @@ b. 如果是实现自定义的C++ API,需要在'paddle/phi/api/lib/api_custom_ forward -对应前向算子的名称、参数、返回值,需要与api.yaml中前向算子配置一致 +对应前向算子的名称、参数、返回值,需要与 api.yaml 中前向算子配置一致 args 反向算子输入参数, 示例中'x'表示将前向的'x'变量输入到反向,'out_grad'表示前向输出'out'对应的反向梯度
-约束条件1:所有参数需要在forward配置项的参数中(输入、输出以及输出对应的反向梯度)找到对应(根据变量名匹配)
-约束条件2:反向输入参数需要以:a.前向输入Tensor b.前向输出Tensor c.前向输出Tensor的反向梯度 d.前向非Tensor类型属性变量(Attribute) 的顺序排列,反向计算中不需要使用的前向变量无须添加
+约束条件 1:所有参数需要在 forward 配置项的参数中(输入、输出以及输出对应的反向梯度)找到对应(根据变量名匹配)
+约束条件 2:反向输入参数需要以:a.前向输入 Tensor b.前向输出 Tensor c.前向输出 Tensor 的反向梯度 d.前向非 Tensor 类型属性变量(Attribute) 的顺序排列,反向计算中不需要使用的前向变量无须添加
output -反向算子输出,顺序需要与前向输入Tensor一致,比如前向输入(Tensor x, Tensor y),则反向输出必须为Tensor(x_grad), Tensor(y_grad) +反向算子输出,顺序需要与前向输入 Tensor 一致,比如前向输入(Tensor x, Tensor y),则反向输出必须为 Tensor(x_grad), Tensor(y_grad) infer_meta @@ -215,8 +215,8 @@ b. 如果是实现自定义的C++ API,需要在'paddle/phi/api/lib/api_custom_ no_need_buffer -可选配置,标记的Tensor变量在前向运行完成后,持有的内存或显存会被释放,以减少训练过程中的内存使用。trace_grad由于反向算子只需要前向变量'x'的维度信息,不需要内存数据,所以可以标记为no_need_buffer提前释放内存
-注意:由于Tensor内存被释放后会影响dtype接口的使用,所以需要在kernel的data_type配置项中指定其他的Tensor来推导kernel的data_type +可选配置,标记的 Tensor 变量在前向运行完成后,持有的内存或显存会被释放,以减少训练过程中的内存使用。trace_grad 由于反向算子只需要前向变量'x'的维度信息,不需要内存数据,所以可以标记为 no_need_buffer 提前释放内存
+注意:由于 Tensor 内存被释放后会影响 dtype 接口的使用,所以需要在 kernel 的 data_type 配置项中指定其他的 Tensor 来推导 kernel 的 data_type optional @@ -229,11 +229,11 @@ b. 如果是实现自定义的C++ API,需要在'paddle/phi/api/lib/api_custom_ -### 2.2 实现InferMeta函数 +### 2.2 实现 InferMeta 函数 -`InferMeta`函数是根据输入参数,推断算子输出Tensor基本信息的函数,推断的信息包括输出Tensor的`shape`、`data type`及`data layout`,同时它也承担了检查输入数据维度、类型等是否合法的功能。 +`InferMeta`函数是根据输入参数,推断算子输出 Tensor 基本信息的函数,推断的信息包括输出 Tensor 的`shape`、`data type`及`data layout`,同时它也承担了检查输入数据维度、类型等是否合法的功能。 -[TraceOp的InferMeta函数](https://github.com/PaddlePaddle/Paddle/blob/befa78ea3fa9d0dae096a7de91f626b0c31daee8/paddle/phi/infermeta/unary.cc#L721) 实现如下: +[TraceOp 的 InferMeta 函数](https://github.com/PaddlePaddle/Paddle/blob/befa78ea3fa9d0dae096a7de91f626b0c31daee8/paddle/phi/infermeta/unary.cc#L721) 实现如下: ```cpp void TraceInferMeta( @@ -291,24 +291,24 @@ void TraceInferMeta( } ``` -其中,`MetaTensor`是对底层异构Tensor的抽象封装,仅支持对底层Tensor的维度、数据类型、布局等属性进行读取和设置,具体方法请参考 [meta_tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/meta_tensor.h)。 +其中,`MetaTensor`是对底层异构 Tensor 的抽象封装,仅支持对底层 Tensor 的维度、数据类型、布局等属性进行读取和设置,具体方法请参考 [meta_tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/meta_tensor.h)。 -**InferMeta的实现位置** +**InferMeta 的实现位置** -InferMeta的文件放置规则(以Tensor输入个数为判定标准): +InferMeta 的文件放置规则(以 Tensor 输入个数为判定标准): -- `nullary.h`:没有输入Tensor参数的函数 -- `unary.h`:仅有一个输入Tensor参数的函数 -- `binary.h`:有两个输入Tensor参数的函数 -- `ternary.h`:有三个输入Tensor参数的函数 -- `multiary.h`:有三个以上输入Tensor或者输入为`vector`的函数 -- `backward.h`:反向op的InferMeta函数一律在此文件中,不受前序规则限制 +- `nullary.h`:没有输入 Tensor 参数的函数 +- `unary.h`:仅有一个输入 Tensor 参数的函数 +- `binary.h`:有两个输入 Tensor 参数的函数 +- `ternary.h`:有三个输入 Tensor 参数的函数 +- `multiary.h`:有三个以上输入 Tensor 或者输入为`vector`的函数 +- `backward.h`:反向 op 的 InferMeta 函数一律在此文件中,不受前序规则限制 -**InferMeta的编译时与运行时** +**InferMeta 的编译时与运行时** -在我们的静态图网络中,`InferMeta`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在compile time时,由于真实的维度未知,框架内部用-1来表示,在run time时,用实际的维度表示,因此维度的值在compile time和 run time时可能不一致,如果存在维度的判断和运算操作,InferMeta就需要区分compile time 和 run time。 +在我们的静态图网络中,`InferMeta`操作在[编译时(compile time)和运行时(run time)](https://github.com/PaddlePaddle/FluidDoc/blob/release/1.2/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md#%E8%AE%A9%E6%88%91%E4%BB%AC%E5%9C%A8fluid%E7%A8%8B%E5%BA%8F%E5%AE%9E%E4%BE%8B%E4%B8%AD%E5%8C%BA%E5%88%86%E7%BC%96%E8%AF%91%E6%97%B6%E5%92%8C%E8%BF%90%E8%A1%8C%E6%97%B6)都会被调用,在 compile time 时,由于真实的维度未知,框架内部用-1 来表示,在 run time 时,用实际的维度表示,因此维度的值在 compile time 和 run time 时可能不一致,如果存在维度的判断和运算操作,InferMeta 就需要区分 compile time 和 run time。 -对于此类InferMeta函数,需要在函数声明的参数列表末尾增加 `MetaConfig` 参数,例如: +对于此类 InferMeta 函数,需要在函数声明的参数列表末尾增加 `MetaConfig` 参数,例如: ``` void ConcatInferMeta(const std::vector& x, @@ -319,7 +319,7 @@ void ConcatInferMeta(const std::vector& x, 然后在函数体中,使用 `config.is_runtime` 判断出于编译时还是运行时。 -具体地,以下两种情况需要区分compile time和 run time。 +具体地,以下两种情况需要区分 compile time 和 run time。 1. 检查 @@ -330,9 +330,9 @@ void ConcatInferMeta(const std::vector& x, PADDLE_ENFORCE_GT(x.dims()[i] , 10) ``` - 在compile time的时候,x.dims()[i]可能等于-1,导致这个PADDLE_ENFORCE_GT报错退出。 + 在 compile time 的时候,x.dims()[i]可能等于-1,导致这个 PADDLE_ENFORCE_GT 报错退出。 - 如果用了以下paddle中定义的宏进行判断: + 如果用了以下 paddle 中定义的宏进行判断: ```cpp PADDLE_ENFORCE_EQ (x.dims()[i] , 10) @@ -343,7 +343,7 @@ void ConcatInferMeta(const std::vector& x, PADDLE_ENFORCE_LE (x.dims()[i] , 10) ``` - 都需要注意区分compile time和run time + 都需要注意区分 compile time 和 run time 2. 运算 @@ -354,7 +354,7 @@ void ConcatInferMeta(const std::vector& x, y_dim[0] = x_dim[i] + 10 ``` - 在compile time的时候,x_dim[i]可能等于-1,得到的 y_dim[0] 等于 9,是不符合逻辑的 + 在 compile time 的时候,x_dim[i]可能等于-1,得到的 y_dim[0] 等于 9,是不符合逻辑的 如果用到了类似以下的运算操作 @@ -366,16 +366,16 @@ void ConcatInferMeta(const std::vector& x, y_dim[i] = x_dim[i] + z_dim[i] ``` - 都需要区分compile time和run time + 都需要区分 compile time 和 run time 3. 处理的标准 - - 检查: compile time的时候不判断维度等于-1的情况,但在runtime的时候检查 - - 运算: -1和其他数做任何运算都要等于-1 + - 检查: compile time 的时候不判断维度等于-1 的情况,但在 runtime 的时候检查 + - 运算: -1 和其他数做任何运算都要等于-1 4. 参考代码 - (1) 判断的实现方法可以参考 [SigmoidCrossEntropyWithLogitsInferMeta](https://github.com/PaddlePaddle/Paddle/blob/cd28cddbfb5f5643947291e9a640ecd414dc8dae/paddle/phi/infermeta/binary.cc#L650),SigmoidCrossEntropyWithLogits 要求X和labels的两个输入,除了最后一维以外,其他的维度完全一致 + (1) 判断的实现方法可以参考 [SigmoidCrossEntropyWithLogitsInferMeta](https://github.com/PaddlePaddle/Paddle/blob/cd28cddbfb5f5643947291e9a640ecd414dc8dae/paddle/phi/infermeta/binary.cc#L650),SigmoidCrossEntropyWithLogits 要求 X 和 labels 的两个输入,除了最后一维以外,其他的维度完全一致 ```cpp bool check = true; @@ -397,7 +397,7 @@ void ConcatInferMeta(const std::vector& x, } ``` - (2) 运算的实现可以参考 [ConcatInferMeta](https://github.com/PaddlePaddle/Paddle/blob/0604df9e70dfe7be8a21df6a80d9fa6d4939bd9d/paddle/phi/infermeta/multiary.cc#L323),concat在InferShape判断时,调用`ComputeAndCheckShape`,除了进行concat轴之外,其他的维度完全一致;在生成output的维度时,把concat轴的维度求和,其他的维度和输入保持一致。 + (2) 运算的实现可以参考 [ConcatInferMeta](https://github.com/PaddlePaddle/Paddle/blob/0604df9e70dfe7be8a21df6a80d9fa6d4939bd9d/paddle/phi/infermeta/multiary.cc#L323),concat 在 InferShape 判断时,调用`ComputeAndCheckShape`,除了进行 concat 轴之外,其他的维度完全一致;在生成 output 的维度时,把 concat 轴的维度求和,其他的维度和输入保持一致。 ```cpp const size_t n = inputs_dims.size(); @@ -452,55 +452,55 @@ void ConcatInferMeta(const std::vector& x, } ``` -## 3. 新增算子Kernel +## 3. 新增算子 Kernel -新增算子Kernel在 `paddle/phi/kernels` 目录中完成 +新增算子 Kernel 在 `paddle/phi/kernels` 目录中完成 -### 3.1 kernels目录结构 +### 3.1 kernels 目录结构 `paddle/phi/kernels` 基本目录结构如下 ``` paddle/phi/kernels -./ (根目录放置设备无关的kernel声明和实现) -./cpu(仅放置cpu后端的kernel实现) -./gpu(仅放置gpu后端的kernel实现) -./xpu(仅放置百度kunlun后端的kernel实现) +./ (根目录放置设备无关的 kernel 声明和实现) +./cpu(仅放置 cpu 后端的 kernel 实现) +./gpu(仅放置 gpu 后端的 kernel 实现) +./xpu(仅放置百度 kunlun 后端的 kernel 实现) ./gpudnn -./funcs(放置一些支持多设备的、在多个kernel中使用的公共functor和functions) +./funcs(放置一些支持多设备的、在多个 kernel 中使用的公共 functor 和 functions) ... ``` -一般情况下,新增算子仅需要关注kernels根目录及kernel所支持设备的子目录即可: +一般情况下,新增算子仅需要关注 kernels 根目录及 kernel 所支持设备的子目录即可: -- kernels 根目录,放置设备无关的kernel.h和kernel.cc - - 例如,一个kernel除了一些简单的设备无关的C++逻辑,关键计算逻辑均是复用已有的phi kernel函数实现的,那么这个kernel实现是天然能够适配所有设备及后端的,所以它的声明和实现均直接放置到kernels目录下即可 -- kernels下一级子目录,原则上按照backend分类按需新建,放置特定后端的kernel实现代码 +- kernels 根目录,放置设备无关的 kernel.h 和 kernel.cc + - 例如,一个 kernel 除了一些简单的设备无关的 C++逻辑,关键计算逻辑均是复用已有的 phi kernel 函数实现的,那么这个 kernel 实现是天然能够适配所有设备及后端的,所以它的声明和实现均直接放置到 kernels 目录下即可 +- kernels 下一级子目录,原则上按照 backend 分类按需新建,放置特定后端的 kernel 实现代码 -下面给出两种典型kernel新增时文件放置位置的说明: +下面给出两种典型 kernel 新增时文件放置位置的说明: -1. 新增与设备无关的Kernel +1. 新增与设备无关的 Kernel - 该类Kernel 实现与所有硬件设备无关,只需要一份代码实现,可参考reshape kernel。其新增文件及目录包括: + 该类 Kernel 实现与所有硬件设备无关,只需要一份代码实现,可参考 reshape kernel。其新增文件及目录包括: - `paddle/phi/kernels/xxx_kernel.h` - `paddle/phi/kernels/xxx_kernel.cc` - 如果是反向kernel,则使用 `grad_kernel` 后缀即可: + 如果是反向 kernel,则使用 `grad_kernel` 后缀即可: - `paddle/phi/kernels/xxx_grad_kernel.h` - `paddle/phi/kernels/xxx_grad_kernel.cc` -2. 新增与设备相关、且CPU&GPU分别实现的Kernel +2. 新增与设备相关、且 CPU&GPU 分别实现的 Kernel - 还有部分Kernel的实现,CPU 和GPU 上逻辑不同,此时没有共同实现的代码,需要区分CPU和GPU 硬件。 - CPU 的实现位于`paddle/phi/kernels/cpu` 目录下; GPU的实现位于`paddle/phi/kernels/gpu` 下,可参考dot kernel,cast kernel等。其新增文件及目录包括: + 还有部分 Kernel 的实现,CPU 和 GPU 上逻辑不同,此时没有共同实现的代码,需要区分 CPU 和 GPU 硬件。 + CPU 的实现位于`paddle/phi/kernels/cpu` 目录下; GPU 的实现位于`paddle/phi/kernels/gpu` 下,可参考 dot kernel,cast kernel 等。其新增文件及目录包括: - `paddle/phi/kernels/xxx_kernel.h` - `paddle/phi/kernels/cpu/xxx_kernel.cc` - `paddle/phi/kernels/gpu/xxx_kernel.cu` - 相应地,反向kernel新增文件为: + 相应地,反向 kernel 新增文件为: - `paddle/phi/kernels/xxx_grad_kernel.h` - `paddle/phi/kernels/cpu/xxx_grad_kernel.cc` @@ -510,9 +510,9 @@ paddle/phi/kernels #### 3.2.1 声明 Kernel 函数 -- 以trace op为例,首先在`paddle/phi/kernels`目录下新建 [`trace_kernel.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/trace_kernel.h) 文件,用于放置前向Kernel函数声明。 +- 以 trace op 为例,首先在`paddle/phi/kernels`目录下新建 [`trace_kernel.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/trace_kernel.h) 文件,用于放置前向 Kernel 函数声明。 -> 注:Kernel函数声明的参数列表原则上与Python API参数列表一致 +> 注:Kernel 函数声明的参数列表原则上与 Python API 参数列表一致 ``` template @@ -524,27 +524,27 @@ void TraceKernel(const Context& dev_ctx, DenseTensor* out); ``` -> 注:所有的kernel声明,统一放在namespace phi中,缩短函数的调用前缀使调用写法更加简洁 +> 注:所有的 kernel 声明,统一放在 namespace phi 中,缩短函数的调用前缀使调用写法更加简洁 说明如下: 1. 模板为固定写法,第一个模板参数为数据类型`T`,第二个模板参数为设备上下文`Context`,`template ` -2. 函数命名:Kernel 的命名统一加Kernel 后缀。即:Kernel名称+Kernel 后缀,驼峰式命名,例如:AddKernel -3. 参数顺序:Context, InputTensor …, Attribute …, OutTensor* 。即:第一位参数为Context, 后边为输入的Tensor, 接着是输入的属性参数, 最后是输出的Tensor的指针参数。如果Kernel没有输入Tensor 或者没有属性参数,略过即可 -2. 第1个函数参数,类型为 `const Context&` 的dev_ctx -3. 第2个函数参数,输入Tensor,类型一般为 `const DenseTensor&` -4. 第3-5个函数参数,均为attribute(根据具体的含义,选择特定的int,float,vector等类型),多个attribute 可以参考python端API定义的顺序,变量命名对齐python api -5. 第6个函数参数,输出Tensor,类型一般为`DenseTensor*`,多个output 可以参考python端API定义的顺序, 变量命名对齐python api +2. 函数命名:Kernel 的命名统一加 Kernel 后缀。即:Kernel 名称+Kernel 后缀,驼峰式命名,例如:AddKernel +3. 参数顺序:Context, InputTensor …, Attribute …, OutTensor* 。即:第一位参数为 Context, 后边为输入的 Tensor, 接着是输入的属性参数, 最后是输出的 Tensor 的指针参数。如果 Kernel 没有输入 Tensor 或者没有属性参数,略过即可 +2. 第 1 个函数参数,类型为 `const Context&` 的 dev_ctx +3. 第 2 个函数参数,输入 Tensor,类型一般为 `const DenseTensor&` +4. 第 3-5 个函数参数,均为 attribute(根据具体的含义,选择特定的 int,float,vector等类型),多个 attribute 可以参考 python 端 API 定义的顺序,变量命名对齐 python api +5. 第 6 个函数参数,输出 Tensor,类型一般为`DenseTensor*`,多个 output 可以参考 python 端 API 定义的顺序, 变量命名对齐 python api > **特殊情况说明:** -> 1. **特殊模板参数**:对于某些Kernel (如reshape ,copy),这些kernel不关注数据类型T, 可以省去第一个模板参数,即为:`template ` -> 2. **特殊输入类型**:对于某些特殊Kernel (如concat 和split kernel)的部分输入或输出是数组类型的DenseTensor, 此时输入类型为:`const std::vector&`; 输出类型为:`std::vector` +> 1. **特殊模板参数**:对于某些 Kernel (如 reshape ,copy),这些 kernel 不关注数据类型 T, 可以省去第一个模板参数,即为:`template ` +> 2. **特殊输入类型**:对于某些特殊 Kernel (如 concat 和 split kernel)的部分输入或输出是数组类型的 DenseTensor, 此时输入类型为:`const std::vector&`; 输出类型为:`std::vector` #### 3.2.2 实现 Kernel 函数 -**复用已有Kernel实现设备无关Kernel函数** +**复用已有 Kernel 实现设备无关 Kernel 函数** -由于目前的Kernel复用机制为新推出的功能,暂未对已有算子进行升级改造,所以这里我们以一个不在框架中的linear算子(out = x * w + b)为例来介绍复用已有Kernel实现设备无关Kernel函数。(linear kernel 的实现源码需要放置在`paddle/phi/kernels/linear_kernel.cc`) +由于目前的 Kernel 复用机制为新推出的功能,暂未对已有算子进行升级改造,所以这里我们以一个不在框架中的 linear 算子(out = x * w + b)为例来介绍复用已有 Kernel 实现设备无关 Kernel 函数。(linear kernel 的实现源码需要放置在`paddle/phi/kernels/linear_kernel.cc`) `LinearKernel` 的实现代码如下: @@ -564,20 +564,20 @@ void LinearKernel(const Context& dev_ctx, AddKernel(dev_ctx, out, b, out); // 复用 AddKernel } ``` -复用Kernel的流程包括: +复用 Kernel 的流程包括: 1. 在源文件中 include 要复用 Kernel 的头文件 -2. 直接调用相应的Kernel函数进行复用 +2. 直接调用相应的 Kernel 函数进行复用 -注意:设备无关Kernel实现时计算逻辑部分只能复用现有Kernel或设备无关的Functor,不能使用设备相关的语法或者函数接口(如cuda、cudnn等)进行计算处理 +注意:设备无关 Kernel 实现时计算逻辑部分只能复用现有 Kernel 或设备无关的 Functor,不能使用设备相关的语法或者函数接口(如 cuda、cudnn 等)进行计算处理 -**实现设备相关Kernel函数** +**实现设备相关 Kernel 函数** -此处 trace 算子的kernel属于前述第2中情况,即与设备相关,CPU和GPU Kernel需要分别实现。 +此处 trace 算子的 kernel 属于前述第 2 中情况,即与设备相关,CPU 和 GPU Kernel 需要分别实现。 -- cpu kernel实现位于:[paddle/phi/kernels/cpu/trace_kernel.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/cpu/trace_kernel.cc) -- gpu kernel实现位于:[paddle/phi/kernels/gpu/trace_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/gpu/trace_kernel.cu) +- cpu kernel 实现位于:[paddle/phi/kernels/cpu/trace_kernel.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/cpu/trace_kernel.cc) +- gpu kernel 实现位于:[paddle/phi/kernels/gpu/trace_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/gpu/trace_kernel.cu) -下面为 `TraceKernel` 的cpu实现: +下面为 `TraceKernel` 的 cpu 实现: ```cpp template @@ -603,16 +603,16 @@ void TraceKernel(const Context& dev_ctx, } ``` -此处TraceKernel的实现并未复用其他Kernel,但如果有需要也是可以复用的,Kernel复用时,同样是直接 include 相应Kernel头文件,在函数中调用即可,例如 [triangular_solve_kernel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/cpu/triangular_solve_kernel.cc) 复用 empty和expand kernel。 +此处 TraceKernel 的实现并未复用其他 Kernel,但如果有需要也是可以复用的,Kernel 复用时,同样是直接 include 相应 Kernel 头文件,在函数中调用即可,例如 [triangular_solve_kernel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/cpu/triangular_solve_kernel.cc) 复用 empty 和 expand kernel。 -首先在triangular_solve_kernel.cc头部include相应头文件: +首先在 triangular_solve_kernel.cc 头部 include 相应头文件: ```cpp #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/expand_kernel.h" ``` -然后在Kernel实现中即可直接调用以上两个头文件中的Kernel,代码片段如下: +然后在 Kernel 实现中即可直接调用以上两个头文件中的 Kernel,代码片段如下: ```cpp // Tensor broadcast to 'out' and temp 'x_bst' @@ -622,24 +622,24 @@ void TraceKernel(const Context& dev_ctx, ExpandKernel(dev_ctx, x, x_bst_dims, &x_bst); ``` -补充:对于Kernel内部临时使用的`DenseTensor`目前推荐使用`Empty`、`EmptyLike`、`Full`和`FullLike`接口进行创建。 +补充:对于 Kernel 内部临时使用的`DenseTensor`目前推荐使用`Empty`、`EmptyLike`、`Full`和`FullLike`接口进行创建。 -反向Kernel的实现与前向是类似的,此处不再赘述,可以直接参考前述对应链接中的代码实现。 +反向 Kernel 的实现与前向是类似的,此处不再赘述,可以直接参考前述对应链接中的代码实现。 **公共函数管理:** -如果有一些函数会被多个Kernel调用,可以创建非 kernel 的文件管理代码,规则如下: +如果有一些函数会被多个 Kernel 调用,可以创建非 kernel 的文件管理代码,规则如下: -1. 仅有当前kernel使用的辅助函数(具体到设备,比如trace的cpu kernel),一律和kernel实现放到同一个设备文件夹中 - - 如果辅助函数相关代码较少,就直接和kernel实现放到同一个`.cc/cu`中 - - 如果辅助函数相关代码较多,就在kernel所在的设备目录创建`.h`管理代码 -2. 有同设备多个kernel使用的辅助函数,在kernel所在的设备目录创建`.h`放置代码 -3. 有跨设备多个kernel使用的辅助函数,在`kernels/funcs`目录下创建`.h/cc/cu`管理代码 +1. 仅有当前 kernel 使用的辅助函数(具体到设备,比如 trace 的 cpu kernel),一律和 kernel 实现放到同一个设备文件夹中 + - 如果辅助函数相关代码较少,就直接和 kernel 实现放到同一个`.cc/cu`中 + - 如果辅助函数相关代码较多,就在 kernel 所在的设备目录创建`.h`管理代码 +2. 有同设备多个 kernel 使用的辅助函数,在 kernel 所在的设备目录创建`.h`放置代码 +3. 有跨设备多个 kernel 使用的辅助函数,在`kernels/funcs`目录下创建`.h/cc/cu`管理代码 4. 如果当前依赖的辅助函数可以直接归类到`kernels/funcs`目录下已有的文件中,则直接放过去,不用创建新的文件 #### 3.2.3 注册 Kernel 函数 -注册kernel的方式比较简单,直接使用注册宏注册即可,示例如下: +注册 kernel 的方式比较简单,直接使用注册宏注册即可,示例如下: ```cpp PD_REGISTER_KERNEL(trace, @@ -656,48 +656,48 @@ PD_REGISTER_KERNEL(trace, ``` 字段说明: -1. `trace`: kernel名称,和Op的名称一致 -2. `CPU`: backend名称, 一般主要就是CPU和GPU -3. `ALL_LAYOUT`: kernel支持的Tensor布局,一般为ALL_LAYOUT,及支持所有布局类型 -4. `phi::TraceKernel`: kernel的函数名称,记得带上namespace phi -5. 剩余的均为Kernel支持的数据类型 +1. `trace`: kernel 名称,和 Op 的名称一致 +2. `CPU`: backend 名称, 一般主要就是 CPU 和 GPU +3. `ALL_LAYOUT`: kernel 支持的 Tensor 布局,一般为 ALL_LAYOUT,及支持所有布局类型 +4. `phi::TraceKernel`: kernel 的函数名称,记得带上 namespace phi +5. 剩余的均为 Kernel 支持的数据类型 > 注意: -> 1. 如果忘记添加注册相关的头文件,会曝出一个xx的错误,如果遇到,请检查include的头文件 -> 2. phi下的注册宏后边是带函数体{ },不是直接加分号,此处与旧的注册宏有小区别 -> 3. 注册kernel的宏声明需要在global namespace +> 1. 如果忘记添加注册相关的头文件,会曝出一个 xx 的错误,如果遇到,请检查 include 的头文件 +> 2. phi 下的注册宏后边是带函数体{ },不是直接加分号,此处与旧的注册宏有小区别 +> 3. 注册 kernel 的宏声明需要在 global namespace ### 3.3 编译测试 -实现完Op和Kernel之后,建议先编译测试一下,编译成功之后,再继续后面的步骤。 +实现完 Op 和 Kernel 之后,建议先编译测试一下,编译成功之后,再继续后面的步骤。 详细的编译环境准备和执行流程可参考[从源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/fromsource.html),下面简单介绍几个主要步骤。 -在 `Paddle` 代码目录下创建并切换到build目录: +在 `Paddle` 代码目录下创建并切换到 build 目录: ``` mkdir build && cd build ``` -执行`cmake`命令,具体选项可参考[从源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/fromsource.html)中的介绍,下面的命令为编译Python3.7,GPU版本,带测试,Release版本的Paddle。 +执行`cmake`命令,具体选项可参考[从源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/fromsource.html)中的介绍,下面的命令为编译 Python3.7,GPU 版本,带测试,Release 版本的 Paddle。 ``` cmake .. -DPY_VERSION=3.7 -DWITH_GPU=ON -DWITH_TESTING=ON -DCMAKE_BUILD_TYPE=Release ``` -在`build`目录下,运行下面命令可以进行编译整个paddle: +在`build`目录下,运行下面命令可以进行编译整个 paddle: ``` make -j$(nproc) ``` **注意:** -新增op后请重新执行`cmake`命令,然后再执行`make`命令编译paddle。 +新增 op 后请重新执行`cmake`命令,然后再执行`make`命令编译 paddle。 -## 4. 封装Python API +## 4. 封装 Python API -系统会对新增的Op即Kernel自动绑定Python,并链接到生成的lib库中,然后在Python端定义相应的API,在API内调用新增算子,并添加相应的中英文文档描述即可。 +系统会对新增的 Op 即 Kernel 自动绑定 Python,并链接到生成的 lib 库中,然后在 Python 端定义相应的 API,在 API 内调用新增算子,并添加相应的中英文文档描述即可。 -[`paddle.trace`](https://github.com/PaddlePaddle/Paddle/blob/bd4dc3be34584f9b273ecec07297fb05e1cf4c52/python/paddle/tensor/math.py#L2277) 的Python API实现位于 `python/paddle/tensor/math.py` 中,具体实现如下: +[`paddle.trace`](https://github.com/PaddlePaddle/Paddle/blob/bd4dc3be34584f9b273ecec07297fb05e1cf4c52/python/paddle/tensor/math.py#L2277) 的 Python API 实现位于 `python/paddle/tensor/math.py` 中,具体实现如下: ```python def trace(x, offset=0, axis1=0, axis2=1, name=None): @@ -792,10 +792,10 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): return out ``` -- Python API 实现要点(详见[飞桨API Python 端开发指南](./new_python_api_cn.html)) +- Python API 实现要点(详见[飞桨 API Python 端开发指南](./new_python_api_cn.html)) - 对输入参数进行合法性检查,即 `__check_input(input, offset, axis1, axis2)` - 添加动态图分支调用,即 `if in_dygraph_mode` 新动态图分支和 `if _in_legacy_dygraph` 旧动态图分支 - - 添加静态图分支调用,即dygraph分支后剩余的代码 + - 添加静态图分支调用,即 dygraph 分支后剩余的代码 ## 5. 添加单元测试 @@ -804,7 +804,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): **注意:** -单测中的测试用例需要尽可能的覆盖Kernel中的所有分支。 +单测中的测试用例需要尽可能的覆盖 Kernel 中的所有分支。 ### 5.1 前向算子单测 @@ -812,7 +812,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): 1. 在`setUp`函数定义输入、输出,以及相关的属性参数。 2. 生成随机的输入数据。 -3. 在Python脚本中实现与前向算子相同的计算逻辑,得到输出值,与算子前向计算的输出进行对比。 +3. 在 Python 脚本中实现与前向算子相同的计算逻辑,得到输出值,与算子前向计算的输出进行对比。 4. 反向计算已经自动集成进测试框架,直接调用相应接口即可。 @@ -846,9 +846,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): 上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: - `self.op_type = "trace" ` : 定义类型,与算子定义的名称相同。 - - `self.python_api = paddle.trace` : 定义python api,与python调用接口一致。 + - `self.python_api = paddle.trace` : 定义 python api,与 python 调用接口一致。 - `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 - - `self.outputs` : 定义输出,并在Python脚本中完成与算子同样的计算逻辑,返回Python端的计算结果。 + - `self.outputs` : 定义输出,并在 Python 脚本中完成与算子同样的计算逻辑,返回 Python 端的计算结果。 ### 5.2 反向算子单测 @@ -857,11 +857,11 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): - `test_check_grad`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 - 第一个参数`['Input']` : 指定对输入变量`Input`做梯度检测。 - 第二个参数`'Out'` : 指定前向网络最终的输出目标变量`Out`。 - - 第三个参数`check_eager` : `check_eager=True`表示开启新动态图(eager模式)单测,`check_eager`默认为`False`。 + - 第三个参数`check_eager` : `check_eager=True`表示开启新动态图(eager 模式)单测,`check_eager`默认为`False`。 -- 对于存在多个输入的反向算子测试,需要指定只计算部分输入梯度的case +- 对于存在多个输入的反向算子测试,需要指定只计算部分输入梯度的 case - 例如,`test_elementwise_sub_op.py`中的`test_check_grad_ingore_x`和`test_check_grad_ingore_y`分支用来测试只需要计算一个输入梯度的情况 - - 此处第三个参数max_relative_error:指定检测梯度时能容忍的最大错误值。 + - 此处第三个参数 max_relative_error:指定检测梯度时能容忍的最大错误值。 ```python def test_check_grad_ingore_x(self): @@ -874,9 +874,9 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ``` ### 5.3 Python API 单元测试 -Python API也需要编写相关的单测进行测试,详见[添加 Python API 单元测试](new_python_api_cn.html#id2) +Python API 也需要编写相关的单测进行测试,详见[添加 Python API 单元测试](new_python_api_cn.html#id2) -其他有关单元测试添加的注意事项请参考 [《Op开发手册》](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index) 及 [《Paddle单元测试规范》](https://github.com/PaddlePaddle/Paddle/wiki/PaddlePaddle-Unit-test-specification)。 +其他有关单元测试添加的注意事项请参考 [《Op 开发手册》](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index) 及 [《Paddle 单元测试规范》](https://github.com/PaddlePaddle/Paddle/wiki/PaddlePaddle-Unit-test-specification)。 ### 5.3 编译和执行 @@ -901,18 +901,18 @@ ctest -R test_trace_op -V ### 6.1 报错检查 -实现算子时检查数据的合法性需要使用PADDLE_ENFORCE以及PADDLE_ENFORCE_EQ等宏定义,基本格式如下: +实现算子时检查数据的合法性需要使用 PADDLE_ENFORCE 以及 PADDLE_ENFORCE_EQ 等宏定义,基本格式如下: ``` PADDLE_ENFORCE(表达式, 错误提示信息) -PADDLE_ENFORCE_EQ(比较对象A, 比较对象B, 错误提示信息) +PADDLE_ENFORCE_EQ(比较对象 A, 比较对象 B, 错误提示信息) ``` -如果表达式为真,或者比较对象A=B,则检查通过,否则会终止程序运行,向用户反馈相应的错误提示信息。 +如果表达式为真,或者比较对象 A=B,则检查通过,否则会终止程序运行,向用户反馈相应的错误提示信息。 为了确保提示友好易懂,开发者需要注意其使用方法。 **总体原则:** -任何使用了PADDLE_ENFORCE与PADDLE_ENFORCE_XX检查的地方,必须有详略得当的备注解释!**错误提示信息不能为空!** +任何使用了 PADDLE_ENFORCE 与 PADDLE_ENFORCE_XX 检查的地方,必须有详略得当的备注解释!**错误提示信息不能为空!** 报错提示信息书写建议: @@ -928,15 +928,15 @@ PADDLE_ENFORCE_EQ(比较对象A, 比较对象B, 错误提示信息) - 例如:`Suggested Fix:If your classifier expects one-hot encoding label,check your n_classes argument to the estimatorand/or the shape of your label.Otherwise, check the shape of your label.` -更详细的报错检查规范介绍请参考 [《Paddle报错信息文案书写规范》](https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification)。 +更详细的报错检查规范介绍请参考 [《Paddle 报错信息文案书写规范》](https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification)。 ### 6.2 算子兼容性问题 -对算子的修改需要考虑兼容性问题,要保证算子修改之后,之前的模型都能够正常加载及运行,即新版本的Paddle预测库能成功加载运行旧版本训练的模型。**所以,需要保证算子当前的所有输入输出参数不能被修改(文档除外)或删除,可以新增参数,但是新增的Tensor类型变量需要设置为optional,非Tensor变量需要设置默认值。更多详细内容请参考[OP修改规范:Input/Output/Attribute只能做兼容修改](https://github.com/PaddlePaddle/Paddle/wiki/OP-Input-Output-Attribute-Compatibility-Modification)** 。 +对算子的修改需要考虑兼容性问题,要保证算子修改之后,之前的模型都能够正常加载及运行,即新版本的 Paddle 预测库能成功加载运行旧版本训练的模型。**所以,需要保证算子当前的所有输入输出参数不能被修改(文档除外)或删除,可以新增参数,但是新增的 Tensor 类型变量需要设置为 optional,非 Tensor 变量需要设置默认值。更多详细内容请参考[OP 修改规范:Input/Output/Attribute 只能做兼容修改](https://github.com/PaddlePaddle/Paddle/wiki/OP-Input-Output-Attribute-Compatibility-Modification)** 。 ### 6.3 显存优化 -#### 6.3.1 为可原位计算的算子注册inplace -有些算子的计算逻辑中,输出可以复用输入的显存空间,也可称为原位计算。例如[reshape](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/reshape_kernel.cc)中,输出`out`可以复用输入`x`的显存空间,因为该算子的计算逻辑不会改变`x`的实际数据,只是修改它的shape,输出和输入复用同一块显存空间不影响结果。对于这类算子,可以注册`inlace`,从而让框架在运行时自动地进行显存优化。 +#### 6.3.1 为可原位计算的算子注册 inplace +有些算子的计算逻辑中,输出可以复用输入的显存空间,也可称为原位计算。例如[reshape](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/reshape_kernel.cc)中,输出`out`可以复用输入`x`的显存空间,因为该算子的计算逻辑不会改变`x`的实际数据,只是修改它的 shape,输出和输入复用同一块显存空间不影响结果。对于这类算子,可以注册`inlace`,从而让框架在运行时自动地进行显存优化。 注册方式为在算子的 YAML 配置中添加`inplace`配置项,格式如:`(x -> out)`,详见[YAML 配置规则](new_cpp_op_cn.html#yaml)。示例: @@ -949,12 +949,12 @@ PADDLE_ENFORCE_EQ(比较对象A, 比较对象B, 错误提示信息) ``` #### 6.3.2 减少反向算子中的无关变量 -通常反向算子会依赖于前向算子的某些输入、输出Tensor,以供反向算子计算使用。但有些情况下,反向算子不需要前向Op的所有输入和输出;有些情况下,反向算子只需要前向算子的部分输入和输出;有些情况下,反向算子只需要使用前向算子中输入和输出变量的Shape和LoD信息。若开发者在注册反向算子时,将不必要的前向算子输入和输出作为反向算子的输入,会导致这部分显存无法被框架现有的显存优化策略优化,从而导致模型显存占用过高。 +通常反向算子会依赖于前向算子的某些输入、输出 Tensor,以供反向算子计算使用。但有些情况下,反向算子不需要前向 Op 的所有输入和输出;有些情况下,反向算子只需要前向算子的部分输入和输出;有些情况下,反向算子只需要使用前向算子中输入和输出变量的 Shape 和 LoD 信息。若开发者在注册反向算子时,将不必要的前向算子输入和输出作为反向算子的输入,会导致这部分显存无法被框架现有的显存优化策略优化,从而导致模型显存占用过高。 所以在定义反向算子时需要注意以下几点: -- 如果反向不需要前向的某些输入或输出参数,则无需在args中设置。 -- 如果有些反向算子需要依赖前向算子的输入或输出变量的的Shape或LoD,但不依赖于变量中Tensor的内存Buffer数据,且不能根据其他变量推断出该Shape和LoD,则可以通过`no_need_buffer`对该变量进行配置,详见[YAML 配置规则](new_cpp_op_cn.html#yaml)。示例: +- 如果反向不需要前向的某些输入或输出参数,则无需在 args 中设置。 +- 如果有些反向算子需要依赖前向算子的输入或输出变量的的 Shape 或 LoD,但不依赖于变量中 Tensor 的内存 Buffer 数据,且不能根据其他变量推断出该 Shape 和 LoD,则可以通过`no_need_buffer`对该变量进行配置,详见[YAML 配置规则](new_cpp_op_cn.html#yaml)。示例: ```yaml - backward_api : trace_grad forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) @@ -966,24 +966,24 @@ PADDLE_ENFORCE_EQ(比较对象A, 比较对象B, 错误提示信息) ### 6.4 性能优化 #### 6.4.1 第三方库的选择 -在写算子过程中优先使用高性能(如cudnn、mkldnn、mklml、eigen等)中提供的操作,但是一定要做benchmark,有些库中的操作在深度学习任务中可能会比较慢。因为高性能库(如eigen等)中提供的操作为了更为通用,在性能方面可能并不是很好,通常深度学习模型中数据量较小,所以有些情况下可能高性能库中提供的某些操作速度较慢。比如Elementwise系列的所有算子(前向和反向),Elementwise操作在模型中调用的次数比较多,尤其是Elementwise_add,在很多操作之后都需要添加偏置项。在之前的实现中Elementwise_op直接调用Eigen库,由于Elementwise操作在很多情况下需要对数据做Broadcast,而实验发现Eigen库做Broadcast的速度比较慢,慢的原因在这个PR[#6229](https://github.com/PaddlePaddle/Paddle/pull/6229)中有描述。 +在写算子过程中优先使用高性能(如 cudnn、mkldnn、mklml、eigen 等)中提供的操作,但是一定要做 benchmark,有些库中的操作在深度学习任务中可能会比较慢。因为高性能库(如 eigen 等)中提供的操作为了更为通用,在性能方面可能并不是很好,通常深度学习模型中数据量较小,所以有些情况下可能高性能库中提供的某些操作速度较慢。比如 Elementwise 系列的所有算子(前向和反向),Elementwise 操作在模型中调用的次数比较多,尤其是 Elementwise_add,在很多操作之后都需要添加偏置项。在之前的实现中 Elementwise_op 直接调用 Eigen 库,由于 Elementwise 操作在很多情况下需要对数据做 Broadcast,而实验发现 Eigen 库做 Broadcast 的速度比较慢,慢的原因在这个 PR[#6229](https://github.com/PaddlePaddle/Paddle/pull/6229)中有描述。 #### 6.4.2 算子性能优化 -算子的计算速度与输入的数据量有关,对于某些算子可以根据输入数据的Shape和算子的属性参数来选择不同的计算方式。比如concat_op,当axis>=1时,在对多个tensor做拼接过程中需要对每个tensor做很多次拷贝,如果是在GPU上,需要调用cudaMemCopy。相对CPU而言,GPU属于外部设备,所以每次调用GPU的操作都会有一定的额外开销,并且当需要拷贝的次数较多时,这种开销就更为凸现。目前concat_op的实现会根据输入数据的Shape以及axis值来选择不同的调用方式,如果输入的tensor较多,且axis不等于0,则将多次拷贝操作转换成一个CUDA Kernel来完成;如果输入tensor较少,且axis等于0,使用直接进行拷贝。相关实验过程在该PR([#8669](https://github.com/PaddlePaddle/Paddle/pull/8669))中有介绍。 +算子的计算速度与输入的数据量有关,对于某些算子可以根据输入数据的 Shape 和算子的属性参数来选择不同的计算方式。比如 concat_op,当 axis>=1 时,在对多个 tensor 做拼接过程中需要对每个 tensor 做很多次拷贝,如果是在 GPU 上,需要调用 cudaMemCopy。相对 CPU 而言,GPU 属于外部设备,所以每次调用 GPU 的操作都会有一定的额外开销,并且当需要拷贝的次数较多时,这种开销就更为凸现。目前 concat_op 的实现会根据输入数据的 Shape 以及 axis 值来选择不同的调用方式,如果输入的 tensor 较多,且 axis 不等于 0,则将多次拷贝操作转换成一个 CUDA Kernel 来完成;如果输入 tensor 较少,且 axis 等于 0,使用直接进行拷贝。相关实验过程在该 PR([#8669](https://github.com/PaddlePaddle/Paddle/pull/8669))中有介绍。 -由于CUDA Kernel的调用有一定的额外开销,所以如果算子中出现多次调用CUDA Kernel,可能会影响算子的执行速度。比如之前的sequence_expand_op中包含很多CUDA Kernel,通常这些CUDA Kernel处理的数据量较小,所以频繁调用这样的Kernel会影响算子的计算速度,这种情况下最好将这些小的CUDA Kernel合并成一个。在优化sequence_expand_op过程(相关PR[#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中就是采用这种思路,优化后的sequence_expand_op比之前的实现平均快出约1倍左右,相关实验细节在该PR([#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中有介绍。 +由于 CUDA Kernel 的调用有一定的额外开销,所以如果算子中出现多次调用 CUDA Kernel,可能会影响算子的执行速度。比如之前的 sequence_expand_op 中包含很多 CUDA Kernel,通常这些 CUDA Kernel 处理的数据量较小,所以频繁调用这样的 Kernel 会影响算子的计算速度,这种情况下最好将这些小的 CUDA Kernel 合并成一个。在优化 sequence_expand_op 过程(相关 PR[#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中就是采用这种思路,优化后的 sequence_expand_op 比之前的实现平均快出约 1 倍左右,相关实验细节在该 PR([#9289](https://github.com/PaddlePaddle/Paddle/pull/9289))中有介绍。 -减少CPU与GPU之间的拷贝和同步操作的次数。比如fetch操作,在每个迭代之后都会对模型参数进行更新并得到一个loss,并且数据从GPU端到没有页锁定的CPU端的拷贝是同步的,所以频繁的fetch多个参数会导致模型训练速度变慢。 +减少 CPU 与 GPU 之间的拷贝和同步操作的次数。比如 fetch 操作,在每个迭代之后都会对模型参数进行更新并得到一个 loss,并且数据从 GPU 端到没有页锁定的 CPU 端的拷贝是同步的,所以频繁的 fetch 多个参数会导致模型训练速度变慢。 更多算子性能优化方法,请参考 [算子性能优化 方法介绍](../op_optimization/op_optimization_method_introduction_cn.html)。 ### 6.5 稀疏梯度参数更新方法 -目前稀疏梯度在做更新的时候会先对梯度做merge,即对相同参数的梯度做累加,然后做参数以及附加参数(如velocity)的更新。 +目前稀疏梯度在做更新的时候会先对梯度做 merge,即对相同参数的梯度做累加,然后做参数以及附加参数(如 velocity)的更新。 ### 6.6 混合设备调用 -由于GPU是异步执行的,当CPU调用返回之后,GPU端可能还没有真正的执行,所以如果在算子中创建了GPU运行时需要用到的临时变量,当GPU开始运行的时候,该临时变量可能在CPU端已经被释放,这样可能会导致GPU计算出错。 +由于 GPU 是异步执行的,当 CPU 调用返回之后,GPU 端可能还没有真正的执行,所以如果在算子中创建了 GPU 运行时需要用到的临时变量,当 GPU 开始运行的时候,该临时变量可能在 CPU 端已经被释放,这样可能会导致 GPU 计算出错。 -关于GPU中的一些同步和异步操作: +关于 GPU 中的一些同步和异步操作: ``` The following device operations are asynchronous with respect to the host: Kernel launches; @@ -993,22 +993,22 @@ The following device operations are asynchronous with respect to the host: Memory set function calls. ``` -关于cudaMemCpy和cudaMemCpyAsync注意事项: +关于 cudaMemCpy 和 cudaMemCpyAsync 注意事项: -- 如果数据传输是从GPU端到非页锁定的CPU端,数据传输将是同步,即使调用的是异步拷贝操作。 -- 如果数据传输是从CPU端到CPU端,数据传输将是同步的,即使调用的是异步拷贝操作。 +- 如果数据传输是从 GPU 端到非页锁定的 CPU 端,数据传输将是同步,即使调用的是异步拷贝操作。 +- 如果数据传输是从 CPU 端到 CPU 端,数据传输将是同步的,即使调用的是异步拷贝操作。 更多内容可参考:[Asynchronous Concurrent Execution](https://docs.nvidia.com/cuda/cuda-c-programming-guide/#asynchronous-concurrent-execution),[API synchronization behavior](https://docs.nvidia.com/cuda/cuda-runtime-api/api-sync-behavior.html#api-sync-behavior) ### 6.7 算子数值稳定性问题 -有些算子存在数值稳定性问题,出现数值稳定性的主要原因程序在多次运行时,对浮点型数据施加操作的顺序可能不同,进而导致最终计算结果不同。而GPU是通过多线程并行计算的方式来加速计算的,所以很容易出现对浮点数施加操作的顺序不固定现象。 +有些算子存在数值稳定性问题,出现数值稳定性的主要原因程序在多次运行时,对浮点型数据施加操作的顺序可能不同,进而导致最终计算结果不同。而 GPU 是通过多线程并行计算的方式来加速计算的,所以很容易出现对浮点数施加操作的顺序不固定现象。 -目前发现cudnn中的卷积操作、cudnn中的MaxPooling、CUDA中CudaAtomicXX、ParallelExecutor的Reduce模式下参数梯度的聚合等操作运行结果是非确定的。 +目前发现 cudnn 中的卷积操作、cudnn 中的 MaxPooling、CUDA 中 CudaAtomicXX、ParallelExecutor 的 Reduce 模式下参数梯度的聚合等操作运行结果是非确定的。 -为此Paddle中添加了一些FLAGS,比如使用FLAGS_cudnn_deterministic来强制cudnn使用确定性算法、FLAGS_cpu_deterministic强制CPU端的计算使用确定性方法。 +为此 Paddle 中添加了一些 FLAGS,比如使用 FLAGS_cudnn_deterministic 来强制 cudnn 使用确定性算法、FLAGS_cpu_deterministic 强制 CPU 端的计算使用确定性方法。 ### 6.8 算子的数学公式 -如果算子有数学公式,一定要在代码中将数学公式写明,并在Python API的Doc中显示,因为用户在对比不同框架的计算结果时可能需要了解Paddle对算子是怎么实现的。 +如果算子有数学公式,一定要在代码中将数学公式写明,并在 Python API 的 Doc 中显示,因为用户在对比不同框架的计算结果时可能需要了解 Paddle 对算子是怎么实现的。 ### 6.9 LoD 在算子内部的传导规范 @@ -1044,9 +1044,9 @@ The following device operations are asynchronous with respect to the host: 在前向传导过程,与输入的 LoD 相比较,算子输出的 LoD 可能出现不变、改变和消失这三种情况: - - 不变:适用于所有的 LoD-Transparent 算子与部分的 LoD-Based算子。可以在`InferMeta` 中调用 `ShareLoD()` 直接将输入 Var 的 LoD 共享给输出 Var, 可参考 [lstm_op](https://github.com/PaddlePaddle/Paddle/blob/a88a1faa48a42a8c3737deb0f05da968d200a7d3/paddle/fluid/operators/lstm_op.cc#L92); 如果有多个输入且都可能存在 LoD 的情况,通常默认共享第一个输入, 例如 [elementwise_ops forward](https://github.com/PaddlePaddle/Paddle/blob/5d6a1fcf16bcb48d2e66306b27d9994d9b07433c/paddle/fluid/operators/elementwise/elementwise_op.h#L69); + - 不变:适用于所有的 LoD-Transparent 算子与部分的 LoD-Based 算子。可以在`InferMeta` 中调用 `ShareLoD()` 直接将输入 Var 的 LoD 共享给输出 Var, 可参考 [lstm_op](https://github.com/PaddlePaddle/Paddle/blob/a88a1faa48a42a8c3737deb0f05da968d200a7d3/paddle/fluid/operators/lstm_op.cc#L92); 如果有多个输入且都可能存在 LoD 的情况,通常默认共享第一个输入, 例如 [elementwise_ops forward](https://github.com/PaddlePaddle/Paddle/blob/5d6a1fcf16bcb48d2e66306b27d9994d9b07433c/paddle/fluid/operators/elementwise/elementwise_op.h#L69); - - 改变:适用于部分 LoD-Based 算子。在实现 OpKernel 时需考虑输出 LoD 的正确计算,真实的 LoD 在前向计算结束后才能确定,此时仍需要在`InferMeta` 中调用 `ShareLoD()`,以确保CompileTime 时对 LoD Level 做了正确的传导,可参考 [sequence_expand_op](https://github.com/PaddlePaddle/Paddle/blob/565d30950138b9f831caa33904d9016cf53c6c2e/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc); + - 改变:适用于部分 LoD-Based 算子。在实现 OpKernel 时需考虑输出 LoD 的正确计算,真实的 LoD 在前向计算结束后才能确定,此时仍需要在`InferMeta` 中调用 `ShareLoD()`,以确保 CompileTime 时对 LoD Level 做了正确的传导,可参考 [sequence_expand_op](https://github.com/PaddlePaddle/Paddle/blob/565d30950138b9f831caa33904d9016cf53c6c2e/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc); - 消失:适用于输出不再是序列数据的 LoD-Based 算子。此时不用再考虑前向的 LoD 传导问题,可参考 [sequence_pool_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc); @@ -1054,7 +1054,7 @@ The following device operations are asynchronous with respect to the host: - 实现 LoD-Based 算子时,需要处理好 LoD 传导的边界情况,例如对长度为零的输入的支持,并完善相应的单测,单测 case 覆盖空序列出现在 batch 开头、中间和末尾等位置的情况,可参考 [test_lstm_op.py](https://github.com/PaddlePaddle/Paddle/blob/4292bd8687ababc7737cffbddc0d38ead2138c00/python/paddle/fluid/tests/unittests/test_lstm_op.py#L203-L216) - - 对 LoD Level 有明确要求的算子,推荐的做法是在 `InferMeta` 中即完成 LoD Level的检查,例如 [sequence_pad_op](https://github.com/PaddlePaddle/Paddle/blob/4292bd8687ababc7737cffbddc0d38ead2138c00/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc#L79)。 + - 对 LoD Level 有明确要求的算子,推荐的做法是在 `InferMeta` 中即完成 LoD Level 的检查,例如 [sequence_pad_op](https://github.com/PaddlePaddle/Paddle/blob/4292bd8687ababc7737cffbddc0d38ead2138c00/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc#L79)。 #### 反向传导 @@ -1068,13 +1068,13 @@ Paddle 支持动态图和静态图两种模式,在 YAML 配置文件中完成 ![code_gen_by_yaml](./code_gen_by_yaml.png) - 其中 YAML 配置文件为前向:[`paddle/phi/api/yaml/api.yaml`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/api/yaml/api.yaml) 和反向:[`paddle/phi/api/yaml/backward.yaml`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/api/yaml/backward.yaml)。 -- 动态图中自动生成的代码包括从Python API到计算Kernel间的各层调用接口实现,从底层往上分别为: - - C++ API:一套与Python API参数对齐的C++接口(只做逻辑计算,不支持自动微分),内部封装了底层kernel的选择和调用等逻辑,供上层灵活使用。 - - 注:前向算子生成C++ API头文件和实现代码分别为`paddle/phi/api/include/api.h`和`paddle/phi/api/lib/api.cc`,反向算子生成的头文件和实现代码分别为`paddle/phi/api/backward/backward_api.h`,`paddle/phi/api/lib/backward_api.cc`。 - - 动态图前向函数与反向节点(Autograd API):在C++ API的基础上进行了封装,组成一个提供自动微分功能的C++函数接口。 +- 动态图中自动生成的代码包括从 Python API 到计算 Kernel 间的各层调用接口实现,从底层往上分别为: + - C++ API:一套与 Python API 参数对齐的 C++接口(只做逻辑计算,不支持自动微分),内部封装了底层 kernel 的选择和调用等逻辑,供上层灵活使用。 + - 注:前向算子生成 C++ API 头文件和实现代码分别为`paddle/phi/api/include/api.h`和`paddle/phi/api/lib/api.cc`,反向算子生成的头文件和实现代码分别为`paddle/phi/api/backward/backward_api.h`,`paddle/phi/api/lib/backward_api.cc`。 + - 动态图前向函数与反向节点(Autograd API):在 C++ API 的基础上进行了封装,组成一个提供自动微分功能的 C++函数接口。 - 注:生成的相关代码在`paddle/fluid/eager/api/generated/eager_generated`目录下 - - Python-C 接口:将支持自动微分功能的C++的函数接口(Autograd API)暴露到Python层供Python API调用。 - - 注:生成的Python-C 接口代码在`paddle/fluid/pybind/eager_final_state_op_function_impl.h`中 -- 静态图的执行流程与动态图不同,所以生成的代码也与动态图有较大差异。静态图由于是先组网后计算,Python API主要负责组网,算子的调度和kernel计算由静态图执行器来完成,因此自动生成的代码是将配置文件中的算子信息注册到框架内供执行器调度,主要包括[OpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/op_proto_maker.h)(静态图中定义算子的输入、输出以及属性等信息)和`REGISTER_OPERATOR`(将算子名称以及OpMaker等信息进行注册)等静态图算子注册组件,具体的代码逻辑可参考`paddle/fluid/operators/generated_op.cc` + - Python-C 接口:将支持自动微分功能的 C++的函数接口(Autograd API)暴露到 Python 层供 Python API 调用。 + - 注:生成的 Python-C 接口代码在`paddle/fluid/pybind/eager_final_state_op_function_impl.h`中 +- 静态图的执行流程与动态图不同,所以生成的代码也与动态图有较大差异。静态图由于是先组网后计算,Python API 主要负责组网,算子的调度和 kernel 计算由静态图执行器来完成,因此自动生成的代码是将配置文件中的算子信息注册到框架内供执行器调度,主要包括[OpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/op_proto_maker.h)(静态图中定义算子的输入、输出以及属性等信息)和`REGISTER_OPERATOR`(将算子名称以及 OpMaker 等信息进行注册)等静态图算子注册组件,具体的代码逻辑可参考`paddle/fluid/operators/generated_op.cc` **注意:由于代码自动生成在编译时进行,所以查看上述生成代码需要先完成[框架的编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/fromsource.html)。** diff --git a/docs/dev_guides/api_contributing_guides/new_python_api_cn.md b/docs/dev_guides/api_contributing_guides/new_python_api_cn.md index 51119c89205..6a6dc9ab5b6 100644 --- a/docs/dev_guides/api_contributing_guides/new_python_api_cn.md +++ b/docs/dev_guides/api_contributing_guides/new_python_api_cn.md @@ -1,8 +1,8 @@ -# 飞桨API Python 端开发指南 +# 飞桨 API Python 端开发指南 本文将介绍为 Paddle 开发新的 API 时需要在 Python 端完成的内容以及注意事项。 -## 开发 Python API代码 +## 开发 Python API 代码 这分为两种情况,Paddle 的 API 包含需要开发 c++ 算子的和不需要开发 c++ 算子而仅使用现有 Python API 组合得到的两种,但两种情况下均有 Python 端的开发工作。 @@ -11,7 +11,7 @@ ### 文件位置与 API 名称 -Python API 的文件位置遵循功能相似的放在一起的原则。大的功能分类可以参考 [API目录结构规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md#api目录结构规范)。 +Python API 的文件位置遵循功能相似的放在一起的原则。大的功能分类可以参考 [API 目录结构规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md#api 目录结构规范)。 大部分常用的数组运算 API(在 numpy 中有功能相似的 `numpy.***` API )放在 `paddle/tensor` 目录下。具体的功能细分如下: @@ -105,7 +105,7 @@ Tip: 当出现类似把一个元素放入一个集中管理的列表的操作时 这类的接口需要兼容动态图和静态图。在动态图下,函数会被多次执行;而在静态图下,函数仅在组网时被调用,真正被多次执行的是组网得到的结果。但 API 在动态图和静态图下的行为是保持一致的。 -关于 API 的命名,参数命名等的一般规范,可以参考 [飞桨API的设计和命名规范](api_design_guidelines_standard_cn.html#id2)。 +关于 API 的命名,参数命名等的一般规范,可以参考 [飞桨 API 的设计和命名规范](api_design_guidelines_standard_cn.html#id2)。 Python API 一般包含如下的部分: @@ -190,7 +190,7 @@ def ones(shape, dtype=None, name=None): 因为 `fill_constant` 里已经处理了动态图和静态图的情况,所以直接调用即可。 -而如果 API 的实现中需要调用一个C++算子时,则需要根据动态图和静态图使用不同的写法。 +而如果 API 的实现中需要调用一个 C++算子时,则需要根据动态图和静态图使用不同的写法。 #### 动静态图分支 **动态图分支** @@ -222,7 +222,7 @@ if _in_legacy_dygraph(): 对于静态图,一般分为创建输出 Tensor,添加 operator 两步。 ```Python -# LayerHelper是一个用于创建op输出变量、向program中添加op的辅助工具类 +# LayerHelper 是一个用于创建 op 输出变量、向 program 中添加 op 的辅助工具类 helper = LayerHelper('trace', **locals()) # 创建输出 Tensor @@ -239,7 +239,7 @@ helper.append_op( outputs={'Out': [out]}) return out ``` -注意:在`append_op`添加的`inputs`和`outputs`项,其中的key值(静态图中变量名)一般为 YAML 中定义的输入输出Tensor变量名的首字母大写格式,静态图中的变量名可以在`paddle/fluid/operators/generated_op.cc`(需要先开发C++算子并完成编译)文件内对应算子的`OpMaker`中找到;`attrs`项的变量名与 YAML 中相同。 +注意:在`append_op`添加的`inputs`和`outputs`项,其中的 key 值(静态图中变量名)一般为 YAML 中定义的输入输出 Tensor 变量名的首字母大写格式,静态图中的变量名可以在`paddle/fluid/operators/generated_op.cc`(需要先开发 C++算子并完成编译)文件内对应算子的`OpMaker`中找到;`attrs`项的变量名与 YAML 中相同。 这里`trace`中的'Input'没有与 YAML 配置的中'x'直接对应是由于为了兼容旧算子体系下`Trace`算子的`OpMaker`实现而做了额外的映射,新增算子时无需考虑这种情况。 @@ -251,7 +251,7 @@ return out 单元测试相关的开发规范可以参考 - [C++ 算子开发指南-添加单元测试](new_cpp_op_cn.html#tianjiadanyuanceshi) ,[Op开发手册(Operator Development Manual)](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index). + [C++ 算子开发指南-添加单元测试](new_cpp_op_cn.html#tianjiadanyuanceshi) ,[Op 开发手册(Operator Development Manual)](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index). 在此不作展开,主要讲述 Python API 的单元测试。 @@ -386,8 +386,8 @@ paddle 编译过程中,对于 Python 代码的处理方式是,先把它们 c ## 参考资料 -1. [Op开发手册(Operator Development Manual)](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index) -2. [飞桨API的设计和命名规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md#api目录结构规范) -3. [新增API 测试及验收规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md) +1. [Op 开发手册(Operator Development Manual)](https://github.com/PaddlePaddle/Paddle/wiki/Operator-Development-Manual-Index) +2. [飞桨 API 的设计和命名规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_design_guidelines_standard_cn.md#api 目录结构规范) +3. [新增 API 测试及验收规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_accpetance_criteria_cn.md) 4. [文档贡献指南](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/docs_contributing_guides_cn.html) -5. [飞桨API文档书写规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md) +5. [飞桨 API 文档书写规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/dev_guides/api_contributing_guides/api_docs_guidelines_cn.md) diff --git a/docs/dev_guides/custom_device_docs/custom_device_example_cn.md b/docs/dev_guides/custom_device_docs/custom_device_example_cn.md index b8ce5528db6..d1c3a312238 100644 --- a/docs/dev_guides/custom_device_docs/custom_device_example_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_device_example_cn.md @@ -3,8 +3,8 @@ 本教程介绍如何为 PaddlePaddle 实现一个 CustomDevice 插件,添加一个名为 CustomCPU 的新硬件后端,并进行编译,打包,安装和使用。 > 注意: -> - 请确保已经正确安装了[飞桨develop](https://github.com/PaddlePaddle/Paddle)最新版本 -> - 当前仅支持 `Linux`平台,示例中使用X86_64平台 +> - 请确保已经正确安装了[飞桨 develop](https://github.com/PaddlePaddle/Paddle)最新版本 +> - 当前仅支持 `Linux`平台,示例中使用 X86_64 平台 ## 第一步:实现自定义 Runtime @@ -58,7 +58,7 @@ void InitPlugin(CustomRuntimeParams *params) { - params->device_type : 硬件后端名,具有同名的插件已经注册时,则不会注册 Runtime 。 - params->sub_device_type : 硬件后端子类型名。 -最后,插件需要填充 params->interface 中的回调接口(至少实现 Required 接口,否则 Runtime 不会被注册),完成自定义 Runtime 的初始化。具体API的说明详见[自定义 Runtime 文档](./custom_runtime_cn.html)。 +最后,插件需要填充 params->interface 中的回调接口(至少实现 Required 接口,否则 Runtime 不会被注册),完成自定义 Runtime 的初始化。具体 API 的说明详见[自定义 Runtime 文档](./custom_runtime_cn.html)。 ```c++ #include @@ -163,9 +163,9 @@ C_Status get_min_chunk_size(const C_Device device, size_t *size) { 例子: -### 1.确定Kernel声明 +### 1.确定 Kernel 声明 -查找飞桨发布的头文件`math_kernel.h`中,其Kernel函数声明如下: +查找飞桨发布的头文件`math_kernel.h`中,其 Kernel 函数声明如下: ```c++ // Add 内核函数 @@ -184,30 +184,30 @@ void AddKernel(const Context& dev_ctx, ``` -### 2.Kernel实现与注册 +### 2.Kernel 实现与注册 ```c++ // add_kernel.cc -#include "paddle/phi/extension.h" // 自定义Kernel依赖头文件 +#include "paddle/phi/extension.h" // 自定义 Kernel 依赖头文件 namespace custom_cpu { -// Kernel函数体实现 +// Kernel 函数体实现 template void AddKernel(const Context& dev_ctx, const phi::DenseTensor& x, const phi::DenseTensor& y, phi::DenseTensor* out) { - // 使用dev_ctx的Alloc API为输出参数out分配模板参数T数据类型的内存空间 + // 使用 dev_ctx 的 Alloc API 为输出参数 out 分配模板参数 T 数据类型的内存空间 dev_ctx.template Alloc(out); - // 使用DenseTensor的numel API获取Tensor元素数量 + // 使用 DenseTensor 的 numel API 获取 Tensor 元素数量 auto numel = x.numel(); - // 使用DenseTensor的data API获取输入参数x的模板参数T类型的数据指针 + // 使用 DenseTensor 的 data API 获取输入参数 x 的模板参数 T 类型的数据指针 auto x_data = x.data(); - // 使用DenseTensor的data API获取输入参数y的模板参数T类型的数据指针 + // 使用 DenseTensor 的 data API 获取输入参数 y 的模板参数 T 类型的数据指针 auto y_data = y.data(); - // 使用DenseTensor的data API获取输出参数out的模板参数T类型的数据指针 + // 使用 DenseTensor 的 data API 获取输出参数 out 的模板参数 T 类型的数据指针 auto out_data = out->data(); // 完成计算逻辑 for (auto i = 0; i < numel; ++i) { @@ -217,12 +217,12 @@ void AddKernel(const Context& dev_ctx, } // namespace custom_cpu -// 全局命名空间内使用注册宏完成Kernel注册 -// CustomCPU的AddKernel注册 -// 参数: add - Kernel名称 +// 全局命名空间内使用注册宏完成 Kernel 注册 +// CustomCPU 的 AddKernel 注册 +// 参数: add - Kernel 名称 // CustomCPU - 后端名称 // ALL_LAYOUT - 内存布局 -// custom_cpu::AddKernel - Kernel函数名 +// custom_cpu::AddKernel - Kernel 函数名 // int - 数据类型名 // int64_t - 数据类型名 // float - 数据类型名 @@ -243,7 +243,7 @@ PD_REGISTER_PLUGIN_KERNEL(add, ### CMake 编译 -**编写CMakeLists.txt** +**编写 CMakeLists.txt** ``` cmake_minimum_required(VERSION 3.10) @@ -257,7 +257,7 @@ set(PADDLE_PLUGIN_DIR "/path/to/site-packages/paddle-plugins/") set(PADDLE_INC_DIR "/path/to/site-packages/paddle/include/") set(PADDLE_LIB_DIR "/path/to/site-packages/paddle/fluid/") -############ 三方依赖,本示例中使用Paddle相同依赖 +############ 三方依赖,本示例中使用 Paddle 相同依赖 set(BOOST_INC_DIR "/path/to/Paddle/build/third_party/boost/src/extern_boost") set(GFLAGS_INC_DIR "/path/to/Paddle/build/third_party/install/gflags/include") set(GLOG_INC_DIR "/path/to/Paddle/build/third_party/install/glog/include") @@ -297,7 +297,7 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/python/.timestamp add_custom_target(python_package ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/python/.timestamp) ``` -**编写setup.py.in** +**编写 setup.py.in** CMake 根据 setup.py.in 生成 setup.py,再使用 setuptools 将插件封装成 wheel 包。 @@ -357,7 +357,7 @@ $ make ### setuptools 编译 -**编写setup.py** +**编写 setup.py** setuptools 也可以用于编译插件,并直接打包 @@ -440,7 +440,7 @@ setup( $ python setup.py bdist_wheel ``` -编译完成后在以及 dist 目录下生成wheel包。 +编译完成后在以及 dist 目录下生成 wheel 包。 ### pip 安装 diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_cn.rst b/docs/dev_guides/custom_device_docs/custom_kernel_cn.rst index fe909776abc..c3ed288f06b 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_cn.rst +++ b/docs/dev_guides/custom_device_docs/custom_kernel_cn.rst @@ -2,13 +2,13 @@ 自定义 Kernel #################### -内核函数(简称Kernel)对应算子的具体实现,飞桨框架针对通过自定义Runtime机制注册的外部硬件,提供了配套的自定义Kernel机制,以实现独立于框架的Kernel编码、注册、编译和自动加载使用。 -自定义Kernel基于飞桨对外发布的函数式Kernel声明、对外开放的C++ API和注册宏实现。 +内核函数(简称 Kernel)对应算子的具体实现,飞桨框架针对通过自定义 Runtime 机制注册的外部硬件,提供了配套的自定义 Kernel 机制,以实现独立于框架的 Kernel 编码、注册、编译和自动加载使用。 +自定义 Kernel 基于飞桨对外发布的函数式 Kernel 声明、对外开放的 C++ API 和注册宏实现。 -- `Kernel函数声明 <./custom_kernel_docs/kernel_declare_cn.html>`_ : 介绍飞桨发布的函数式Kernel声明。 -- `Kernel实现接口 <./custom_kernel_docs/cpp_api_cn.html>`_ : 介绍自定义Kernel函数体实现所需的C++ API。 -- `Kernel注册接口 <./custom_kernel_docs/register_api_cn.html>`_ : 介绍自定义Kernel注册宏。 +- `Kernel 函数声明 <./custom_kernel_docs/kernel_declare_cn.html>`_ : 介绍飞桨发布的函数式 Kernel 声明。 +- `Kernel 实现接口 <./custom_kernel_docs/cpp_api_cn.html>`_ : 介绍自定义 Kernel 函数体实现所需的 C++ API。 +- `Kernel 注册接口 <./custom_kernel_docs/register_api_cn.html>`_ : 介绍自定义 Kernel 注册宏。 .. toctree:: diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/context_api_cn.md b/docs/dev_guides/custom_device_docs/custom_kernel_docs/context_api_cn.md index 0221c1a1bee..bc34da8d502 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/context_api_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/context_api_cn.md @@ -1,28 +1,28 @@ # Context API ## CustomContext -`CustomContext`为自定义Kernel函数模板参数Context的实参,请参照[custom_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/backends/custom/custom_context.h) +`CustomContext`为自定义 Kernel 函数模板参数 Context 的实参,请参照[custom_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/backends/custom/custom_context.h) ```c++ // 构造函数 - // 参数:place - CustomPlace对象 + // 参数:place - CustomPlace 对象 // 返回:None explicit CustomContext(const CustomPlace&); // 析构函数 virtual ~CustomContext(); - // 获取设备上下文Place信息 + // 获取设备上下文 Place 信息 // 参数:None - // 返回:place - Place对象 + // 返回:place - Place 对象 const Place& GetPlace() const override; - // 获取设备上下文stream信息 + // 获取设备上下文 stream 信息 // 参数:None // 返回:stream - void*类型指针 void* stream() const; - // 等待stream上的操作完成 + // 等待 stream 上的操作完成 // 参数:None // 返回:None void Wait() const override; @@ -47,96 +47,96 @@ // 析构函数 virtual ~DeviceContext(); - // 设置Device Allocator - // 参数:Allocator指针 + // 设置 Device Allocator + // 参数:Allocator 指针 // 返回:None void SetAllocator(const Allocator*); - // 设置Host Allocator - // 参数:Allocator指针 + // 设置 Host Allocator + // 参数:Allocator 指针 // 返回:None void SetHostAllocator(const Allocator*); - // 设置zero-size Allocator - // 参数:Allocator指针 + // 设置 zero-size Allocator + // 参数:Allocator 指针 // 返回:None void SetZeroAllocator(const Allocator*); // 获取 Allocator // 参数:None - // 返回:Allocator对象 + // 返回:Allocator 对象 const Allocator& GetAllocator() const; // 获取 Host Allocator // 参数:None - // 返回:Allocator对象 + // 返回:Allocator 对象 const Allocator& GetHostAllocator() const; // 获取 zero-size Allocator // 参数:None - // 返回:Allocator对象 + // 返回:Allocator 对象 const Allocator& GetZeroAllocator() const; - // 为Tensor分配Device内存 - // 参数:TensorBase类型指针 - // dtype - DataType类型变量 - // requested_size - size_t类型变量,默认值为0 + // 为 Tensor 分配 Device 内存 + // 参数:TensorBase 类型指针 + // dtype - DataType 类型变量 + // requested_size - size_t 类型变量,默认值为 0 // 返回:数据指针 - void*类型指针 void* Alloc(TensorBase*, DataType dtype, size_t requested_size = 0) const; - // 为Tensor分配Device内存 + // 为 Tensor 分配 Device 内存 // 模板参数:T - 数据类型 - // 参数:TensorBase类型指针 - // requested_size - size_t类型变量,默认值为0 + // 参数:TensorBase 类型指针 + // requested_size - size_t 类型变量,默认值为 0 // 返回:数据指针 - T*类型指针 template T* Alloc(TensorBase* tensor, size_t requested_size = 0) const; - // 为Tensor分配Host内存 - // 参数:TensorBase指针 - // dtype - DataType类型变量 - // requested_size - size_t类型变量,默认值为0 + // 为 Tensor 分配 Host 内存 + // 参数:TensorBase 指针 + // dtype - DataType 类型变量 + // requested_size - size_t 类型变量,默认值为 0 // 返回:数据指针 - void*类型指针 void* HostAlloc(TensorBase* tensor, DataType dtype, size_t requested_size = 0) const; - // 为Tensor分配Host内存 + // 为 Tensor 分配 Host 内存 // 模板参数:T - 数据类型 - // 参数:TensorBase指针 - // requested_size - size_t类型变量,默认值为0 + // 参数:TensorBase 指针 + // requested_size - size_t 类型变量,默认值为 0 // 返回:数据指针 - T*类型数据指针 template T* HostAlloc(TensorBase* tensor, size_t requested_size = 0) const; - // 获取设备上下文Place信息,子类实现 + // 获取设备上下文 Place 信息,子类实现 // 参数:None - // 返回:place - Place对象 + // 返回:place - Place 对象 virtual const Place& GetPlace() const = 0; - // 等待stream上的操作完成,子类实现 + // 等待 stream 上的操作完成,子类实现 // 参数:None // 返回:None virtual void Wait() const {} // 设置随机数发生器 - // 参数:Generator指针 + // 参数:Generator 指针 // 返回:None void SetGenerator(Generator*); // 获取随机数发生器 // 参数:None - // 返回:Generator指针 + // 返回:Generator 指针 Generator* GetGenerator() const; - // 设置Host随机数发生器 - // 参数:Generator指针 + // 设置 Host 随机数发生器 + // 参数:Generator 指针 // 返回:None void SetHostGenerator(Generator*); - // 获取Host随机数发生器 + // 获取 Host 随机数发生器 // 参数:None - // 返回:Generator指针 + // 返回:Generator 指针 Generator* GetHostGenerator() const; ``` diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_cn.rst b/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_cn.rst index 542cb212d2d..de0382f4cce 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_cn.rst +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/cpp_api_cn.rst @@ -1,16 +1,16 @@ ############# -Kernel实现接口 +Kernel 实现接口 ############# -自定义Kernel函数体的实现主要依赖两部分:1.飞桨发布的API:如设备上下文API、Tensor相关API和异常处理API等; 2.硬件封装库的API:根据具体硬件封装库使用。其中飞桨发布的C++ API已通过头文件方式发布。 +自定义 Kernel 函数体的实现主要依赖两部分:1.飞桨发布的 API:如设备上下文 API、Tensor 相关 API 和异常处理 API 等; 2.硬件封装库的 API:根据具体硬件封装库使用。其中飞桨发布的 C++ API 已通过头文件方式发布。 -- `Context API <./context_api_cn.html>`_ : 介绍设备上下文相关C++ API。 -- `Tensor API <./tensor_api_cn.html>`_ : 介绍Tensor相关C++ API。 -- `Exception API <./exception_api_cn.html>`_ : 介绍异常处理相关C++ API。 +- `Context API <./context_api_cn.html>`_ : 介绍设备上下文相关 C++ API。 +- `Tensor API <./tensor_api_cn.html>`_ : 介绍 Tensor 相关 C++ API。 +- `Exception API <./exception_api_cn.html>`_ : 介绍异常处理相关 C++ API。 -注:飞桨发布了丰富的C++ API,此处重点介绍三类API并在相应页面列举相关联的类和文件供开发者参考查阅。 +注:飞桨发布了丰富的 C++ API,此处重点介绍三类 API 并在相应页面列举相关联的类和文件供开发者参考查阅。 .. toctree:: :hidden: diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/exception_api_cn.md b/docs/dev_guides/custom_device_docs/custom_kernel_docs/exception_api_cn.md index a3b36d40a5f..e458b2b449f 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/exception_api_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/exception_api_cn.md @@ -6,22 +6,22 @@ 使用方式: ```c++ - PADDLE_ENFORCE_{TYPE}(cond_a, // 条件A - cond_b, // 条件B, 根据TYPE可选 + PADDLE_ENFORCE_{TYPE}(cond_a, // 条件 A + cond_b, // 条件 B, 根据 TYPE 可选 phi::errors::{ERR_TYPE}("{ERR_MSG}")); ``` 根据`TYPE`的不同,分为: -| Exception宏 | 判断条件 | 报错信息 | +| Exception 宏 | 判断条件 | 报错信息 | |---|---|---| -| PADDLE_ENFORCE_EQ | cond_a == cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_NE | cond_a != cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_GT | cond_a > cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_GE | cond_a >= cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_LT | cond_a < cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_LE | cond_a <= cond_b | 触发ERR_TYPE异常和报ERR_MSG | -| PADDLE_ENFORCE_NOT_NULL | cond_a != nullptr | 触发ERR_TYPE异常和报ERR_MSG | +| PADDLE_ENFORCE_EQ | cond_a == cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_NE | cond_a != cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_GT | cond_a > cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_GE | cond_a >= cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_LT | cond_a < cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_LE | cond_a <= cond_b | 触发 ERR_TYPE 异常和报 ERR_MSG | +| PADDLE_ENFORCE_NOT_NULL | cond_a != nullptr | 触发 ERR_TYPE 异常和报 ERR_MSG | `ERR_TYPE`支持: @@ -37,15 +37,15 @@ | ExecutionTimeout | 超时 | | Unimplemented | 未实现 | | Unavailable | 不可用 | -| Fatal | Fatal错误 | +| Fatal | Fatal 错误 | | External | 外部错误 | -`ERR_MSG`为C语言风格字符串,支持变长参数。 +`ERR_MSG`为 C 语言风格字符串,支持变长参数。 示例: ```c++ -// 如果num_col_dims >= 2 && num_col_dims <= src.size()不为true则报InvalidArgument异常 +// 如果 num_col_dims >= 2 && num_col_dims <= src.size()不为 true 则报 InvalidArgument 异常 // 和打印相关提示信息 PADDLE_ENFORCE_EQ( (num_col_dims >= 2 && num_col_dims <= src.size()), diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/kernel_declare_cn.md b/docs/dev_guides/custom_device_docs/custom_kernel_docs/kernel_declare_cn.md index 915730d5c47..de237ad2ffd 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/kernel_declare_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/kernel_declare_cn.md @@ -1,10 +1,10 @@ -# Kernel函数声明 +# Kernel 函数声明 -飞桨通过头文件发布函数式Kernel声明,框架内外一致。 +飞桨通过头文件发布函数式 Kernel 声明,框架内外一致。 -编写自定义Kernel需基于具体的Kernel函数声明,头文件位于飞桨安装路径的`include/paddle/phi/kernels/`下。 +编写自定义 Kernel 需基于具体的 Kernel 函数声明,头文件位于飞桨安装路径的`include/paddle/phi/kernels/`下。 -Kernel函数声明的格式如下: +Kernel 函数声明的格式如下: ```c++ template @@ -18,10 +18,10 @@ void KernelNameKernel(const Context& dev_ctx, 1. 模板参数:固定写法,第一个模板参数为数据类型`T`,第二个模板参数为设备上下文`Context`。 2. 函数返回:固定为`void`。 -3. 函数命名:Kernel名称+Kernel后缀,驼峰式命名,如`SoftmaxKernel`。 -4. 函数参数:依次为设备上下文参数,输入Tensor参数(InputTensor),属性参数(Attribute)和输出Tensor参数(OutTensor)。其中: +3. 函数命名:Kernel 名称+Kernel 后缀,驼峰式命名,如`SoftmaxKernel`。 +4. 函数参数:依次为设备上下文参数,输入 Tensor 参数(InputTensor),属性参数(Attribute)和输出 Tensor 参数(OutTensor)。其中: - 设备上下文参数:固定为`const Context&`类型; - - 自定义Kernel对应`CustomContext`类型,请参照[custom_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/backends/custom/custom_context.h) + - 自定义 Kernel 对应`CustomContext`类型,请参照[custom_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/backends/custom/custom_context.h) - InputTensor:数量>=0,支持的类型包括: - `const DenseTensor&` 请参照[dense_tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/dense_tensor.h) - `const SelectedRows&` 请参照[selected_rows.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/selected_rows.h) @@ -58,7 +58,7 @@ void KernelNameKernel(const Context& dev_ctx, - `std::vector` - `std::vector` -示例,如`softmax`的Kernel函数位于`softmax_kernel.h`中,具体如下: +示例,如`softmax`的 Kernel 函数位于`softmax_kernel.h`中,具体如下: ```c++ // Softmax 内核函数 @@ -79,5 +79,5 @@ void SoftmaxKernel(const Context& dev_ctx, ``` > 注意: -> 1. Kernel函数声明是自定义Kernel能够被注册和框架调用的基础,由框架发布,需要严格遵守 -> 2. Kernel函数声明与头文件可能不完全对应,可以按照函数命名约定等查找所需Kernel函数声明 +> 1. Kernel 函数声明是自定义 Kernel 能够被注册和框架调用的基础,由框架发布,需要严格遵守 +> 2. Kernel 函数声明与头文件可能不完全对应,可以按照函数命名约定等查找所需 Kernel 函数声明 diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/register_api_cn.md b/docs/dev_guides/custom_device_docs/custom_kernel_docs/register_api_cn.md index f723b949aa7..eaeac2e957a 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/register_api_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/register_api_cn.md @@ -1,6 +1,6 @@ -# Kernel注册接口 +# Kernel 注册接口 -自定义Kernel通过飞桨框架提供的注册宏进行注册,以便飞桨框架调用。 +自定义 Kernel 通过飞桨框架提供的注册宏进行注册,以便飞桨框架调用。 注册宏的位置需要放置在全局空间下。 @@ -18,12 +18,12 @@ PD_REGISTER_PLUGIN_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...)) {} 说明: - 注册宏名称:固定为`PD_REGISTER_PLUGIN_KERNEL` -- 第一个参数:kernel_name,即Kernel名称,飞桨内外一致,请参照CPU相同Kernel函数注册名称,如`softmax` -- 第二个参数:backend,即后端名称,可自定义,但须与自定义Runtime设定的名称一致,如`Ascend910` +- 第一个参数:kernel_name,即 Kernel 名称,飞桨内外一致,请参照 CPU 相同 Kernel 函数注册名称,如`softmax` +- 第二个参数:backend,即后端名称,可自定义,但须与自定义 Runtime 设定的名称一致,如`Ascend910` - 第三个参数:layout,即内存布局,为`DataLayout`类型的枚举,按需设定,请参照[layout.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/common/layout.h) -- 第四个参数:meta_kernel_fn,即Kernel函数名,注意此处不加模板参数,如`my_namespace::SoftmaxKernel` +- 第四个参数:meta_kernel_fn,即 Kernel 函数名,注意此处不加模板参数,如`my_namespace::SoftmaxKernel` - 不定长数据类型参数:C++的基础数据类型或飞桨定义的`phi::dtype::float16`、`phi::dtype::bfloat16`、`phi::dtype::complex`等类型,请参照[data_type.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/common/data_type.h) -- 末尾:固定为函数体,其中可按需对Kernel进行必要设置,如果没有,保留`{}`。 +- 末尾:固定为函数体,其中可按需对 Kernel 进行必要设置,如果没有,保留`{}`。 >说明:末尾函数体对应的函数声明如下: >```c++ @@ -34,17 +34,17 @@ PD_REGISTER_PLUGIN_KERNEL(kernel_name, backend, layout, meta_kernel_fn, ...)) {} >void __PD_KERNEL_args_def_FN_##kernel_name##_##backend##_##layout( > const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); >``` ->即函数体中可使用参数`kernel_key`与`kernel`,在Kernel注册时对Kernel进行个性化调整。 +>即函数体中可使用参数`kernel_key`与`kernel`,在 Kernel 注册时对 Kernel 进行个性化调整。 -示例,如`softmax`的CustomCPU后端Kernel注册如下: +示例,如`softmax`的 CustomCPU 后端 Kernel 注册如下: ```c++ -// Softmax的CustomCPU后端Kernel注册 +// Softmax 的 CustomCPU 后端 Kernel 注册 // 全局命名空间 -// 参数: softmax - Kernel名称 +// 参数: softmax - Kernel 名称 // CustomCPU - 后端名称 // ALL_LAYOUT - 内存布局 -// custom_cpu::SoftmaxKernel - Kernel函数名 +// custom_cpu::SoftmaxKernel - Kernel 函数名 // float - 数据类型名 // double - 数据类型名 // phi::dtype::float16 - 数据类型名 @@ -58,5 +58,5 @@ PD_REGISTER_PLUGIN_KERNEL(softmax, ``` > 注意: -> 1. 对于通过自定义Runtime接入的后端,backend参数须与之名称保持一致 +> 1. 对于通过自定义 Runtime 接入的后端,backend 参数须与之名称保持一致 > 2. 注册宏末尾函数体中除非有明确需要,否则保留空函数体即可,请参照飞桨框架内其它后端的使用 diff --git a/docs/dev_guides/custom_device_docs/custom_kernel_docs/tensor_api_cn.md b/docs/dev_guides/custom_device_docs/custom_kernel_docs/tensor_api_cn.md index 5273e1011a4..58ccd48e6cb 100644 --- a/docs/dev_guides/custom_device_docs/custom_kernel_docs/tensor_api_cn.md +++ b/docs/dev_guides/custom_device_docs/custom_kernel_docs/tensor_api_cn.md @@ -1,49 +1,49 @@ # Tensor API -飞桨发布多种Tensor,基类均为`TensorBase`,这里列举常用的`DenseTensor` API,`TensorBase`与其它Tensor类型请参照文后链接。 +飞桨发布多种 Tensor,基类均为`TensorBase`,这里列举常用的`DenseTensor` API,`TensorBase`与其它 Tensor 类型请参照文后链接。 ## DenseTensor `DenseTensor`中的所有元素数据存储在连续内存中,请参照[dense_tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/dense_tensor.h) ```c++ - // 构造DenseTensor并分配内存 - // 参数:a - Allocator指针类型 - // meta - DenseTensorMeta对象 + // 构造 DenseTensor 并分配内存 + // 参数:a - Allocator 指针类型 + // meta - DenseTensorMeta 对象 // 返回:None DenseTensor(Allocator* a, const DenseTensorMeta& meta); - // 构造DenseTensor并分配内存 - // 参数:a - Allocator指针类型 - // meta - DenseTensorMeta移动对象 + // 构造 DenseTensor 并分配内存 + // 参数:a - Allocator 指针类型 + // meta - DenseTensorMeta 移动对象 // 返回:None DenseTensor(Allocator* a, DenseTensorMeta&& meta); - // 构造DenseTensor并分配内存 - // 参数:holder - Allocation共享指针类型 - // meta - DenseTensorMeta移动对象 + // 构造 DenseTensor 并分配内存 + // 参数:holder - Allocation 共享指针类型 + // meta - DenseTensorMeta 移动对象 // 返回:None DenseTensor(const std::shared_ptr& holder, const DenseTensorMeta& meta); // 移动构造函数 - // 参数:other - DenseTensor移动对象 + // 参数:other - DenseTensor 移动对象 // 返回:None DenseTensor(DenseTensor&& other) = default; // 拷贝构造函数 - // 参数:other - DenseTensor对象 + // 参数:other - DenseTensor 对象 // 返回:None DenseTensor(const DenseTensor& other); // 赋值操作 - // 参数:other - DenseTensor对象 - // 返回:DenseTensor对象 + // 参数:other - DenseTensor 对象 + // 返回:DenseTensor 对象 DenseTensor& operator=(const DenseTensor& other); // 移动赋值操作 - // 参数:other - DenseTensor对象 - // 返回:DenseTensor对象 + // 参数:other - DenseTensor 对象 + // 返回:DenseTensor 对象 DenseTensor& operator=(DenseTensor&& other); // 无参构造函数 @@ -57,121 +57,121 @@ // 返回:字符串指针 static const char* name(); - // 获取Tensor中元素数量 + // 获取 Tensor 中元素数量 // 参数:None - // 返回:int64_t类型变量 + // 返回:int64_t 类型变量 int64_t numel() const override; - // 获取Tensor的dims信息 + // 获取 Tensor 的 dims 信息 // 参数:None - // 返回:DDim对象 + // 返回:DDim 对象 const DDim& dims() const noexcept override; - // 获取Tensor的lod信息 + // 获取 Tensor 的 lod 信息 // 参数:None - // 返回:LoD对象 + // 返回:LoD 对象 const LoD& lod() const noexcept; - // 获取Tensor的数据类型信息 + // 获取 Tensor 的数据类型信息 // 参数:None - // 返回:DataType类型变量 + // 返回:DataType 类型变量 DataType dtype() const noexcept override; - // 获取Tensor的内存布局信息 + // 获取 Tensor 的内存布局信息 // 参数:None - // 返回:DataLayout类型变量 + // 返回:DataLayout 类型变量 DataLayout layout() const noexcept override; - // 获取Tensor的Place信息 + // 获取 Tensor 的 Place 信息 // 参数:None - // 返回:Place类型变量 + // 返回:Place 类型变量 const Place& place() const override; - // 获取Tensor的meta信息 + // 获取 Tensor 的 meta 信息 // 参数:None - // 返回:DenseTensorMeta对象 + // 返回:DenseTensorMeta 对象 const DenseTensorMeta& meta() const noexcept; - // 设置Tensor的meta信息 - // 参数:meta - DenseTensorMeta移动对象 + // 设置 Tensor 的 meta 信息 + // 参数:meta - DenseTensorMeta 移动对象 // 返回:None void set_meta(DenseTensorMeta&& meta); - // 设置Tensor的meta信息 - // 参数:meta - DenseTensorMeta对象 + // 设置 Tensor 的 meta 信息 + // 参数:meta - DenseTensorMeta 对象 // 返回:None void set_meta(const DenseTensorMeta& meta); - // 检查Tensor的meta信息是否有效 + // 检查 Tensor 的 meta 信息是否有效 // 参数:None - // 返回:bool类型变量 + // 返回:bool 类型变量 bool valid() const noexcept override; - // 检查Tensor的是否被初始化 + // 检查 Tensor 的是否被初始化 // 参数:None - // 返回:bool类型变量 + // 返回:bool 类型变量 bool initialized() const override; - // 为Tensor分配内存 - // 参数:allocator - Allocator类型指针 - // dtype - DataType变量 - // requested_size - size_t类型变量 + // 为 Tensor 分配内存 + // 参数:allocator - Allocator 类型指针 + // dtype - DataType 变量 + // requested_size - size_t 类型变量 // 返回:void*类型指针 void* AllocateFrom(Allocator* allocator, DataType dtype, size_t requested_size = 0) override; - // 检查是否与其它Tensor共享内存 - // 参数:b - DenseTensor对象 - // 返回:bool类型变量 + // 检查是否与其它 Tensor 共享内存 + // 参数:b - DenseTensor 对象 + // 返回:bool 类型变量 bool IsSharedWith(const DenseTensor& b) const; - // 修改Tensor的Dims信息并分配内存 - // 参数:dims - DDim对象 + // 修改 Tensor 的 Dims 信息并分配内存 + // 参数:dims - DDim 对象 // 返回:None void ResizeAndAllocate(const DDim& dims); - // 修改Tensor的Dims信息 - // 参数:dims - DDim对象 - // 返回:DenseTensor对象 + // 修改 Tensor 的 Dims 信息 + // 参数:dims - DDim 对象 + // 返回:DenseTensor 对象 DenseTensor& Resize(const DDim& dims); - // 重置Tensor的LoD信息 - // 参数:lod - LoD对象 + // 重置 Tensor 的 LoD 信息 + // 参数:lod - LoD 对象 // 返回:None void ResetLoD(const LoD& lod); - // 获取Tensor的内存大小 + // 获取 Tensor 的内存大小 // 参数:None - // 返回:size_t类型变量 + // 返回:size_t 类型变量 size_t capacity() const; - // 获取Tensor的不可修改数据指针 + // 获取 Tensor 的不可修改数据指针 // 模板参数:T - 数据类型 // 参数:None - // 返回:不可修改的T类型数据指针 + // 返回:不可修改的 T 类型数据指针 template const T* data() const; - // 获取Tensor的不可修改数据指针 + // 获取 Tensor 的不可修改数据指针 // 参数:None - // 返回:不可修改的void类型数据指针 + // 返回:不可修改的 void 类型数据指针 const void* data() const; - // 获取Tensor的可修改内存数据指针 + // 获取 Tensor 的可修改内存数据指针 // 模板参数:T - 数据类型 // 参数:None - // 返回:可修改的T类型数据指针 + // 返回:可修改的 T 类型数据指针 template T* data(); - // 获取Tensor的可修改内存数据指针 + // 获取 Tensor 的可修改内存数据指针 // 参数:None - // 返回:可修改的void类型数据指针 + // 返回:可修改的 void 类型数据指针 void* data(); ``` -## 其它Tensor类型 +## 其它 Tensor 类型 - `TensorBase`:请参照[tensor_base.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/tensor_base.h) - `SelectedRows`:请参照[selected_rows.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/core/selected_rows.h) diff --git a/docs/dev_guides/custom_device_docs/custom_runtime_cn.rst b/docs/dev_guides/custom_device_docs/custom_runtime_cn.rst index b137b0cc3f6..e229e90da69 100644 --- a/docs/dev_guides/custom_device_docs/custom_runtime_cn.rst +++ b/docs/dev_guides/custom_device_docs/custom_runtime_cn.rst @@ -5,13 +5,13 @@ 自定义 Runtime 为 PaddlePaddle 提供了一种插件式注册新硬件 Runtime 的方式。 DeviceManager 管理 PaddlePaddle 的硬件设备以及 Runtime/Driver 接口,向上提供统一的接口供框架调用硬件功能,向下暴露一系列接口用于注册自定义 Runtime ,通过 C API 形式保证二进制兼容性。这些接口可以在 `device_ext.h `_ 文件中查看,开发者只需要实现这些接口即可为 PaddlePaddle 添加自定义 Runtime 。 - `数据类型 <./runtime_data_type_cn.html>`_ : 介绍自定义 Runtime 的数据类型定义。 -- `Device接口 <./device_api_cn.html>`_ : 介绍Device接口的定义和功能。 -- `Memory接口 <./memory_api_cn.html>`_ : 介绍Memory接口的定义和功能。 -- `Stream接口 <./stream_api_cn.html>`_ : 介绍Stream接口的定义和功能。 -- `Event接口 <./event_api_cn.html>`_ : 介绍Event接口的定义和功能。 +- `Device 接口 <./device_api_cn.html>`_ : 介绍 Device 接口的定义和功能。 +- `Memory 接口 <./memory_api_cn.html>`_ : 介绍 Memory 接口的定义和功能。 +- `Stream 接口 <./stream_api_cn.html>`_ : 介绍 Stream 接口的定义和功能。 +- `Event 接口 <./event_api_cn.html>`_ : 介绍 Event 接口的定义和功能。 -Device接口 +Device 接口 ############ +------------------------+--------------------------+ @@ -43,7 +43,7 @@ Device接口 +------------------------+--------------------------+ -Memory接口 +Memory 接口 ############ +---------------------------+------------------------------+ @@ -95,41 +95,41 @@ Memory接口 +---------------------------+------------------------------+ -Stream接口 +Stream 接口 ############ +---------------------+----------------------------------------+ | 接口名称 | 功能简介 | +=====================+========================================+ -| create_stream | 创建一个stream对象。 | +| create_stream | 创建一个 stream 对象。 | +---------------------+----------------------------------------+ -| destroy_stream | 销毁一个stream对象。 | +| destroy_stream | 销毁一个 stream 对象。 | +---------------------+----------------------------------------+ -| query_stream | 查询stream上任务是否完成。 | +| query_stream | 查询 stream 上任务是否完成。 | +---------------------+----------------------------------------+ -| synchronize_stream | 同步stream,等待stream上所有任务完成。 | +| synchronize_stream | 同步 stream,等待 stream 上所有任务完成。 | +---------------------+----------------------------------------+ -| stream_add_callback | 添加一个主机回调到stream上。 | +| stream_add_callback | 添加一个主机回调到 stream 上。 | +---------------------+----------------------------------------+ -| stream_wait_event | 等待stream上的一个event完成。 | +| stream_wait_event | 等待 stream 上的一个 event 完成。 | +---------------------+----------------------------------------+ -Event接口 +Event 接口 ############ +-------------------+----------------------------+ | 接口名称 | 功能简介 | +===================+============================+ -| create_event | 创建一个event对象。 | +| create_event | 创建一个 event 对象。 | +-------------------+----------------------------+ -| destroy_event | 销毁一个event对象。 | +| destroy_event | 销毁一个 event 对象。 | +-------------------+----------------------------+ -| record_event | 在stream上记录event。 | +| record_event | 在 stream 上记录 event。 | +-------------------+----------------------------+ -| query_event | 查询event是否完成。 | +| query_event | 查询 event 是否完成。 | +-------------------+----------------------------+ -| synchronize_event | 同步event,等待event完成。 | +| synchronize_event | 同步 event,等待 event 完成。 | +-------------------+----------------------------+ diff --git a/docs/dev_guides/custom_device_docs/device_api_cn.md b/docs/dev_guides/custom_device_docs/device_api_cn.md index c15159617c1..0a2a6f4b18b 100644 --- a/docs/dev_guides/custom_device_docs/device_api_cn.md +++ b/docs/dev_guides/custom_device_docs/device_api_cn.md @@ -1,4 +1,4 @@ -# Device接口 +# Device 接口 ## initialize 【optional】 diff --git a/docs/dev_guides/custom_device_docs/event_api_cn.md b/docs/dev_guides/custom_device_docs/event_api_cn.md index 04773a68040..7a0fe3cb40d 100644 --- a/docs/dev_guides/custom_device_docs/event_api_cn.md +++ b/docs/dev_guides/custom_device_docs/event_api_cn.md @@ -1,4 +1,4 @@ -# Event接口 +# Event 接口 ## create_event 【required】 @@ -10,13 +10,13 @@ C_Status (*create_event)(const C_Device device, C_Event* event) ### 接口说明 -创建一个event对象,event被框架内部用于同步不同stream之间的任务。硬件不支持异步执行时该接口需要空实现。 +创建一个 event 对象,event 被框架内部用于同步不同 stream 之间的任务。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -event - 存储创建的event对象。 +event - 存储创建的 event 对象。 ## destroy_event 【required】 @@ -28,13 +28,13 @@ C_Status (*destroy_event)(const C_Device device, C_Event event) ### 接口说明 -销毁一个event对象。硬件不支持异步执行时该接口需要空实现。 +销毁一个 event 对象。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -event - 需要释放的event对象。 +event - 需要释放的 event 对象。 ## record_event 【required】 @@ -46,15 +46,15 @@ C_Status (*record_event)(const C_Device device, C_Stream stream, C_Event event) ### 接口说明 -在stream上记录event。硬件不支持异步执行时该接口需要空实现。 +在 stream 上记录 event。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -stream - 在该stream上记录event。 +stream - 在该 stream 上记录 event。 -event - 被记录的event。 +event - 被记录的 event。 ## query_event 【optional】 @@ -66,13 +66,13 @@ C_Status (*query_event)(const C_Device device, C_Event event) ### 接口说明 -查询event是否完成,如果没有实现,PaddlePaddle 会用 synchronize_event 代替。 +查询 event 是否完成,如果没有实现,PaddlePaddle 会用 synchronize_event 代替。 ### 参数 device - 使用的设备。 -event - 需要查询的event对象。 +event - 需要查询的 event 对象。 ## synchronize_event 【required】 @@ -84,10 +84,10 @@ C_Status (*synchronize_event)(const C_Device device, C_Event event) ### 接口说明 -同步event,等待event完成。硬件不支持异步执行时该接口需要空实现。 +同步 event,等待 event 完成。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -event - 需要同步的event。 +event - 需要同步的 event。 diff --git a/docs/dev_guides/custom_device_docs/index_cn.rst b/docs/dev_guides/custom_device_docs/index_cn.rst index e868518180d..571c5db49b6 100644 --- a/docs/dev_guides/custom_device_docs/index_cn.rst +++ b/docs/dev_guides/custom_device_docs/index_cn.rst @@ -6,8 +6,8 @@ 自定义硬件接入功能由自定义 Runtime 与自定义 Kernel 两个主要组件构成,基于这两个组件,用户可按需完成自定义新硬件接入飞桨。 -- `自定义Runtime <./custom_runtime_cn.html>`_ : 飞桨框架自定义Runtime介绍 -- `自定义Kernel <./custom_kernel_cn.html>`_ : 飞桨框架自定义Kernel介绍 +- `自定义 Runtime <./custom_runtime_cn.html>`_ : 飞桨框架自定义 Runtime 介绍 +- `自定义 Kernel <./custom_kernel_cn.html>`_ : 飞桨框架自定义 Kernel 介绍 - `新硬件接入示例 <./custom_kernel_cn.html>`_ : 通过示例介绍自定义新硬件接入飞桨的步骤 .. toctree:: diff --git a/docs/dev_guides/custom_device_docs/memory_api_cn.md b/docs/dev_guides/custom_device_docs/memory_api_cn.md index 124972fa3dc..0acb8437fd8 100644 --- a/docs/dev_guides/custom_device_docs/memory_api_cn.md +++ b/docs/dev_guides/custom_device_docs/memory_api_cn.md @@ -1,4 +1,4 @@ -# Memory接口 +# Memory 接口 ## device_memory_allocate 【required】 @@ -226,7 +226,7 @@ C_Status (*async_memory_copy_h2d)(const C_Device device, C_Stream stream, void* device - 使用的设备。 -stream - 在该stream上执行。 +stream - 在该 stream 上执行。 dst - 目的设备内存地址。 @@ -250,7 +250,7 @@ C_Status (*async_memory_copy_d2h)(const C_Device device, C_Stream stream, void* device - 使用的设备。 -stream - 在该stream上执行。 +stream - 在该 stream 上执行。 dst - 目的主机内存地址。 @@ -274,7 +274,7 @@ C_Status (*async_memory_copy_d2d)(const C_Device device, C_Stream stream, void* device - 使用的设备。 -stream - 使用的stream。 +stream - 使用的 stream。 dst - 目的设备内存地址。 @@ -300,7 +300,7 @@ dst_device - 目的设备。 src_device - 源设备。 -stream - 使用的stream。 +stream - 使用的 stream。 dst - 目的设备内存地址。 @@ -414,7 +414,7 @@ C_Status (*device_extra_padding_size)(C_Device device, size_t* size) ### 接口说明 -分配设备内存需要的额外填充字节,如果没有实现,则默认为0。为避免频繁调用硬件 API 申请/释放内存, PaddlePaddle 会自行管理设备内存,申请内存时优先从 PaddlePaddle 管理的内存中分配。申请 size 大小的内存时,会分配 size + extra_padding_size 大小的内存,并按 min_chunk_size 对齐。 +分配设备内存需要的额外填充字节,如果没有实现,则默认为 0。为避免频繁调用硬件 API 申请/释放内存, PaddlePaddle 会自行管理设备内存,申请内存时优先从 PaddlePaddle 管理的内存中分配。申请 size 大小的内存时,会分配 size + extra_padding_size 大小的内存,并按 min_chunk_size 对齐。 ### 参数 diff --git a/docs/dev_guides/custom_device_docs/runtime_data_type_cn.md b/docs/dev_guides/custom_device_docs/runtime_data_type_cn.md index a16887675f1..3c15d65f5e4 100644 --- a/docs/dev_guides/custom_device_docs/runtime_data_type_cn.md +++ b/docs/dev_guides/custom_device_docs/runtime_data_type_cn.md @@ -36,7 +36,7 @@ typedef struct C_Device_st { int id; } * C_Device; ### 说明 -描述一个device对象。 +描述一个 device 对象。 ## C_Stream @@ -48,7 +48,7 @@ typedef struct C_Stream_st* C_Stream; ### 说明 -描述一个stream对象,stream是框架内部用于执行异步任务的任务队列,同一stream中的任务按顺序执行。 +描述一个 stream 对象,stream 是框架内部用于执行异步任务的任务队列,同一 stream 中的任务按顺序执行。 ## C_Event @@ -60,7 +60,7 @@ typedef struct C_Event_st* C_Event; ### 说明 -描述一个event对象,event被框架内部用于同步不同stream之间的任务。 +描述一个 event 对象,event 被框架内部用于同步不同 stream 之间的任务。 ## C_Callback @@ -75,7 +75,7 @@ typedef void (*C_Callback)(C_Device device, ### 说明 -主机回调函数类型,具有4个参数,使用的设备,使用的stream,用户数据,以及返回值。 +主机回调函数类型,具有 4 个参数,使用的设备,使用的 stream,用户数据,以及返回值。 ## CustomRuntimeParams diff --git a/docs/dev_guides/custom_device_docs/stream_api_cn.md b/docs/dev_guides/custom_device_docs/stream_api_cn.md index 4efb6943982..639da5b2365 100644 --- a/docs/dev_guides/custom_device_docs/stream_api_cn.md +++ b/docs/dev_guides/custom_device_docs/stream_api_cn.md @@ -1,4 +1,4 @@ -# Stream接口 +# Stream 接口 ## create_stream 【required】 @@ -10,13 +10,13 @@ C_Status (*create_stream)(const C_Device device, C_Stream* stream) ### 接口说明 -创建一个stream对象,stream是框架内部用于执行异步任务的任务队列,同一stream中的任务按顺序执行。硬件不支持异步执行时该接口需要空实现。 +创建一个 stream 对象,stream 是框架内部用于执行异步任务的任务队列,同一 stream 中的任务按顺序执行。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -stream - 存储创建的stream对象。 +stream - 存储创建的 stream 对象。 ## destroy_stream 【required】 @@ -28,13 +28,13 @@ C_Status (*destroy_stream)(const C_Device device, C_Stream stream) ### 接口说明 -销毁一个stream对象。硬件不支持异步执行时该接口需要空实现。 +销毁一个 stream 对象。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -stream - 需要释放的stream对象。 +stream - 需要释放的 stream 对象。 ## query_stream 【optional】 @@ -46,13 +46,13 @@ C_Status (*query_stream)(const C_Device device, C_Stream stream) ### 接口说明 -查询stream上的任务是否完成,如果没有实现,PaddlePaddle 会用 synchronize_stream 代替。 +查询 stream 上的任务是否完成,如果没有实现,PaddlePaddle 会用 synchronize_stream 代替。 ### 参数 device - 使用的设备。 -stream - 需要查询的stream。 +stream - 需要查询的 stream。 ## synchronize_stream 【required】 @@ -64,13 +64,13 @@ C_Status (*synchronize_stream)(const C_Device device, C_Stream stream) ### 接口说明 -同步stream,等待stream上所有任务完成。硬件不支持异步执行时该接口需要空实现。 +同步 stream,等待 stream 上所有任务完成。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -stream - 需要同步的stream。 +stream - 需要同步的 stream。 ## stream_add_callback 【optional】 @@ -82,13 +82,13 @@ C_Status (*stream_add_callback)(const C_Device device, C_Stream stream, C_Callba ### 接口说明 -添加一个主机回调函数到stream上。 +添加一个主机回调函数到 stream 上。 ### 参数 device - 使用的设备。 -stream - 添加回调到该stream中。 +stream - 添加回调到该 stream 中。 callback - 回调函数。 @@ -104,12 +104,12 @@ C_Status (*stream_wait_event)(const C_Device device, C_Stream stream, C_Event ev ### 接口说明 -等待stream上的一个event完成。硬件不支持异步执行时该接口需要空实现。 +等待 stream 上的一个 event 完成。硬件不支持异步执行时该接口需要空实现。 ### 参数 device - 使用的设备。 -stream - 等待的stream。 +stream - 等待的 stream。 -event - 等待的event。 +event - 等待的 event。 diff --git a/docs/dev_guides/docs_contributing_guides_cn.md b/docs/dev_guides/docs_contributing_guides_cn.md index 8ddb8f47300..617395ff5c7 100644 --- a/docs/dev_guides/docs_contributing_guides_cn.md +++ b/docs/dev_guides/docs_contributing_guides_cn.md @@ -1,14 +1,14 @@ # 文档贡献指南 -PaddlePaddle 的文档存储于 [PaddlePaddle/docs](https://github.com/PaddlePaddle/docs) 中,之后通过技术手段转为 HTML文件后呈现至[官网文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/index_cn.html) 。官网文档和 `docs` 的对应关系如下: +PaddlePaddle 的文档存储于 [PaddlePaddle/docs](https://github.com/PaddlePaddle/docs) 中,之后通过技术手段转为 HTML 文件后呈现至[官网文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/index_cn.html) 。官网文档和 `docs` 的对应关系如下: | 官网 | docs | | -- | -- | |[文档/安装说明](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/index_cn.html) | [docs/docs/install](https://github.com/PaddlePaddle/docs/tree/develop/docs/install) | | [文档/使用教程](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/index_cn.html) | [docs/docs/guides](https://github.com/PaddlePaddle/docs/tree/develop/docs/guides) | | [文档/应用实践](https://www.paddlepaddle.org.cn/documentation/docs/zh/tutorial/index_cn.html) | [docs/docs/tutorial](https://github.com/PaddlePaddle/docs/tree/develop/docs/tutorial) | -| [文档/API文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html) | [docs/docs/api](https://github.com/PaddlePaddle/docs/tree/develop/docs/api) | +| [文档/API 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html) | [docs/docs/api](https://github.com/PaddlePaddle/docs/tree/develop/docs/api) | | [文档/常见问题与解答](https://www.paddlepaddle.org.cn/documentation/docs/zh/faq/index_cn.html) | [docs/docs/faq](https://github.com/PaddlePaddle/docs/tree/develop/docs/faq) | | [文档/Release Note](https://www.paddlepaddle.org.cn/documentation/docs/zh/release_note_cn.html) | [docs/docs/release_note_cn.md](https://github.com/PaddlePaddle/docs/blob/develop/docs/release_note_cn.md) | @@ -19,7 +19,7 @@ PaddlePaddle 的文档存储于 [PaddlePaddle/docs](https://github.com/PaddlePad ![图片](http://bos.bj.bce-internal.sdns.baidu.com/agroup-bos-bj/bj-adbd631fa37a3bb3a313e723b17fe634764eebfe) ### 1.2 Clone -将你目录下的远程仓库clone到本地。 +将你目录下的远程仓库 clone 到本地。 ``` ➜ git clone https://github.com/USERNAME/docs ➜ cd docs @@ -27,7 +27,7 @@ PaddlePaddle 的文档存储于 [PaddlePaddle/docs](https://github.com/PaddlePad ### 1.3 创建本地分支 -docs 目前使用 [Git流分支模型](https://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护。 +docs 目前使用 [Git 流分支模型](https://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护。 所有的 feature 和 bug fix 的开发工作都应该在一个新的分支上完成,一般从 develop 分支上创建新分支。 @@ -42,7 +42,7 @@ docs 目前使用 [Git流分支模型](https://nvie.com/posts/a-successful-git-b Paddle 开发人员使用 [pre-commit](https://pre-commit.com/) 工具来管理 Git 预提交钩子。 它可以帮助你格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。 -pre-commit测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: +pre-commit 测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: ``` ➜ pip install pre-commit @@ -51,7 +51,7 @@ pre-commit测试是 Travis-CI 中单元测试的一部分,不满足钩子的 P Paddle 使用 clang-format 来调整 C/C++ 源代码格式,请确保 clang-format 版本在 3.8 以上。 -**注**:通过``pip install pre-commit``和 ``conda install -c conda-forge pre-commit``安装的yapf稍有不同,Paddle 开发人员使用的是 ``pip install pre-commit``。 +**注**:通过``pip install pre-commit``和 ``conda install -c conda-forge pre-commit``安装的 yapf 稍有不同,Paddle 开发人员使用的是 ``pip install pre-commit``。 ## 二、正式修改文档 @@ -59,7 +59,7 @@ Paddle 使用 clang-format 来调整 C/C++ 源代码格式,请确保 clang-for ### 2.1 新增文档 -当你要新增文档时,需要参考上述的对应关系,找到合适的目录,新建 Markdown或reStructuredText文件。中英文文档存储在同一路径下,其中,中文文档的后缀为 `_cn.md/rst`,英文文档的后缀为 `_en.md/rst`。 +当你要新增文档时,需要参考上述的对应关系,找到合适的目录,新建 Markdown 或 reStructuredText 文件。中英文文档存储在同一路径下,其中,中文文档的后缀为 `_cn.md/rst`,英文文档的后缀为 `_en.md/rst`。 在新增文件后,还需要在目录文件中添加该文件的索引。目录文件一般是 index_cn.rst/ index_en.rst,需要在文件的 `.. toctree::` 部分添加该文件的索引。 @@ -80,13 +80,13 @@ Paddle 使用 clang-format 来调整 C/C++ 源代码格式,请确保 clang-for ### 2.2 修改文档 -修改文档,可以通过文档的URL,确定文档的源文件。 如 文档 -> 使用教程 -> 动态图转静态图 中 《调试方法》的文档URL为:[https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/debugging_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/debugging_cn.html),URL路径中,`guides/04_dygraph_to_static/debugging_cn.html` 即对应 `(docs/docs/)guides/04_dygraph_to_static/debugging_cn.md` , 因此,可以很快的确定文档的源文件,然后直接修改即可。 +修改文档,可以通过文档的 URL,确定文档的源文件。 如 文档 -> 使用教程 -> 动态图转静态图 中 《调试方法》的文档 URL 为:[https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/debugging_cn.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/debugging_cn.html),URL 路径中,`guides/04_dygraph_to_static/debugging_cn.html` 即对应 `(docs/docs/)guides/04_dygraph_to_static/debugging_cn.md` , 因此,可以很快的确定文档的源文件,然后直接修改即可。 ## 三、提交&push -### 3.1 提交&触发CI单测 +### 3.1 提交&触发 CI 单测 - 修改 ``guides/04_dygraph_to_static/debugging_cn.md`` 这个文件,并提交这个文件 @@ -126,7 +126,7 @@ cpplint..............................................(no files to check)Skipped pylint...................................................................Passed copyright_checker........................................................Passed ``` - 全部Passed 或 Skipped后,即可进入下一步。如果有 Failed 文件,则需要按照规范,修改出现Failed 的文件后,重新 ``git add -> pre-commit`` ,直至没有 Failed 文件。 + 全部 Passed 或 Skipped 后,即可进入下一步。如果有 Failed 文件,则需要按照规范,修改出现 Failed 的文件后,重新 ``git add -> pre-commit`` ,直至没有 Failed 文件。 ``` ➜ pre-commit CRLF end-lines remover...............................(no files to check)Skipped @@ -193,23 +193,23 @@ upstream ➜ git push origin my-cool-stuff ``` -## 四、提交PR +## 四、提交 PR -在你push后在对应仓库会提醒你进行PR操作,点击后,按格式填写PR内容,即可。 +在你 push 后在对应仓库会提醒你进行 PR 操作,点击后,按格式填写 PR 内容,即可。 ## 五、review&merge -提交PR后,可以指定 Paddle 的同学进行 Review。目前 Paddle 负责文档的同学是 @TCChenLong、@jzhang533、@saxon-zh、@Heeenrrry、@dingjiaweiww等 。 +提交 PR 后,可以指定 Paddle 的同学进行 Review。目前 Paddle 负责文档的同学是 @TCChenLong、@jzhang533、@saxon-zh、@Heeenrrry、@dingjiaweiww 等 。 ## CI -Paddle 中与文档相关的CI 流水线是 `FluidDoc1`等,主要对以下几个方面进行检查: +Paddle 中与文档相关的 CI 流水线是 `FluidDoc1`等,主要对以下几个方面进行检查: -- 检查PR CLA -- 检查增量修改的API是否需要相关人员审核 +- 检查 PR CLA +- 检查增量修改的 API 是否需要相关人员审核 - 若需要执行示例代码则执行看能否正常运行 -如果无法通过该CI,请点击对应CI的details,查看CI运行的的log,并根据log修改你的PR,直至通过CI。 +如果无法通过该 CI,请点击对应 CI 的 details,查看 CI 运行的的 log,并根据 log 修改你的 PR,直至通过 CI。 未选择任何文件 diff --git a/docs/dev_guides/git_guides/code_review_cn.md b/docs/dev_guides/git_guides/code_review_cn.md index a38da2ca750..9dc2a117320 100644 --- a/docs/dev_guides/git_guides/code_review_cn.md +++ b/docs/dev_guides/git_guides/code_review_cn.md @@ -2,9 +2,9 @@ 为了使评审人在评审代码时更好地专注于代码本身,请你每次提交代码时,遵守以下约定: -1)请保证CI 中测试任务能顺利通过。如果没过,说明提交的代码存在问题,评审人一般不做评审。 +1)请保证 CI 中测试任务能顺利通过。如果没过,说明提交的代码存在问题,评审人一般不做评审。 -2)如果解决了某个Issue的问题,请在该Pull Request的**第一个**评论框中加上:`fix #issue_number`,这样当该Pull Request被合并后,会自动关闭对应的Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages)。 +2)如果解决了某个 Issue 的问题,请在该 Pull Request 的**第一个**评论框中加上:`fix #issue_number`,这样当该 Pull Request 被合并后,会自动关闭对应的 Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages)。 此外,在回复评审人意见时,请你遵守以下约定: diff --git a/docs/dev_guides/git_guides/codestyle_check_guide_cn.md b/docs/dev_guides/git_guides/codestyle_check_guide_cn.md index 62ed68d12c0..9d8bee28433 100644 --- a/docs/dev_guides/git_guides/codestyle_check_guide_cn.md +++ b/docs/dev_guides/git_guides/codestyle_check_guide_cn.md @@ -1,10 +1,10 @@ # 代码风格检查指南 -整洁、规范的代码风格,能够保证代码的可读性、易用性和健壮性。Paddle 使用 [pre-commit](http://pre-commit.com/) 工具进行代码风格检查。它可以帮助检查提交代码的不规范问题并格式化(当前会检查C++,Python和CMake语言的代码);诸如cpplint等工具能提前发现代码的潜在静态逻辑错误,提高开发效率。 +整洁、规范的代码风格,能够保证代码的可读性、易用性和健壮性。Paddle 使用 [pre-commit](http://pre-commit.com/) 工具进行代码风格检查。它可以帮助检查提交代码的不规范问题并格式化(当前会检查 C++,Python 和 CMake 语言的代码);诸如 cpplint 等工具能提前发现代码的潜在静态逻辑错误,提高开发效率。 -在Paddle CI 中,由PR-CI-Codestyle-Check流水线对提交的PR进行代码风格检查,若该流水线执行失败,PR将**无法合入**到Paddle仓库。此时需要根据流水线日志的报错信息,在本地修改代码,再次提交。一般情况下,本地使用`pre-commit`进行代码风格检查的结果和 PR-CI-Codestyle-Check流水线结果是一致的。下面介绍 `pre-commit` 的本地安装与使用方法。 +在 Paddle CI 中,由 PR-CI-Codestyle-Check 流水线对提交的 PR 进行代码风格检查,若该流水线执行失败,PR 将**无法合入**到 Paddle 仓库。此时需要根据流水线日志的报错信息,在本地修改代码,再次提交。一般情况下,本地使用`pre-commit`进行代码风格检查的结果和 PR-CI-Codestyle-Check 流水线结果是一致的。下面介绍 `pre-commit` 的本地安装与使用方法。 -Paddle 目前使用的pre-commit版本是 2.17.0。首先安装并在当前目录运行它: +Paddle 目前使用的 pre-commit 版本是 2.17.0。首先安装并在当前目录运行它: ```bash ➜ pip install pre-commit==2.17.0 @@ -13,8 +13,8 @@ Paddle 目前使用的pre-commit版本是 2.17.0。首先安装并在当前目 >注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。 -在使用 `git commit` 提交修改时,pre-commit将自动检查修改文件的代码规范,并对不符合规范的文件进行格式化。此时,`git commit` 并未执行成功,需要将pre-commit对文件的修改添加到暂存区,再次commit,直到pre-commit代码检查通过后,本次提交才算完成。 -例如,对Paddle/paddle/phi/kernels/abs_kernel.h修改后,提交commit,通过`git diff`查看,会发现clang-format修改了该文件,需要添加修改后,再次`git commit`,完成本次提交。 +在使用 `git commit` 提交修改时,pre-commit 将自动检查修改文件的代码规范,并对不符合规范的文件进行格式化。此时,`git commit` 并未执行成功,需要将 pre-commit 对文件的修改添加到暂存区,再次 commit,直到 pre-commit 代码检查通过后,本次提交才算完成。 +例如,对 Paddle/paddle/phi/kernels/abs_kernel.h 修改后,提交 commit,通过`git diff`查看,会发现 clang-format 修改了该文件,需要添加修改后,再次`git commit`,完成本次提交。 ```bash ➜ git diff @@ -70,22 +70,22 @@ Date: xxx ... ``` -目前pre-commit主要执行C++, Python, Cmake语言的代码规范和格式化,以及git相关的通用检查和格式化。所有的检查工具信息如下: +目前 pre-commit 主要执行 C++, Python, Cmake 语言的代码规范和格式化,以及 git 相关的通用检查和格式化。所有的检查工具信息如下: |检查工具名称 | 作用 | 当前版本 | |---|---|---| -|[pre-commit](https://github.com/pre-commit/pre-commit) | hook管理工具 | 2.17.0 -|[remove-crlf](https://github.com/Lucas-C/pre-commit-hooks.git) | 将CRLF改为LF | 1.1.14 -|[pre-commit-hooks](https://github.com/Lucas-C/pre-commit-hooks.git) | pre-commit自带的hook,执行一些通用检查 | 4.1.0 +|[pre-commit](https://github.com/pre-commit/pre-commit) | hook 管理工具 | 2.17.0 +|[remove-crlf](https://github.com/Lucas-C/pre-commit-hooks.git) | 将 CRLF 改为 LF | 1.1.14 +|[pre-commit-hooks](https://github.com/Lucas-C/pre-commit-hooks.git) | pre-commit 自带的 hook,执行一些通用检查 | 4.1.0 |[cpplint]((https://github.com/cpplint/cpplint)) |C++代码风格检查 | 1.6.0 |[clang-format]((https://releases.llvm.org/download.html)) | C++代码格式化 | 13.0.0 -|[pylint]((https://github.com/PyCQA/pylint/))| python代码风格检查,仅用于检查示例代码 | 2.12.0 -|[yapf]((https://github.com/pre-commit/mirrors-yapf))| python代码格式化 | 0.32.0 +|[pylint]((https://github.com/PyCQA/pylint/))| python 代码风格检查,仅用于检查示例代码 | 2.12.0 +|[yapf]((https://github.com/pre-commit/mirrors-yapf))| python 代码格式化 | 0.32.0 ## FAQ -1. pre-commit==2.17.0要求Python>=3.6.1,建议使用较高版本的Python。 -2. 在首次commit时,pre-commit需要初始化环境,执行时间会稍长一些,大概在3min左右。 -3. 在首次commit前,请先升级pip,并使用pypi官方镜像源,否则,可能会导致clang-format或者cmake-lint安装失败。命令如下: +1. pre-commit==2.17.0 要求 Python>=3.6.1,建议使用较高版本的 Python。 +2. 在首次 commit 时,pre-commit 需要初始化环境,执行时间会稍长一些,大概在 3min 左右。 +3. 在首次 commit 前,请先升级 pip,并使用 pypi 官方镜像源,否则,可能会导致 clang-format 或者 cmake-lint 安装失败。命令如下: ```bash ➜ pip install --upgrade pip ➜ pip config set global.index-url https://pypi.python.org/simple diff --git a/docs/dev_guides/git_guides/index_cn.rst b/docs/dev_guides/git_guides/index_cn.rst index f4f7093e430..da317adcdc1 100644 --- a/docs/dev_guides/git_guides/index_cn.rst +++ b/docs/dev_guides/git_guides/index_cn.rst @@ -6,9 +6,9 @@ Git 操作指南 - `本地开发指南 <./local_dev_guide_cn.html>`_ : 如何在本地进行 Paddle 开发。 - `代码风格检查指南 <./codestyle_check_guide_cn.html>`_ : 如何在本地进行代码风格检查。 -- `提交PR注意事项 <./submit_pr_guide_cn.html>`_ : 提交PR相关的注意事项。 +- `提交 PR 注意事项 <./submit_pr_guide_cn.html>`_ : 提交 PR 相关的注意事项。 - `Code Review 约定 <./code_review_cn.html>`_ : 飞桨框架对于 Code Review 的一些约定介绍。 -- `Paddle CI手册 <./paddle_ci_manual_cn.html>`_ : Paddle CI 介绍说明。 +- `Paddle CI 手册 <./paddle_ci_manual_cn.html>`_ : Paddle CI 介绍说明。 .. toctree:: diff --git a/docs/dev_guides/git_guides/local_dev_guide_cn.md b/docs/dev_guides/git_guides/local_dev_guide_cn.md index 21dfcffb38e..3343a541a80 100644 --- a/docs/dev_guides/git_guides/local_dev_guide_cn.md +++ b/docs/dev_guides/git_guides/local_dev_guide_cn.md @@ -11,7 +11,7 @@ 以下教程将指导你提交代码。 ## [Fork](https://help.github.com/articles/fork-a-repo/) -跳转到[PaddlePaddle](https://github.com/PaddlePaddle/Paddle) GitHub首页,然后单击 `Fork` 按钮,生成自己目录下的仓库,比如 。 +跳转到[PaddlePaddle](https://github.com/PaddlePaddle/Paddle) GitHub 首页,然后单击 `Fork` 按钮,生成自己目录下的仓库,比如 。 ## 克隆(Clone) @@ -25,7 +25,7 @@ ## 创建本地分支 -Paddle 目前使用[Git流分支模型](http://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护,具体请参考 [Paddle 分支规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/others/releasing_process.md)。 +Paddle 目前使用[Git 流分支模型](http://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护,具体请参考 [Paddle 分支规范](https://github.com/PaddlePaddle/docs/blob/develop/docs/design/others/releasing_process.md)。 所有的 feature 和 bug fix 的开发工作都应该在一个新的分支上完成,一般从 `develop` 分支上创建新分支。 @@ -72,7 +72,7 @@ no changes added to commit (use "git add" and/or "git commit -a") - **运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`。 -- **执行单测一定要用ctest命令**,不可直接`python test_*.py`。 +- **执行单测一定要用 ctest 命令**,不可直接`python test_*.py`。 参考上述[编译](#编译)过程,编译成功后,在`build`目录下执行下面的命令来运行单元测试: diff --git a/docs/dev_guides/git_guides/paddle_ci_manual_cn.md b/docs/dev_guides/git_guides/paddle_ci_manual_cn.md index c9807ad7d87..7d45c7d5851 100644 --- a/docs/dev_guides/git_guides/paddle_ci_manual_cn.md +++ b/docs/dev_guides/git_guides/paddle_ci_manual_cn.md @@ -1,22 +1,22 @@ -# Paddle CI手册 +# Paddle CI 手册 ## 整体介绍 -当你提交一个PR`(Pull_Request)`,你的PR需要经过一些CI`(Continuous Integration)`,以触发`develop`分支的为例为你展示CI执行的顺序: +当你提交一个 PR`(Pull_Request)`,你的 PR 需要经过一些 CI`(Continuous Integration)`,以触发`develop`分支的为例为你展示 CI 执行的顺序: ![ci_exec_order.png](../images/ci_exec_order.png) 如上图所示,提交一个`PR`,你需要: -- 签署CLA协议 -- PR描述需要符合规范 -- 通过不同平台`(Linux/Mac/Windows/XPU/NPU等)`的编译与单测 +- 签署 CLA 协议 +- PR 描述需要符合规范 +- 通过不同平台`(Linux/Mac/Windows/XPU/NPU 等)`的编译与单测 - 通过静态代码扫描工具的检测 -**需要注意的是:如果你的PR只修改文档部分,你可以在commit中添加说明(commit message)以只触发文档相关的CI,写法如下:** +**需要注意的是:如果你的 PR 只修改文档部分,你可以在 commit 中添加说明(commit message)以只触发文档相关的 CI,写法如下:** ```shell -# PR仅修改文档等内容,只触发PR-CI-Static-Check +# PR 仅修改文档等内容,只触发 PR-CI-Static-Check git commit -m 'test=document_fix' ``` @@ -26,110 +26,110 @@ git commit -m 'test=document_fix' ### CLA -贡献者许可证协议[Contributor License Agreements](https://cla-assistant.io/PaddlePaddle/Paddle)是指当你要给Paddle贡献代码的时候,需要签署的一个协议。如果不签署那么你贡献给 Paddle 项目的修改,即`PR`会被 GitHub 标志为不可被接受,签署了之后,这个`PR`就是可以在 review 之后被接受了。 +贡献者许可证协议[Contributor License Agreements](https://cla-assistant.io/PaddlePaddle/Paddle)是指当你要给 Paddle 贡献代码的时候,需要签署的一个协议。如果不签署那么你贡献给 Paddle 项目的修改,即`PR`会被 GitHub 标志为不可被接受,签署了之后,这个`PR`就是可以在 review 之后被接受了。 ### CheckPRTemplate -检查PR描述信息是否按照模板填写。 +检查 PR 描述信息是否按照模板填写。 -- 通常10秒内检查完成,如遇长时间未更新状态,请re-edit一下PR描述重新触发该CI。 +- 通常 10 秒内检查完成,如遇长时间未更新状态,请 re-edit 一下 PR 描述重新触发该 CI。 ```markdown ### PR types -(必填)从上述选项中,选择并填写PR类型 +(必填)从上述选项中,选择并填写 PR 类型 ### PR changes -(必填)从上述选项中,选择并填写PR所修改的内容 +(必填)从上述选项中,选择并填写 PR 所修改的内容 ### Describe -(必填)请填写PR的具体修改内容 +(必填)请填写 PR 的具体修改内容 ``` -### Linux平台 +### Linux 平台 #### PR-CI-Clone -该CI主要是将当前PR的代码从GitHub clone到CI机器,方便后续的CI直接使用。 +该 CI 主要是将当前 PR 的代码从 GitHub clone 到 CI 机器,方便后续的 CI 直接使用。 #### PR-CI-APPROVAL -该CI主要的功能是检测PR中的修改是否通过了审批。在其他CI通过之前,你可以无需过多关注该CI, 其他CI通过后会有相关人员进行review你的PR。 +该 CI 主要的功能是检测 PR 中的修改是否通过了审批。在其他 CI 通过之前,你可以无需过多关注该 CI, 其他 CI 通过后会有相关人员进行 review 你的 PR。 - 执行脚本:`paddle/scripts/paddle_build.sh assert_file_approvals` #### PR-CI-Build -该CI主要是编译出当前PR的编译产物,并且将编译产物上传到BOS(百度智能云对象存储)中,方便后续的CI可以直接复用该编译产物。 +该 CI 主要是编译出当前 PR 的编译产物,并且将编译产物上传到 BOS(百度智能云对象存储)中,方便后续的 CI 可以直接复用该编译产物。 - 执行脚本:`paddle/scripts/paddle_build.sh build_pr_dev` #### PR-CI-Py3 -该CI主要的功能是为了检测当前PR在CPU、Python3版本的编译与单测是否通过。 +该 CI 主要的功能是为了检测当前 PR 在 CPU、Python3 版本的编译与单测是否通过。 - 执行脚本:`paddle/scripts/paddle_build.sh cicheck_py37` #### PR-CI-Coverage -该CI主要的功能是检测当前PR在GPU、Python3版本的编译与单测是否通过,同时增量代码需满足行覆盖率大于90%的要求。 +该 CI 主要的功能是检测当前 PR 在 GPU、Python3 版本的编译与单测是否通过,同时增量代码需满足行覆盖率大于 90%的要求。 - 编译脚本:`paddle/scripts/paddle_build.sh cpu_cicheck_coverage` - 测试脚本:`paddle/scripts/paddle_build.sh gpu_cicheck_coverage` #### PR-CE-Framework -该CI主要是为了测试P0级框架API与预测API的功能是否通过。此CI使用`PR-CI-Build`的编译产物,无需单独编译。 +该 CI 主要是为了测试 P0 级框架 API 与预测 API 的功能是否通过。此 CI 使用`PR-CI-Build`的编译产物,无需单独编译。 -- 框架API测试脚本([PaddlePaddle/PaddleTest](https://github.com/PaddlePaddle/PaddleTest)):`PaddleTest/framework/api/run_paddle_ci.sh` -- 预测API测试脚本([PaddlePaddle/PaddleTest](https://github.com/PaddlePaddle/PaddleTest)):`PaddleTest/inference/python_api_test/parallel_run.sh ` +- 框架 API 测试脚本([PaddlePaddle/PaddleTest](https://github.com/PaddlePaddle/PaddleTest)):`PaddleTest/framework/api/run_paddle_ci.sh` +- 预测 API 测试脚本([PaddlePaddle/PaddleTest](https://github.com/PaddlePaddle/PaddleTest)):`PaddleTest/inference/python_api_test/parallel_run.sh ` #### PR-CI-OP-benchmark -该CI主要的功能是PR中的修改是否会造成OP性能下降或者精度错误。此CI使用`PR-CI-Build`的编译产物,无需单独编译。 +该 CI 主要的功能是 PR 中的修改是否会造成 OP 性能下降或者精度错误。此 CI 使用`PR-CI-Build`的编译产物,无需单独编译。 - 执行脚本:`tools/ci_op_benchmark.sh run_op_benchmark` -关于CI失败解决方案等详细信息可查阅[PR-CI-OP-benchmark Manual](https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual) +关于 CI 失败解决方案等详细信息可查阅[PR-CI-OP-benchmark Manual](https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual) #### PR-CI-Model-benchmark -该CI主要的功能是检测PR中的修改是否会导致模型性能下降或者运行报错。此CI使用`PR-CI-Build`的编译产物,无需单独编译。 +该 CI 主要的功能是检测 PR 中的修改是否会导致模型性能下降或者运行报错。此 CI 使用`PR-CI-Build`的编译产物,无需单独编译。 - 执行脚本:`tools/ci_model_benchmark.sh run_all` -关于CI失败解决方案等详细信息可查阅[PR-CI-Model-benchmark Manual](https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-Model-benchmark-Manual) +关于 CI 失败解决方案等详细信息可查阅[PR-CI-Model-benchmark Manual](https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-Model-benchmark-Manual) #### PR-CI-Static-Check -该CI主要的功能是检查文档是否符合规范,检测`develop`分支与当前`PR`分支的增量的API英文文档是否符合规范,以及当变更API或OP时需要TPM approval。 +该 CI 主要的功能是检查文档是否符合规范,检测`develop`分支与当前`PR`分支的增量的 API 英文文档是否符合规范,以及当变更 API 或 OP 时需要 TPM approval。 - 编译脚本:`paddle/scripts/paddle_build.sh build_and_check_cpu` - 示例文档检测脚本:`paddle/scripts/paddle_build.sh build_and_check_gpu` #### PR-CI-Codestyle-Check -该CI主要的功能是检查提交代码是否符合规范,详细内容请参考[代码风格检查指南](./codestyle_check_guide_cn.html)。 +该 CI 主要的功能是检查提交代码是否符合规范,详细内容请参考[代码风格检查指南](./codestyle_check_guide_cn.html)。 - 执行脚本:`paddle/scripts/paddle_build.sh build_and_check_gpu` #### PR-CI-CINN -该CI主要是为了编译含CINN的Paddle,并运行Paddle-CINN对接的单测,保证训练框架进行CINN相关开发的正确性。 +该 CI 主要是为了编译含 CINN 的 Paddle,并运行 Paddle-CINN 对接的单测,保证训练框架进行 CINN 相关开发的正确性。 - 编译脚本:`paddle/scripts/paddle_build.sh build_only` - 测试脚本:`paddle/scripts/paddle_build.sh test` #### PR-CI-Inference -该CI主要的功能是为了检测当前PR对C++预测库与训练库的编译和单测是否通过。 +该 CI 主要的功能是为了检测当前 PR 对 C++预测库与训练库的编译和单测是否通过。 - 编译脚本:`paddle/scripts/paddle_build.sh build_inference` - 测试脚本:`paddle/scripts/paddle_build.sh gpu_inference` #### PR-CI-GpuPS -该CI主要是为了保证GPUBOX相关代码合入后编译可以通过。 +该 CI 主要是为了保证 GPUBOX 相关代码合入后编译可以通过。 - 编译脚本:`paddle/scripts/paddle_build.sh build_gpubox` @@ -137,7 +137,7 @@ git commit -m 'test=document_fix' #### PR-CI-Mac-Python3 -该CI是为了检测当前PR在MAC系统下python35版本的编译与单测是否通过,以及做develop与当前PR的单测增量检测,如有不同,提示需要approval。 +该 CI 是为了检测当前 PR 在 MAC 系统下 python35 版本的编译与单测是否通过,以及做 develop 与当前 PR 的单测增量检测,如有不同,提示需要 approval。 - 执行脚本:`paddle/scripts/paddle_build.sh maccheck_py35` @@ -145,44 +145,44 @@ git commit -m 'test=document_fix' #### PR-CI-Windows -该CI是为了检测当前PR在Windows系统下MKL版本的GPU编译与单测是否通过,以及做develop与当前PR的单测增量检测,如有不同,提示需要approval。 +该 CI 是为了检测当前 PR 在 Windows 系统下 MKL 版本的 GPU 编译与单测是否通过,以及做 develop 与当前 PR 的单测增量检测,如有不同,提示需要 approval。 - 执行脚本:`paddle/scripts/paddle_build.bat wincheck_mkl` #### PR-CI-Windows-OPENBLAS -该CI是为了检测当前PR在Windows系统下OPENBLAS版本的CPU编译与单测是否通过。 +该 CI 是为了检测当前 PR 在 Windows 系统下 OPENBLAS 版本的 CPU 编译与单测是否通过。 - 执行脚本:`paddle/scripts/paddle_build.bat wincheck_openblas` #### PR-CI-Windows-Inference -该CI是为了检测当前PR在Windows系统下预测模块的编译与单测是否通过。 +该 CI 是为了检测当前 PR 在 Windows 系统下预测模块的编译与单测是否通过。 - 执行脚本:`paddle/scripts/paddle_build.bat wincheck_inference` -### XPU机器 +### XPU 机器 #### PR-CI-Kunlun -该CI主要的功能是检测PR中的修改能否在昆仑芯片上编译与单测通过。 +该 CI 主要的功能是检测 PR 中的修改能否在昆仑芯片上编译与单测通过。 - 执行脚本:`paddle/scripts/paddle_build.sh check_xpu_coverage` -### NPU机器 +### NPU 机器 #### PR-CI-NPU -该CI主要是为了检测当前PR对NPU代码编译跟测试是否通过。 +该 CI 主要是为了检测当前 PR 对 NPU 代码编译跟测试是否通过。 - 编译脚本:`paddle/scripts/paddle_build.sh build_only` - 测试脚本:`paddle/scripts/paddle_build.sh gpu_cicheck_py35` -### Sugon-DCU机器 +### Sugon-DCU 机器 #### PR-CI-ROCM-Compile -该CI主要的功能是检测PR中的修改能否在曙光芯片上编译通过。 +该 CI 主要的功能是检测 PR 中的修改能否在曙光芯片上编译通过。 - 执行脚本:`paddle/scripts/musl_build/build_paddle.sh build_only` @@ -190,47 +190,47 @@ git commit -m 'test=document_fix' #### PR-CI-iScan-C -该CI是为了检测当前PR的C++代码是否可以通过静态代码扫描。 +该 CI 是为了检测当前 PR 的 C++代码是否可以通过静态代码扫描。 #### PR-CI-iScan- Python -该CI是为了检测当前PR的Python代码是否可以通过静态代码扫描。 +该 CI 是为了检测当前 PR 的 Python 代码是否可以通过静态代码扫描。 -## CI失败如何处理 -### CLA失败 +## CI 失败如何处理 +### CLA 失败 -- 如果你的cla一直是pending状态,那么需要等其他CI都通过后,点击 Close pull request ,再点击 Reopen pull request ,并等待几分钟(建立在你已经签署cla协议的前提下);如果上述操作重复2次仍未生效,请重新提一个PR或评论区留言。 -- 如果你的cla是失败状态,可能原因是你提交PR的账号并非你签署cla协议的账号,如下图所示: +- 如果你的 cla 一直是 pending 状态,那么需要等其他 CI 都通过后,点击 Close pull request ,再点击 Reopen pull request ,并等待几分钟(建立在你已经签署 cla 协议的前提下);如果上述操作重复 2 次仍未生效,请重新提一个 PR 或评论区留言。 +- 如果你的 cla 是失败状态,可能原因是你提交 PR 的账号并非你签署 cla 协议的账号,如下图所示: ![cla.png](./images/cla.png) -- 建议你在提交PR前设置: +- 建议你在提交 PR 前设置: ``` git config –local user.email 你的邮箱 git config –local user.name 你的名字 ``` -### CheckPRTemplate失败 +### CheckPRTemplate 失败 -如果你的`CheckPRTemplate`状态一直未变化,这是由于通信原因状态未返回到GitHub。你只需要重新编辑一下PR描述保存后就可以重新触发该条CI,步骤如下: +如果你的`CheckPRTemplate`状态一直未变化,这是由于通信原因状态未返回到 GitHub。你只需要重新编辑一下 PR 描述保存后就可以重新触发该条 CI,步骤如下: ![checkPRtemplate1.png](../images/checkPRtemplate1.png) ![checkPRTemplate2.png](../images/checkPRTemplate2.png) -### 其他CI失败 +### 其他 CI 失败 -当你的`PR`的CI失败时,`paddle-bot`会在你的`PR`页面发出一条评论,同时此评论GitHub会同步到你的邮箱,让你第一时间感知到`PR`的状态变化(注意:只有第一条CI失败的时候会发邮件,之后失败的CI只会更新`PR`页面的评论。) +当你的`PR`的 CI 失败时,`paddle-bot`会在你的`PR`页面发出一条评论,同时此评论 GitHub 会同步到你的邮箱,让你第一时间感知到`PR`的状态变化(注意:只有第一条 CI 失败的时候会发邮件,之后失败的 CI 只会更新`PR`页面的评论。) ![paddle-bot-comment.png](../images/paddle-bot-comment.png) ![ci-details.png](../images/ci-details.png) -你可以通过点击`paddle-bot`评论中的CI名字,也可通过点击CI列表中的`Details`来查看CI的运行日志,如上图。通常运行日志的末尾会告诉你CI失败的原因。 +你可以通过点击`paddle-bot`评论中的 CI 名字,也可通过点击 CI 列表中的`Details`来查看 CI 的运行日志,如上图。通常运行日志的末尾会告诉你 CI 失败的原因。 -由于网络代理、机器不稳定等原因,有时候CI的失败也并不是你的`PR`自身的原因,这时候你只需要rerun此CI即可(你需要将你的GitHub授权于效率云CI平台)。 +由于网络代理、机器不稳定等原因,有时候 CI 的失败也并不是你的`PR`自身的原因,这时候你只需要 rerun 此 CI 即可(你需要将你的 GitHub 授权于效率云 CI 平台)。 ![rerun.png](../images/rerun.png) -如果CI失败你无法判断原因,请联系 @[lelelelelez](https://github.com/lelelelelez)。 +如果 CI 失败你无法判断原因,请联系 @[lelelelelez](https://github.com/lelelelelez)。 若遇到其他问题,请联系 @[lelelelelez](https://github.com/lelelelelez)。 diff --git a/docs/dev_guides/git_guides/submit_pr_guide_cn.md b/docs/dev_guides/git_guides/submit_pr_guide_cn.md index b8ca2107780..05ae3de9c47 100644 --- a/docs/dev_guides/git_guides/submit_pr_guide_cn.md +++ b/docs/dev_guides/git_guides/submit_pr_guide_cn.md @@ -1,20 +1,20 @@ -# 提交PR注意事项 +# 提交 PR 注意事项 ## 提交 Pull Request -- 请注意commit的数量: +- 请注意 commit 的数量: -原因:如果仅仅修改一个文件但提交了十几个commit,每个commit只做了少量的修改,这会给评审人带来很大困扰。评审人需要逐一查看每个commit才能知道做了哪些修改,且不排除commit之间的修改存在相互覆盖的情况。 +原因:如果仅仅修改一个文件但提交了十几个 commit,每个 commit 只做了少量的修改,这会给评审人带来很大困扰。评审人需要逐一查看每个 commit 才能知道做了哪些修改,且不排除 commit 之间的修改存在相互覆盖的情况。 -建议:每次提交时,保持尽量少的commit。可以通过`git rebase -i HEAD~3`将最新的3个commit合并成一个(你可以根据实际情况修改该数值),再Push到远程仓库,可以参考[rebase 用法](https://www.jianshu.com/p/4a8f4af4e803)。 +建议:每次提交时,保持尽量少的 commit。可以通过`git rebase -i HEAD~3`将最新的 3 个 commit 合并成一个(你可以根据实际情况修改该数值),再 Push 到远程仓库,可以参考[rebase 用法](https://www.jianshu.com/p/4a8f4af4e803)。 -- 请注意每个commit的名称:应能反映当前commit的内容,不能太随意。 +- 请注意每个 commit 的名称:应能反映当前 commit 的内容,不能太随意。 -- 请不要频繁Merge develop 分支(在过CI时,会自动Merge develop),这样会使CI重跑,更加延长CI通过时间。 +- 请不要频繁 Merge develop 分支(在过 CI 时,会自动 Merge develop),这样会使 CI 重跑,更加延长 CI 通过时间。 -- 评审人review过后,不允许使用git push -f 强行提交代码,这样评审人无法看到修改前后的diff,使评审变得困难。 +- 评审人 review 过后,不允许使用 git push -f 强行提交代码,这样评审人无法看到修改前后的 diff,使评审变得困难。 -## 完成 Pull Request PR创建 +## 完成 Pull Request PR 创建 切换到所建分支,然后点击 `Compare & pull request`。 @@ -24,17 +24,17 @@ ![change_base](../images/change_base.png) -如果解决了某个Issue的问题,请在该PUll Request的第一个评论框中加上:fix #issue_number,这样当该PUll Request被合并后,会自动关闭对应的Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages/) +如果解决了某个 Issue 的问题,请在该 PUll Request 的第一个评论框中加上:fix #issue_number,这样当该 PUll Request 被合并后,会自动关闭对应的 Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages/) 接下来等待 review,如果有需要修改的地方,参照上述步骤更新 origin 中的对应分支即可。 -## 签署CLA协议和通过单元测试 +## 签署 CLA 协议和通过单元测试 -### 签署CLA +### 签署 CLA -在首次向PaddlePaddle提交Pull Request时,你需要签署一次CLA(Contributor License Agreement)协议,以保证你的代码可以被合入,具体签署方式如下: +在首次向 PaddlePaddle 提交 Pull Request 时,你需要签署一次 CLA(Contributor License Agreement)协议,以保证你的代码可以被合入,具体签署方式如下: -- 请你查看PR中的Check部分,找到license/cla,并点击右侧detail,进入CLA网站 +- 请你查看 PR 中的 Check 部分,找到 license/cla,并点击右侧 detail,进入 CLA 网站
@@ -42,7 +42,7 @@
-- 请你点击CLA网站中的“Sign in with GitHub to agree”,点击完成后将会跳转回你的Pull Request页面 +- 请你点击 CLA 网站中的“Sign in with GitHub to agree”,点击完成后将会跳转回你的 Pull Request 页面
@@ -53,13 +53,13 @@ ### 通过单元测试 -你在Pull Request中每提交一次新的commit后,会触发CI单元测试,请确认你的commit message中已加入必要的说明,请见[提交(commit)](./local_dev_guide_cn.html#commit) +你在 Pull Request 中每提交一次新的 commit 后,会触发 CI 单元测试,请确认你的 commit message 中已加入必要的说明,请见[提交(commit)](./local_dev_guide_cn.html#commit) -请你关注你Pull Request中的CI单元测试进程,它将会在几个小时内完成 +请你关注你 Pull Request 中的 CI 单元测试进程,它将会在几个小时内完成 -当所需的测试后都出现了绿色的对勾,表示你本次commit通过了各项单元测试,你只需要关注显示Required任务,不显示的可能是我们正在测试的任务 +当所需的测试后都出现了绿色的对勾,表示你本次 commit 通过了各项单元测试,你只需要关注显示 Required 任务,不显示的可能是我们正在测试的任务 -如果所需的测试后出现了红色叉号,代表你本次的commit未通过某项单元测试,在这种情况下,请你点击detail查看报错详情,优先自行解决报错问题,无法解决的情况,以评论的方式添加到评论区中,我们的工作人员将和你一起查看 +如果所需的测试后出现了红色叉号,代表你本次的 commit 未通过某项单元测试,在这种情况下,请你点击 detail 查看报错详情,优先自行解决报错问题,无法解决的情况,以评论的方式添加到评论区中,我们的工作人员将和你一起查看 ## 删除远程分支 diff --git a/docs/dev_guides/index_cn.rst b/docs/dev_guides/index_cn.rst index a0d83c519ef..97ed7ae2ec8 100644 --- a/docs/dev_guides/index_cn.rst +++ b/docs/dev_guides/index_cn.rst @@ -8,9 +8,9 @@ - `概述 <./Overview_cn.html>`_ : 贡献指南概述。 - `代码规范 <./style_guides_cn.html>`_ : 代码规范说明。 -- `Git 操作指南 <./git_guides/index_cn.html>`_ : Git 操作相关说明与Paddle CI 手册。 -- `编译安装 `_ : 如何从源码编译安装Paddle。 -- `API开发指南 <./api_contributing_guides/api_contributing_guides_cn.html>`_ : API开发相关说明。 +- `Git 操作指南 <./git_guides/index_cn.html>`_ : Git 操作相关说明与 Paddle CI 手册。 +- `编译安装 `_ : 如何从源码编译安装 Paddle。 +- `API 开发指南 <./api_contributing_guides/api_contributing_guides_cn.html>`_ : API 开发相关说明。 - `算子性能优化贡献指南 <./op_optimization/op_optimization_contributing_guides_cn.html>`_ : 飞桨算子性能优化相关说明。 - `算子数据类型扩展贡献指南 <./op_dtype_extension/op_dtype_extension_contributing_guides_cn.html>`_ : 飞桨算子数据类型扩展相关说明。 - `曙光开发指南 <./sugon/index_cn.html>`_ : 曙光开发相关说明。 diff --git a/docs/dev_guides/op_dtype_extension/op_dtype_extension_acceptance_criteria_cn.md b/docs/dev_guides/op_dtype_extension/op_dtype_extension_acceptance_criteria_cn.md index 4acad719101..26260938083 100644 --- a/docs/dev_guides/op_dtype_extension/op_dtype_extension_acceptance_criteria_cn.md +++ b/docs/dev_guides/op_dtype_extension/op_dtype_extension_acceptance_criteria_cn.md @@ -1,23 +1,23 @@ # 算子数据类型扩展 验收规范 -## 通过CI验证 +## 通过 CI 验证 -提交至 Paddle repo 的 Pull Request(简称 PR),涉及到的相关检测CI必须全部 Pass。用来验证对之前功能点的兼容和影响,保障新合入代码对历史代码不产生影响。 +提交至 Paddle repo 的 Pull Request(简称 PR),涉及到的相关检测 CI 必须全部 Pass。用来验证对之前功能点的兼容和影响,保障新合入代码对历史代码不产生影响。 -新增代码必须要有相应的单测保障测试覆盖率达到准入要求(行覆盖率达到90%)。 +新增代码必须要有相应的单测保障测试覆盖率达到准入要求(行覆盖率达到 90%)。 ## 通过精度验证 扩展数据类型后需要添加对应数据类型的单元测试,并通过算子的精度检查。单元测试需要注意以下规范: -- [OP单测必须使用大尺寸输入](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-input-shape-requirements) -- [反向Op必须调用check_grad](https://github.com/PaddlePaddle/Paddle/wiki/Gradient-Check-Is-Required-for-Op-Test) -- [单测精度中atol, rtol, eps, max_relative_error, 不允许自行放大阈值](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements) +- [OP 单测必须使用大尺寸输入](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-input-shape-requirements) +- [反向 Op 必须调用 check_grad](https://github.com/PaddlePaddle/Paddle/wiki/Gradient-Check-Is-Required-for-Op-Test) +- [单测精度中 atol, rtol, eps, max_relative_error, 不允许自行放大阈值](https://github.com/PaddlePaddle/Paddle/wiki/OP-test-accuracy-requirements) ## 通过性能验证 -深度学习框架通常支持多种数据类型的输入,其中低精度运算不仅能够减少显存占用,还可以加快计算的效率。在一些特定硬件上,使用半精度浮点数FP16的峰值计算能力最高可达单精度浮点数FP32的数倍,基于此原理实现的混合精度训练策略对模型也可以实现数倍加速。在完成数据类型的扩展后,可以使用飞桨的[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)算子性能测试专业工具对算子性能进行测试对比,例如对于FP16数据类型,验收的基本要求是算子性能要明显优于使用FP32数据类型,同时我们也鼓励开发者针对FP16类型实现极致的加速。如下图所示,OP Benchmark 测试不同数据类型输入下的OP性能真实状态。 +深度学习框架通常支持多种数据类型的输入,其中低精度运算不仅能够减少显存占用,还可以加快计算的效率。在一些特定硬件上,使用半精度浮点数 FP16 的峰值计算能力最高可达单精度浮点数 FP32 的数倍,基于此原理实现的混合精度训练策略对模型也可以实现数倍加速。在完成数据类型的扩展后,可以使用飞桨的[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)算子性能测试专业工具对算子性能进行测试对比,例如对于 FP16 数据类型,验收的基本要求是算子性能要明显优于使用 FP32 数据类型,同时我们也鼓励开发者针对 FP16 类型实现极致的加速。如下图所示,OP Benchmark 测试不同数据类型输入下的 OP 性能真实状态。 -- Conv2d算子,使用FP32数据类型: +- Conv2d 算子,使用 FP32 数据类型: ``` =========================================================================== -- paddle version : 0.0.0 @@ -53,7 +53,7 @@ W0706 08:37:25.901605 20400 gpu_context.cc:306] device: 0, cuDNN Version: 8.2. } ``` -- Conv2d算子,使用FP16数据类型: +- Conv2d 算子,使用 FP16 数据类型: ``` =========================================================================== -- paddle version : 0.0.0 @@ -89,20 +89,20 @@ W0706 08:37:25.901605 20400 gpu_context.cc:306] device: 0, cuDNN Version: 8.2. } ``` -## PR内容描述要求 +## PR 内容描述要求 -单元测试内容需要和开发代码放在同一个PR提交,后续修改也需要基于此PR。PR内容描述测试部分需要明确描述下列内容: +单元测试内容需要和开发代码放在同一个 PR 提交,后续修改也需要基于此 PR。PR 内容描述测试部分需要明确描述下列内容: -1. 针对低精度数据类型的支持方法描述:概要说明计算精度是否对不同数据类型敏感,如Transpose算子的计算精度与数据类型无关 +1. 针对低精度数据类型的支持方法描述:概要说明计算精度是否对不同数据类型敏感,如 Transpose 算子的计算精度与数据类型无关 -2. 扩展数据类型后算子的性能状况:给出不同数据类型下算子性能,如FP32和FP16的性能对比 +2. 扩展数据类型后算子的性能状况:给出不同数据类型下算子性能,如 FP32 和 FP16 的性能对比 -3. PR性能优化方案概述:如果扩展数据类型时,还对算子进行了性能优化,则需要描述优化方案 +3. PR 性能优化方案概述:如果扩展数据类型时,还对算子进行了性能优化,则需要描述优化方案 ## 交流与改进 -PR的单测部分必须通过 Paddle 测试人员 review,确保完整覆盖了待测功能点后,会给予 approved。如果 review 过程中发现测试缺失和遗漏的测试点,会通过 GitHub 代码行 Comment 的和 Request Changes 的方式交流改进,待PR修改完毕后给予 approved。 +PR 的单测部分必须通过 Paddle 测试人员 review,确保完整覆盖了待测功能点后,会给予 approved。如果 review 过程中发现测试缺失和遗漏的测试点,会通过 GitHub 代码行 Comment 的和 Request Changes 的方式交流改进,待 PR 修改完毕后给予 approved。 ## 后续维护 -代码成功合入后,如果发现对框架造成了精度和性能下降影响,或者和部分功能存在严重冲突导致Bug,会对代码进行 Revert 并通过 ISSUE 告知相关的开发者,请提交 PR 修复问题,并重新合入。 +代码成功合入后,如果发现对框架造成了精度和性能下降影响,或者和部分功能存在严重冲突导致 Bug,会对代码进行 Revert 并通过 ISSUE 告知相关的开发者,请提交 PR 修复问题,并重新合入。 diff --git a/docs/dev_guides/op_dtype_extension/op_dtype_extension_contributing_guides_cn.rst b/docs/dev_guides/op_dtype_extension/op_dtype_extension_contributing_guides_cn.rst index b5ad1fdca7d..2af6968ce26 100644 --- a/docs/dev_guides/op_dtype_extension/op_dtype_extension_contributing_guides_cn.rst +++ b/docs/dev_guides/op_dtype_extension/op_dtype_extension_contributing_guides_cn.rst @@ -2,7 +2,7 @@ 算子数据类型扩展 提交流程 ##################### -飞桨作为一个开源项目,我们鼓励生态开发者为PaddlePaddle完善和优化各类算子,当你想为飞桨扩展数据类型的支持时,请遵守下述贡献流程,在 GitHub 上完成文档设计和代码设计并提交至相应的 GitHub 仓库。 +飞桨作为一个开源项目,我们鼓励生态开发者为 PaddlePaddle 完善和优化各类算子,当你想为飞桨扩展数据类型的支持时,请遵守下述贡献流程,在 GitHub 上完成文档设计和代码设计并提交至相应的 GitHub 仓库。 算子数据类型扩展贡献流程 @@ -25,28 +25,28 @@ 对于你贡献的源代码,你将拥有合法的知识产权,为了保护你的权益,你需要先签署一份 `贡献者许可协议 `_ 。 -**注意**:只有当你签署完CLA后,我们才能继续对你提交的设计方案和实现代码进行评审及合入。 +**注意**:只有当你签署完 CLA 后,我们才能继续对你提交的设计方案和实现代码进行评审及合入。 **3. 根据开发难点提交必要的设计文档** 算子数据类型扩展是针对飞桨已有的算子,因此可以只在必要情况下提供设计文档: -- 不需要提供设计文档:如果算子性能符合预期,比如算子使用FP16数据类型的计算性能明显优于使用FP32数据类型,代码实现中仅涉及了算子的功能增强,则不需要撰写设计文档。我们鼓励开发者在进行算子数据类型扩展时首先进行方案设计,你可以将自己的设计思路提交在PR中。 -- 需要提供设计文档:如果算子在扩展了数据类型后性能不符合预期,例如为算子扩展了FP16数据类型,但是性能却差于使用FP32数据类型计算,则需要针对FP16数据类型实现性能的优化,这种情况下需要添加算子性能优化设计文档,这也是为了促进社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和PR请求,在提交实现代码之前确保方案符合飞桨的设计理念,同时也便于后续的代码评审及合入工作。你需要将设计文档提交至 GitHub 开发者社区仓库,并根据本地开发指南提交PR。此过程请参考相应的开发规范,并提交以下内容: +- 不需要提供设计文档:如果算子性能符合预期,比如算子使用 FP16 数据类型的计算性能明显优于使用 FP32 数据类型,代码实现中仅涉及了算子的功能增强,则不需要撰写设计文档。我们鼓励开发者在进行算子数据类型扩展时首先进行方案设计,你可以将自己的设计思路提交在 PR 中。 +- 需要提供设计文档:如果算子在扩展了数据类型后性能不符合预期,例如为算子扩展了 FP16 数据类型,但是性能却差于使用 FP32 数据类型计算,则需要针对 FP16 数据类型实现性能的优化,这种情况下需要添加算子性能优化设计文档,这也是为了促进社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和 PR 请求,在提交实现代码之前确保方案符合飞桨的设计理念,同时也便于后续的代码评审及合入工作。你需要将设计文档提交至 GitHub 开发者社区仓库,并根据本地开发指南提交 PR。此过程请参考相应的开发规范,并提交以下内容: .. csv-table:: :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30, 30 "算子性能优化设计文档", "- `算子性能优化设计文档 模版 `_ - - `算子性能优化设计文档 示例 `_ ", "`Github开发者社区仓库 `_" + - `算子性能优化设计文档 示例 `_ ", "`Github 开发者社区仓库 `_" **4. 设计文档评审&公示** 针对需要提供设计文档的算子,飞桨专家会对你提交的设计文档进行审核,同时此文档也将接受来自开发者社区的评估,所有开发者都可以在 PR 评论区进行广泛的交流。开发者根据飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会合入。 -如果你的设计方案比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 +如果你的设计方案比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 **5. 公布评审结果&合入文档** @@ -60,35 +60,35 @@ :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30,30 - "1、算子数据类型扩展实现代码", "- `Paddle代码规范 `_ - - `C++ OP开发指南 <../api_contributing_guides/new_cpp_op_cn.html>`_ - ", "`Github飞桨训练框架仓库 `_" - "2、API英文文档", "- `API文档书写规范 `_ - ", "`Github飞桨训练框架仓库 `_" - "3、API中文文档", "- `API文档书写规范 `_ + "1、算子数据类型扩展实现代码", "- `Paddle 代码规范 `_ + - `C++ OP 开发指南 <../api_contributing_guides/new_cpp_op_cn.html>`_ + ", "`Github 飞桨训练框架仓库 `_" + "2、API 英文文档", "- `API 文档书写规范 `_ + ", "`Github 飞桨训练框架仓库 `_" + "3、API 中文文档", "- `API 文档书写规范 `_ - `中文文档贡献指南 <../docs_contributing_guides_cn.html>`_ - ", "`Github飞桨文档仓库 `_" - "4、算子不同数据类型性能对比", "- `OP Benchmark使用指南 `_ + ", "`Github 飞桨文档仓库 `_" + "4、算子不同数据类型性能对比", "- `OP Benchmark 使用指南 `_ - `算子性能优化 优化方法 <../op_optimization/op_optimization_method_introduction_cn.html>`_ - `算子数据类型扩展 验收规范 <./op_dtype_extension_acceptance_criteria_cn.html>`_ - ", "`Github飞桨训练框架仓库 `_" + ", "`Github 飞桨训练框架仓库 `_" -当你完成以上代码设计后,需要将代码提交至 `Github飞桨训练框架仓库 `_ 和 `Github飞桨中文文档仓库 `_ ,并根据 `本地开发指南 `_ 提交PR、准备接受社区的评审。 +当你完成以上代码设计后,需要将代码提交至 `Github 飞桨训练框架仓库 `_ 和 `Github 飞桨中文文档仓库 `_ ,并根据 `本地开发指南 `_ 提交 PR、准备接受社区的评审。 **7. 实现代码评审&公示** 飞桨官方会及时安排专家进行算子优化代码审核,代码也将接受来自开发者社区的评审,所有开发者可以在 PR 评论区进行交流。请你对飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 -如果你的代码实现逻辑比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 +如果你的代码实现逻辑比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 **8. 公布评审结果&合入代码** -当算子优化代码评审&公示通过,官方会在开源社区中同步,你所实现的优化代码将会合入至 `Github飞桨训练框架仓库 `_ 。 +当算子优化代码评审&公示通过,官方会在开源社区中同步,你所实现的优化代码将会合入至 `Github 飞桨训练框架仓库 `_ 。 **9. 通过集成测试、精度和性能验收** -当你的代码合入 `Github飞桨训练框架仓库 `_ 后,飞桨会对你的优化代码进行模型级别的集成测试、精度和性能的验收,并告知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成。如果测试不通过,会通过 ISSUE 联系你进行代码修复,请及时关注 GitHub上的最新动态。 +当你的代码合入 `Github 飞桨训练框架仓库 `_ 后,飞桨会对你的优化代码进行模型级别的集成测试、精度和性能的验收,并告知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成。如果测试不通过,会通过 ISSUE 联系你进行代码修复,请及时关注 GitHub 上的最新动态。 **注意**:代码合入 develop 分支之后的第二天,你可以从官网下载 develop 安装包体验此功能。飞桨后续也会将此功能纳入下一个正式版的发版计划。 diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/example_cn.rst b/docs/dev_guides/op_optimization/kernel_primitive_api/example_cn.rst index 9bd4fe31d27..202b1762fdc 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/example_cn.rst +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/example_cn.rst @@ -3,7 +3,7 @@ API 示例 - `ElementwiseAdd <./add_example_cn.html>`_ : 加法操作,输入和输出具有相同 Shape。 - `Reduce <./reduce_example_cn.html>`_ : 针对最高维进行规约操作。 -- `Model <./model_example_cn.html>`_ : Resnet50执行流程。 +- `Model <./model_example_cn.html>`_ : Resnet50 执行流程。 .. toctree:: :hidden: diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/index_cn.rst b/docs/dev_guides/op_optimization/kernel_primitive_api/index_cn.rst index 13268ef9ec4..7b1c74916a6 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/index_cn.rst +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/index_cn.rst @@ -4,7 +4,7 @@ Kernel Primitive API Kernel Primitive API 对算子 Kernel 实现中的底层代码进行了抽象与功能封装,提供高性能的 Block 级 IO 运算和 Compute 运算。使用 Kernel Primitive API 进行 Kernel 开发可以更加专注计算逻辑的实现,在保证性能的同时大幅减少代码量,同时实现了算子计算与硬件解耦。 -本部分为 PaddlePaddle 高级开发人员提供了用于 Kernel 开发的 CUDA Kernel Primitive API,该类 API 能够帮助开发者在提升开发效率的同时收获较好的性能。Kernel Primitive API 主要包括 IO API、Compute API 以及 OpFunc,IO API 能够高效的完成全局内存与寄存器间的数据读写操作。Compute API 为通用计算函数,如ElementwiseBinary、ElementwiseUnary 等;OpFunc 用于定义 Compute API 中的计算规则,例如实现 Add 操作则需要定义 AddFunctor 用于 ElementwiseBinary 调用,开发者可以直接使用默认的 OpFunc 也可以根据需要进行自定义,具体的实现规则将在 OpFunc 小节中进行详细介绍。当前 API 均是 Block 级别的多线程 API,开发者可以直接传入当前 Block 的数据指针以及操作类型完成相应的计算,目前仅支持全局数据指针和寄存器指针。 +本部分为 PaddlePaddle 高级开发人员提供了用于 Kernel 开发的 CUDA Kernel Primitive API,该类 API 能够帮助开发者在提升开发效率的同时收获较好的性能。Kernel Primitive API 主要包括 IO API、Compute API 以及 OpFunc,IO API 能够高效的完成全局内存与寄存器间的数据读写操作。Compute API 为通用计算函数,如 ElementwiseBinary、ElementwiseUnary 等;OpFunc 用于定义 Compute API 中的计算规则,例如实现 Add 操作则需要定义 AddFunctor 用于 ElementwiseBinary 调用,开发者可以直接使用默认的 OpFunc 也可以根据需要进行自定义,具体的实现规则将在 OpFunc 小节中进行详细介绍。当前 API 均是 Block 级别的多线程 API,开发者可以直接传入当前 Block 的数据指针以及操作类型完成相应的计算,目前仅支持全局数据指针和寄存器指针。 API 列表 ############ diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md index 7165e4d4df6..501a8358ca9 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/io_api_cn.md @@ -26,7 +26,7 @@ __device__ void ReadData(Ty* dst, const Tx* src, int size_nx, int size_ny, int s ### 函数参数 -> dst :输出寄存器指针,数据类型为Ty,大小为 NX * NY。
+> dst :输出寄存器指针,数据类型为 Ty,大小为 NX * NY。
> src :当前 Block 的输入数据指针,数据类型为 Tx。
> size_nx :当前 Block 在最低维最多偏移 size_nx 个元素,参数仅在 IsBoundary = true 时参与计算。
> size_ny :当前 Block 在最低维最多偏移 size_ny 个元素,参数仅在 IsBoundary = true 时参与计算。
@@ -193,7 +193,7 @@ __device__ void ReadDataReduce(Tx* dst, > stride_nx :最低维每读取 1 个元素需要跳转 stride_nx 列。
> stride_ny :最高维每读取 1 个元素需要跳转 stride_ny 行。
> func : 输入数据存储到寄存器前做的数据变换,如:dst[i] = SquareFunctor(src[i])。 -> reduce_last_dim:原始输入数据的最低维是否进行reduce,当reduce_last_dim = true 按照 threadIdx.x 进行索引,否则使用 threadIdx.y。
+> reduce_last_dim:原始输入数据的最低维是否进行 reduce,当 reduce_last_dim = true 按照 threadIdx.x 进行索引,否则使用 threadIdx.y。
## [WriteData](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/primitive/datamover_primitives.h#L495) @@ -215,7 +215,7 @@ __device__ void WriteData(T* dst, T* src, int num); > T :元素类型。
> NX :每个线程连续读取 NX 列数据。
-> NY :每个线程读取 NY 行数据,当前仅支持为NY = 1。
+> NY :每个线程读取 NY 行数据,当前仅支持为 NY = 1。
> BlockSize :设备属性,标识当前设备线程索引方式。对于 GPU,threadIdx.x 用作线程索引,当前该参数暂不支持。
> IsBoundary :标识是否进行访存边界判断。当 Block 处理的数据总数小于 NX * NY * blockDim.x 时,需要进行边界判断以避免访存越界。
diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_cn.md b/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_cn.md index 06a5e1176af..33e33684b0b 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_cn.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/model_example_cn.md @@ -1,9 +1,9 @@ # 示例 - Model ## 模型运行说明 + 在 GPU 平台上默认使用 Kernel Primitive API 编写的算子。 -+ 在昆仑芯 2 代(XPU2)平台上使用 Kernel Primitive API 编写的算子需要开启FLAGS\_run\_kp\_kernel环境变量。 ++ 在昆仑芯 2 代(XPU2)平台上使用 Kernel Primitive API 编写的算子需要开启 FLAGS\_run\_kp\_kernel 环境变量。 -### XPU Kernel Primitive API Paddle模型运行 +### XPU Kernel Primitive API Paddle 模型运行 以 resnet50 为例展示昆仑芯 2 代(XPU2)平台 KP 模型运行的基本流程。
+ 1. 安装 PaddlePaddle XPU2 KP 安装包,当前仅支持 python3.7
diff --git a/docs/dev_guides/op_optimization/kernel_primitive_api/reduce_example_cn.md b/docs/dev_guides/op_optimization/kernel_primitive_api/reduce_example_cn.md index 6a6f15d74af..1e32bb0517b 100644 --- a/docs/dev_guides/op_optimization/kernel_primitive_api/reduce_example_cn.md +++ b/docs/dev_guides/op_optimization/kernel_primitive_api/reduce_example_cn.md @@ -2,7 +2,7 @@ ## 功能说明 + 根据 ReduceOp 中定义的计算规则对最高维度进行规约操作,例如输入为 x[N, H, W, C], axis 取值为 0, 规约后为 out[1, H, W, C],此处以 ReduceSum 为例进行介绍。 -### ReduceOp定义 +### ReduceOp 定义 ``` template struct IdentityFunctor { diff --git a/docs/dev_guides/op_optimization/op_optimization_accpetance_criteria_cn.md b/docs/dev_guides/op_optimization/op_optimization_accpetance_criteria_cn.md index 7902acd0d59..4433e399d77 100644 --- a/docs/dev_guides/op_optimization/op_optimization_accpetance_criteria_cn.md +++ b/docs/dev_guides/op_optimization/op_optimization_accpetance_criteria_cn.md @@ -1,14 +1,14 @@ # 算子性能优化 验收规范 -## CI通过性 +## CI 通过性 -提交至 Paddle repo 的 Pull Request(简称 PR),涉及到的相关检测CI必须全部 Pass。用来验证对之前功能点的兼容和影响,保障新合入代码对历史代码不产生影响。 +提交至 Paddle repo 的 Pull Request(简称 PR),涉及到的相关检测 CI 必须全部 Pass。用来验证对之前功能点的兼容和影响,保障新合入代码对历史代码不产生影响。 新增代码必须要有相应的单测保障测试覆盖率达到准入要求(测试覆盖率(行覆盖率)90%)。 ## 性能测试 -[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)作为一套测试飞桨内算子性能的专业工具, 如下图所示能够输出各类case下的OP性能真实状态, 建议用其进行算子性能测试。经过性能优化,OP Benchmark中全部case不能出现性能下降,需要通过列表,对比性能优化前后的OP性能情况。 +[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)作为一套测试飞桨内算子性能的专业工具, 如下图所示能够输出各类 case 下的 OP 性能真实状态, 建议用其进行算子性能测试。经过性能优化,OP Benchmark 中全部 case 不能出现性能下降,需要通过列表,对比性能优化前后的 OP 性能情况。 ``` =========================================================================== @@ -37,26 +37,26 @@ W0615 14:55:43.819144 28877 gpu_resources.cc:61] Please NOTE: device: 0, GPU Com {"framework": "paddle", "version": "0.0.0", "name": "adaptive_avg_pool2d", "device": "GPU", "backward": false, "speed": {"repeat": 1000, "begin": 10, "end": 990, "total": 0.6467142883612185, "wall_time": 0, "total_include_wall_time": 0.6467142883612185, "gpu_time": 0.43744}, "parameters": "x (Variable) - dtype: float32, shape: [4, 2048, 64, 128]\ndata_format (string): NCHW\noutput_size (list): [32, 32]\n"} ``` -## PR内容描述要求 +## PR 内容描述要求 -单元测试内容需要和开发代码放在同一个PR提交,后续修改也需要基于此PR。PR内容描述测试部分需要明确描述下列内容: +单元测试内容需要和开发代码放在同一个 PR 提交,后续修改也需要基于此 PR。PR 内容描述测试部分需要明确描述下列内容: - 1. 合入前Paddle中算子的性能现状 + 1. 合入前 Paddle 中算子的性能现状 2. 业内最优方案的算子性能现状 - 3. PR性能优化方案概述 + 3. PR 性能优化方案概述 4. 优化前后算子性能对比表格 -## OP测试内容及单元测试要求 +## OP 测试内容及单元测试要求 -性能测试至少覆盖[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)中全部的case场景。OP性能优化后,需要在 Paddle 单元测试中对GPU Kernel进行有效性和边界值测试。 +性能测试至少覆盖[OP Benchmark](https://github.com/PaddlePaddle/benchmark/tree/master/api)中全部的 case 场景。OP 性能优化后,需要在 Paddle 单元测试中对 GPU Kernel 进行有效性和边界值测试。 ## 交流与改进 -PR的单测部分必须 Paddle 测试人员 review,确保完整覆盖了待测功能点后,会给予 approved。如果 review 过程中发现测试缺失和遗漏的测试点,会通过 GitHub 代码行 Comment 的和 Request Changes 的方式交流改进,待PR修改完毕后给予 approved。 +PR 的单测部分必须 Paddle 测试人员 review,确保完整覆盖了待测功能点后,会给予 approved。如果 review 过程中发现测试缺失和遗漏的测试点,会通过 GitHub 代码行 Comment 的和 Request Changes 的方式交流改进,待 PR 修改完毕后给予 approved。 ## 后续维护 -代码成功合入后,如果发现对框架造成了性能下降影响,或者和部分功能存在严重冲突导致Bug,会对代码进行 Revert 并通过 ISSUE 告知相关的开发者,请提交 PR 修复问题,并重新合入。 +代码成功合入后,如果发现对框架造成了性能下降影响,或者和部分功能存在严重冲突导致 Bug,会对代码进行 Revert 并通过 ISSUE 告知相关的开发者,请提交 PR 修复问题,并重新合入。 diff --git a/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst b/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst index eefd814cdd5..314f8e6a8a6 100644 --- a/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst +++ b/docs/dev_guides/op_optimization/op_optimization_contributing_guides_cn.rst @@ -20,19 +20,19 @@ **1. 任务认领** -如果你想参与飞桨 OP 开源贡献,可以在 GitHub Paddle 项目上的 Issue 区域进行任务认领,飞桨官方会发布一些 OP性能优化任务,你可以认领其中的算子优化任务,并按照此贡献流程提交算子性能优化设计文档。 +如果你想参与飞桨 OP 开源贡献,可以在 GitHub Paddle 项目上的 Issue 区域进行任务认领,飞桨官方会发布一些 OP 性能优化任务,你可以认领其中的算子优化任务,并按照此贡献流程提交算子性能优化设计文档。 **2. 签订贡献者许可协议(CLA)** 对于你贡献的源代码,你将拥有合法的知识产权,为了保护你的权益,你需要先签署一份 `贡献者许可协议 `_ 。 -**注意**:只有当你签署完CLA后,我们才能继续对你提交的设计方案和实现代码进行评审及合入。 +**注意**:只有当你签署完 CLA 后,我们才能继续对你提交的设计方案和实现代码进行评审及合入。 **3. 提交算子性能优化设计文档** -算子性能优化设计文档的目的是促进社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和PR请求,在提交实现代码之前确保OP性能优化方案设计方案符合飞桨的设计理念,同时也便于后续的代码评审及合入工作。 +算子性能优化设计文档的目的是促进社区开发者更容易的参与开源项目共建,开发者通过与飞桨专家和社区其他用户进行广泛的交流,完善设计方案和 PR 请求,在提交实现代码之前确保 OP 性能优化方案设计方案符合飞桨的设计理念,同时也便于后续的代码评审及合入工作。 -当你想要发起一个算子性能优化的贡献时,需要首先进行算子优化方案设计并设计文档。飞桨提供了 算子性能优化设计文档模版 ,你可以使用这份模版编写设计文档。完成后,你需要将设计文档提交至 GitHub 开发者社区仓库 ,并根据本地开发指南提交PR。 +当你想要发起一个算子性能优化的贡献时,需要首先进行算子优化方案设计并设计文档。飞桨提供了 算子性能优化设计文档模版 ,你可以使用这份模版编写设计文档。完成后,你需要将设计文档提交至 GitHub 开发者社区仓库 ,并根据本地开发指南提交 PR。 此过程请参考相应的开发规范,并提交以下内容: @@ -41,20 +41,20 @@ :widths: 10, 30, 30 "算子性能优化设计文档", "- `算子性能优化设计文档 模版 `_ - - `算子性能优化设计文档 示例 `_ ", "`Github开发者社区仓库 `_" + - `算子性能优化设计文档 示例 `_ ", "`Github 开发者社区仓库 `_" **4. 设计文档评审&公示** 飞桨专家对你提交的设计文档进行审核,同时此文档也将接受来自开发者社区的评估,所有开发者都可以在 PR 评论区进行广泛的交流。开发者根据飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会合入。 -如果你的设计方案比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 +如果你的设计方案比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 **5. 公布评审结果&合入文档** 当设计文档评审&公示通过后,你的算子性能优化设计文档将会合入至 `飞桨开发者社区仓库 `_ ,并在开源社区中同步。 -**6. 提交API实现代码** +**6. 提交 API 实现代码** 随后,你可以根据评审通过的设计内容进行代码开发。此过程请参考相应的开发规范,并提交以下内容: @@ -62,29 +62,29 @@ :header: "提交内容", "参考文档", "提交位置" :widths: 10, 30, 30 - "算子性能优化实现代码", "- `Paddle代码规范 `_ - - `C++ OP开发指南 <../api_contributing_guides/new_cpp_op_cn.html>`_ - - `OP Benchmark使用指南 `_ + "算子性能优化实现代码", "- `Paddle 代码规范 `_ + - `C++ OP 开发指南 <../api_contributing_guides/new_cpp_op_cn.html>`_ + - `OP Benchmark 使用指南 `_ - `算子性能优化 优化方法 <./op_optimization_method_introduction_cn.html>`_ - `算子性能优化 验收规范 <./op_optimization_accpetance_criteria_cn.html>`_ - ", "`Github飞桨训练框架仓库 `_" + ", "`Github 飞桨训练框架仓库 `_" -当你完成以上代码设计后,需要将代码提交至 `Github飞桨训练框架仓库 `_ ,并根据 `本地开发指南 `_ 提交PR、准备接受社区的评审。 +当你完成以上代码设计后,需要将代码提交至 `Github 飞桨训练框架仓库 `_ ,并根据 `本地开发指南 `_ 提交 PR、准备接受社区的评审。 **7. 实现代码评审&公示** 飞桨官方会及时安排专家进行算子性能优化代码审核,代码也将接受来自开发者社区的评审,所有开发者可以在 PR 评论区进行交流。请你对飞桨专家和其他开发者的反馈意见进行讨论并做出修改,最终评审通过后会在开源社区中同步。 -如果你的代码实现逻辑比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注pr中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 +如果你的代码实现逻辑比较复杂,会在社区中针对算子的设计文档发起评审会议。飞桨会提前在 PR 评论区公布会议时间、会议地址、参会人、议题等内容,请及时关注 pr 中最新动态,你也可以在评论区自行申请评审会。会议结束后,会在 PR 中发出会议结论。 **8. 公布评审结果&合入代码** -当算子优化代码评审&公示通过后,官方会在开源社区中同步,你所实现的优化代码将会合入至 `Github飞桨训练框架仓库 `_ 。 +当算子优化代码评审&公示通过后,官方会在开源社区中同步,你所实现的优化代码将会合入至 `Github 飞桨训练框架仓库 `_ 。 **9. 通过模型集成及验收** -当你的代码合入 `Github飞桨训练框架仓库 `_ 后,飞桨会对你的性能优化代码进行模型级集成测试,并告知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成;如果测试不通过,会通过 ISSUE 联系你进行代码修复,请及时关注 GitHub上的最新动态。 +当你的代码合入 `Github 飞桨训练框架仓库 `_ 后,飞桨会对你的性能优化代码进行模型级集成测试,并告知你测试结果。如果测试通过,恭喜你贡献流程已经全部完成;如果测试不通过,会通过 ISSUE 联系你进行代码修复,请及时关注 GitHub 上的最新动态。 **注意**:代码合入 develop 分支之后的第二天,你可以从官网下载 develop 安装包体验此功能。飞桨后续也会将此功能纳入下一个正式版的发版计划。 diff --git a/docs/dev_guides/op_optimization/op_optimization_method_introduction_cn.md b/docs/dev_guides/op_optimization/op_optimization_method_introduction_cn.md index f322b06d9c3..44f049c906a 100644 --- a/docs/dev_guides/op_optimization/op_optimization_method_introduction_cn.md +++ b/docs/dev_guides/op_optimization/op_optimization_method_introduction_cn.md @@ -8,21 +8,21 @@ - 算子性能优化工作的基本目标是获得明显的算子性能提升, 力争达到业界一流的性能水平, 同时保证精度不会下降. -- 飞桨内算子性能优化主要围绕GPU计算开展, 因此需要用户掌握基本的[GPU编程模型](https://developer.nvidia.com/zh-cn/blog/cuda-model-intro-cn/). +- 飞桨内算子性能优化主要围绕 GPU 计算开展, 因此需要用户掌握基本的[GPU 编程模型](https://developer.nvidia.com/zh-cn/blog/cuda-model-intro-cn/). # 优化技巧 ## 1.通用优化技巧 -GPU Kernel直接影响了算子性能, 我们推荐采用以下等通用优化策略提升GPU Kernel的性能, 从而削减算子的计算开销. +GPU Kernel 直接影响了算子性能, 我们推荐采用以下等通用优化策略提升 GPU Kernel 的性能, 从而削减算子的计算开销. | 通用技巧 | | -- | | [向量化读写](https://developer.nvidia.com/blog/cuda-pro-tip-increase-performance-with-vectorized-memory-access>)| | [协线程操作](https://developer.nvidia.com/blog/cooperative-groups/>) | -| [Warp级操作](https://developer.nvidia.com/blog/using-cuda-warp-level-primitives>) | -| [共享内存操作]() ([注意Bank Conflicts](https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/)) | +| [Warp 级操作](https://developer.nvidia.com/blog/using-cuda-warp-level-primitives>) | +| [共享内存操作]() ([注意 Bank Conflicts](https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/)) | ## 2. 飞桨内置优化技巧 @@ -31,36 +31,36 @@ GPU Kernel直接影响了算子性能, 我们推荐采用以下等通用优化 ### 2.1 [线程配置优化](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/backends/gpu/gpu_launch_config.h) -我们推荐结合OP的使用场景设计对于的线程配置策略,如下图所示[IndexSample OP](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/index_sample_cn.html#index-sample)常用于处理2维数据, 因此使用[2维的线程配置策略](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/gpu/index_sample_kernel.cu#L82-L91)相对比1维配置策略,性能可提升20%左右。 +我们推荐结合 OP 的使用场景设计对于的线程配置策略,如下图所示[IndexSample OP](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/index_sample_cn.html#index-sample)常用于处理 2 维数据, 因此使用[2 维的线程配置策略](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/gpu/index_sample_kernel.cu#L82-L91)相对比 1 维配置策略,性能可提升 20%左右。 -优化GPU Kernel中的线程配置策略, 涵盖一维、二维、三维线程配置策略, 目前已经在`Elementwise`, `Stack`, `IndexSample`等OP中使用. +优化 GPU Kernel 中的线程配置策略, 涵盖一维、二维、三维线程配置策略, 目前已经在`Elementwise`, `Stack`, `IndexSample`等 OP 中使用. -### 2.2 [Warp计算优化](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/funcs/math_cuda_utils.h) +### 2.2 [Warp 计算优化](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/funcs/math_cuda_utils.h) -飞桨内对上文中提到的**Warp级操作**进行了封装, 提供了简易的调用接口, 开发者可调用接口快速获得Warp内或者Block内的全部数据的求和、最大值、最小值. +飞桨内对上文中提到的**Warp 级操作**进行了封装, 提供了简易的调用接口, 开发者可调用接口快速获得 Warp 内或者 Block 内的全部数据的求和、最大值、最小值. ### 2.3 [索引计算优化](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/fast_divmod.h): -当GPU Kernel的索引计算中存在除法或取模操作, 将在导致汇编层面计算开销变大, 我们建议采用快速除法优化这部分的计算开销。飞桨内[Pooling OP](https://github.com/PaddlePaddle/Paddle/blob/890c73158f663b327be7664ed6c4d08fb2c236a9/paddle/phi/kernels/funcs/pooling.cu#L41-L101) 采用索引优化计算后, 性能提升1倍. +当 GPU Kernel 的索引计算中存在除法或取模操作, 将在导致汇编层面计算开销变大, 我们建议采用快速除法优化这部分的计算开销。飞桨内[Pooling OP](https://github.com/PaddlePaddle/Paddle/blob/890c73158f663b327be7664ed6c4d08fb2c236a9/paddle/phi/kernels/funcs/pooling.cu#L41-L101) 采用索引优化计算后, 性能提升 1 倍. -### 2.4 [Kps优化工具库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/index_cn.html) +### 2.4 [Kps 优化工具库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/index_cn.html) -飞桨综合了一系列GPU Kernel通用性能优化技巧推出了Kernel Primitive API,提供高性能的 Block 级 IO 运算和 Compute 运算。使用 Kernel Primitive API 进行 Kernel 开发可以更加专注计算逻辑的实现,在保证性能的同时大幅减少代码量,同时实现了算子计算与硬件解耦,详情见官网[Kernel Primitive API](https://www.paddlepaddle.org.cn/documentation/docs/zh/dev_guides/kernel_primitive_api/index_cn.html), 建议参考案例[ElementwiseAdd](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/add_example_cn.html)和[Reduce](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/reduce_example_cn.html) 使用。 +飞桨综合了一系列 GPU Kernel 通用性能优化技巧推出了 Kernel Primitive API,提供高性能的 Block 级 IO 运算和 Compute 运算。使用 Kernel Primitive API 进行 Kernel 开发可以更加专注计算逻辑的实现,在保证性能的同时大幅减少代码量,同时实现了算子计算与硬件解耦,详情见官网[Kernel Primitive API](https://www.paddlepaddle.org.cn/documentation/docs/zh/dev_guides/kernel_primitive_api/index_cn.html), 建议参考案例[ElementwiseAdd](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/add_example_cn.html)和[Reduce](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/dev_guides/kernel_primitive_api/reduce_example_cn.html) 使用。 ### 3. C++模板特性 -我们也鼓励充分挖掘C++侧的可用优化点, 如使用`#pragma unroll`编译阶段加速指令,编译期自动展循环, 加速运行时循环的执行效率. +我们也鼓励充分挖掘 C++侧的可用优化点, 如使用`#pragma unroll`编译阶段加速指令,编译期自动展循环, 加速运行时循环的执行效率. -- 案例: [Elementwise_add OP](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/funcs/elementwise_base.h#L658-L661) 采用模板参数加速循环展开, 性能提升约5% +- 案例: [Elementwise_add OP](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/funcs/elementwise_base.h#L658-L661) 采用模板参数加速循环展开, 性能提升约 5% ``` struct SameDimsElementwisePrimitiveCaller { @@ -75,4 +75,4 @@ struct SameDimsElementwisePrimitiveCaller { ### 4. 内置第三方库 -飞桨内置了cuBLAS, cuDNN, cuSOLVER, Thrust等一系列第三方库, 若采用这些第三方等高性能计算库能获得显著的性能收益,也欢迎使用。cuBLAS使用示例见[matmul_kernel_impl.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/impl/matmul_kernel_impl.h), cuDNN的使用示例见[conv_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/gpudnn/conv_kernel.cu#L366-L379), cuSOLVER使用示例见[values_vectors_functor.h](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/funcs/values_vectors_functor.h#L219-L260), Thrust使用示例见[coalesced_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/sparse/gpu/coalesced_kernel.cu#L93-L106). +飞桨内置了 cuBLAS, cuDNN, cuSOLVER, Thrust 等一系列第三方库, 若采用这些第三方等高性能计算库能获得显著的性能收益,也欢迎使用。cuBLAS 使用示例见[matmul_kernel_impl.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/impl/matmul_kernel_impl.h), cuDNN 的使用示例见[conv_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/gpudnn/conv_kernel.cu#L366-L379), cuSOLVER 使用示例见[values_vectors_functor.h](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/funcs/values_vectors_functor.h#L219-L260), Thrust 使用示例见[coalesced_kernel.cu](https://github.com/PaddlePaddle/Paddle/blob/30838aa698d6f3f3b0860b052f6a50ef53ac6784/paddle/phi/kernels/sparse/gpu/coalesced_kernel.cu#L93-L106). diff --git a/docs/dev_guides/sugon/complie_and_test_cn.md b/docs/dev_guides/sugon/complie_and_test_cn.md index 005daae35c0..8f99ff8471b 100644 --- a/docs/dev_guides/sugon/complie_and_test_cn.md +++ b/docs/dev_guides/sugon/complie_and_test_cn.md @@ -1,11 +1,11 @@ -# 曙光智算平台-Paddle源码编译和单测执行 +# 曙光智算平台-Paddle 源码编译和单测执行 由于[曙光智算](https://www.hpccube.com/ac/home/index.html)环境下网路受限,直接编译飞桨源码会遇到第三方依赖库下载失败从而导致编译失败的问题。因此在曙光智算环境下进行飞桨源码编译与单测需要以下几个步骤: -## 第一章节:本地容器编译Paddle源码,并进行Paddle目录打包 +## 第一章节:本地容器编译 Paddle 源码,并进行 Paddle 目录打包 -**第一步**:本地启动编译容器 (推荐使用Paddle镜像) +**第一步**:本地启动编译容器 (推荐使用 Paddle 镜像) ```bash # 并拉取开发镜像 @@ -18,47 +18,47 @@ docker run -it --name paddle-dev --shm-size=128G --network=host \ paddlepaddle/paddle:latest-dev-rocm4.0-miopen2.11 /bin/bash ``` -**第二步**:在容器内进行源码编译,CMAKE编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) +**第二步**:在容器内进行源码编译,CMAKE 编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) ```bash -# 拉取Paddle最新源码到本地目录,默认为develop分支 +# 拉取 Paddle 最新源码到本地目录,默认为 develop 分支 git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle # 创建编译目录 mkdir build && cd build -# 执行cmake +# 执行 cmake cmake .. -DPY_VERSION=3.7 -DWITH_ROCM=ON -DWITH_TESTING=ON \ -DWITH_DISTRIBUTE=ON -DWITH_MKL=ON # 使用以下命令来编译 make -j8 -# 编译完成之后将整个Paddle目录打包 +# 编译完成之后将整个 Paddle 目录打包 cd /workspace tar -zcvf Paddle.tar.gz ./Paddle ``` -## 第二章节:将打包的Paddle源码包上传曙光智算平台个人目录 +## 第二章节:将打包的 Paddle 源码包上传曙光智算平台个人目录 **第一步**:登录[曙光智算](https://www.hpccube.com/ac/home/index.html) 平台后进入菜单顶部的「文件 -> E-File」环境 ![图片](../images/sugon_login.png) -**第二步**:在E-File页面点击文件上传,将压缩的Paddle包上传到智算平台的个人目录下 +**第二步**:在 E-File 页面点击文件上传,将压缩的 Paddle 包上传到智算平台的个人目录下 -> **注意**:Paddle.tar.gz 包大约6G左右大小,建议使用“快传“节省传输时间。 +> **注意**:Paddle.tar.gz 包大约 6G 左右大小,建议使用“快传“节省传输时间。 ![图片](../images/sugon_upload.png) -## 第三章节:智算平台下启动Paddle开发容器,执行编译和单测 +## 第三章节:智算平台下启动 Paddle 开发容器,执行编译和单测 **第一步**:登录[曙光智算](https://www.hpccube.com/ac/home/index.html) 平台后进入页面中部的「我的服务 -> 智能计算服务」 ![图片](../images/sugon_my_service.png) -**第二步**:在AI服务页面,点击页面底部的 「SSH | Jupyter」图标 +**第二步**:在 AI 服务页面,点击页面底部的 「SSH | Jupyter」图标 ![图片](../images/sugon_ssh.png) @@ -68,25 +68,25 @@ tar -zcvf Paddle.tar.gz ./Paddle **第四步**:在「创建实例」页面中选择和填入如下信息之后,点击「运行」按钮 -> **注意**:这里必须选择框架版本为 paddle:latest-dev-rocm4.0-miopen2.11,建议选择CPU数量8,C86加速卡数量1,内存64.0 +> **注意**:这里必须选择框架版本为 paddle:latest-dev-rocm4.0-miopen2.11,建议选择 CPU 数量 8,C86 加速卡数量 1,内存 64.0 ![图片](../images/sugon_framework_version.png) **第五步**:在「实例」页面等待容器状态从「等待」转为「运行」后,点击右侧「SSH」按钮 -如下图所示,第一行为容器刚创建时状态为「等待」,右侧「SSH」按钮为灰色不可点击;预计10分钟左右容器转为第二行的「运行」状态,右侧红框中的「SSH」按钮转为蓝色可点击状态。点击「SSH」按钮后弹出的「WebShell」页面即为刚创建的容器环境。 +如下图所示,第一行为容器刚创建时状态为「等待」,右侧「SSH」按钮为灰色不可点击;预计 10 分钟左右容器转为第二行的「运行」状态,右侧红框中的「SSH」按钮转为蓝色可点击状态。点击「SSH」按钮后弹出的「WebShell」页面即为刚创建的容器环境。 ![图片](../images/sugon_test.png) -**第六步**:在「WebShell」页面中,链接Paddle源码目录并重新编译后,即可执行相关算子单测任务 +**第六步**:在「WebShell」页面中,链接 Paddle 源码目录并重新编译后,即可执行相关算子单测任务 -> **注意**:以下步骤目的是为了在容器内创建和本文第一章节中的本地容器一样的编译路径,否则CMAKE编译会出错 +> **注意**:以下步骤目的是为了在容器内创建和本文第一章节中的本地容器一样的编译路径,否则 CMAKE 编译会出错 ```bash -# 先将个人目录下的Paddle源码包进行解压 +# 先将个人目录下的 Paddle 源码包进行解压 tar -zxvf Paddle.tar.gz -# 将解压后的源码目录软链接到/workspace/Paddle目录 +# 将解压后的源码目录软链接到/workspace/Paddle 目录 sudo mkdir /workspace sudo ln -s ~/Paddle /workspace/Paddle sudo chown -R $USER:$USER /workspace @@ -94,7 +94,7 @@ sudo chown -R $USER:$USER /workspace # 进入源码编译目录,重新执行编译命令 cd /workspace/Paddle/build && make -j8 -# 编译成功之后,在build目录下执行单测 +# 编译成功之后,在 build 目录下执行单测 ctest -R test_atan2_op -VV ``` diff --git a/docs/dev_guides/sugon/index_cn.rst b/docs/dev_guides/sugon/index_cn.rst index 4fffc17ab7c..84d57649217 100644 --- a/docs/dev_guides/sugon/index_cn.rst +++ b/docs/dev_guides/sugon/index_cn.rst @@ -4,9 +4,9 @@ 以下将说明 Paddle 适配曙光相关的开发指南: -- `曙光智算平台-Paddle源码编译和单测执行 <./complie_and_test_cn.html>`_ : 如何曙光曙光智算平台编译Paddle源码编译并执行单测。 -- `Paddle适配C86加速卡详解 <./paddle_c86_cn.html>`_ : 详解Paddle适配C86加速卡。 -- `Paddle框架下ROCm(HIP)算子单测修复指导 <./paddle_c86_fix_guides_cn.html>`_ : 指导Paddle框架下ROCm(HIP)算子单测修复。 +- `曙光智算平台-Paddle 源码编译和单测执行 <./complie_and_test_cn.html>`_ : 如何曙光曙光智算平台编译 Paddle 源码编译并执行单测。 +- `Paddle 适配 C86 加速卡详解 <./paddle_c86_cn.html>`_ : 详解 Paddle 适配 C86 加速卡。 +- `Paddle 框架下 ROCm(HIP)算子单测修复指导 <./paddle_c86_fix_guides_cn.html>`_ : 指导 Paddle 框架下 ROCm(HIP)算子单测修复。 .. toctree:: diff --git a/docs/dev_guides/sugon/paddle_c86_cn.md b/docs/dev_guides/sugon/paddle_c86_cn.md index bd02b23b7d2..cc93ac6d665 100644 --- a/docs/dev_guides/sugon/paddle_c86_cn.md +++ b/docs/dev_guides/sugon/paddle_c86_cn.md @@ -1,19 +1,19 @@ -# Paddle适配C86加速卡详解 +# Paddle 适配 C86 加速卡详解 -当前百度飞桨 PaddlePaddle 已经适配了支持C86加速卡 的 ROCm 软件栈,并提供了官方 [Paddle ROCm 安装包](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_install_cn.html) 的下载,以及官方 [开发与运行镜像](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_install_cn.html) 的下载,并完成了C86加速卡 上 [70个模型](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_rocm_cn.html) 的训练与推理任务的支持。 +当前百度飞桨 PaddlePaddle 已经适配了支持 C86 加速卡 的 ROCm 软件栈,并提供了官方 [Paddle ROCm 安装包](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_install_cn.html) 的下载,以及官方 [开发与运行镜像](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_install_cn.html) 的下载,并完成了 C86 加速卡 上 [70 个模型](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/09_hardware_support/rocm_docs/paddle_rocm_cn.html) 的训练与推理任务的支持。 ## Paddle 适配 ROCm 软件栈 ### ROCm 软件栈简介 -ROCm 软件栈整体架构如下,其中除了支持C86加速卡 的 Driver/Firemware 之外,对上层应用还提供了一整套较为完整的开发套件,其中与深度学习框架适配相关的最为主要的几个部分包括: +ROCm 软件栈整体架构如下,其中除了支持 C86 加速卡 的 Driver/Firemware 之外,对上层应用还提供了一整套较为完整的开发套件,其中与深度学习框架适配相关的最为主要的几个部分包括: -- HIP: 支持异构计算的 C++ Driver/Runtime API,兼容 CUDA 并提供了与 CUDA 多个版本的 [API对照表](https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/CUDA_Runtime_API_functions_supported_by_HIP.md) +- HIP: 支持异构计算的 C++ Driver/Runtime API,兼容 CUDA 并提供了与 CUDA 多个版本的 [API 对照表](https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/CUDA_Runtime_API_functions_supported_by_HIP.md) - HIP Kernel: 支持自定义 Kernel 编程,编程语法同 CUDA Kernel 一致,且支持直接编译 CUDA Kernel 源码为 HIP Kernel - HIPCC:HIP Kernel 编译器,类同于 NVCC 编译器,支持将 CUDA Kernel 或者 HIP Kernel 编译为 ROCm 上的可执行程序 - 加速库、数学库及通讯库:包括 MIOpen,rocBLAS,RCCL 等,分别对标 CUDA 的 cuDNN,cuBLAS, NCCL 等 ([对照表](https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_porting_guide.md#library-equivalents)) -ROCm 软件栈本身具备较高的成熟度与完备性,用户根据 ROCm 提供的 CUDA到HIP的[代码移植手册](https://rocmdocs.amd.com/en/latest/Programming_Guides/HIP-porting-guide.html) ,可以较为方便的将 CUDA 上的工作移植到 HIP 上,使得用户程序可以支持跨 ROCm 与 CUDA 的异构计算。 +ROCm 软件栈本身具备较高的成熟度与完备性,用户根据 ROCm 提供的 CUDA 到 HIP 的[代码移植手册](https://rocmdocs.amd.com/en/latest/Programming_Guides/HIP-porting-guide.html) ,可以较为方便的将 CUDA 上的工作移植到 HIP 上,使得用户程序可以支持跨 ROCm 与 CUDA 的异构计算。 ![图片](../images/sugon_rocm.png) @@ -30,8 +30,8 @@ ROCm 软件栈本身具备较高的成熟度与完备性,用户根据 ROCm 提 - [operators.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/operators.cmake#L98) 配置 HIP Kernel 算子注册方式,自动映射 CUDA Kernel 算子文件为 HIP Kernel 算子文件 - 其他相关 cmake 配置,包括依赖的第三方库如 [eigen.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/external/eigen.cmake) 和 [warpctc.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/external/warpctc.cmake)等 2. 设备接入:主要包括设备相关的 Driver/Runtime API 的接入,以及通讯库等底层加速库的接入工作 - - 动态库加载: 在 [paddle/fluid/platform/dynload](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/dynload) 目录下动态加载 ROCm 加速库及所需API,如 [hiprand.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/hiprand.h) [miopen.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/miopen.h) [rocblas.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/rocblas.h)等 - - Driver/Runtime适配:主要在 [paddle/fluid/platform/device/gpu](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/device/gpu) 目录下对HIP和CUDA进行了相关API的封装,其中在 [gpu_types.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/device/gpu/gpu_types.h) 少量封装了部分与CUDA 差异较小的数据类型定义,部分ROCm独有代码位于[paddle/fluid/platform/device/gpu/rocm](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/device/gpu/rocm)目录 + - 动态库加载: 在 [paddle/fluid/platform/dynload](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/dynload) 目录下动态加载 ROCm 加速库及所需 API,如 [hiprand.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/hiprand.h) [miopen.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/miopen.h) [rocblas.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/dynload/rocblas.h)等 + - Driver/Runtime 适配:主要在 [paddle/fluid/platform/device/gpu](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/device/gpu) 目录下对 HIP 和 CUDA 进行了相关 API 的封装,其中在 [gpu_types.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/device/gpu/gpu_types.h) 少量封装了部分与 CUDA 差异较小的数据类型定义,部分 ROCm 独有代码位于[paddle/fluid/platform/device/gpu/rocm](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform/device/gpu/rocm)目录 - Memory 管理:利用上一步封装好的 Driver/Runtime API 对 [memcpy.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/memory/memcpy.cc#L574) 与 [paddle/fluid/memory/allocation](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/memory/allocation) 目录下的多种 Memory Allocator 进行实现 - Device Context 管理:利用封装好的 API 实现对设备上下文的管理及设备池的初始化,位于 [device_contxt.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/platform/device_context.h) - 其他设备管理相关的适配接入,如 Profiler, Tracer, Error Message, NCCL 等,代码主要位于 [Paddle/platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform) 目录下 @@ -40,11 +40,11 @@ ROCm 软件栈本身具备较高的成熟度与完备性,用户根据 ROCm 提 - 数学库支持:通过 ROCm 的 rocBLAS 库,实现 Paddle 在 [blas.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/funcs/blas/blas.h) 中定义的 BLAS 函数,代码位于 [blas_impl.hip.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/phi/kernels/funcs/blas/blas_impl.hip.h) - Kernel 算子注册:根据 [operators.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/operators.cmake#L98) 的修改,可以大部分复用 Paddle 框架下的 CUDA 已有算子 Kernel 文件,存在部分 Kernel 实现在 CUDA 与 ROCm 平台下有所区别,例如线程数、WarpSize 以及 thurst 库等;此类区别需要针对具体的算子实现进行相应的调整,通过 Paddle 自身的算子单测用例以及模型验证测试可以对此类问题进行定位并修复 - MIOpen 算子注册:MIOpen 与 cuDNN 的接口与类型设计较为类似,但在实际执行中还是存在一定区别,因为对于此类算子需根据 MIOpen API 进行适配,甚至对于差异较大的算子例如 [rnn_op.cu.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/rnn_op.cu.cc#L506) 需要进行 weight 数据重排 -4. Python API 兼容适配:当前 Paddle ROCm 兼容所有 Paddle CUDA 相关的 Python API,这意味着对于所有目前 Paddle 可以支持 CUDA 的模型,无需修改任意代码可以直接运行在C86加速卡 上 +4. Python API 兼容适配:当前 Paddle ROCm 兼容所有 Paddle CUDA 相关的 Python API,这意味着对于所有目前 Paddle 可以支持 CUDA 的模型,无需修改任意代码可以直接运行在 C86 加速卡 上 -经过以上几个步骤的适配工作,用户可以无需修改任意代码就将之前在 Paddle CUDA 平台上的程序运行在C86加速卡 的环境下,在保持用户已有的基于 Paddle CUDA 的编程习惯的同时,也减少了 Paddle 已有的模型套件在 CUDA 平台与 ROCm 平台之间的迁移工作。 +经过以上几个步骤的适配工作,用户可以无需修改任意代码就将之前在 Paddle CUDA 平台上的程序运行在 C86 加速卡 的环境下,在保持用户已有的基于 Paddle CUDA 的编程习惯的同时,也减少了 Paddle 已有的模型套件在 CUDA 平台与 ROCm 平台之间的迁移工作。 -例如以下这份代码可以同时运行于 Paddle 的 CUDA 环境和C86加速卡环境,且输出结果一致: +例如以下这份代码可以同时运行于 Paddle 的 CUDA 环境和 C86 加速卡环境,且输出结果一致: ```python import paddle @@ -69,7 +69,7 @@ Tensor(shape=[2, 3, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True, [0.03205860, 0.08714432, 0.23688284, 0.64391428], [0.03205860, 0.08714432, 0.23688284, 0.64391428]]]) -# C86加速卡 输出如下 +# C86 加速卡 输出如下 W1102 10:06:45.729085 875 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 90.0, Driver API Version: 321.0, Runtime API Version: 3.1 W1102 10:06:45.733167 875 device_context.cc:460] device: 0, MIOpen Version: 2.11.0 Tensor(shape=[2, 3, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True, diff --git a/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md b/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md index 286afda2768..a779e4033a2 100644 --- a/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md +++ b/docs/dev_guides/sugon/paddle_c86_fix_guides_cn.md @@ -1,31 +1,31 @@ -# Paddle框架下ROCm(HIP)算子单测修复指导 +# Paddle 框架下 ROCm(HIP)算子单测修复指导 -进行ROCm(HIP)算子修复之前,请先仔细阅读 [曙光智算平台-Paddle源码编译和单测执行](./compile_and_test_cn.html) 并按照其中步骤准备好编译和单测环境。并阅读 [Paddle适配C86加速卡详解](./paddle_c86_cn.html) 文档了解当前Paddle与ROCm(HIP)的适配方案和具体的代码修改。 +进行 ROCm(HIP)算子修复之前,请先仔细阅读 [曙光智算平台-Paddle 源码编译和单测执行](./compile_and_test_cn.html) 并按照其中步骤准备好编译和单测环境。并阅读 [Paddle 适配 C86 加速卡详解](./paddle_c86_cn.html) 文档了解当前 Paddle 与 ROCm(HIP)的适配方案和具体的代码修改。 -常见的HIP算子问题已经修复办法如下,也可以在[PaddlePR](https://github.com/PaddlePaddle/Paddle/pulls?q=is%3Apr+%5BROCm%5D)中搜索`[ROCm]`关键字查看ROCm(HIP)相关的代码修改,更多问题请自行探索解决方法。 +常见的 HIP 算子问题已经修复办法如下,也可以在[PaddlePR](https://github.com/PaddlePaddle/Paddle/pulls?q=is%3Apr+%5BROCm%5D)中搜索`[ROCm]`关键字查看 ROCm(HIP)相关的代码修改,更多问题请自行探索解决方法。 -> 注:打开 Paddle Debug Level日志可以参考 [Contribute Code](https://github.com/PaddlePaddle/Paddle/blob/develop/CONTRIBUTING.md#writing-logs) 中的 Writing Logs章节。 +> 注:打开 Paddle Debug Level 日志可以参考 [Contribute Code](https://github.com/PaddlePaddle/Paddle/blob/develop/CONTRIBUTING.md#writing-logs) 中的 Writing Logs 章节。 -## 算子修复举例1:无法找到对应算子的GPU Kernel +## 算子修复举例 1:无法找到对应算子的 GPU Kernel -这类问题常由相应的算子没有在HIP下注册成功引起,首先根据报错信息中提示的算子名确认该算子的Kernel情况。执行以下步骤,并观察输出结果中的`data_type`以及`place`的结果: +这类问题常由相应的算子没有在 HIP 下注册成功引起,首先根据报错信息中提示的算子名确认该算子的 Kernel 情况。执行以下步骤,并观察输出结果中的`data_type`以及`place`的结果: ```bash -# 将以下路径输出到PYTHONPATH环境变量 +# 将以下路径输出到 PYTHONPATH 环境变量 export PYTHONPATH=/workspace/Paddle/build/python:$PYTHONPATH -# 执行如下命令打印算子的Kernel列表 - 将其中的XXX改为真正的算子名即可 +# 执行如下命令打印算子的 Kernel 列表 - 将其中的 XXX 改为真正的算子名即可 python -c $'import paddle\nfor k in paddle.fluid.core._get_all_register_op_kernels()["XXX"]:print(k)' -# 例如如下输出,表示存在数据类型为float,算子硬件类型为GPU的Kernel -# 通常会有多行类似以下结果的输出,请根据输出仔细分析算子Kernel结果 +# 例如如下输出,表示存在数据类型为 float,算子硬件类型为 GPU 的 Kernel +# 通常会有多行类似以下结果的输出,请根据输出仔细分析算子 Kernel 结果 data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(gpu:0)]:library_type[PLAIN] ``` -### 情况1:整个算子只有CPU Kernel,没有任何 GPU Kernel +### 情况 1:整个算子只有 CPU Kernel,没有任何 GPU Kernel -例如如下输出表示:算子只存在Place为CPU的Kernel,不存在任何Place为GPU的Kernel +例如如下输出表示:算子只存在 Place 为 CPU 的 Kernel,不存在任何 Place 为 GPU 的 Kernel ```bash # 错误提示信息如下: @@ -33,12 +33,12 @@ data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(gpu:0)]:library_t 928: [Hint: Expected kernel_iter != kernels.end(), but received kernel_iter == kernels.end().] (at /workspace/Paddle/paddle/fluid/framework/operator.cc:1503) 928: [operator < lu > error] -# 打印算子名对应的Kernel列表如下 +# 打印算子名对应的 Kernel 列表如下 data_type[double]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_type[PLAIN] data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_type[PLAIN] ``` -这个原因通常是由于算子的GPU Kernel源码文件没有加入编译目标引起的。修复办法是先在 [operators.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/operators.cmake#L245) 中查看改算子的源码是否被移除,如下图代码所示: +这个原因通常是由于算子的 GPU Kernel 源码文件没有加入编译目标引起的。修复办法是先在 [operators.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/operators.cmake#L245) 中查看改算子的源码是否被移除,如下图代码所示: ![图片](../images/sugon_find_source_code.png) @@ -46,13 +46,13 @@ data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_typ ![图片](../images/sugon_paddle_with_hip.png) -根据注释,是由于初始适配时ROCm下的rocSolver库未曾适配导致的,需参考cuSovler代码以及 [hipSOLVER](https://github.com/ROCmSoftwarePlatform/hipSOLVER) 中rocSovler和cuSolver的API封装示例修改代码使改算子可以在HIP环境下正确运行。 +根据注释,是由于初始适配时 ROCm 下的 rocSolver 库未曾适配导致的,需参考 cuSovler 代码以及 [hipSOLVER](https://github.com/ROCmSoftwarePlatform/hipSOLVER) 中 rocSovler 和 cuSolver 的 API 封装示例修改代码使改算子可以在 HIP 环境下正确运行。 -### 情况2:算子GPU Kernel存在,少了某个LibraryType下的Kernel +### 情况 2:算子 GPU Kernel 存在,少了某个 LibraryType 下的 Kernel 这类问题的常见报错信息如下: -例如如下输出表示:算子只存在GPU Kernel,但是只有PLAIN没有CUDNN类型实现 +例如如下输出表示:算子只存在 GPU Kernel,但是只有 PLAIN 没有 CUDNN 类型实现 ```bash # 错误提示信息如下: @@ -60,7 +60,7 @@ data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_typ 1000: [Hint: Expected kernel_iter != kernels.end(), but received kernel_iter == kernels.end().] (at /workspace/Paddle/paddle/fluid/framework/operator.cc:1503) 1000: [operator < pool2d_grad_grad > error] -# 打印算子名对应的Kernel列表如下 +# 打印算子名对应的 Kernel 列表如下 data_type[double]:data_layout[Undefined(AnyLayout)]:place[Place(gpu:0)]:library_type[PLAIN] data_type[::paddle::platform::float16]:data_layout[Undefined(AnyLayout)]:place[Place(gpu:0)]:library_type[PLAIN] data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_type[PLAIN] @@ -68,40 +68,40 @@ data_type[float]:data_layout[Undefined(AnyLayout)]:place[Place(gpu:0)]:library_t data_type[double]:data_layout[Undefined(AnyLayout)]:place[Place(cpu)]:library_type[PLAIN] ``` -查看对应算子源码文件 [pool_cudnn_op.cu.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/pool_cudnn_op.cu.cc#L555) 可知对应的`pool2d_grad_grad`只在CUDA平台下注册了CUDNN的`pool2d_grad_grad`算子,但是没有在HIP平台下注册,因此修改代码在HIP平台下进行注册即可。 +查看对应算子源码文件 [pool_cudnn_op.cu.cc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/pool_cudnn_op.cu.cc#L555) 可知对应的`pool2d_grad_grad`只在 CUDA 平台下注册了 CUDNN 的`pool2d_grad_grad`算子,但是没有在 HIP 平台下注册,因此修改代码在 HIP 平台下进行注册即可。 ![图片](../images/sugon_register_op_kernel.png) -### 情况3:存在算子的GPU Kernel,只是少了某几个数据类型 +### 情况 3:存在算子的 GPU Kernel,只是少了某几个数据类型 -例如如下输出表示:存在Place为GPU的GPU Kernel,只是少了数据类型bfloat16 +例如如下输出表示:存在 Place 为 GPU 的 GPU Kernel,只是少了数据类型 bfloat16 ![图片](../images/sugon_data_type.png) -这个原因通常是由于算子的GPU Kernel源码文件中没有注册对应数据类型引起的,修复办法是先在以下两个算子Kernel源码目录中通过查找`REGISTER_OP_CUDA_KERNEL`关键字找到对应算子的源码文件中注册算子Kernel的代码。 +这个原因通常是由于算子的 GPU Kernel 源码文件中没有注册对应数据类型引起的,修复办法是先在以下两个算子 Kernel 源码目录中通过查找`REGISTER_OP_CUDA_KERNEL`关键字找到对应算子的源码文件中注册算子 Kernel 的代码。 ```bash -Paddle当前算子Kernel了目录主要位于如下两个目录中 +Paddle 当前算子 Kernel 了目录主要位于如下两个目录中 cd /workspace/Paddle/paddle/fluid/operators cd /workspace/Paddle/paddle/phi/kernels ``` -例如查找得到的relu算子的注册代码如下: +例如查找得到的 relu 算子的注册代码如下: ![图片](../images/sugon_register_activation.png) -注意观察其中用`PADDLE_WITH_HIP`的宏定义包围的代码才是C86加速卡相关算子,其中`REGISTER_ACTIVATION_CUDA_KERNEL`的定义如下,只为算子的前反向定义了float/double/float16三种数据类型,缺少错误提示中所说的bfloat16数据类型 +注意观察其中用`PADDLE_WITH_HIP`的宏定义包围的代码才是 C86 加速卡相关算子,其中`REGISTER_ACTIVATION_CUDA_KERNEL`的定义如下,只为算子的前反向定义了 float/double/float16 三种数据类型,缺少错误提示中所说的 bfloat16 数据类型 ![图片](../images/sugon_cuda_kernel.png) -因此可以参考非`PADDLE_WITH_HIP`的宏定义包围的英伟达GPU相关代码,为 HIP算子注册bfloat16数据类型。之后再在HIP环境下验证该算子的正确输出结果。 +因此可以参考非`PADDLE_WITH_HIP`的宏定义包围的英伟达 GPU 相关代码,为 HIP 算子注册 bfloat16 数据类型。之后再在 HIP 环境下验证该算子的正确输出结果。 -## 算子修复举例2:单测的输出结果无法达到精度对齐 +## 算子修复举例 2:单测的输出结果无法达到精度对齐 这类问题造成的原因较为多样,请先仔细阅读单测的报错信息,可能存在如下几种情况 -### 情况1:输出误差较小,则相应修改单测文件的误差阈值即可 +### 情况 1:输出误差较小,则相应修改单测文件的误差阈值即可 这类问题的常见报错信息如下: @@ -115,7 +115,7 @@ cd /workspace/Paddle/paddle/phi/kernels 从输出中可以观察到,`actual`和`expected`结果较为接近,可以通过修改误差阈值来解决: ```bash -# 例如原有误差阈值为0.01 +# 例如原有误差阈值为 0.01 self.assertTrue(np.allclose(hist, prob, rtol=0.01), "actual: {}, expected: {}".format(hist, prob)) @@ -124,12 +124,12 @@ self.assertTrue(np.allclose(hist, prob, rtol=0.05), "actual: {}, expected: {}".format(hist, prob)) ``` -### 情况2:输出误差较大,需要定位误差是代码实现导致还是硬件本身原因 +### 情况 2:输出误差较大,需要定位误差是代码实现导致还是硬件本身原因 这类问题的常见报错信息如下: ```bash -# 示例2 +# 示例 2 373: AssertionError: 373: Not equal to tolerance rtol=1e-06, atol=0 373: @@ -140,17 +140,17 @@ self.assertTrue(np.allclose(hist, prob, rtol=0.05), 373: y: array(-1.72674) ``` -从输出中观察到,此类算子误差非常大,可能是算子本身计算代码在HIP平台下存在问题。建议仔细调试该算子的GPU Kernel,定位算子计算问题并进行修复。 +从输出中观察到,此类算子误差非常大,可能是算子本身计算代码在 HIP 平台下存在问题。建议仔细调试该算子的 GPU Kernel,定位算子计算问题并进行修复。 -### 情况3:输出结果中出Nan,需要定位算子内核函数的实现问题 +### 情况 3:输出结果中出 Nan,需要定位算子内核函数的实现问题 这类问题的常见报错信息如下: ```bash -# 示例3 +# 示例 3 356: AssertionError: False is not true : Output (Out) has diff at Place(gpu:0) 356: Expect [[[ 0.3687 -0.0764 0.1682 0.3389 -0.4622 ] ... 356: But Got[[[[nan nan nan nan nan] ... ``` -从输出中观察到,算子输出直接出nan了,可能是算子本身计算代码在HIP平台下存在问题。同上个问题一样,需要仔细调试该算子的GPU Kernel,定位算子计算问题并进行修复。可能的解决办法是请先检查对应算子Kernel的线程数,可以参考 [ROCm-Developer-Tools/HIP#2235](https://github.com/ROCm-Developer-Tools/HIP/issues/2235) 中的回复,将HIP平台下的算子线程数控制在256及以内。 +从输出中观察到,算子输出直接出 nan 了,可能是算子本身计算代码在 HIP 平台下存在问题。同上个问题一样,需要仔细调试该算子的 GPU Kernel,定位算子计算问题并进行修复。可能的解决办法是请先检查对应算子 Kernel 的线程数,可以参考 [ROCm-Developer-Tools/HIP#2235](https://github.com/ROCm-Developer-Tools/HIP/issues/2235) 中的回复,将 HIP 平台下的算子线程数控制在 256 及以内。 diff --git a/docs/eval/evaluation_of_docs_system.md b/docs/eval/evaluation_of_docs_system.md index 7e0ee875a7e..5098c22428a 100644 --- a/docs/eval/evaluation_of_docs_system.md +++ b/docs/eval/evaluation_of_docs_system.md @@ -336,7 +336,7 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 - 模型推理 - 模型推理总览 - - GPU/CPU推理 + - GPU/CPU 推理 - Ascend 910 AI 处理器上推理 - Ascend 310 AI 处理器上使用 MindIR 模型进行推理 - Ascend 310 AI 处理器上使用 AIR 模型进行推理 @@ -357,8 +357,8 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 分布式并行训练基础样例(Ascend) 分布式并行训练基础样例(GPU) 分布式推理 - 保存和加载模型(HyBrid Parallel模式) - 分布式并行训练Transformer模型 + 保存和加载模型(HyBrid Parallel 模式) + 分布式并行训练 Transformer 模型 鹏程·盘古模型网络多维度混合并行解析 分布式故障恢复 @@ -511,9 +511,9 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 - 使用 LeNet 在 MNIST 数据集实现图像分类 - 使用卷积神经网络进行图像分类 - 基于图片相似度的图片搜索 - - 基于U-Net卷积神经网络实现宠物图像分割 - - 通过OCR实现验证码识别 - - 通过Sub-Pixel实现图像超分辨率 + - 基于 U-Net 卷积神经网络实现宠物图像分割 + - 通过 OCR 实现验证码识别 + - 通过 Sub-Pixel 实现图像超分辨率 - 人脸关键点检测 - 点云处理:实现 PointNet 点云分类 - 自然语言处理 @@ -529,7 +529,7 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 - 强化学习——Advantage Actor-Critic(A2C) - 强化学习——Deep Deterministic Policy Gradient (DDPG) - 时序数据 - - 通过AutoEncoder实现时序数据异常检测 + - 通过 AutoEncoder 实现时序数据异常检测 - 动转静 - 使用动转静完成以图搜图 @@ -545,7 +545,7 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 | | TensorFlow | 数量 | PyTorch | 数量 | MindSpore | 数量 | PaddlePaddle | 数量 | | ---------------------------- | ------------------------------------------------------------ | ---- | ------------------------------------------------------------ | ---- | ------------------------------------------------------------ | ---- | ------------------------------------------------------------ | ------ | -| 基本数据(Tensor)和基本算子 | Tensors Variables Tensor slicing Ragged tensor Sparse tensor DTensor concepts | 6 | Tensors Transforms Introduction to PyTorch Tensors | 3 | 张量 Tensor | 1 | Tensor概念介绍 | 1 | +| 基本数据(Tensor)和基本算子 | Tensors Variables Tensor slicing Ragged tensor Sparse tensor DTensor concepts | 6 | Tensors Transforms Introduction to PyTorch Tensors | 3 | 张量 Tensor | 1 | Tensor 概念介绍 | 1 | | 数据加载与预处理 | Images CSV Numpy pandas.DataFrame TFRecord and tf.Example Additional formats with tf.io Text More text loading Classifying structured data with preprocessing layers Classfication on imbalanced data Time series forecasting Decision forest models | 13 | Datasets & Dataloaders | 1 | 数据处理 数据处理(进阶) 自动数据增强 轻量化数据处理 单节点数据缓存 优化数据处理 | 6 | 数据集的定义和加载 数据预处理 | 2 | | 如何组网 | Modules, layers, and models | 1 | Build the Neural Network Building Models with PyTorch What is torch.nn really? Learing PyTorch with Examples | 4 | 创建网络 网络构建 | 2 | 模型组网 飞桨高层 API 使用指南 层与模型 | 3 | | 如何训练 | Training loops NumPy API Checkpoint SavedModel | 4 | Optimization Model Parameters Traning wiht PyTorch | 2 | 模型训练 训练与评估 | 2 | 训练与预测验证 自定义指标 | 2 | @@ -553,8 +553,8 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 | 可视化、调优技巧 | Overfit and underfit Tune hyperprameters with Keras Tuner Better performance with tf.function Profile TensorFlow performance Graph optimizaition Optimize GPU Performance Mixed precision | 7 | PyTorch TensorBoard Support Model Understanding with Captum Visualizing Models, Data, and Training with TensorBoard Profiling your PyTorch Module PyTorch Profiler with TensorBoard Hyperparameter tuning with Ray Tune Optimizing Vision Transformer Model for Deployment Parametrization Tutorial Pruning Tutorial Grokking PyTorch Intel CPU performance from first principles | 11 | 查看中间文件 Dump 功能调试 自定义调试信息 调用自定义类 算子增量编译 算子调优工具 自动数据加速 固定随机性以复现脚本运行结果 | 8 | VisualDL 工具简介 VisualDL 使用指南 飞桨模型量化 | 3 | | 自动微分 | Automatic differentiation Advanced autodiff | 2 | Automatic Differentiation with torch.autograd The Fundamentals of Autograd | 2 | 自动微分 | 1 | 自动微分 | 1 | | 动态图与静态图 | Graphs and functions | 1 | (torchscript 其实是静态图,不过归类到部署中了) | 0 | 动态图与静态图 | 1 | 使用样例 转换原理 支持语法 案例解析 报错调试 动态图 使用动转静完成以图搜图 | 7 | -| 部署相关 | https://www.tensorflow.org/tfx/tutorials 下的21篇文章 https://www.tensorflow.org/tfx/guide 下的30+文章 | 50+ | Deploying PyTorch in Python via a REST API with Flask Introduction to TorchScript Loading a TorchScript Model in C++ (optional) Exporting a Model from PyTorch to ONNX and Running it using ONNX Runtime Real Time Inference on Raspberry Pi 4 | 6 | 推理与部署 模型推理总览 GPU/CPU推理 Ascend 910 AI 处理器上推理 Ascend 310 AI 处理器上使用 MindIR 模型进行推理 Ascend 310 AI 处理器上使用 AIR 模型进行推理 | 7 | 服务器部署 移动端/嵌入式部署 模型压缩 https://www.paddlepaddle.org.cn/lite/v2.10/guide/introduction.html 下 50+ 篇文章 | 50+ | -| CV 领域相关 | Basic image classfication Convolutional Neural Network Image classification Transfer learning and fine-tuning Transfer learning with TF Hub Data Augmentaion Image segmentation Object detection with TF Hub Neural style transfer DeepDream DCGAN Pix2Pix CycleGAN Adversarial FGSM Intro to Autoencoders Variational Autoencoder | 16 | TorchVision Object Detection Finetuning Tutorial Transfer Learning for Computer Vision Tutorial Adversarial Example Generation DCGAN Tutorial Spatial Tansformer Networks Tutorial Optimizaing Vision Transformer Model for Deployment Quantized Transfer Learning for Computer Vision Tutorial | 7 | ResNet50 网络进行图像分类 图像分类迁移学习 模型对抗攻击 生成式对抗网络 | 4 | 使用 LeNet 在 MNIST 数据集实现图像分类 使用卷积神经网络进行图像分类 基于图片相似度的图片搜索 基于U-Net卷积神经网络实现宠物图像分割 通过OCR实现验证码识别 通过Sub-Pixel实现图像超分辨率 人脸关键点检测 点云处理:实现 PointNet 点云分类 | 7 | +| 部署相关 | https://www.tensorflow.org/tfx/tutorials 下的 21 篇文章 https://www.tensorflow.org/tfx/guide 下的 30+文章 | 50+ | Deploying PyTorch in Python via a REST API with Flask Introduction to TorchScript Loading a TorchScript Model in C++ (optional) Exporting a Model from PyTorch to ONNX and Running it using ONNX Runtime Real Time Inference on Raspberry Pi 4 | 6 | 推理与部署 模型推理总览 GPU/CPU 推理 Ascend 910 AI 处理器上推理 Ascend 310 AI 处理器上使用 MindIR 模型进行推理 Ascend 310 AI 处理器上使用 AIR 模型进行推理 | 7 | 服务器部署 移动端/嵌入式部署 模型压缩 https://www.paddlepaddle.org.cn/lite/v2.10/guide/introduction.html 下 50+ 篇文章 | 50+ | +| CV 领域相关 | Basic image classfication Convolutional Neural Network Image classification Transfer learning and fine-tuning Transfer learning with TF Hub Data Augmentaion Image segmentation Object detection with TF Hub Neural style transfer DeepDream DCGAN Pix2Pix CycleGAN Adversarial FGSM Intro to Autoencoders Variational Autoencoder | 16 | TorchVision Object Detection Finetuning Tutorial Transfer Learning for Computer Vision Tutorial Adversarial Example Generation DCGAN Tutorial Spatial Tansformer Networks Tutorial Optimizaing Vision Transformer Model for Deployment Quantized Transfer Learning for Computer Vision Tutorial | 7 | ResNet50 网络进行图像分类 图像分类迁移学习 模型对抗攻击 生成式对抗网络 | 4 | 使用 LeNet 在 MNIST 数据集实现图像分类 使用卷积神经网络进行图像分类 基于图片相似度的图片搜索 基于 U-Net 卷积神经网络实现宠物图像分割 通过 OCR 实现验证码识别 通过 Sub-Pixel 实现图像超分辨率 人脸关键点检测 点云处理:实现 PointNet 点云分类 | 7 | | NLP 领域相关 | Basic text classification Text classification with TF Hub Word embeddings Word2Vec Text classification with an RNN classify Text with BERT Solve GLUE tasks using BERT on TPU Neural machine translation with attention Image captioning | 9 | Language Modeling with nn.Transformer and TorchText NLP From Scratch: Classifying Names with a Character-Level RNN NLP From Scratch: Generating Names with a Character-Level RNN NLP From Scratch: Translation with a Sequence to Sequence Network and Attention Text classification with the torchtext library Language Translation with nn.Transformer and torchtext Dynamic Quantization on an LSTM Word Language Model Dynamic Quantization on BERT | 8 | 使用 RNN 实现情感分类 LSTM+CRF 实现序列标注 | 2 | 用 N-Gram 模型在莎士比亚文集中训练 word embedding IMDB 数据集使用 BOW 网络的文本分类 使用预训练的词向量完成文本分类任务 使用注意力机制的 LSTM 的机器翻译 使用序列到序列模型完成数字加法 | 5 | | 语音领域相关 | | | Audio I/O Audio Resampling Audio Data Augmentation Audio Feature Extractions Audio Feature Augmentation Audio Datasets Speech Recognition with Wav2Vec2 Speech Command Classification with torchaudio Text-to-speech with torchaudio Forced Alignment with Wav2Vec2 | 10 | | 0 | | 0 | | 推荐领域相关 | Recommenders | 1 | Introduction to TorchRec | 1 | | 0 | 使用协同过滤实现电影推荐 | 1 | @@ -562,9 +562,9 @@ MindSpore 的有自己独立的文档分类标准和风格,所以硬套本文 | 移动端相关 | 独立的栏目 https://www.tensorflow.org/lite | 10+ | Image Segmentation DeepLabV3 on iOS Image Segmentation DeepLabV3 on Android | 2 | | 0 | Paddle Lite 中独立存在 | 未统计 | | 框架之间的迁移相关 | | | | 0 | 概述 准备工作 网络脚本分析 网络脚本开发 网络调试 精度调试 性能调试 推理执行 网络迁移调试实例 常见问题 | 10 | Paddle 1.8 与 Paddle 2.0 API 映射表 PyTorch-PaddlePaddle API 映射表 版本迁移工具 | 3 | | 自定义算子 | Tensors and operations Custom layers Custom traning: walkthrough Create an op Extension types | 5 | Double Backward with Custom Functions Fusing Convolution and Batch Norm using Custom Function Custom C++ and CUDA Extensions Extending TorchScript with Custom C++ Operators Extending TorchScript with Custom C++ Classes Registering a Dispatched Operator in C++ Extending dispatcher for a new backend in C++ | 7 | 算子分类 运算重载 自定义算子(CPU) 自定义算子(GPU) 自定义算子(Ascend) 自定义算子(基于 Custom 表达) | 6 | 自定义原生算子 原生算子开发注意事项 自定义外部算子 自定义 Python 算子 API 介绍 API 示例 本地开发指南 提交 PR 注意事项 FAQ | 9 | -| 分布式训练 | Distributed training with Kereas Distributed training with DTensors Using DTensors with Keras Custom training loops Multi-worker training with Keras Multi-worker training with CTL Parameter Server Training Distributed input Distributed training | 9 | PyTorch Distributed Overview Single-Machine Model Parallel Best PracticesGetting Started with Distributed Data Parallel Writing Distributed Applications with PyTorch Getting Started with Fully Sharded Data Prallel Customize Process Group Backends Using Cpp Extension Getting Started with Distributed RPC Framework Implementing a Parameter Server Using Distributed RPC Framework Distributed Pipeline Parallelsim using RPC Implementing Batch RPC Processing Using Asynchronous Executions Combining Distributed DataPrallel with Distributed RPC Framework Training Transformer models using Pipeline Parallelism Training Transformer models using Distributed Data Parallel and Pipeline Parallelism Distributed Training with Uneven Inputs Using the Join Context Manager | 16 | 分布式并行总览 分布式集合通信原语 分布式并行训练基础样例(Ascend) 分布式并行训练基础样例(GPU) 分布式推理 保存和加载模型(HyBrid Parallel模式) 分布式并行训练Transformer模型 鹏程·盘古模型网络多维度混合并行解析 分布式故障恢复 | 9 | 单机多卡训练 分布式训练开始 使用 FleetAPI 进行分布式训练 | 3 | +| 分布式训练 | Distributed training with Kereas Distributed training with DTensors Using DTensors with Keras Custom training loops Multi-worker training with Keras Multi-worker training with CTL Parameter Server Training Distributed input Distributed training | 9 | PyTorch Distributed Overview Single-Machine Model Parallel Best PracticesGetting Started with Distributed Data Parallel Writing Distributed Applications with PyTorch Getting Started with Fully Sharded Data Prallel Customize Process Group Backends Using Cpp Extension Getting Started with Distributed RPC Framework Implementing a Parameter Server Using Distributed RPC Framework Distributed Pipeline Parallelsim using RPC Implementing Batch RPC Processing Using Asynchronous Executions Combining Distributed DataPrallel with Distributed RPC Framework Training Transformer models using Pipeline Parallelism Training Transformer models using Distributed Data Parallel and Pipeline Parallelism Distributed Training with Uneven Inputs Using the Join Context Manager | 16 | 分布式并行总览 分布式集合通信原语 分布式并行训练基础样例(Ascend) 分布式并行训练基础样例(GPU) 分布式推理 保存和加载模型(HyBrid Parallel 模式) 分布式并行训练 Transformer 模型 鹏程·盘古模型网络多维度混合并行解析 分布式故障恢复 | 9 | 单机多卡训练 分布式训练开始 使用 FleetAPI 进行分布式训练 | 3 | | 框架设计文档 | Random number generation | 1 | 分散在 API 文档、源码中,其实比较丰富。30+ | 30+ | 设计白皮书 全场景统一 函数式微分编程 动静态图结合 异构并行训练 分布式并行 中间表达 MindIR 高性能数据处理引擎 图算融合加速引擎 二阶优化 可视化调试调优 安全可信 术语 | 13 | | 0 | -| 其它 | Integrated gradients Uncertainty quantification with SNGP Probabilistic regression Keras一级标题下的13篇文章 Thinking in TensorFlow 2 Data input pipelines 一级标题下的3篇 GPU TPU | 20 | Learn the Basics Quickstart Deep Learning with PyTorch: A 60 Minute Blitz Building a Convolution/Batch Norm fuser in FX Building a Simple CPU Performance Profiler with FX Channels Last Memory Format in PyTorch Forward-mode Automatic Differentiation Using the PyTorch C++ Frontend Dynamic Parallelism in TorchScript Autograd in C++ Frontend Static Quantization with Eager Model in PyTorch | 11 | 基本介绍 快速入门 进阶案例:线性拟合 混合精度 梯度累积算法 自适应梯度求和算法 降维训练算法 | 7 | 10 分钟快速上手飞桨 使用线性回归预测波士顿房价 模型导出 ONNX 协议 飞桨产品硬件支持表 昆仑 XPU 芯片运行飞桨 海光 DCU 芯片运行飞桨 昇腾 NPU 芯片运行飞桨 环境变量 FLAGS 下9篇 hello paddle:从普通程序走向机器学习程序 通过AutoEncoder实现时序数据异常检测 广播介绍 自动混合精度训练 梯度裁剪 升级指南 | 20+ | +| 其它 | Integrated gradients Uncertainty quantification with SNGP Probabilistic regression Keras 一级标题下的 13 篇文章 Thinking in TensorFlow 2 Data input pipelines 一级标题下的 3 篇 GPU TPU | 20 | Learn the Basics Quickstart Deep Learning with PyTorch: A 60 Minute Blitz Building a Convolution/Batch Norm fuser in FX Building a Simple CPU Performance Profiler with FX Channels Last Memory Format in PyTorch Forward-mode Automatic Differentiation Using the PyTorch C++ Frontend Dynamic Parallelism in TorchScript Autograd in C++ Frontend Static Quantization with Eager Model in PyTorch | 11 | 基本介绍 快速入门 进阶案例:线性拟合 混合精度 梯度累积算法 自适应梯度求和算法 降维训练算法 | 7 | 10 分钟快速上手飞桨 使用线性回归预测波士顿房价 模型导出 ONNX 协议 飞桨产品硬件支持表 昆仑 XPU 芯片运行飞桨 海光 DCU 芯片运行飞桨 昇腾 NPU 芯片运行飞桨 环境变量 FLAGS 下 9 篇 hello paddle:从普通程序走向机器学习程序 通过 AutoEncoder 实现时序数据异常检测 广播介绍 自动混合精度训练 梯度裁剪 升级指南 | 20+ | 可以看除,PaddlePaddle 在文档上是比较完备的,在本文划分的 19 个具体领域中的 17 个领域中都已有文档,包括: diff --git "a/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" "b/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" index 2b690185337..85d95753dab 100644 --- "a/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" +++ "b/docs/eval/\343\200\220Hackathon No.111\343\200\221 PR.md" @@ -1,9 +1,9 @@ -- 一个完整的使用动静转换@to_static导出、可部署的模型完整代码(参考以图搜图),提供 AI Studio 任务链接 +- 一个完整的使用动静转换@to_static 导出、可部署的模型完整代码(参考以图搜图),提供 AI Studio 任务链接 AI Studio 任务链接:https://aistudio.baidu.com/aistudio/projectdetail/3910079 - 接口层面: -接口层面相对来说比较全面,指出了模型静态图导出的方法。同时InputSpec也比较好用,可以通过三种方式来构造所需要的InputSpec:直接构造、由Tensor构造以及由numpy.ndarray构造,但是并没有指出这三种方式构造的InputSpec的优缺点。在动态图转静态图--使用样例--2.2.2基本用法的方式四:指定非Tensor参数类型中代码有问题,to_static函数中没有输入net参数,修改代码如下: +接口层面相对来说比较全面,指出了模型静态图导出的方法。同时 InputSpec 也比较好用,可以通过三种方式来构造所需要的 InputSpec:直接构造、由 Tensor 构造以及由 numpy.ndarray 构造,但是并没有指出这三种方式构造的 InputSpec 的优缺点。在动态图转静态图--使用样例--2.2.2 基本用法的方式四:指定非 Tensor 参数类型中代码有问题,to_static 函数中没有输入 net 参数,修改代码如下: ```python class SimpleNet(Layer): @@ -32,12 +32,12 @@ paddle.jit.save(net, path='./simple_net') ``` - 语法层面: -支持语法相对来说是比较全面的,介绍的也比较细致。控制流语法等用起来也比较流畅。但是在第三方相关库numpy中只是简单的说了部分支持,并没有具体的例子解释numpy操作中哪部分是支持的,哪部分是不支持的。并且在案例解析--三、内嵌Numpy操作中直接写到动态图模型代码中numpy相关的操作不能转换为静态图,虽然提供了一个好的方法来解决这个问题(转换为tensor),虽然能理解下来但是感觉这两部分写的不具体且有点矛盾。 +支持语法相对来说是比较全面的,介绍的也比较细致。控制流语法等用起来也比较流畅。但是在第三方相关库 numpy 中只是简单的说了部分支持,并没有具体的例子解释 numpy 操作中哪部分是支持的,哪部分是不支持的。并且在案例解析--三、内嵌 Numpy 操作中直接写到动态图模型代码中 numpy 相关的操作不能转换为静态图,虽然提供了一个好的方法来解决这个问题(转换为 tensor),虽然能理解下来但是感觉这两部分写的不具体且有点矛盾。 ![4df67d8440d0fc20490cbd09cbd5498](https://user-images.githubusercontent.com/102226413/165878773-640e73c2-d343-4fb2-8d6b-af3947d9c6bb.png) ![5c43735dfac00b3290cf2b0b5c58b3d](https://user-images.githubusercontent.com/102226413/165878786-ed404b8c-ab03-43a7-9b15-9dc56dc44635.png) -另外,在案例解析6.1默认参数的部分,给出了forward函数一个不错的建议,但是当我在分析它的原因的时候,我测试了一下下面的代码: +另外,在案例解析 6.1 默认参数的部分,给出了 forward 函数一个不错的建议,但是当我在分析它的原因的时候,我测试了一下下面的代码: ```python @@ -68,7 +68,7 @@ net = SimpleNet() paddle.jit.save(net, path='./simple_net', input_spec=[InputSpec(shape=[None, 10], name='x'), True]) ``` -它他并没有报错,但是paddle.jit.save时在input_spec时我指定了非tensor的数据,而且程序运行并没有报错,这会不会与原因有点冲突?文档原因截图如下: +它他并没有报错,但是 paddle.jit.save 时在 input_spec 时我指定了非 tensor 的数据,而且程序运行并没有报错,这会不会与原因有点冲突?文档原因截图如下: ![f4e2808997a5556bfd5f6c580245b3f](https://user-images.githubusercontent.com/102226413/165878738-61ed378a-67cb-4d0e-93b8-aba8e7b6fe13.png) @@ -76,7 +76,7 @@ paddle.jit.save(net, path='./simple_net', input_spec=[InputSpec(shape=[None, 10] - 报错层面 文档总体来说写的比较全面。 -文档中1.1错误日志怎么看,报错调试的文档代码如下: +文档中 1.1 错误日志怎么看,报错调试的文档代码如下: ```python import paddle import numpy as np @@ -95,7 +95,7 @@ if __name__ == '__main__': train() ``` -报错日志如下图,在paddle内置的方法中有点难以快速定位到问题所在。该报错问题应该是第7行paddle.reshape的维度设置不对。但是在使用排错日志的时候,没有报错信息直接定位到第7行。个人觉得对错误代码位置的直接定位才是最重要的。而且报错的内容提示太多,对新手来说不会很友好。建议直接在报错的时候,报错的最后位置,重复一遍,最重要的报错信息,并提示报错代码所在位置。这样对新手比较友好。对于这种简单问题的报错提示更加明确一点会让使用者觉得更加方便。 +报错日志如下图,在 paddle 内置的方法中有点难以快速定位到问题所在。该报错问题应该是第 7 行 paddle.reshape 的维度设置不对。但是在使用排错日志的时候,没有报错信息直接定位到第 7 行。个人觉得对错误代码位置的直接定位才是最重要的。而且报错的内容提示太多,对新手来说不会很友好。建议直接在报错的时候,报错的最后位置,重复一遍,最重要的报错信息,并提示报错代码所在位置。这样对新手比较友好。对于这种简单问题的报错提示更加明确一点会让使用者觉得更加方便。 ![@6U(A`{~$P$`XD1I{YGYOLT](https://user-images.githubusercontent.com/102226413/165878813-ec7a90b6-518b-4a2c-ae68-8a92572ff96a.png) ![)PH6WOHJZ{UJ}~YIKADQ)$4](https://user-images.githubusercontent.com/102226413/165878824-4d3dfe4f-3dea-447d-86fe-5d57c0937246.png) @@ -103,12 +103,12 @@ if __name__ == '__main__': - 文档层面 -文档整体比较完善,但是在使用指南->动态图转静态图->案例解析 中全部都是动静转化机制的各种API的分章节介绍,建议在案例解析最后增加一个完整的实例代码,比如cifar10图像分类的动态图转静态图案例,或者把应用实践中的案例链接附在最后,方便读者找寻。有些读者可能想找一个案例,然后找了使用指南的案例解析,发现没有一个完整的案例,正巧这个读者对整个文档不熟悉,没看过应用实践,然后就找不到案例。 +文档整体比较完善,但是在使用指南->动态图转静态图->案例解析 中全部都是动静转化机制的各种 API 的分章节介绍,建议在案例解析最后增加一个完整的实例代码,比如 cifar10 图像分类的动态图转静态图案例,或者把应用实践中的案例链接附在最后,方便读者找寻。有些读者可能想找一个案例,然后找了使用指南的案例解析,发现没有一个完整的案例,正巧这个读者对整个文档不熟悉,没看过应用实践,然后就找不到案例。 - 意见建议(问题汇总) -1、接口层面,使用指南->动态图转静态图->使用样例 2.2.1 构造inputSpec 并没有指出这三种方式构造的InputSpec的优缺点。 -2、语法层面,对numpy的支持性存在一些问题。 +1、接口层面,使用指南->动态图转静态图->使用样例 2.2.1 构造 inputSpec 并没有指出这三种方式构造的 InputSpec 的优缺点。 +2、语法层面,对 numpy 的支持性存在一些问题。 3、报错调试,在使用排错日志的时候,没有报错信息直接定位到错误代码的位置,且报错内容提示太多,对新手不友好。建议直接在报错的时候,报错的最后位置,重复一遍,最重要的报错信息,并提示报错代码所在位置。 -4、文档层面,在使用指南->动态图转静态图->案例解析 中全部都是动静转化机制的各种API的分章节介绍,建议在案例解析最后增加一个完整的实例代码。 +4、文档层面,在使用指南->动态图转静态图->案例解析 中全部都是动静转化机制的各种 API 的分章节介绍,建议在案例解析最后增加一个完整的实例代码。 diff --git "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" index af215315003..08078b1afa7 100644 --- "a/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" +++ "b/docs/eval/\343\200\220Hackathon No.113\343\200\221 PR.md" @@ -1,7 +1,7 @@ # 1、任务描述: - 飞桨框架于 2.0 正式版全面支持了动态图训练,并在2.1、2.2 两个大版本中不断完善分布式能力,同时大幅增强了训练功能。在本任务中,我们希望能收到你对于飞桨动态图分布式训练功能的使用感受,可以与其他深度学习框架做功能对比,并产出一份对应的评估报告。 + 飞桨框架于 2.0 正式版全面支持了动态图训练,并在 2.1、2.2 两个大版本中不断完善分布式能力,同时大幅增强了训练功能。在本任务中,我们希望能收到你对于飞桨动态图分布式训练功能的使用感受,可以与其他深度学习框架做功能对比,并产出一份对应的评估报告。 # 2、环境配置: @@ -9,7 +9,7 @@ ## 2.1、PyTorch 环境配置: -- 1、首先安装anaconda +- 1、首先安装 anaconda ```bash bash Anaconda3-2020.07-Linux-x86_64.sh –u ``` @@ -18,7 +18,7 @@ bash Anaconda3-2020.07-Linux-x86_64.sh –u conda create --name pytorch_1.9 python=3.7 conda activate pytorch-1.9 ``` -- 3、安装 pytorch-1.9(适配rocm-4.0.1及以上)PyTorch1.8 和 PyTorch1.9 安装 wheel 包在公共目录: +- 3、安装 pytorch-1.9(适配 rocm-4.0.1 及以上)PyTorch1.8 和 PyTorch1.9 安装 wheel 包在公共目录: ```bash /public/software/apps/DeepLearning/whl/rocm-4.0.1/ ``` @@ -28,23 +28,23 @@ module rm compiler/rocm/2.9 module load compiler/rocm/4.0.1 pip install /public/software/apps/DeepLearning/whl/rocm-4.0.1/torch-1.9.0+rocm4.0.1-cp36-cp36m-linux_x86_64.whl -i https://pypi.tuna.tsinghua.edu.cn/simple/ ``` -- 对于torchverion的安装不能按照曙光官方帮助文档给定的方法来,否则torchvision在运行自定义算子时会出现错误,所以需要使用源码安装的方式,安装方法如下: +- 对于 torchverion 的安装不能按照曙光官方帮助文档给定的方法来,否则 torchvision 在运行自定义算子时会出现错误,所以需要使用源码安装的方式,安装方法如下: ```text -1、本地下载对应的torchvision分支源码包:https://github.com/pytorch/vision上传集群, -2、进入对应的conda环境,加载对应的rocm(这里rocm4.0.1)版本; +1、本地下载对应的 torchvision 分支源码包:https://github.com/pytorch/vision 上传集群, +2、进入对应的 conda 环境,加载对应的 rocm(这里 rocm4.0.1)版本; 3、conda install libpng -y 4、conda install jpeg -y 5、pip3 install numpy pillow matplotlib ninja -i https://pypi.tuna.tsinghua.edu.cn/simple/ -6、使用salloc申请计算结点,使用ssh登录至计算节点,并进入对应的conda环境加载rocm(这里rocm4.0.1),执行编译:CC=clang CXX=clang++ python setup.py install +6、使用 salloc 申请计算结点,使用 ssh 登录至计算节点,并进入对应的 conda 环境加载 rocm(这里 rocm4.0.1),执行编译:CC=clang CXX=clang++ python setup.py install ``` ## 2.2、PaddlePaddle 环境配置: -- PaddlePaddle 的环境在曙光超算上需要使用镜像的方式进行安装,镜像添加,源镜像名称填:paddlepaddle/paddle,源镜像标签填:latest-dev-rocm4.0-miopen2.11。然后创建实例打开容器。因为Docker容器中不能连接网络,使用paddle官网给出的安装方式会出现网络连接的错误。 +- PaddlePaddle 的环境在曙光超算上需要使用镜像的方式进行安装,镜像添加,源镜像名称填:paddlepaddle/paddle,源镜像标签填:latest-dev-rocm4.0-miopen2.11。然后创建实例打开容器。因为 Docker 容器中不能连接网络,使用 paddle 官网给出的安装方式会出现网络连接的错误。 ```bash python -m pip install paddlepaddle-rocm==2.2.2.rocm401.miopen211 -f https://www.paddlepaddle.org.cn/whl/rocm/stable.whl ``` -- 故需要提前下载whl文件,下载链接如下,下载的版本为paddlepaddle_rocm-2.1.1.rocm401.miopen211-cp37-cp37m-linux_x86_64.whl。下载链接: +- 故需要提前下载 whl 文件,下载链接如下,下载的版本为 paddlepaddle_rocm-2.1.1.rocm401.miopen211-cp37-cp37m-linux_x86_64.whl。下载链接: https://www.paddlepaddle.org.cn/whl/rocm/stable.whl @@ -52,7 +52,7 @@ https://www.paddlepaddle.org.cn/whl/rocm/stable.whl ```bash pip install paddlepaddle_rocm-2.1.1.rocm401.miopen211-cp37-cp37m-linux_x86_64.whl ``` -- 期间所需要的其他库都需要通过在曙光超算上通过EShell进行安装,需要设定清华镜像源,例如 +- 期间所需要的其他库都需要通过在曙光超算上通过 EShell 进行安装,需要设定清华镜像源,例如 ```bash pip install six -i https://pypi.tuna.tsinghua.edu.cn/simple ``` @@ -82,25 +82,25 @@ https://www.paddlepaddle.org.cn/documentation/docs/zh/practices/cv/landmark_dete - 6、预测 具体分析如下: -- 第一步:导入相关库,即导入与本次任务相关的库,例如import paddle -- 第二步:构建数据集,首先自定义一个处理人脸的datasets,其继承于Dataset类,然后实现__init__,__getitem__与__len__函数,__init__函数实现一些数据集的初始化,即完成了对csv文件的读取与数据的清洗以及训练集、验证集、测试集的划分,__getitem__函数实现了通过index参数获取对应的image与label,同时对图像进行预处理操作,即transform操作,__len__函数即返回数据集的数量(长度)。 -- 第三步:定义模型,构建一个模型的类,继承于paddle.nn.Layer,然后实现__init__与forward函数,__init__函数定义网络模型的一些层结构,forward函数实现网络模型的前向传播。同时,示例中给出的代码中网络模型经过了paddle.Model类的封装,paddle.Model类相对来说会更方便一些,其内部定义了一些fit等函数可以直接使用。 -- 第四步:定义损失函数与优化器,优化器采用了Adam算法,同时初始学习率指定为0.001,损失函数采用均方误差函数,因为网络模型经过了paddle.Model类的封装,所以此处对于损失函数与优化器需要使用prepare函数进行指定。 -- 第五步:构建训练代码,网络模型经过了paddle.Model类的封装,训练只需要调用其内部的fit函数,对于fit函数,其内部会把传入的数据集进行DataLoader封装,同时通过tain_batch函数进行其训练过程。 -- 第六步:测试,通过调用paddle.Model类的predict函数可以直接得到其预测的结果。 -- 总结:上述单机的过程中,其采用了paddle.Model类的封装,会在使用方面方便很多,并且paddle.Model类的内部会在模型训练的过程中加入一些回调函数callback进行使用,相对来说比较好用一些。 +- 第一步:导入相关库,即导入与本次任务相关的库,例如 import paddle +- 第二步:构建数据集,首先自定义一个处理人脸的 datasets,其继承于 Dataset 类,然后实现__init__,__getitem__与__len__函数,__init__函数实现一些数据集的初始化,即完成了对 csv 文件的读取与数据的清洗以及训练集、验证集、测试集的划分,__getitem__函数实现了通过 index 参数获取对应的 image 与 label,同时对图像进行预处理操作,即 transform 操作,__len__函数即返回数据集的数量(长度)。 +- 第三步:定义模型,构建一个模型的类,继承于 paddle.nn.Layer,然后实现__init__与 forward 函数,__init__函数定义网络模型的一些层结构,forward 函数实现网络模型的前向传播。同时,示例中给出的代码中网络模型经过了 paddle.Model 类的封装,paddle.Model 类相对来说会更方便一些,其内部定义了一些 fit 等函数可以直接使用。 +- 第四步:定义损失函数与优化器,优化器采用了 Adam 算法,同时初始学习率指定为 0.001,损失函数采用均方误差函数,因为网络模型经过了 paddle.Model 类的封装,所以此处对于损失函数与优化器需要使用 prepare 函数进行指定。 +- 第五步:构建训练代码,网络模型经过了 paddle.Model 类的封装,训练只需要调用其内部的 fit 函数,对于 fit 函数,其内部会把传入的数据集进行 DataLoader 封装,同时通过 tain_batch 函数进行其训练过程。 +- 第六步:测试,通过调用 paddle.Model 类的 predict 函数可以直接得到其预测的结果。 +- 总结:上述单机的过程中,其采用了 paddle.Model 类的封装,会在使用方面方便很多,并且 paddle.Model 类的内部会在模型训练的过程中加入一些回调函数 callback 进行使用,相对来说比较好用一些。 -## 3.2、paddle分布式的实现 +## 3.2、paddle 分布式的实现 ### 方式一: -首先对于上述代码进行分析,在paddle.Model的底层可以发现其在对模型进行初始化的时候会判断是动态图的模式还是静态图的模式,即 +首先对于上述代码进行分析,在 paddle.Model 的底层可以发现其在对模型进行初始化的时候会判断是动态图的模式还是静态图的模式,即 ```python if fluid.in_dygraph_mode(): self._adapter = DynamicGraphAdapter(self) else: self._adapter = StaticGraphAdapter(self) ``` -因为我们采用的是动态图,所以其会调用第一个函数,即初始化DynamicGraphAdapter(self),观察其底层内部我们可以发现其会进行单机还是分布式的判断,如果是分布式,即tasks>1,会初始化分布式的环境同时构建分布式的模型,代码分析如下: +因为我们采用的是动态图,所以其会调用第一个函数,即初始化 DynamicGraphAdapter(self),观察其底层内部我们可以发现其会进行单机还是分布式的判断,如果是分布式,即 tasks>1,会初始化分布式的环境同时构建分布式的模型,代码分析如下: ```python if self._nranks > 1: @@ -114,15 +114,15 @@ if self._nranks > 1: self.model.network, stradegy) ``` -这样的话,分布式的环境与分布式的模型就已经在paddle.Model内的内部构建完成了。 -分析model.prepare即设置训练过程中的优化器与损失函数中我们发现其内部并没有设置分布式优化器,故这一部分需要自己在代码中添加,即 +这样的话,分布式的环境与分布式的模型就已经在 paddle.Model 内的内部构建完成了。 +分析 model.prepare 即设置训练过程中的优化器与损失函数中我们发现其内部并没有设置分布式优化器,故这一部分需要自己在代码中添加,即 ```python optim = fleet.distributed_optimizer(optim) model.prepare(optim, paddle.nn.MSELoss()) ``` -分析训练的过程,即model.fit的底层代码,我们可以发现其底层构建数据集是就采用了分布式的数据集划分,即其sampler采用的是DistributedBatchSampler,所以其训练时也就采用的是分布式的训练过程,即把数据划分到各个设备上进行训练。 +分析训练的过程,即 model.fit 的底层代码,我们可以发现其底层构建数据集是就采用了分布式的数据集划分,即其 sampler 采用的是 DistributedBatchSampler,所以其训练时也就采用的是分布式的训练过程,即把数据划分到各个设备上进行训练。 -总结:相对来说,示例中的代码使用了paddle.Model的封装,而其paddle.Model底层的一些函数中也添加了对分布式的处理,所以从示例代码中改成分布式的代码只需要进行分布式优化器的构建,即将 +总结:相对来说,示例中的代码使用了 paddle.Model 的封装,而其 paddle.Model 底层的一些函数中也添加了对分布式的处理,所以从示例代码中改成分布式的代码只需要进行分布式优化器的构建,即将 ```python model.prepare(optim, paddle.nn.MSELoss()) ``` @@ -132,7 +132,7 @@ optim = fleet.distributed_optimizer(optim) model.prepare(optim, paddle.nn.MSELoss()) ``` ### 方式二: -paddle.Model类内部封装的东西使用起来比较方便,但是不利于用户了解单机转成分布式的具体流程,所以我没有使用paddle.Model类,而是重新编写了分布式的代码。其流程如下: +paddle.Model 类内部封装的东西使用起来比较方便,但是不利于用户了解单机转成分布式的具体流程,所以我没有使用 paddle.Model 类,而是重新编写了分布式的代码。其流程如下: - 1、导入分布式所需要的依赖包 - 2、初始化分布式环境 @@ -143,11 +143,11 @@ paddle.Model类内部封装的东西使用起来比较方便,但是不利于 - 7、启动分布式任务 下面具体流程如下: -- 第一步:导入分布式所需要的依赖包,即导入任务相关的API函数,例如 +- 第一步:导入分布式所需要的依赖包,即导入任务相关的 API 函数,例如 ```python from paddle.distributed import fleet ``` -- 第二步:初始化分布式环境,采用了collective通信,代码如下: +- 第二步:初始化分布式环境,采用了 collective 通信,代码如下: ```python fleet.init(is_collective=True) ``` @@ -156,15 +156,15 @@ fleet.init(is_collective=True) model = FaceNet(num_keypoints=15) model = fleet.distributed_model(model) ``` -- 第四步:设置分布式所需要的优化器,优化器采用了Adam优化器,初始学习率为0.001,代码如下: +- 第四步:设置分布式所需要的优化器,优化器采用了 Adam 优化器,初始学习率为 0.001,代码如下: ```python optim = paddle.optimizer.Adam(learning_rate=1e-3, parameters=model.parameters()) optim = fleet.distributed_optimizer(optim) ``` - 第五步:数据集的拆分 -对于分布式的数据拆分,需要先构建其数据集的采样器,这里需要使用DistributedBatchSampler,其中参数为数据集dataset、batch_size、num_replicas、rank、shuffle、drop_last,这里我指定了dataset、batch_size、shuffle,设置了shuffle为True,即对数据进行打乱,其中的参数num_replicas如果不指定,其默认会获取当前环境中的ntasks,然后按照ntasks分配数据集。drop_last参数如果不指定会默认为False,也就是不会丢失最后一个batch的数据。构建完分布式采样器之后,使用DataLoader进行封装一下,这里指定一下batch_sampler为刚才构建的采样器,注意指定batch_sampler参数之后不需要再指定batch_size、shuffle以及drop_last参数。 +对于分布式的数据拆分,需要先构建其数据集的采样器,这里需要使用 DistributedBatchSampler,其中参数为数据集 dataset、batch_size、num_replicas、rank、shuffle、drop_last,这里我指定了 dataset、batch_size、shuffle,设置了 shuffle 为 True,即对数据进行打乱,其中的参数 num_replicas 如果不指定,其默认会获取当前环境中的 ntasks,然后按照 ntasks 分配数据集。drop_last 参数如果不指定会默认为 False,也就是不会丢失最后一个 batch 的数据。构建完分布式采样器之后,使用 DataLoader 进行封装一下,这里指定一下 batch_sampler 为刚才构建的采样器,注意指定 batch_sampler 参数之后不需要再指定 batch_size、shuffle 以及 drop_last 参数。 - 第六步:构建训练代码 -这里不采用paddle.Model进行封装,所以需要自己编写for循环获取数据进行前向传播以及反向传播的过程,代码如下: +这里不采用 paddle.Model 进行封装,所以需要自己编写 for 循环获取数据进行前向传播以及反向传播的过程,代码如下: ```python for eop in range(epoch): # train_sampler.set_epoch(eop) @@ -186,7 +186,7 @@ for eop in range(epoch): print("[Epoch %d, batch %d] loss: %.5f" % (eop, batch_id, loss_data)) ``` * 第七步: -飞桨通过paddle.distributed.launch组件启动分布式任务。该组件可用于启动单机多卡分布式任务,也可以用于启动多机多卡分布式任务。该组件为每张参与分布式任务的训练卡启动一个训练进程。默认情形下,该组件将在每个节点上启动N个进程,这里N等于训练节点的卡数,即使用所有的训练卡。用户也可以通过gpus参数指定训练节点上使用的训练卡列表,该列表以逗号分隔。需要注意的是,所有节点需要使用相同数量的训练卡数。为了启动多机分布式任务,需要通过ips参数指定所有节点的IP地址列表,该列表以逗号分隔。需要注意的是,该列表在所有节点上需要保持一致,即各节点IP地址出现的顺序需要保持一致。这里我进行了单机多卡与多机多卡的实验,实验启动方式如下: +飞桨通过 paddle.distributed.launch 组件启动分布式任务。该组件可用于启动单机多卡分布式任务,也可以用于启动多机多卡分布式任务。该组件为每张参与分布式任务的训练卡启动一个训练进程。默认情形下,该组件将在每个节点上启动 N 个进程,这里 N 等于训练节点的卡数,即使用所有的训练卡。用户也可以通过 gpus 参数指定训练节点上使用的训练卡列表,该列表以逗号分隔。需要注意的是,所有节点需要使用相同数量的训练卡数。为了启动多机分布式任务,需要通过 ips 参数指定所有节点的 IP 地址列表,该列表以逗号分隔。需要注意的是,该列表在所有节点上需要保持一致,即各节点 IP 地址出现的顺序需要保持一致。这里我进行了单机多卡与多机多卡的实验,实验启动方式如下: 单机多卡分布式任务:这里我采用的是四个卡,启动方式如下: ```python @@ -426,7 +426,7 @@ step 0 / 4 - loss: 0.00083 Eval samples: 428 INFO 2022-04-02 23:34:34,617 launch.py:268] Local processes completed. ``` -多机多卡分布式任务:相对于单机多卡的分布式任务,多机多卡下不需要对代码有任何的更改,故不需要更改程序,只需要改变一下启动方式。这里对于多机的情况,我在曙光超算上开启了两个镜像,每个镜像申请了2个加速卡,开启之后首先使用ifconfig查看两个镜像下的ip地址,然后使用ping指令查看一下两个镜像能否相互ping通,然后分别在两个镜像下使用下面指令运行:(其中ips中的两个ip地址换成镜像中的ip地址) +多机多卡分布式任务:相对于单机多卡的分布式任务,多机多卡下不需要对代码有任何的更改,故不需要更改程序,只需要改变一下启动方式。这里对于多机的情况,我在曙光超算上开启了两个镜像,每个镜像申请了 2 个加速卡,开启之后首先使用 ifconfig 查看两个镜像下的 ip 地址,然后使用 ping 指令查看一下两个镜像能否相互 ping 通,然后分别在两个镜像下使用下面指令运行:(其中 ips 中的两个 ip 地址换成镜像中的 ip 地址) ```python python -m paddle.distributed.launch --ips="192.168.0.1,192.168.0.2" --gpus 0,1 train_fleet_dygraph.py ``` @@ -861,14 +861,14 @@ Eval samples: 428 | :------------ | ---------- | --------------- | ------ | ------| | 序号 | 核心步骤 | 完成情况(成功/不成功) | 遇到问题 |解决方法(无法解决请注明)| | 1 | 导入分布式训练所需要的依赖包 | 完成 | 无 | 无 | -| 2 | 初始化分布式环境 | 完成 | PaddlePaddle 安装有时候会有一些问题、NCCL初始化有问题![图片](https://user-images.githubusercontent.com/35827074/165877509-b84f5846-b175-4ab9-8ae3-eef66ed09047.png) | 使用export设置一些安装的库的环境变量,上述问题是rocm版本问题,需要使用rocm-4.0.1版本。 修改rocm版本的方法为. module switch compiler/rocm/4.0.1,再就是导入超算上的一些环境变量 export NCCL_IB_HCA=mlx5_0 export NCCL_SOCKET_IFNAME=eno1 export NCCL_IB_DISABLE=0 | +| 2 | 初始化分布式环境 | 完成 | PaddlePaddle 安装有时候会有一些问题、NCCL 初始化有问题![图片](https://user-images.githubusercontent.com/35827074/165877509-b84f5846-b175-4ab9-8ae3-eef66ed09047.png) | 使用 export 设置一些安装的库的环境变量,上述问题是 rocm 版本问题,需要使用 rocm-4.0.1 版本。 修改 rocm 版本的方法为. module switch compiler/rocm/4.0.1,再就是导入超算上的一些环境变量 export NCCL_IB_HCA=mlx5_0 export NCCL_SOCKET_IFNAME=eno1 export NCCL_IB_DISABLE=0 | | 3 | 设置分布式训练需要的优化器 | 完成 | 无 | 无 | -| 4 | 数据集拆分 | 完成 | 示例里面没有数据集的拆分案例,不会使用数据集的拆分;使用DistributedBatchSampler采样器之后DataLoader中无法指定batchsize以及shuffle参数 | 分析paddle的分布式API底层以及结合其他深度学习框架分析,发现了DistributedBatchSampler API,然后分析其底层实现,发现可以应用;分析DataLoader底层的源码,发现在指定batch_sampler参数之后不能指定batchsize、shuffle以及drop_last参数,然后在DistributedBatchSampler构建采样器的过程中指定。分布式数据集拆分使用DistributedBatchSampler,通过使用DistributedBatchSampler构建一个分布式的采样器,其会将数据平均划分到多个设备中,然后将其输入到Dataloader函数中,参数为batch_sampler,案例的全部代码已经在附录中给出。关于拆分部分如下:train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=2) val_sampler = DistributedBatchSampler(val_dataset, 32) val_loader = DataLoader(val_dataset, batch_sampler=val_sampler, num_workers=2) | +| 4 | 数据集拆分 | 完成 | 示例里面没有数据集的拆分案例,不会使用数据集的拆分;使用 DistributedBatchSampler 采样器之后 DataLoader 中无法指定 batchsize 以及 shuffle 参数 | 分析 paddle 的分布式 API 底层以及结合其他深度学习框架分析,发现了 DistributedBatchSampler API,然后分析其底层实现,发现可以应用;分析 DataLoader 底层的源码,发现在指定 batch_sampler 参数之后不能指定 batchsize、shuffle 以及 drop_last 参数,然后在 DistributedBatchSampler 构建采样器的过程中指定。分布式数据集拆分使用 DistributedBatchSampler,通过使用 DistributedBatchSampler 构建一个分布式的采样器,其会将数据平均划分到多个设备中,然后将其输入到 Dataloader 函数中,参数为 batch_sampler,案例的全部代码已经在附录中给出。关于拆分部分如下:train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=2) val_sampler = DistributedBatchSampler(val_dataset, 32) val_loader = DataLoader(val_dataset, batch_sampler=val_sampler, num_workers=2) | | 5 | 构建训练代码 | 完成 | 无 | 无 | -| 6 | 单机多卡分布式训练 | 完成 | 在曙光超算上使用SBATCH作业提交方式时有环境的问题 | 申请4个DCU,使用镜像的方式进行实现 | -| 7 | 多机多卡分布式训练 | 完成 | 无 | 注意再进行多机多卡时先要两个机器之间互相ping一下 | +| 6 | 单机多卡分布式训练 | 完成 | 在曙光超算上使用 SBATCH 作业提交方式时有环境的问题 | 申请 4 个 DCU,使用镜像的方式进行实现 | +| 7 | 多机多卡分布式训练 | 完成 | 无 | 注意再进行多机多卡时先要两个机器之间互相 ping 一下 | -* 总结:上述单机转为分布式的过程中,总体来说感觉还是可以的,动态图下paddle单机转为分布式的代码还是比较方便的,也有一些官网上的参考文档用于学习,但是有一些是在其参考文档中没有介绍的,例如数据集的拆分等这些需要自己去思考。 +* 总结:上述单机转为分布式的过程中,总体来说感觉还是可以的,动态图下 paddle 单机转为分布式的代码还是比较方便的,也有一些官网上的参考文档用于学习,但是有一些是在其参考文档中没有介绍的,例如数据集的拆分等这些需要自己去思考。 # 4、PyTorch 单机与分布式: ## 4.1、PyTorch 单机 @@ -888,7 +888,7 @@ PyTorch 单机转为分布式的具体流程如下: ```python import torch.distributed as dist ``` -- 2、初始化分布式环境,需要指定一下通信后端(我采用的是NCCL),初始化方法(我采用的是env初始化),当前进程号以及总的进程数量。 +- 2、初始化分布式环境,需要指定一下通信后端(我采用的是 NCCL),初始化方法(我采用的是 env 初始化),当前进程号以及总的进程数量。 ```python rank = int(os.environ["RANK"]) world_size = int(os.environ['WORLD_SIZE']) @@ -897,13 +897,13 @@ device = gpu torch.cuda.set_device(gpu) dist.init_process_group(backend="nccl", world_size=world_size, rank=rank) ``` -3、构建模型,这里需要使用torch.nn.parallel.DistributedDataParallel构建其分布式的模型,这里没有采用Dataparallel API,是因为其效率相对于DistributedDataParallel比较低。 +3、构建模型,这里需要使用 torch.nn.parallel.DistributedDataParallel 构建其分布式的模型,这里没有采用 Dataparallel API,是因为其效率相对于 DistributedDataParallel 比较低。 ```python model = FaceNet(num_keypoints=15).to(device) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu]) ``` 4、构建优化器与损失函数 -优化器采用SGD,指定学习率与动量等参数。 +优化器采用 SGD,指定学习率与动量等参数。 ```python optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.0001, momentum=0.9) ``` @@ -911,14 +911,14 @@ optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters() ```python criterion = torch.nn.MSELoss() ``` -5、数据集的拆分,首先使用DistributedSampler构建分布式数据集的拆分,这里可以指定一下是否需要进行shuffle以及drop_last等参数,然后使用DataLoader进行封装。 +5、数据集的拆分,首先使用 DistributedSampler 构建分布式数据集的拆分,这里可以指定一下是否需要进行 shuffle 以及 drop_last 等参数,然后使用 DataLoader 进行封装。 ```python train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True) train_loader = torch.utils.data.DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False) val_loader = torch.utils.data.DataLoader(val_dataset, sampler=val_sampler, batch_size=batch_size) ``` -6、构建训练代码,这里需要使用train_sampler.set_epoch(epoch)设置一下,因为在train_sampler中使用了shuffle这个参数,而看其源码可以发现其依靠self.epoch这个参数进行随机种子的设置,所以需要在每个epoch训练时设置一下采样器的self.epoch这个参数,即通过train_sampler.set_epoch(epoch)进行设置。训练代码如下,每一个epoch训练完进行val的验证评估: +6、构建训练代码,这里需要使用 train_sampler.set_epoch(epoch)设置一下,因为在 train_sampler 中使用了 shuffle 这个参数,而看其源码可以发现其依靠 self.epoch 这个参数进行随机种子的设置,所以需要在每个 epoch 训练时设置一下采样器的 self.epoch 这个参数,即通过 train_sampler.set_epoch(epoch)进行设置。训练代码如下,每一个 epoch 训练完进行 val 的验证评估: ```python for epoch in range(total_epoch): model.train() @@ -948,7 +948,7 @@ for epoch in range(total_epoch): print("Eval samples: ", len(val_dataset)) ``` 7、启动分布式任务 -PyTorch 分布式下两种启动方式,我选择的是和paddle类似的一种方式即通过torch.distributed.launch进行启动,启动方式如下: +PyTorch 分布式下两种启动方式,我选择的是和 paddle 类似的一种方式即通过 torch.distributed.launch 进行启动,启动方式如下: ```bash python -m torch.distributed.launch --nproc_per_node=4 train_multi.py ``` @@ -958,10 +958,10 @@ python -m torch.distributed.launch --nproc_per_node=4 train_multi.py ## 相似点: PaddlePaddle 与 PyTorch 单机转为分布式的流程基本上是相似的,基本上遵循如下流程:导入分布式相关的库、初始化分布式环境、构建分布式的模型、构建优化器与损失函数、同时进行分布式数据集的拆分,最后构建训练代码,其整个流程都比较相似,相对来说 PaddlePaddle 与 PyTorch 单机转为分布式都是比较方便的。 ## 不同点: -- 1、PaddlePaddle 内部有许多封装好的类,例如 paddle.Model 类,其内部封装了好多函数,例如train_batch/fit等函数,还加入了一些回调函数例如EarlyStopping等,可以比较方便地进行训练、测试的过程,比较容易使用。 -- 2、对于单机转为分布式的过程,如果对数据集进行shuffle打乱时,PyTorch 需要在每个epoch训练开始时调用train_sampler.set_epoch函数即设置一下shuffle打乱的种子,但是 PaddlePaddle 如果对数据集进行shuffle打乱时,可以选择并不需要设置,因为其内部在每次打乱时会将self.epoch进行加一的操作,即自动改变了其数据打乱的种子,使用起来更加方便。 +- 1、PaddlePaddle 内部有许多封装好的类,例如 paddle.Model 类,其内部封装了好多函数,例如 train_batch/fit 等函数,还加入了一些回调函数例如 EarlyStopping 等,可以比较方便地进行训练、测试的过程,比较容易使用。 +- 2、对于单机转为分布式的过程,如果对数据集进行 shuffle 打乱时,PyTorch 需要在每个 epoch 训练开始时调用 train_sampler.set_epoch 函数即设置一下 shuffle 打乱的种子,但是 PaddlePaddle 如果对数据集进行 shuffle 打乱时,可以选择并不需要设置,因为其内部在每次打乱时会将 self.epoch 进行加一的操作,即自动改变了其数据打乱的种子,使用起来更加方便。 - 3、从使用方面来说,PaddlePaddle 的分布式初始化有时候会报错有时候能使用,其环境用起来感觉不太稳定,PyTorch 的分布式使用起来相对比较稳定,其初始化环境等功能实现都比较稳定。 -- 4、从官方文档来说,PaddlePaddle 的分布式示例文档中感觉不太完善,例如DistributedSampler等的API没有在分布式示例文档中展现,paddle.Model等API没有找到相关API文档的介绍;PyTorch 的分布式示例文档相对来说比较完善,包括其示例以及API的使用以及分布式通信的相关API都有其文档介绍。 +- 4、从官方文档来说,PaddlePaddle 的分布式示例文档中感觉不太完善,例如 DistributedSampler 等的 API 没有在分布式示例文档中展现,paddle.Model 等 API 没有找到相关 API 文档的介绍;PyTorch 的分布式示例文档相对来说比较完善,包括其示例以及 API 的使用以及分布式通信的相关 API 都有其文档介绍。 # 5、附录 ## 单机示例转为分布式的代码 @@ -989,7 +989,7 @@ lookid_dir = './data/data60/IdLookupTable.csv' class ImgTransforms(object): """ 图像预处理工具,用于将图像进行升维(96, 96) => (96, 96, 3), - 并对图像的维度进行转换从HWC变为CHW + 并对图像的维度进行转换从 HWC 变为 CHW """ def __init__(self, fmt): @@ -1060,7 +1060,7 @@ class FaceDataset(Dataset): return len(self.data_img) # 模型的定义 -# 对应30维度 +# 对应 30 维度 class FaceNet(paddle.nn.Layer): def __init__(self, num_keypoints, pretrained=False): super(FaceNet, self).__init__() @@ -1079,7 +1079,7 @@ class FaceNet(paddle.nn.Layer): def main(): - # 初始化Fleet环境 + # 初始化 Fleet 环境 fleet.init(is_collective=True) # 训练数据集和验证数据集 train_dataset = FaceDataset(Train_Dir, mode='train') @@ -1092,7 +1092,7 @@ def main(): optim = paddle.optimizer.Adam(learning_rate=1e-3, parameters=model.parameters()) # 构建分布式优化器 optim = fleet.distributed_optimizer(optim) - # 通过Fleet API获取分布式model,用于支持分布式训练 + # 通过 Fleet API 获取分布式 model,用于支持分布式训练 model = fleet.distributed_model(model) # 数据集的拆分 构建分布式数据集 train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True) diff --git a/docs/faq/2.0.md b/docs/faq/2.0.md index 8fcb03da613..d9a3043feaf 100644 --- a/docs/faq/2.0.md +++ b/docs/faq/2.0.md @@ -4,67 +4,67 @@ ##### 问题:paddle 2.0 是否支持 python2,python3.5 ? -+ 答复:paddle 2.0依然提供了python2,python3.5的官方安装包,但未来的某个版本将不再支持python2,python3.5。(python 官方已停止对python2,python3.5的更新和维护) ++ 答复:paddle 2.0 依然提供了 python2,python3.5 的官方安装包,但未来的某个版本将不再支持 python2,python3.5。(python 官方已停止对 python2,python3.5 的更新和维护) ---------- ##### 问题:paddle 2.0 是否支持 CUDA9 ? -+ 答复:本版本依然提供了CUDA9的官方安装包,但从未来的某个版本起,将不再支持CUDA9。 ++ 答复:本版本依然提供了 CUDA9 的官方安装包,但从未来的某个版本起,将不再支持 CUDA9。 ---------- ##### 问题:paddle 2.0 是否支持 CentOS6.0 ? -+ 答复:对于CentOS6.0,本版本仅提供了有限度的支持(仅发布了CentOS6下,Python3.7 CUDA10.2/CUDNN7 和CPU的安装包)。(CentOS6官方已宣布了停止更新和维护) ++ 答复:对于 CentOS6.0,本版本仅提供了有限度的支持(仅发布了 CentOS6 下,Python3.7 CUDA10.2/CUDNN7 和 CPU 的安装包)。(CentOS6 官方已宣布了停止更新和维护) ---------- -##### 问题:paddle 2.0 是否支持 AVX指令的x86机器 ? +##### 问题:paddle 2.0 是否支持 AVX 指令的 x86 机器 ? -+ 答复:2.0版本依然提供了对不支持AVX指令的x86机器上运行飞桨的支持,但未来的某个版本将会废弃对不支持AVX指令的x86机器的支持。 ++ 答复:2.0 版本依然提供了对不支持 AVX 指令的 x86 机器上运行飞桨的支持,但未来的某个版本将会废弃对不支持 AVX 指令的 x86 机器的支持。 ---------- ##### 问题:2.0 版本移除了哪些第三方库 ? -+ 答复:2.0 版本移除的第三方依赖库为:nltk、opencv、scipy、rarfile、prettytable、pathlib、matplotlib、graphviz、objgraph。由于某些功能依然会有对opencv的依赖,在使用到时,会提示用户进行安装。 ++ 答复:2.0 版本移除的第三方依赖库为:nltk、opencv、scipy、rarfile、prettytable、pathlib、matplotlib、graphviz、objgraph。由于某些功能依然会有对 opencv 的依赖,在使用到时,会提示用户进行安装。 具体表现为: -1. 删除依赖这些库的API,原来Paddle基于这些第三方库提供了一些API,在删除这些依赖库的同时也删除了这些API,如:移除matplotlib的同时,paddle.utils.plot也被删除了。 +1. 删除依赖这些库的 API,原来 Paddle 基于这些第三方库提供了一些 API,在删除这些依赖库的同时也删除了这些 API,如:移除 matplotlib 的同时,paddle.utils.plot 也被删除了。 -2. 删除依赖第三方库的数据集,如:移除nltk的同时,依赖nltk的数据集sentiment(nltk.movie_reivew)也被删除了。 +2. 删除依赖第三方库的数据集,如:移除 nltk 的同时,依赖 nltk 的数据集 sentiment(nltk.movie_reivew)也被删除了。 -3. 删除依赖的第三方库后,如果需要使用这些库,需要重新安装`pip install objgraph`,直接import objgraph可能会报错。 +3. 删除依赖的第三方库后,如果需要使用这些库,需要重新安装`pip install objgraph`,直接 import objgraph 可能会报错。 ---------- -##### 问题:从1.x版本升级为2.0版本,哪些API有变动 ? +##### 问题:从 1.x 版本升级为 2.0 版本,哪些 API 有变动 ? -+ 答复:飞桨框架2.0.0版本推荐用户使用位于paddle根目录下的API,同时在`paddle.fluid`目录下保留了所有的1.x版本的API,保留对之前版本API体系的支持。 ++ 答复:飞桨框架 2.0.0 版本推荐用户使用位于 paddle 根目录下的 API,同时在`paddle.fluid`目录下保留了所有的 1.x 版本的 API,保留对之前版本 API 体系的支持。 -查看API变动的两种方法: +查看 API 变动的两种方法: -1. 依据1.8版本API到2.0版本API的对应关系表对API进行升级,请参考文档 [飞桨框架API映射表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/08_api_mapping/paddle_api_mapping_cn.html) +1. 依据 1.8 版本 API 到 2.0 版本 API 的对应关系表对 API 进行升级,请参考文档 [飞桨框架 API 映射表](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/08_api_mapping/paddle_api_mapping_cn.html) -2. 飞桨提供了迁移工具,来方便用户将旧版本的代码迁移为2.0.1版本的代码,详情请见:[版本迁移工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/migration_cn.html) +2. 飞桨提供了迁移工具,来方便用户将旧版本的代码迁移为 2.0.1 版本的代码,详情请见:[版本迁移工具](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/migration_cn.html) ---------- -##### 问题:2.0 版本中是否有LoDTensor ? +##### 问题:2.0 版本中是否有 LoDTensor ? -+ 答复:2.0 版本没有LoDTensor的概念,统一使用Tensor来表示数据。 ++ 答复:2.0 版本没有 LoDTensor 的概念,统一使用 Tensor 来表示数据。 + 解决方案: -1. 使用padding/bucketing的方式对数据进行处理后,使用Tensor来表示数据,进行模型训练,具体示例请参考:[IMDB 数据集使用BOW网络的文本分类](https://www.paddlepaddle.org.cn/documentation/docs/zh/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.html),[使用注意力机制的LSTM的机器翻译](https://www.paddlepaddle.org.cn/documentation/docs/zh/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.html)。 +1. 使用 padding/bucketing 的方式对数据进行处理后,使用 Tensor 来表示数据,进行模型训练,具体示例请参考:[IMDB 数据集使用 BOW 网络的文本分类](https://www.paddlepaddle.org.cn/documentation/docs/zh/tutorial/nlp_case/imdb_bow_classification/imdb_bow_classification.html),[使用注意力机制的 LSTM 的机器翻译](https://www.paddlepaddle.org.cn/documentation/docs/zh/tutorial/nlp_case/seq2seq_with_attention/seq2seq_with_attention.html)。 -2. 在使用padding/bucketing方案对性能影响极大的场景下,请谨慎升级,并请期待未来的paddle对该功能更加易用和高效的实现。 +2. 在使用 padding/bucketing 方案对性能影响极大的场景下,请谨慎升级,并请期待未来的 paddle 对该功能更加易用和高效的实现。 ---------- @@ -73,42 +73,42 @@ - 答复:在 2.0 之前的版本的 paddle 中,向用户暴露了以下的数据表示的概念: - [Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.8/beginners_guide/basic_concept/tensor.html): 类似于 numpy ndarray 的多维数组。 - [Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.8/beginners_guide/basic_concept/variable.html):可以简单理解为,在构建静态的计算图时的数据节点。 - - [LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.8/beginners_guide/basic_concept/lod_tensor.html):用来表示嵌套的、每条数据长度不一的一组数据。(例:一个batch中包含了长度为3,10,7,50的四个句子) + - [LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/1.8/beginners_guide/basic_concept/lod_tensor.html):用来表示嵌套的、每条数据长度不一的一组数据。(例:一个 batch 中包含了长度为 3,10,7,50 的四个句子) -这三类不同类型的概念的同时存在,让使用 paddle 的开发者容易感到混淆,需要构建 LoDTensor 类型的数据的情况在具体的实践中,通常也可以使用 padding/bucketing 的最佳实践来达到同样的目的,因此 paddle 2.0 版本起,我们把这些概念统一为 [Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/basic_concept/tensor_introduction_cn.html) 的概念。在 paddle 2.0 版本起,对于每条数据长度不一的一组数据的处理,您可以参看这篇 Tutorial: [使用注意力机制的LSTM的机器翻译](https://www.paddlepaddle.org.cn/documentation/docs/zh/practices/nlp/seq2seq_with_attention.html)。 +这三类不同类型的概念的同时存在,让使用 paddle 的开发者容易感到混淆,需要构建 LoDTensor 类型的数据的情况在具体的实践中,通常也可以使用 padding/bucketing 的最佳实践来达到同样的目的,因此 paddle 2.0 版本起,我们把这些概念统一为 [Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/basic_concept/tensor_introduction_cn.html) 的概念。在 paddle 2.0 版本起,对于每条数据长度不一的一组数据的处理,您可以参看这篇 Tutorial: [使用注意力机制的 LSTM 的机器翻译](https://www.paddlepaddle.org.cn/documentation/docs/zh/practices/nlp/seq2seq_with_attention.html)。 ---------- -##### 问题:1.8开发的静态图代码能在2.0版本中运行吗 ? +##### 问题:1.8 开发的静态图代码能在 2.0 版本中运行吗 ? + 答复: -所有1.8的静态图模型在2.0版本中都会报错。 +所有 1.8 的静态图模型在 2.0 版本中都会报错。 + 问题分析: -2.0版本默认开启了动态图模式。即当调用`import paddle`后,此时Paddle已经运行在动态图模式下。基于1.8开发的静态图代码,在2.0版本下直接执行会出错。为此,在静态图的一些入口API 中加入了报错检查,例如直接调用`fluid.data`会遇到如下错误: +2.0 版本默认开启了动态图模式。即当调用`import paddle`后,此时 Paddle 已经运行在动态图模式下。基于 1.8 开发的静态图代码,在 2.0 版本下直接执行会出错。为此,在静态图的一些入口 API 中加入了报错检查,例如直接调用`fluid.data`会遇到如下错误: ![图片](https://paddlepaddleimage.cdn.bcebos.com/faqimage%2Fbj-fcd837654fa8c5bb15b071ecaad6b92ef632d872.png) + 解决方案: -1. 旧版本(1.8及之前版本)静态图下的代码,需要在`import paddle`后的头部位置加入`paddle.enable_static()`来开启静态图模式,这样才能正常运行。 +1. 旧版本(1.8 及之前版本)静态图下的代码,需要在`import paddle`后的头部位置加入`paddle.enable_static()`来开启静态图模式,这样才能正常运行。 -2. 原来通过`dygraph guard`写的动态图代码仍然可以正常执行。但在2.0下可以不需要像以前写`dygraph guard`,直接按照动态图模式编写代码。 +2. 原来通过`dygraph guard`写的动态图代码仍然可以正常执行。但在 2.0 下可以不需要像以前写`dygraph guard`,直接按照动态图模式编写代码。 -3. 同时,请注意对于GPU版本的paddle,在`import paddle`时默认开启动态图会选择`CUDAPlace`作为默认place。如果要修改place,可以通过`paddle.set_device()`来完成。 +3. 同时,请注意对于 GPU 版本的 paddle,在`import paddle`时默认开启动态图会选择`CUDAPlace`作为默认 place。如果要修改 place,可以通过`paddle.set_device()`来完成。 ---------- -##### 问题:2.0版本中`loss.backward()` 是否默认清空上个step 的梯度? +##### 问题:2.0 版本中`loss.backward()` 是否默认清空上个 step 的梯度? + 答复: -2.0版本新增动态图梯度累加功能,起到变相“扩大BatchSize”的作用,`backward()`接口默认不清空上个step梯度。 +2.0 版本新增动态图梯度累加功能,起到变相“扩大 BatchSize”的作用,`backward()`接口默认不清空上个 step 梯度。 + 解决方案: diff --git a/docs/faq/data_cn.md b/docs/faq/data_cn.md index 89301160cac..e151b00afd6 100644 --- a/docs/faq/data_cn.md +++ b/docs/faq/data_cn.md @@ -5,40 +5,40 @@ + 答复:当训练时使用的数据集数据量较大或者预处理逻辑复杂时,如果串行地进行数据读取,数据读取往往会成为训练效率的瓶颈。这种情况下通常需要利用多线程或者多进程的方法异步地进行数据载入,从而提高数据读取和整体训练效率。 -paddle中推荐使用 `DataLoader`,这是一种灵活的异步加载方式。 +paddle 中推荐使用 `DataLoader`,这是一种灵活的异步加载方式。 -该API提供了多进程的异步加载支持,可以配置`num_workers`指定异步加载数据的进程数目从而满足不同规模数据集的读取需求。 +该 API 提供了多进程的异步加载支持,可以配置`num_workers`指定异步加载数据的进程数目从而满足不同规模数据集的读取需求。 -具体使用方法及示例请参考API文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader) +具体使用方法及示例请参考 API 文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader) ---------- -##### 问题:使用多卡进行并行训练时,如何配置DataLoader进行异步数据读取? +##### 问题:使用多卡进行并行训练时,如何配置 DataLoader 进行异步数据读取? -+ 答复:paddle中多卡训练时设置异步读取和单卡场景并无太大差别,动态图模式下,由于目前仅支持多进程多卡,每个进程将仅使用一个设备,比如一张GPU卡,这种情况下,与单卡训练无异,只需要确保每个进程使用的是正确的卡即可。 ++ 答复:paddle 中多卡训练时设置异步读取和单卡场景并无太大差别,动态图模式下,由于目前仅支持多进程多卡,每个进程将仅使用一个设备,比如一张 GPU 卡,这种情况下,与单卡训练无异,只需要确保每个进程使用的是正确的卡即可。 -具体示例请参考飞桨API [paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)中的示例。 +具体示例请参考飞桨 API [paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)中的示例。 ---------- -##### 问题:有拓展Tensor维度的Op吗? +##### 问题:有拓展 Tensor 维度的 Op 吗? -+ 答复:请参考API [paddle.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/unsqueeze_cn.html#unsqueeze)。 ++ 答复:请参考 API [paddle.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/unsqueeze_cn.html#unsqueeze)。 ---------- ##### 问题:如何给图片添加一个通道数,并进行训练? -+ 答复:如果是在进入paddle计算流程之前,数据仍然是numpy.array的形式,使用numpy接口`numpy.expand_dims`为图片数据增加维度后,再通过`numpy.reshape`进行操作即可,具体使用方法可查阅numpy的官方文档。 ++ 答复:如果是在进入 paddle 计算流程之前,数据仍然是 numpy.array 的形式,使用 numpy 接口`numpy.expand_dims`为图片数据增加维度后,再通过`numpy.reshape`进行操作即可,具体使用方法可查阅 numpy 的官方文档。 -如果是希望在模型训练或预测流程中完成通道的操作,可以使用paddle对应的API [paddle.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/unsqueeze_cn.html#unsqueeze) 和 [paddle.reshape](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/reshape_cn.html#reshape)。 +如果是希望在模型训练或预测流程中完成通道的操作,可以使用 paddle 对应的 API [paddle.unsqueeze](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/unsqueeze_cn.html#unsqueeze) 和 [paddle.reshape](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/manipulation/reshape_cn.html#reshape)。 ---------- -##### 问题:如何从numpy.array生成一个具有shape和dtype的Tensor? +##### 问题:如何从 numpy.array 生成一个具有 shape 和 dtype 的 Tensor? + 答复:在动态图模式下,可以参考如下示例: @@ -49,13 +49,13 @@ import numpy as np x = np.ones([2, 2], np.float32) y = paddle.to_tensor(x) -# 或者直接使用paddle生成tensor +# 或者直接使用 paddle 生成 tensor z = paddle.ones([2, 2], 'float32') ``` ---------- -##### 问题:如何初始化一个随机数的Tensor? +##### 问题:如何初始化一个随机数的 Tensor? -+ 答复:使用`paddle.rand` 或 `paddle.randn` 等API。具体请参考: ++ 答复:使用`paddle.rand` 或 `paddle.randn` 等 API。具体请参考: [paddle.rand](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/random/rand_cn.html#rand) 和[paddle.randn](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/tensor/random/randn_cn.html#randn) diff --git a/docs/faq/distributed_cn.md b/docs/faq/distributed_cn.md index 690d64ae41b..eae42e5a39d 100644 --- a/docs/faq/distributed_cn.md +++ b/docs/faq/distributed_cn.md @@ -2,89 +2,89 @@ ## 综合问题 -##### 问题:怎样了解飞桨分布式Fleet API用法? +##### 问题:怎样了解飞桨分布式 Fleet API 用法? -+ 答复:可查看覆盖高低阶应用的[分布式用户文档](https://github.com/PaddlePaddle/fleetx)和[分布式API文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/Overview_cn.html) ++ 答复:可查看覆盖高低阶应用的[分布式用户文档](https://github.com/PaddlePaddle/fleetx)和[分布式 API 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/Overview_cn.html) ---------- ##### 问题:机房训练模型的分布式环境用什么比较合适? -+ 答复: 推荐使用K8S部署,K8S的环境搭建可[参考文档](https://fleet-x.readthedocs.io/en/latest/paddle_fleet_rst/paddle_on_k8s.html) ++ 答复: 推荐使用 K8S 部署,K8S 的环境搭建可[参考文档](https://fleet-x.readthedocs.io/en/latest/paddle_fleet_rst/paddle_on_k8s.html) ---------- ##### 问题:目前飞桨分布式对哪些模型套件/工具支持? + 答复: -1. 多机多卡支持paddlerec,PGL,paddleHelix,paddleclas,paddlenlp,paddledetection。 -2. 单机多卡支持全部飞桨的模型套件和高层API写法,无需修改单卡训练代码,默认启用全部可见的卡。 +1. 多机多卡支持 paddlerec,PGL,paddleHelix,paddleclas,paddlenlp,paddledetection。 +2. 单机多卡支持全部飞桨的模型套件和高层 API 写法,无需修改单卡训练代码,默认启用全部可见的卡。 ---------- ##### 问题:怎样自定义单机多卡训练的卡数量? -+ 答复:如果直接使用飞桨模型套件(paddleclas,paddleseg等)或高层API写的代码,可以直接用这条命令指定显卡启动程序,文档源代码不用改(文档内不要用set_device指定卡): ++ 答复:如果直接使用飞桨模型套件(paddleclas,paddleseg 等)或高层 API 写的代码,可以直接用这条命令指定显卡启动程序,文档源代码不用改(文档内不要用 set_device 指定卡): `python3 -m paddle.distributed.launch --gpus="1, 3" train.py` - 使用基础API的场景下,在程序中修改三处: - * 第1处改动,import库`import paddle.distributed as dist` - * 第2处改动,初始化并行环境`dist.init_parallel_env()` - * 第3处改动,对模型增加paddle.DataParallel封装 `net = paddle.DataParallel(paddle.vision.models.LeNet())` + 使用基础 API 的场景下,在程序中修改三处: + * 第 1 处改动,import 库`import paddle.distributed as dist` + * 第 2 处改动,初始化并行环境`dist.init_parallel_env()` + * 第 3 处改动,对模型增加 paddle.DataParallel 封装 `net = paddle.DataParallel(paddle.vision.models.LeNet())` 修改完毕就可以使用 `python3 -m paddle.distributed.launch --gpus="1, 3" xxx `来启动了。可参考[AI Studio 项目示例](https://aistudio.baidu.com/aistudio/projectdetail/1222066) ---------- -## Fleet API的使用 +## Fleet API 的使用 -##### 问题:飞桨2.0版本分布式Fleet API的目录在哪? +##### 问题:飞桨 2.0 版本分布式 Fleet API 的目录在哪? -+ 答复:2.0版本分布式API从paddle.fluid.incubate.fleet目录挪至paddle.distributed.fleet目录下,且对部分API接口做了兼容升级。import方式如下: ++ 答复:2.0 版本分布式 API 从 paddle.fluid.incubate.fleet 目录挪至 paddle.distributed.fleet 目录下,且对部分 API 接口做了兼容升级。import 方式如下: ```python import paddle.distributed.fleet as fleet fleet.init() ``` - 不再支持老版本paddle.fluid.incubate.fleet API,2.0版本会在分布式计算图拆分的阶段报语法相关错误。未来的某个版本会直接移除废弃paddle.fluid目录下的API。 + 不再支持老版本 paddle.fluid.incubate.fleet API,2.0 版本会在分布式计算图拆分的阶段报语法相关错误。未来的某个版本会直接移除废弃 paddle.fluid 目录下的 API。 ---------- -##### 问题:飞桨2.0版本的fleet配置初始化接口init和init_server用法有什么变化? +##### 问题:飞桨 2.0 版本的 fleet 配置初始化接口 init 和 init_server 用法有什么变化? + 答复: -1. `fleet.init`接口,2.0版本支持`role_maker`,`is_collective`,`strategy`等参数,且均有缺省值,老版本仅支持`role_maker`,且无缺省配置。[点击这里](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/fleet/Fleet_cn.html) 参考2.0 Fleet API的使用方式。 -2. `fleet.init_server`接口,除支持传入`model_dir`之外,2.0版本还支持传入`var_names`,加载指定的变量。 +1. `fleet.init`接口,2.0 版本支持`role_maker`,`is_collective`,`strategy`等参数,且均有缺省值,老版本仅支持`role_maker`,且无缺省配置。[点击这里](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/fleet/Fleet_cn.html) 参考 2.0 Fleet API 的使用方式。 +2. `fleet.init_server`接口,除支持传入`model_dir`之外,2.0 版本还支持传入`var_names`,加载指定的变量。 ---------- -##### 问题: 飞桨2.0版本的分布式paddle.static.nn.sparse_embedding和paddle.nn.embedding有什么差别? +##### 问题: 飞桨 2.0 版本的分布式 paddle.static.nn.sparse_embedding 和 paddle.nn.embedding 有什么差别? -+ 答复:`paddle.nn.embedding`和`paddle.static.nn.sparse_embedding`的稀疏参数将会在每个PServer段都用文本的一部分保存,最终整体拼接起来是完整的embedding。推荐使用`paddle.static.nn.sparse_embedding`直接采用分布式预估的方案。虽然 `nn.embedding`目前依旧可以正常使用,但后续的某个版本会变成与使用`paddle.static.nn.sparse_embedding`一样的保存方案。老版本中使用的0号节点的本地预测功能在加载模型的时候会报模型加载错误。 ++ 答复:`paddle.nn.embedding`和`paddle.static.nn.sparse_embedding`的稀疏参数将会在每个 PServer 段都用文本的一部分保存,最终整体拼接起来是完整的 embedding。推荐使用`paddle.static.nn.sparse_embedding`直接采用分布式预估的方案。虽然 `nn.embedding`目前依旧可以正常使用,但后续的某个版本会变成与使用`paddle.static.nn.sparse_embedding`一样的保存方案。老版本中使用的 0 号节点的本地预测功能在加载模型的时候会报模型加载错误。 ---------- -##### 问题:飞桨2.0分布式可以用哪些配置类? +##### 问题:飞桨 2.0 分布式可以用哪些配置类? -+ 答复:2.0之后统一为`paddle.distributed.fleet.DistributedStrategy()`,与下述老版本配置类不兼容。2.0之前的版本参数服务器配置类:`paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy.DistributedStrategy`,2.0之前的版本collective模式配置类:`paddle.fluid.incubate.fleet.collective.DistributedStrategy` ++ 答复:2.0 之后统一为`paddle.distributed.fleet.DistributedStrategy()`,与下述老版本配置类不兼容。2.0 之前的版本参数服务器配置类:`paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy.DistributedStrategy`,2.0 之前的版本 collective 模式配置类:`paddle.fluid.incubate.fleet.collective.DistributedStrategy` ---------- -##### 问题:飞桨2.0分布式配置项统一到DistributedStrategy后有哪些具体变化? +##### 问题:飞桨 2.0 分布式配置项统一到 DistributedStrategy 后有哪些具体变化? + 答复: -2.0版本之后,建议根据 [DistributedStrategy文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/fleet/DistributedStrategy_cn.html) 和 [BuildStrategy文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/compiler/BuildStrategy_cn.html#buildstrategy) 修改配置选项。 +2.0 版本之后,建议根据 [DistributedStrategy 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/fleet/DistributedStrategy_cn.html) 和 [BuildStrategy 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/compiler/BuildStrategy_cn.html#buildstrategy) 修改配置选项。 -2.0版本将3个环境变量配置变为`DistributedStrategy`配置项,3个环境变量将不生效,包括 +2.0 版本将 3 个环境变量配置变为`DistributedStrategy`配置项,3 个环境变量将不生效,包括 * `FLAGS_sync_nccl_allreduce`→ `strategy.sync_nccl_allreduce` * `FLAGS_fuse_parameter_memory_size` → `strategy.fuse_grad_size_in_MB` * `FLAGS_fuse_parameter_groups_size` → `strategy.fuse_grad_size_in_TFLOPS` - DistributedStrategy中`exec_strategy`配置项不兼容升级为`execution_strategy`。 + DistributedStrategy 中`exec_strategy`配置项不兼容升级为`execution_strategy`。 - DistributedStrategy中`forward_recompute`配置项不兼容升级为`recompute`。 + DistributedStrategy 中`forward_recompute`配置项不兼容升级为`recompute`。 - DistributedStrategy中`recompute_checkpoints`配置项不兼容升级为`recompute_configs`字典下的字段,如下: + DistributedStrategy 中`recompute_checkpoints`配置项不兼容升级为`recompute_configs`字典下的字段,如下: ```python import paddle.distributed.fleet a fleet @@ -96,39 +96,39 @@ "checkpoint_shape": [100, 512, 1024]} ``` - DistributedStrategy中`use_local_sgd`配置项变为不兼容升级为localsgd。 + DistributedStrategy 中`use_local_sgd`配置项变为不兼容升级为 localsgd。 ---------- -##### 问题:飞桨2.0分布式Fleet的program接口是否还能继续用? -+ 答复:2.0版本后,fleet接口下main_program和_origin_program均已废弃,会报错没有这个变量,替换使用`paddle.static.default_main_program`即可。 +##### 问题:飞桨 2.0 分布式 Fleet 的 program 接口是否还能继续用? ++ 答复:2.0 版本后,fleet 接口下 main_program 和_origin_program 均已废弃,会报错没有这个变量,替换使用`paddle.static.default_main_program`即可。 ---------- -##### 问题:怎样在本地测试Fleet API实现的分布式训练代码是否正确? +##### 问题:怎样在本地测试 Fleet API 实现的分布式训练代码是否正确? -+ 答复:首先写好分布式train.py文件 ++ 答复:首先写好分布式 train.py 文件 - * 在PServer模式下,命令行模拟启动分布式:`python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py` - * 在Collective模式下,命令改为`python -m paddle.distributed.launch --gpus=0,1 train.py` + * 在 PServer 模式下,命令行模拟启动分布式:`python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 train.py` + * 在 Collective 模式下,命令改为`python -m paddle.distributed.launch --gpus=0,1 train.py` ---------- -##### 问题:Paddle Fleet怎样做增量训练,有没有文档支持? +##### 问题:Paddle Fleet 怎样做增量训练,有没有文档支持? + 答复:增量训练可参考[文档示例](https://fleet-x.readthedocs.io/en/latest/paddle_fleet_rst/parameter_server/ps_incremental_learning.html) ---------- -##### 问题:飞桨2.0分布式distributed_optimizer如何使用自动混合精度amp的optimizer? +##### 问题:飞桨 2.0 分布式 distributed_optimizer 如何使用自动混合精度 amp 的 optimizer? -+ 答复:`amp_init`接口支持pure_fp16,可以直接调用`optimizer.amp_init`。 ++ 答复:`amp_init`接口支持 pure_fp16,可以直接调用`optimizer.amp_init`。 ---------- -##### 问题:Paddle Fleet可以在K8S GPU集群上利用CPU资源跑pserver模式的MPI程序吗? +##### 问题:Paddle Fleet 可以在 K8S GPU 集群上利用 CPU 资源跑 pserver 模式的 MPI 程序吗? -+ 答复:可以,GPU可设置为trainer。 ++ 答复:可以,GPU 可设置为 trainer。 ---------- @@ -140,54 +140,54 @@ ## 环境配置和训练初始化 -##### 问题:分布式环境变量FLAGS参数定义可以在哪查看,比如communicator相关的? +##### 问题:分布式环境变量 FLAGS 参数定义可以在哪查看,比如 communicator 相关的? + 答复:参考使用[DistributedStrategy](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/distributed/fleet/DistributedStrategy_cn.html#distributedstrategy)配置分布式策略。 ---------- -##### 问题:2.0分布式训练的启动命令有什么变化? +##### 问题:2.0 分布式训练的启动命令有什么变化? -+ 答复:为了统一启动分布式Collective/PS模式任务方式以及易用性考虑,2.0版本中launch/fleetrun启动分布式任务时参数产生不兼容升级,`--cluster_node_ips`改为`--ips`,`--selected_gpus`改为`--gpus`、`--node_ip`、`--use_paddlecloud`、`--started_port`、`--log_level`、`--print_config` 5个参数已废弃,使用旧参数会直接报错没有此参数。代码迁移至python/paddle/distributed/fleet/launch.py。 ++ 答复:为了统一启动分布式 Collective/PS 模式任务方式以及易用性考虑,2.0 版本中 launch/fleetrun 启动分布式任务时参数产生不兼容升级,`--cluster_node_ips`改为`--ips`,`--selected_gpus`改为`--gpus`、`--node_ip`、`--use_paddlecloud`、`--started_port`、`--log_level`、`--print_config` 5 个参数已废弃,使用旧参数会直接报错没有此参数。代码迁移至 python/paddle/distributed/fleet/launch.py。 ---------- -##### 问题:分布式环境依赖为什么出现第三方libssl库的依赖? +##### 问题:分布式环境依赖为什么出现第三方 libssl 库的依赖? -+ 答复:分布式RPC从GRPC迁移至BRPC, 会导致在运行时依赖libssl库。使用docker的情况下,基础镜像拉一下官方最新的docker镜像,或自行安装libssl相关的依赖也可以。未安装libssl的情况下,import paddle的时候,出现找不到libssl.so的库文件相关报错。使用MPI的情况下,需要将编译包时用到的libssl.so、libcrypto.so等依赖手动通过`LD_LIBRARY_PATH`进行指定。 ++ 答复:分布式 RPC 从 GRPC 迁移至 BRPC, 会导致在运行时依赖 libssl 库。使用 docker 的情况下,基础镜像拉一下官方最新的 docker 镜像,或自行安装 libssl 相关的依赖也可以。未安装 libssl 的情况下,import paddle 的时候,出现找不到 libssl.so 的库文件相关报错。使用 MPI 的情况下,需要将编译包时用到的 libssl.so、libcrypto.so 等依赖手动通过`LD_LIBRARY_PATH`进行指定。 ---------- ## 分布式的动态图模式 -##### 问题:飞桨2.0版本动态图DataParallel用法有哪些简化? +##### 问题:飞桨 2.0 版本动态图 DataParallel 用法有哪些简化? -+答复:老版本用法依然兼容,建议使用以下新用法:`apply_collective_grads`、`scale_loss`可以删除不使用。loss会根据环境除以相应的卡数,`scale_loss`不再进行任何处理。 ++答复:老版本用法依然兼容,建议使用以下新用法:`apply_collective_grads`、`scale_loss`可以删除不使用。loss 会根据环境除以相应的卡数,`scale_loss`不再进行任何处理。 ---------- -##### 问题:飞桨2.0版本调用model.eval之后不再自动关闭反向计算图的构建,引入显存的消耗增加,可能会引入OOM,怎么解决? +##### 问题:飞桨 2.0 版本调用 model.eval 之后不再自动关闭反向计算图的构建,引入显存的消耗增加,可能会引入 OOM,怎么解决? + 答复:动态图`no_grad`和 `model.eval` 解绑,应使用`with paddle.no_grad():` 命令,显示关闭反向计算图的构建。 ---------- -##### 问题:飞桨2.0版本动态图环境初始化新接口怎样用? +##### 问题:飞桨 2.0 版本动态图环境初始化新接口怎样用? -+ 答复:建议调用新接口`paddle.distributed.init_parallel_env`,不需要输入参数。1.8的`fluid.dygraph.prepare_context`依然兼容。 ++ 答复:建议调用新接口`paddle.distributed.init_parallel_env`,不需要输入参数。1.8 的`fluid.dygraph.prepare_context`依然兼容。 ---------- -##### 问题:分布式支持哪些飞桨2.0版本的模型保存和加载接口? +##### 问题:分布式支持哪些飞桨 2.0 版本的模型保存和加载接口? -+ 答复: 与单机相同,分布式动态图推荐使用`paddle.jit.save`保存,使用`paddle.jit.load`加载,无需切换静态图,存储格式与推理模型存储一致。对比1.8动态图使用不含控制流的模型保存接口`TracedLayer.save_inference_model`,含控制流的模型保存接口`ProgramTranslator.save_inference_model`,加载模型需要使用静态图接口`fluid.io.load_inference_model`。 ++ 答复: 与单机相同,分布式动态图推荐使用`paddle.jit.save`保存,使用`paddle.jit.load`加载,无需切换静态图,存储格式与推理模型存储一致。对比 1.8 动态图使用不含控制流的模型保存接口`TracedLayer.save_inference_model`,含控制流的模型保存接口`ProgramTranslator.save_inference_model`,加载模型需要使用静态图接口`fluid.io.load_inference_model`。 `fluid.save_dygraph`和`fluid.load_dygraph`升级为`paddle.save`和`paddle.load`,推荐使用新接口。`paddle.save`不再默认添加后缀,建议用户指定使用标椎后缀(模型参数:.pdparams,优化器参数:.pdopt)。 -##### 问题:飞桨2.0版本为什么不能使用minimize 和 clear_gradient? +##### 问题:飞桨 2.0 版本为什么不能使用 minimize 和 clear_gradient? -+ 答复:2.0版本中重新实现optimizer,放在`paddle.optimizer`,建议使用新接口和参数。老版本的`paddle.fluid.optimizer`仍然可用。 ++ 答复:2.0 版本中重新实现 optimizer,放在`paddle.optimizer`,建议使用新接口和参数。老版本的`paddle.fluid.optimizer`仍然可用。 - 新版增加接口`step`替换`minimize`。老版动态图需要调用`loss.backward()`,用minimize来表示梯度的更新行为,词语意思不太一致。 + 新版增加接口`step`替换`minimize`。老版动态图需要调用`loss.backward()`,用 minimize 来表示梯度的更新行为,词语意思不太一致。 新版使用简化的`clear_grad`接口替换`clear_gradient`。 @@ -197,20 +197,20 @@ ## 报错查错 -##### 问题:集合通信Collective模式报参数未初始化的错误是什么原因? +##### 问题:集合通信 Collective 模式报参数未初始化的错误是什么原因? -+ 答复:2.0版本需要严格先`run(startup_program)`,然后再调用`fleet.init_worker()`启动worker端通信相关,并将0号worker的参数广播出去完成其他节点的初始化。先`init_worker`,再`run(startup_program)`,会报参数未初始化的错误 ++ 答复:2.0 版本需要严格先`run(startup_program)`,然后再调用`fleet.init_worker()`启动 worker 端通信相关,并将 0 号 worker 的参数广播出去完成其他节点的初始化。先`init_worker`,再`run(startup_program)`,会报参数未初始化的错误 - 2.0之前的版本是在server端做初始化,无需0号节点广播,所以`init_worker()`可以在`run(startup_program)`执行。 + 2.0 之前的版本是在 server 端做初始化,无需 0 号节点广播,所以`init_worker()`可以在`run(startup_program)`执行。 ---------- -##### 问题:分布式任务跑很久loss突然变成nan的原因? +##### 问题:分布式任务跑很久 loss 突然变成 nan 的原因? -+ 答复:可设置环境变量`export FLAGS_check_nan_inf=1`定位出错的地方,可以从checkpoint开始训练,参数服务器和集合通信模式均可使用这种方式查错。 ++ 答复:可设置环境变量`export FLAGS_check_nan_inf=1`定位出错的地方,可以从 checkpoint 开始训练,参数服务器和集合通信模式均可使用这种方式查错。 ---------- -##### 问题:任务卡在role init怎么解决? +##### 问题:任务卡在 role init 怎么解决? -+ 答复:通常可能是gloo的初始化问题,需要检查是否有节点任务挂了。建议调小`train_data`配置的数据量,由于启动trainer前要下载数据,大量数据会导致拖慢。 ++ 答复:通常可能是 gloo 的初始化问题,需要检查是否有节点任务挂了。建议调小`train_data`配置的数据量,由于启动 trainer 前要下载数据,大量数据会导致拖慢。 diff --git a/docs/faq/index_cn.rst b/docs/faq/index_cn.rst index 5bc8364048d..5ca36d0f7c2 100644 --- a/docs/faq/index_cn.rst +++ b/docs/faq/index_cn.rst @@ -2,9 +2,9 @@ 常见问题与解答 ############## -本栏目以问答对的形式收录了用户开发过程中遇到的高频咨询类问题,包含了2.0版本的变化、安装、数据与数据处理、模型组网、训练、预测、模型保存与加载、参数调整、分布式等几类常见问题。 +本栏目以问答对的形式收录了用户开发过程中遇到的高频咨询类问题,包含了 2.0 版本的变化、安装、数据与数据处理、模型组网、训练、预测、模型保存与加载、参数调整、分布式等几类常见问题。 -除此之外,你也可以查看 `官网API文档 `_ 、 `历史Issues `_ 、 `飞桨论坛 `_ 来寻求解答。 +除此之外,你也可以查看 `官网 API 文档 `_ 、 `历史 Issues `_ 、 `飞桨论坛 `_ 来寻求解答。 同时,你也可以在 `Github Issues `_ 中进行提问,飞桨会有专门的技术人员解答。 diff --git a/docs/faq/install_cn.md b/docs/faq/install_cn.md index cfe51ffce96..439a9b22dc2 100644 --- a/docs/faq/install_cn.md +++ b/docs/faq/install_cn.md @@ -1,7 +1,7 @@ # 安装常见问题 -##### 问题:使用过程中报找不到tensorrt库的日志 +##### 问题:使用过程中报找不到 tensorrt 库的日志 + 问题描述: @@ -13,35 +13,35 @@ > Windows: set PATH by `set PATH=XXX; + 问题分析: -遇到该问题是因为使用的paddle默认开始了TensorRT,但是本地环境中没有找到TensorRT的库,该问题只影响使用[Paddle Inference](https://paddleinference.paddlepaddle.org.cn/master/product_introduction/inference_intro.html)开启TensorRT预测的场景,对其它方面均不造成影响。 +遇到该问题是因为使用的 paddle 默认开始了 TensorRT,但是本地环境中没有找到 TensorRT 的库,该问题只影响使用[Paddle Inference](https://paddleinference.paddlepaddle.org.cn/master/product_introduction/inference_intro.html)开启 TensorRT 预测的场景,对其它方面均不造成影响。 + 解决办法: -根据提示信息,在环境变量中加入TensorRT的库路径。 +根据提示信息,在环境变量中加入 TensorRT 的库路径。 ----- -##### 问题:Windows环境下,使用pip install时速度慢,如何解决? +##### 问题:Windows 环境下,使用 pip install 时速度慢,如何解决? + 解决方案: -在pip后面加上参数`-i`指定pip源,使用国内源获取安装包。 +在 pip 后面加上参数`-i`指定 pip 源,使用国内源获取安装包。 + 操作步骤: -1. Python2情况下,使用如下命令安装PaddlePaddle。 +1. Python2 情况下,使用如下命令安装 PaddlePaddle。 ```bash pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple/ ``` -2. Python3情况下,使用如下命令安装PaddlePaddle。 +2. Python3 情况下,使用如下命令安装 PaddlePaddle。 ```bash pip3 install paddlepaddle -i https://mirror.baidu.com/pypi/simple/ ``` -你还可以通过如下三个地址获取pip安装包,只需修改 `-i` 后网址即可: +你还可以通过如下三个地址获取 pip 安装包,只需修改 `-i` 后网址即可: 1. https://pypi.tuna.tsinghua.edu.cn/simple 2. https://mirrors.aliyun.com/pypi/simple/ @@ -49,29 +49,29 @@ ------ -##### 问题:使用pip install时报错,`PermissionError: [WinError 5]` ,如何解决? +##### 问题:使用 pip install 时报错,`PermissionError: [WinError 5]` ,如何解决? + 问题描述: -使用pip install时报错,`PermissionError: [WinError 5]` , +使用 pip install 时报错,`PermissionError: [WinError 5]` , `C:\\Program Files\\python35\\Lib\\site-packages\\graphviz`。 + 报错分析: -用户权限问题导致,由于用户的Python安装到系统文件内(如`Program Files/`),任何的操作都需要管理员权限。 +用户权限问题导致,由于用户的 Python 安装到系统文件内(如`Program Files/`),任何的操作都需要管理员权限。 + 解决方法: -选择“以管理员身份运行”运行CMD,重新执行安装过程, 使用命令`pip install paddlepaddle`。 +选择“以管理员身份运行”运行 CMD,重新执行安装过程, 使用命令`pip install paddlepaddle`。 ------ -##### 问题: 使用pip install时报错,`ERROR: No matching distribution found for paddlepaddle` ,如何解决? +##### 问题: 使用 pip install 时报错,`ERROR: No matching distribution found for paddlepaddle` ,如何解决? + 问题描述: -使用pip install时报错,`ERROR: Could not find a version that satisfies the requirement paddlepaddle (from versions: none)` +使用 pip install 时报错,`ERROR: Could not find a version that satisfies the requirement paddlepaddle (from versions: none)` `ERROR: No matching distribution found for paddlepaddle` @@ -79,53 +79,53 @@ + 报错分析: -Python版本不匹配导致。用户使用的是32位Python,但是对应的32位pip没有PaddlePaddle源。 +Python 版本不匹配导致。用户使用的是 32 位 Python,但是对应的 32 位 pip 没有 PaddlePaddle 源。 + 解决方法: -请用户使用64位的Python进行PaddlePaddle安装。 +请用户使用 64 位的 Python 进行 PaddlePaddle 安装。 ------ -##### 问题: 本地使用import paddle时报错,`ModuleNotFoundError:No module named ‘paddle’`,如何解决? +##### 问题: 本地使用 import paddle 时报错,`ModuleNotFoundError:No module named ‘paddle’`,如何解决? + 报错分析: -原因在于用户的计算机上可能安装了多个版本的Python,而安装PaddlePaddle时的Python和`import paddle`时的Python版本不一致导致报错。如果用户熟悉PyCharm等常见的IDE配置包安装的方法,配置运行的方法,则可以避免此类问题。 +原因在于用户的计算机上可能安装了多个版本的 Python,而安装 PaddlePaddle 时的 Python 和`import paddle`时的 Python 版本不一致导致报错。如果用户熟悉 PyCharm 等常见的 IDE 配置包安装的方法,配置运行的方法,则可以避免此类问题。 + 解决方法: -用户明确安装Paddle的python位置,并切换到该python进行安装。可能需要使用`python -m pip install paddlepaddle`命令确保paddle是安装到该python中。 +用户明确安装 Paddle 的 python 位置,并切换到该 python 进行安装。可能需要使用`python -m pip install paddlepaddle`命令确保 paddle 是安装到该 python 中。 ------ -##### 问题: 使用PaddlePaddle GPU的Docker镜像时报错, `Cuda Error: CUDA driver version is insufficient for CUDA runtime version`,如何解决? +##### 问题: 使用 PaddlePaddle GPU 的 Docker 镜像时报错, `Cuda Error: CUDA driver version is insufficient for CUDA runtime version`,如何解决? + 报错分析: -机器上的CUDA驱动偏低导致。 +机器上的 CUDA 驱动偏低导致。 + 解决方法: -需要升级CUDA驱动解决。 +需要升级 CUDA 驱动解决。 -1. Ubuntu和CentOS环境,需要把相关的驱动和库映射到容器内部。如果使用GPU的docker环境,需要用nvidia-docker来运行,更多请参考[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)。 +1. Ubuntu 和 CentOS 环境,需要把相关的驱动和库映射到容器内部。如果使用 GPU 的 docker 环境,需要用 nvidia-docker 来运行,更多请参考[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)。 -2. Windows环境,需要升级CUDA驱动。 +2. Windows 环境,需要升级 CUDA 驱动。 ------ -##### 问题: 使用PaddlePaddle时报错,`Error: no CUDA-capable device is detected`,如何解决? +##### 问题: 使用 PaddlePaddle 时报错,`Error: no CUDA-capable device is detected`,如何解决? + 报错分析: -CUDA安装错误导致。 +CUDA 安装错误导致。 + 解决方法: 查找“libcudart.so”所在目录,并将其添加到`LD_LIBRARY_PATH`中。 -例如:执行`find / -name libcudart.so`, 发现libcudart.so在`/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.so`路径下, 使用如下命令添加即可。 +例如:执行`find / -name libcudart.so`, 发现 libcudart.so 在`/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.so`路径下, 使用如下命令添加即可。 ```bash export LD_LIBRARY_PATH=/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.so:${LD_LIBRARY_PATH} @@ -133,11 +133,11 @@ export LD_LIBRARY_PATH=/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.s ------ -##### 问题: 如何升级PaddlePaddle? +##### 问题: 如何升级 PaddlePaddle? + 答复: -1. GPU环境: +1. GPU 环境: ```bash pip install -U paddlepaddle-gpu @@ -146,10 +146,10 @@ export LD_LIBRARY_PATH=/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.s 或者 ```bash - pip install paddlepaddle-gpu==需要安装的版本号(如2.0) + pip install paddlepaddle-gpu==需要安装的版本号(如 2.0) ``` -2. CPU环境: +2. CPU 环境: ```bash pip install -U paddlepaddle @@ -157,29 +157,29 @@ export LD_LIBRARY_PATH=/usr/local/cuda-10.0/targets/x86_64-linux/lib/libcudart.s 或者 ```bash - pip install paddlepaddle==需要安装的版本号(如2.0) + pip install paddlepaddle==需要安装的版本号(如 2.0) ``` ------ -##### 问题: 在GPU上如何选择PaddlePaddle版本? +##### 问题: 在 GPU 上如何选择 PaddlePaddle 版本? + 答复: -首先请确定你本机的CUDA、cuDNN版本,飞桨目前pip安装适配CUDA版本9.0/10.0/10.1/10.2/11.0,CUDA9.0/10.0/10.1/10.2 配合 cuDNN v7.6.5+,CUDA 工具包11.0配合cuDNN v8.0.4。请确定你安装的是适合的版本。更多安装信息见[官网安装文档](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/2.0/install/pip/windows-pip.html) +首先请确定你本机的 CUDA、cuDNN 版本,飞桨目前 pip 安装适配 CUDA 版本 9.0/10.0/10.1/10.2/11.0,CUDA9.0/10.0/10.1/10.2 配合 cuDNN v7.6.5+,CUDA 工具包 11.0 配合 cuDNN v8.0.4。请确定你安装的是适合的版本。更多安装信息见[官网安装文档](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/2.0/install/pip/windows-pip.html) ------ -##### 问题: import paddle报错, dlopen: cannot load any more object with static TLS, 如何解决? +##### 问题: import paddle 报错, dlopen: cannot load any more object with static TLS, 如何解决? + 答复: -glibc版本过低,建议使用官方提供的docker镜像或者将glibc升级到2.23+。 +glibc 版本过低,建议使用官方提供的 docker 镜像或者将 glibc 升级到 2.23+。 ------ -##### 问题: python2.7中,如果使用Paddle1.8.5之前的版本,import paddle时,报错,提示`/xxxx/rarfile.py, line820, print(f.filename, file=file), SyntaxError: invalid syntax`,如何解决? +##### 问题: python2.7 中,如果使用 Paddle1.8.5 之前的版本,import paddle 时,报错,提示`/xxxx/rarfile.py, line820, print(f.filename, file=file), SyntaxError: invalid syntax`,如何解决? + 答复: -rarfile版本太高,它的最新版本已经不支持python2.x了,可以通过`pip install rarfile==3.0`安装3.0版本的rarfile即可。 +rarfile 版本太高,它的最新版本已经不支持 python2.x 了,可以通过`pip install rarfile==3.0`安装 3.0 版本的 rarfile 即可。 diff --git a/docs/faq/others_cn.md b/docs/faq/others_cn.md index e9d8791a0ae..0b850b45de6 100644 --- a/docs/faq/others_cn.md +++ b/docs/faq/others_cn.md @@ -1,14 +1,14 @@ # 其他常见问题 -##### 问题:使用X2paddle 从Caffe 转Paddle model时,报错 `TypeError: __new__() got an unexpected keyword argument 'serialized_options'` ,如何处理? +##### 问题:使用 X2paddle 从 Caffe 转 Paddle model 时,报错 `TypeError: __new__() got an unexpected keyword argument 'serialized_options'` ,如何处理? -+ 答复:这是由于ProtoBuf版本较低导致,将protobuf升级到3.6.0即可解决。 ++ 答复:这是由于 ProtoBuf 版本较低导致,将 protobuf 升级到 3.6.0 即可解决。 ---------- -##### 问题:Windows环境下,出现"Windows not support stack backtrace yet",如何处理? +##### 问题:Windows 环境下,出现"Windows not support stack backtrace yet",如何处理? -+ 答复:Windows环境下,遇到程序报错不会详细跟踪内存报错内容。这些信息对底层开发者更有帮助,普通开发者不必关心这类警告。如果想得到完整内存追踪错误信息,可以尝试更换至Linux系统。 ++ 答复:Windows 环境下,遇到程序报错不会详细跟踪内存报错内容。这些信息对底层开发者更有帮助,普通开发者不必关心这类警告。如果想得到完整内存追踪错误信息,可以尝试更换至 Linux 系统。 ---------- diff --git a/docs/faq/params_cn.md b/docs/faq/params_cn.md index a04b32df906..e2f945f258e 100644 --- a/docs/faq/params_cn.md +++ b/docs/faq/params_cn.md @@ -2,20 +2,20 @@ ##### 问题:如何将本地数据传入`paddle.nn.embedding`的参数矩阵中? -+ 答复:需将本地词典向量读取为NumPy数据格式,然后使用`paddle.nn.initializer.Assign`这个API初始化`paddle.nn.embedding`里的`param_attr`参数,即可实现加载用户自定义(或预训练)的Embedding向量。 ++ 答复:需将本地词典向量读取为 NumPy 数据格式,然后使用`paddle.nn.initializer.Assign`这个 API 初始化`paddle.nn.embedding`里的`param_attr`参数,即可实现加载用户自定义(或预训练)的 Embedding 向量。 ------ -##### 问题:如何实现网络层中多个feature间共享该层的向量权重? +##### 问题:如何实现网络层中多个 feature 间共享该层的向量权重? -+ 答复:你可以使用`paddle.ParamAttr`并设定一个name参数,然后再将这个类的对象传入网络层的`param_attr`参数中,即将所有网络层中`param_attr`参数里的`name`设置为同一个,即可实现共享向量权重。如使用embedding层时,可以设置`param_attr=paddle.ParamAttr(name="word_embedding")`,然后把`param_attr`传入embedding层中。 ++ 答复:你可以使用`paddle.ParamAttr`并设定一个 name 参数,然后再将这个类的对象传入网络层的`param_attr`参数中,即将所有网络层中`param_attr`参数里的`name`设置为同一个,即可实现共享向量权重。如使用 embedding 层时,可以设置`param_attr=paddle.ParamAttr(name="word_embedding")`,然后把`param_attr`传入 embedding 层中。 ---------- -##### 问题:使用optimizer或ParamAttr设置的正则化和学习率,二者什么差异? +##### 问题:使用 optimizer 或 ParamAttr 设置的正则化和学习率,二者什么差异? -+ 答复:ParamAttr中定义的`regularizer`优先级更高。若ParamAttr中定义了`regularizer`,则忽略Optimizer中的`regularizer`;否则,则使用Optimizer中的`regularizer`。ParamAttr中的学习率默认为1.0,在对参数优化时,最终的学习率等于optimizer的学习率乘以ParamAttr的学习率。 ++ 答复:ParamAttr 中定义的`regularizer`优先级更高。若 ParamAttr 中定义了`regularizer`,则忽略 Optimizer 中的`regularizer`;否则,则使用 Optimizer 中的`regularizer`。ParamAttr 中的学习率默认为 1.0,在对参数优化时,最终的学习率等于 optimizer 的学习率乘以 ParamAttr 的学习率。 ---------- @@ -23,44 +23,44 @@ + 答复: -1. 在动态图中,使用`paddle.save` API, 并将最后一层的`layer.state_dict()` 传入至save方法的obj 参数即可, 然后使用`paddle.load` 方法加载对应层的参数值。详细可参考API文档[save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/save_cn.html#save) 和[load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/load_cn.html#load)。 -2. 在静态图中,使用`paddle.static.save_vars`保存指定的vars,然后使用`paddle.static.load_vars`加载对应层的参数值。具体示例请见API文档:[load_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/io/load_vars_cn.html) 和 [save_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/io/save_vars_cn.html) 。 +1. 在动态图中,使用`paddle.save` API, 并将最后一层的`layer.state_dict()` 传入至 save 方法的 obj 参数即可, 然后使用`paddle.load` 方法加载对应层的参数值。详细可参考 API 文档[save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/save_cn.html#save) 和[load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/load_cn.html#load)。 +2. 在静态图中,使用`paddle.static.save_vars`保存指定的 vars,然后使用`paddle.static.load_vars`加载对应层的参数值。具体示例请见 API 文档:[load_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/io/load_vars_cn.html) 和 [save_vars](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/io/save_vars_cn.html) 。 ---------- -##### 问题:训练过程中如何固定网络和Batch Normalization(BN)? +##### 问题:训练过程中如何固定网络和 Batch Normalization(BN)? + 答复: -1. 对于固定BN:设置 `use_global_stats=True`,使用已加载的全局均值和方差:`global mean/variance`,具体内容可查看官网API文档[batch_norm](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/layers/batch_norm_cn.html#batch-norm)。 +1. 对于固定 BN:设置 `use_global_stats=True`,使用已加载的全局均值和方差:`global mean/variance`,具体内容可查看官网 API 文档[batch_norm](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/layers/batch_norm_cn.html#batch-norm)。 -2. 对于固定网络层:如: stage1→ stage2 → stage3 ,设置stage2的输出,假设为*y*,设置 `y.stop_gradient=True`,那么, stage1→ stage2整体都固定了,不再更新。 +2. 对于固定网络层:如: stage1→ stage2 → stage3 ,设置 stage2 的输出,假设为*y*,设置 `y.stop_gradient=True`,那么, stage1→ stage2 整体都固定了,不再更新。 ---------- -##### 问题:训练的step在参数优化器中是如何变化的? +##### 问题:训练的 step 在参数优化器中是如何变化的? 图片名称 * 答复: - `step`表示的是经历了多少组mini_batch,其统计方法为`exe.run`(对应Program)运行的当前次数,即每运行一次`exe.run`,step加1。举例代码如下: + `step`表示的是经历了多少组 mini_batch,其统计方法为`exe.run`(对应 Program)运行的当前次数,即每运行一次`exe.run`,step 加 1。举例代码如下: ```python -# 执行下方代码后相当于step增加了N x Epoch总数 +# 执行下方代码后相当于 step 增加了 N x Epoch 总数 for epoch in range(epochs): - # 执行下方代码后step相当于自增了N + # 执行下方代码后 step 相当于自增了 N for data in [mini_batch_1,2,3...N]: - # 执行下方代码后step += 1 + # 执行下方代码后 step += 1 exe.run(data) ``` ----- -##### 问题:如何修改全连接层参数,比如weight,bias? +##### 问题:如何修改全连接层参数,比如 weight,bias? -+ 答复:可以通过`param_attr`设置参数的属性,`paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0.0, 0.02), learning_rate=2.0)`,如果`learning_rate`设置为0,该层就不参与训练。也可以构造一个numpy数据,使用`paddle.nn.initializer.Assign`来给权重设置想要的值。 ++ 答复:可以通过`param_attr`设置参数的属性,`paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0.0, 0.02), learning_rate=2.0)`,如果`learning_rate`设置为 0,该层就不参与训练。也可以构造一个 numpy 数据,使用`paddle.nn.initializer.Assign`来给权重设置想要的值。 ----- @@ -68,8 +68,8 @@ for epoch in range(epochs): ##### 问题:如何进行梯度裁剪? -+ 答复:Paddle的梯度裁剪方式需要在[Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/Overview_cn.html#api)中进行设置,目前提供三种梯度裁剪方式,分别是[paddle.nn.ClipGradByValue](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByValue_cn.html)`(设定范围值裁剪)`、[paddle.nn.ClipGradByNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByNorm_cn.html)`(设定L2范数裁剪)` -、[paddle.nn.ClipGradByGlobalNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByGlobalNorm_cn.html)`(通过全局L2范数裁剪)`,需要先创建一个该类的实例对象,然后将其传入到优化器中,优化器会在更新参数前,对梯度进行裁剪。 ++ 答复:Paddle 的梯度裁剪方式需要在[Optimizer](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/Overview_cn.html#api)中进行设置,目前提供三种梯度裁剪方式,分别是[paddle.nn.ClipGradByValue](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByValue_cn.html)`(设定范围值裁剪)`、[paddle.nn.ClipGradByNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByNorm_cn.html)`(设定 L2 范数裁剪)` +、[paddle.nn.ClipGradByGlobalNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/nn/ClipGradByGlobalNorm_cn.html)`(通过全局 L2 范数裁剪)`,需要先创建一个该类的实例对象,然后将其传入到优化器中,优化器会在更新参数前,对梯度进行裁剪。 注:该类接口在动态图、静态图下均会生效,是动静统一的。目前不支持其他方式的梯度裁剪。 @@ -84,18 +84,18 @@ sdg.step() # 更新参数前,会先对参 ---------- -##### 问题:如何在同一个优化器中定义不同参数的优化策略,比如bias的参数weight_decay的值为0.0,非bias的参数weight_decay的值为0.01? +##### 问题:如何在同一个优化器中定义不同参数的优化策略,比如 bias 的参数 weight_decay 的值为 0.0,非 bias 的参数 weight_decay 的值为 0.01? + 答复: - 1. [AdamW](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/AdamW_cn.html#adamw)的参数`apply_decay_param_fun`可以用来选择哪些参数使用decay_weight策略。 + 1. [AdamW](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/optimizer/AdamW_cn.html#adamw)的参数`apply_decay_param_fun`可以用来选择哪些参数使用 decay_weight 策略。 2. 在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性。 ---------- -##### 问题:paddle fluid如何自定义优化器,自定义更新模型参数的规则? +##### 问题:paddle fluid 如何自定义优化器,自定义更新模型参数的规则? + 答复: - 1. 要定义全新优化器,自定义优化器中参数的更新规则,可以通过继承fluid.Optimizer,重写_append_optimize_op方法实现。不同优化器实现原理各不相同,一般流程是先获取learning_rate,gradients参数,可训练参数,以及该优化器自身特别需要的参数,然后实现更新参数的代码,最后返回更新后的参数。 - 在实现更新参数代码时,可以选择直接调用[paddle的API](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html)或者使用[自定义原生算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/index_cn.html)。在使用自定义原生算子时,要注意动态图与静态图调用方式有所区别: + 1. 要定义全新优化器,自定义优化器中参数的更新规则,可以通过继承 fluid.Optimizer,重写_append_optimize_op 方法实现。不同优化器实现原理各不相同,一般流程是先获取 learning_rate,gradients 参数,可训练参数,以及该优化器自身特别需要的参数,然后实现更新参数的代码,最后返回更新后的参数。 + 在实现更新参数代码时,可以选择直接调用[paddle 的 API](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html)或者使用[自定义原生算子](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/index_cn.html)。在使用自定义原生算子时,要注意动态图与静态图调用方式有所区别: 需要首先使用`framework.in_dygraph_mode()`判断是否为动态图模式,如果是动态图模式,则需要调用`paddle._C_ops`中相应的优化器算子;如果不是动态图模式,则需要调用`block.append_op` 来添加优化器算子。 - 代码样例可参考[paddle源码](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)中AdamOptimizer等优化器的实现。 + 代码样例可参考[paddle 源码](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)中 AdamOptimizer 等优化器的实现。 2. 使用现有的常用优化器,可以在创建`Param`的时候,可以通过设置[ParamAttr](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ParamAttr_cn.html#paramattr)的属性来控制参数的属性,可以通过设置`regularizer`,`learning_rate`等参数简单设置参数的更新规则。 diff --git a/docs/faq/save_cn.md b/docs/faq/save_cn.md index 3c6bd623856..9790c66c7c3 100644 --- a/docs/faq/save_cn.md +++ b/docs/faq/save_cn.md @@ -1,19 +1,19 @@ # 模型保存常见问题 -##### 问题:静态图的save接口与save_inference_model接口存储的结果有什么区别? +##### 问题:静态图的 save 接口与 save_inference_model 接口存储的结果有什么区别? + 答复:主要差别在于保存结果的应用场景: - 1. save接口(2.0的`paddle.static.save`或者1.8的`fluid.io.save`) + 1. save 接口(2.0 的`paddle.static.save`或者 1.8 的`fluid.io.save`) - 该接口用于保存训练过程中的模型和参数,一般包括`*.pdmodel`,`*.pdparams`,`*.pdopt`三个文件。其中`*.pdmodel`是训练使用的完整模型program描述,区别于推理模型,训练模型program包含完整的网络,包括前向网络,反向网络和优化器,而推理模型program仅包含前向网络,`*.pdparams`是训练网络的参数dict,key为变量名,value为Tensor array数值,`*.pdopt`是训练优化器的参数,结构与*.pdparams一致。 + 该接口用于保存训练过程中的模型和参数,一般包括`*.pdmodel`,`*.pdparams`,`*.pdopt`三个文件。其中`*.pdmodel`是训练使用的完整模型 program 描述,区别于推理模型,训练模型 program 包含完整的网络,包括前向网络,反向网络和优化器,而推理模型 program 仅包含前向网络,`*.pdparams`是训练网络的参数 dict,key 为变量名,value 为 Tensor array 数值,`*.pdopt`是训练优化器的参数,结构与*.pdparams 一致。 - 2. save_inference_model接口(2.0的`paddle.static.save_inference_model`或者1.8的`fluid.io.save_inference_model`) + 2. save_inference_model 接口(2.0 的`paddle.static.save_inference_model`或者 1.8 的`fluid.io.save_inference_model`) - 该接口用于保存推理模型和参数,2.0的`paddle.static.save_inference_model`保存结果为`*.pdmodel`和`*.pdiparams`两个文件,其中`*.pdmodel`为推理使用的模型program描述,`*.pdiparams`为推理用的参数,这里存储格式与`*.pdparams`不同(注意两者后缀差个`i`),`*.pdiparams`为二进制Tensor存储格式,不含变量名。1.8的`fluid.io.save_inference_model`默认保存结果为`__model__`文件,和以参数名为文件名的多个分散参数文件,格式与2.0一致。 + 该接口用于保存推理模型和参数,2.0 的`paddle.static.save_inference_model`保存结果为`*.pdmodel`和`*.pdiparams`两个文件,其中`*.pdmodel`为推理使用的模型 program 描述,`*.pdiparams`为推理用的参数,这里存储格式与`*.pdparams`不同(注意两者后缀差个`i`),`*.pdiparams`为二进制 Tensor 存储格式,不含变量名。1.8 的`fluid.io.save_inference_model`默认保存结果为`__model__`文件,和以参数名为文件名的多个分散参数文件,格式与 2.0 一致。 - 3. 关于更多2.0动态图模型保存和加载的介绍可以参考教程:[模型存储与载入](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/02_paddle2.0_develop/08_model_save_load_cn.html) + 3. 关于更多 2.0 动态图模型保存和加载的介绍可以参考教程:[模型存储与载入](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/02_paddle2.0_develop/08_model_save_load_cn.html) ---------- @@ -22,7 +22,7 @@ + 答复:在增量训练过程中,不仅需要保存模型的参数,也需要保存优化器的参数。 -具体地,在2.0版本中需要使用Layer和Optimizer的`state_dict`和`set_state_dict`方法配合`paddle.save/load`使用。简要示例如下: +具体地,在 2.0 版本中需要使用 Layer 和 Optimizer 的`state_dict`和`set_state_dict`方法配合`paddle.save/load`使用。简要示例如下: ``` import paddle @@ -46,35 +46,35 @@ emb.set_state_dict(para_state_dict) adam.set_state_dict(opti_state_dict) ``` -##### 问题:paddle.load可以加载哪些API产生的结果呢? +##### 问题:paddle.load 可以加载哪些 API 产生的结果呢? + 答复: - 为了更高效地使用paddle存储的模型参数,`paddle.load`支持从除`paddle.save`之外的其他save相关API的存储结果中载入`state_dict`,但是在不同场景中,参数`path`的形式有所不同: + 为了更高效地使用 paddle 存储的模型参数,`paddle.load`支持从除`paddle.save`之外的其他 save 相关 API 的存储结果中载入`state_dict`,但是在不同场景中,参数`path`的形式有所不同: 1. 从`paddle.static.save`或者`paddle.Model().save(training=True)`的保存结果载入:`path`需要是完整的文件名,例如`model.pdparams`或者`model.opt`; 2. 从`paddle.jit.save`或者`paddle.static.save_inference_model`或者`paddle.Model().save(training=False)`的保存结果载入:`path`需要是路径前缀, 例如`model/mnist`,`paddle.load`会从`mnist.pdmodel`和`mnist.pdiparams`中解析`state_dict`的信息并返回。 - 3. 从paddle 1.x API`paddle.fluid.io.save_inference_model`或者`paddle.fluid.io.save_params/save_persistables`的保存结果载入:`path`需要是目录,例如`model`,此处model是一个文件夹路径。 + 3. 从 paddle 1.x API`paddle.fluid.io.save_inference_model`或者`paddle.fluid.io.save_params/save_persistables`的保存结果载入:`path`需要是目录,例如`model`,此处 model 是一个文件夹路径。 - 需要注意的是,如果从`paddle.static.save`或者`paddle.static.save_inference_model`等静态图API的存储结果中载入`state_dict`,动态图模式下参数的结构性变量名将无法被恢复。在将载入的`state_dict`配置到当前Layer中时,需要配置`Layer.set_state_dict`的参数`use_structured_name=False`。 + 需要注意的是,如果从`paddle.static.save`或者`paddle.static.save_inference_model`等静态图 API 的存储结果中载入`state_dict`,动态图模式下参数的结构性变量名将无法被恢复。在将载入的`state_dict`配置到当前 Layer 中时,需要配置`Layer.set_state_dict`的参数`use_structured_name=False`。 -##### 问题:paddle.save 是如何保存state_dict,Layer对象,Tensor以及包含Tensor的嵌套list、tuple、dict的呢? +##### 问题:paddle.save 是如何保存 state_dict,Layer 对象,Tensor 以及包含 Tensor 的嵌套 list、tuple、dict 的呢? + 答复: - 1. 对于``state_dict``保存方式与paddle2.0完全相同,我们将``Tensor``转化为``numpy.ndarray``保存。 + 1. 对于``state_dict``保存方式与 paddle2.0 完全相同,我们将``Tensor``转化为``numpy.ndarray``保存。 - 2. 对于其他形式的包含``Tensor``的对象(``Layer``对象,单个``Tensor``以及包含``Tensor``的嵌套``list``、``tuple``、``dict``),在动态图中,将``Tensor``转化为``tuple(Tensor.name, Tensor.numpy())``;在静态图中,将``Tensor``直接转化为``numpy.ndarray``。之所以这样做,是因为当在静态图中使用动态保存的模型时,有时需要``Tensor``的名字因此将名字保存下来,同时,在``load``时区分这个``numpy.ndarray``是由Tenosr转化而来还是本来就是``numpy.ndarray``;保存静态图的``Tensor``时,通常通过``Variable.get_value``得到``Tensor``再使用``paddle.save``保存``Tensor``,此时,``Variable``是有名字的,这个``Tensor``是没有名字的,因此将静态图``Tensor``直接转化为``numpy.ndarray``保存。 - > 此处动态图Tensor和静态图Tensor是不相同的,动态图Tensor有name、stop_gradient等属性;而静态图的Tensor是比动态图Tensor轻量级的,只包含place等基本信息,不包含名字等。 + 2. 对于其他形式的包含``Tensor``的对象(``Layer``对象,单个``Tensor``以及包含``Tensor``的嵌套``list``、``tuple``、``dict``),在动态图中,将``Tensor``转化为``tuple(Tensor.name, Tensor.numpy())``;在静态图中,将``Tensor``直接转化为``numpy.ndarray``。之所以这样做,是因为当在静态图中使用动态保存的模型时,有时需要``Tensor``的名字因此将名字保存下来,同时,在``load``时区分这个``numpy.ndarray``是由 Tenosr 转化而来还是本来就是``numpy.ndarray``;保存静态图的``Tensor``时,通常通过``Variable.get_value``得到``Tensor``再使用``paddle.save``保存``Tensor``,此时,``Variable``是有名字的,这个``Tensor``是没有名字的,因此将静态图``Tensor``直接转化为``numpy.ndarray``保存。 + > 此处动态图 Tensor 和静态图 Tensor 是不相同的,动态图 Tensor 有 name、stop_gradient 等属性;而静态图的 Tensor 是比动态图 Tensor 轻量级的,只包含 place 等基本信息,不包含名字等。 -##### 问题:将Tensor转换为numpy.ndarray或者tuple(Tensor.name, Tensor.numpy())不是惟一可译编码,为什么还要做这样的转换呢? +##### 问题:将 Tensor 转换为 numpy.ndarray 或者 tuple(Tensor.name, Tensor.numpy())不是惟一可译编码,为什么还要做这样的转换呢? + 答复: - 1. 我们希望``paddle.save``保存的模型能够不依赖paddle框架就能够被用户解析(pickle格式模型),这样用户可以方便的做调试,轻松的看到保存的参数的数值。其他框架的模型与paddle模型做转化也会容易很多。 + 1. 我们希望``paddle.save``保存的模型能够不依赖 paddle 框架就能够被用户解析(pickle 格式模型),这样用户可以方便的做调试,轻松的看到保存的参数的数值。其他框架的模型与 paddle 模型做转化也会容易很多。 2. 我们希望保存的模型尽量小,只保留了能够满足大多场景的信息(动态图保存名字和数值,静态图只保存数值),如果需要``Tensor``的其他信息(例如``stop_gradient``),可以向被保存的对象中添加这些信息,``load``之后再还原这些信息。这样的转换方式可以覆盖绝大多数场景,一些特殊场景也是可以通过一些方法解决的,如下面的问题。 -##### 问题:什么情况下save与load的结果不一致呢,应该如何避免这种情况发生呢? +##### 问题:什么情况下 save 与 load 的结果不一致呢,应该如何避免这种情况发生呢? + 答复: - 以下情况会造成save与load的结果不一致: + 以下情况会造成 save 与 load 的结果不一致: 1. 被保存的对象包含动态图``Tensor``同时包含``tuple(string, numpy.ndarray)``; 2. 被保存的对象包含静态图``Tensor``,同时包含``numpy.ndarray``或者``tuple(string, numpy.ndarray)``; 3. 被保存的对象只包含``numpy.ndarray``,但是包含``tuple(string, numpy.ndarray)``。 @@ -84,17 +84,17 @@ adam.set_state_dict(opti_state_dict) 2. 如果被保存的对象包含``numpy.ndarray``,尽量在``load``时设置``return_numpy = True``。 3. 对于``Layer``对象,只保存参数的值和名字,如果需要其他信息(例如``stop_gradient``),请将手将这些信息打包成`dict`等,一并保存。 -##### 问题:paddle 2.x 如何保存模型文件?如何保存paddle 1.x 中的 model 文件? +##### 问题:paddle 2.x 如何保存模型文件?如何保存 paddle 1.x 中的 model 文件? + 答复: - 1. 在paddle2.x可使用``paddle.jit.save``接口以及``paddle.static.save_inference_model``,通过指定``path``来保存成为``path.pdmodel``和``path.pdiparams``,可对应paddle1.x中使用``save_inference_model``指定dirname和params_filename生成``dirname/__model__``和``dirname/params文件``。paddle2.x保存模型文件详情可参考: + 1. 在 paddle2.x 可使用``paddle.jit.save``接口以及``paddle.static.save_inference_model``,通过指定``path``来保存成为``path.pdmodel``和``path.pdiparams``,可对应 paddle1.x 中使用``save_inference_model``指定 dirname 和 params_filename 生成``dirname/__model__``和``dirname/params 文件``。paddle2.x 保存模型文件详情可参考: - [paddle.jit.save/load](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/02_paddle2.0_develop/08_model_save_load_cn.html#dongtaitumoxing-canshubaocunzairu-xunliantuili) - [paddle.static.save/load_inference_model](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/02_paddle2.0_develop/08_model_save_load_cn.html#jingtaitumoxing-canshubaocunzairu-tuilibushu) - 2. 如果想要在paddle2.x中读取paddle 1.x中的model文件,可参考: + 2. 如果想要在 paddle2.x 中读取 paddle 1.x 中的 model 文件,可参考: - [兼容载入旧格式模型](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.2rc/guides/01_paddle2.0_introduction/load_old_format_model.html#cn-guides-load-old-format-model) -##### 问题:paddle如何单独load存下来所有模型变量中某一个变量,然后修改变量中的值? +##### 问题:paddle 如何单独 load 存下来所有模型变量中某一个变量,然后修改变量中的值? + 答复: 1. 如果目的是修改存储变量的值,可以使用``paddle.save``保存下来所有变量,然后再使用``paddle.load``将所有变量载入后,查找目标变量进行修改,示例代码如下: @@ -106,7 +106,7 @@ layer = paddle.nn.Linear(3, 4) path = 'example/model.pdparams' paddle.save(layer.state_dict(), path) layer_param = paddle.load(path) -# 修改fc_0.b_0的值 +# 修改 fc_0.b_0 的值 layer_param["fc_0.b_0"] = 10 ``` @@ -124,7 +124,7 @@ tensor_bias = paddle.load(path_b) tensor_bias[0] = 10 ``` -更多介绍请参考以下API文档: +更多介绍请参考以下 API 文档: - [paddle.save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/save_cn.html) - [paddle.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/framework/io/load_cn.html) diff --git a/docs/faq/train_cn.md b/docs/faq/train_cn.md index e7cb958c4bd..fac22f20a3c 100644 --- a/docs/faq/train_cn.md +++ b/docs/faq/train_cn.md @@ -8,7 +8,7 @@ ##### 问题:请问`paddle.matmul`和`paddle.multiply`有什么区别? -+ 答复:`matmul`支持的两个tensor的矩阵乘操作。`muliply`是支持两个tensor进行逐元素相乘。 ++ 答复:`matmul`支持的两个 tensor 的矩阵乘操作。`muliply`是支持两个 tensor 进行逐元素相乘。 ---------- @@ -27,23 +27,23 @@ torch.gather(input, dim, index, *, sparse_grad=False, out=None) - `paddle.gather` - - 输入形状:`x`可以是任意的`N`维Tensor。但`index`必须是形状为`[M]`的一维Tensor,或形状为`[M, 1]`的二维Tensor。 + - 输入形状:`x`可以是任意的`N`维 Tensor。但`index`必须是形状为`[M]`的一维 Tensor,或形状为`[M, 1]`的二维 Tensor。 - - 输出形状:输出Tensor `out`的形状`shape_out`和`x`的形状`shape_x`的关系为:`shape_out[i] = shape_x[i] if i != axis else M`。 + - 输出形状:输出 Tensor `out`的形状`shape_out`和`x`的形状`shape_x`的关系为:`shape_out[i] = shape_x[i] if i != axis else M`。 - 计算公式:`out[i_1][i_2]...[i_axis]...[i_N] = x[i_1][i_2]...[index[i_axis]]...[i_N]` 。 - - 举例说明:假设`x`的形状为`[N1, N2, N3]`,`index`的形状为`[M]`,`axis`的值为1,那么输出`out`的形状为`[N1, M, N3]`,且`out[i_1][i_2][i_3] = x[i_1][index[i_2]][i_3]`。 + - 举例说明:假设`x`的形状为`[N1, N2, N3]`,`index`的形状为`[M]`,`axis`的值为 1,那么输出`out`的形状为`[N1, M, N3]`,且`out[i_1][i_2][i_3] = x[i_1][index[i_2]][i_3]`。 - `torch.gather` - - 输入形状:`input`可以是任意的`N`维Tensor,且`index.rank`必须等于`input.rank`。 + - 输入形状:`input`可以是任意的`N`维 Tensor,且`index.rank`必须等于`input.rank`。 - - 输出形状:输出Tensor `out`的形状与`index`相同。 + - 输出形状:输出 Tensor `out`的形状与`index`相同。 - 计算公式:`out[i_1][i_2]...[i_dim]...[i_N] = input[i_1][i_2]...[index[i_1][i_2]...[i_N]]...[i_N]`。 - - 举例说明:假设`x`的形状为`[N1, N2, N3]`,`index`的形状为`[M1, M2, M3]`,`dim`的值为1,那么输出`out`的形状为`[M1, M2, M3]`,且`out[i_1][i_2][i_3] = input[i_1][index[i_1][i_2][i_3]][i_3]`。 + - 举例说明:假设`x`的形状为`[N1, N2, N3]`,`index`的形状为`[M1, M2, M3]`,`dim`的值为 1,那么输出`out`的形状为`[M1, M2, M3]`,且`out[i_1][i_2][i_3] = input[i_1][index[i_1][i_2][i_3]][i_3]`。 - 异同比较 @@ -53,7 +53,7 @@ torch.gather(input, dim, index, *, sparse_grad=False, out=None) ---------- -##### 问题:在模型组网时,inplace参数的设置会影响梯度回传吗?经过不带参数的op之后,梯度是否会保留下来? +##### 问题:在模型组网时,inplace 参数的设置会影响梯度回传吗?经过不带参数的 op 之后,梯度是否会保留下来? + 答复:inplace 参数不会影响梯度回传。只要用户没有手动设置`stop_gradient=True`,梯度都会保留下来。 @@ -65,19 +65,19 @@ torch.gather(input, dim, index, *, sparse_grad=False, out=None) ---------- -##### 问题:使用CPU进行模型训练,如何利用多处理器进行加速? +##### 问题:使用 CPU 进行模型训练,如何利用多处理器进行加速? -+ 答复:在2.0版本动态图模式下,CPU训练加速可以从以下两点进行配置: ++ 答复:在 2.0 版本动态图模式下,CPU 训练加速可以从以下两点进行配置: -1. 使用多进程DataLoader加速数据读取:训练数据较多时,数据处理往往会成为训练速度的瓶颈,paddle提供了异步数据读取接口DataLoader,可以使用多进程进行数据加载,充分利用多处理的优势,具体使用方法及示例请参考API文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)。 +1. 使用多进程 DataLoader 加速数据读取:训练数据较多时,数据处理往往会成为训练速度的瓶颈,paddle 提供了异步数据读取接口 DataLoader,可以使用多进程进行数据加载,充分利用多处理的优势,具体使用方法及示例请参考 API 文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)。 -2. 推荐使用支持[MKL(英特尔数学核心函数库)](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html)的paddle安装包,MKL相比Openblas等通用计算库在计算速度上有显著的优势,能够提升您的训练效率。 +2. 推荐使用支持[MKL(英特尔数学核心函数库)](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onemkl.html)的 paddle 安装包,MKL 相比 Openblas 等通用计算库在计算速度上有显著的优势,能够提升您的训练效率。 ---------- -##### 问题:使用NVIDIA多卡运行Paddle时报错 Nccl error,如何解决? +##### 问题:使用 NVIDIA 多卡运行 Paddle 时报错 Nccl error,如何解决? -+ 答复:这个错误大概率是环境配置不正确导致的,建议您使用NVIDIA官方提供的方法参考检测自己的环境是否配置正确。具体地,可以使用[ NCCL Tests ](https://github.com/NVIDIA/nccl-tests) 检测您的环境;如果检测不通过,请登录[ NCCL官网 ](https://developer.nvidia.com/zh-cn)下载NCCl,安装后重新检测。 ++ 答复:这个错误大概率是环境配置不正确导致的,建议您使用 NVIDIA 官方提供的方法参考检测自己的环境是否配置正确。具体地,可以使用[ NCCL Tests ](https://github.com/NVIDIA/nccl-tests) 检测您的环境;如果检测不通过,请登录[ NCCL 官网 ](https://developer.nvidia.com/zh-cn)下载 NCCl,安装后重新检测。 ---------- @@ -102,38 +102,38 @@ torch.gather(input, dim, index, *, sparse_grad=False, out=None) 2. 开启以下三个选项: ```bash -#一旦不再使用即释放内存垃圾,=1.0 垃圾占用内存大小达到10G时,释放内存垃圾 +#一旦不再使用即释放内存垃圾,=1.0 垃圾占用内存大小达到 10G 时,释放内存垃圾 export FLAGS_eager_delete_tensor_gb=0.0 -#启用快速垃圾回收策略,不等待cuda kernel 结束,直接释放显存 +#启用快速垃圾回收策略,不等待 cuda kernel 结束,直接释放显存 export FLAGS_fast_eager_deletion_mode=1 -#该环境变量设置只占用0%的显存 +#该环境变量设置只占用 0%的显存 export FLAGS_fraction_of_gpu_memory_to_use=0 ``` 详细请参考官方文档[存储分配与优化](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.html) 调整相关配置。 -此外,建议您使用[AI Studio 学习与 实训社区训练](https://aistudio.baidu.com/aistudio/index),获取免费GPU算力,提升您的训练效率。 +此外,建议您使用[AI Studio 学习与 实训社区训练](https://aistudio.baidu.com/aistudio/index),获取免费 GPU 算力,提升您的训练效率。 ---------- -##### 问题:如何提升模型训练时的GPU利用率? +##### 问题:如何提升模型训练时的 GPU 利用率? + 答复:有如下两点建议: - 1. 如果数据预处理耗时较长,可使用DataLoader加速数据读取过程,具体请参考API文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)。 + 1. 如果数据预处理耗时较长,可使用 DataLoader 加速数据读取过程,具体请参考 API 文档:[paddle.io.DataLoader](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/io/DataLoader_cn.html#dataloader)。 - 2. 如果提高GPU计算量,可以增大`batch_size`,但是注意同时调节其他超参数以确保训练配置的正确性。 + 2. 如果提高 GPU 计算量,可以增大`batch_size`,但是注意同时调节其他超参数以确保训练配置的正确性。 以上两点均为比较通用的方案,其他的优化方案和模型相关,可参考官方模型库 [models](https://github.com/PaddlePaddle/models) 中的具体示例。 ---------- -##### 问题:如何处理变长ID导致程序内存占用过大的问题? +##### 问题:如何处理变长 ID 导致程序内存占用过大的问题? -+ 答复:请先参考[显存分配与优化文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.html) 开启存储优化开关,包括显存垃圾及时回收和Op内部的输出复用输入等。若存储空间仍然不够,建议: ++ 答复:请先参考[显存分配与优化文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/advanced_guide/performance_improving/singlenode_training_improving/memory_optimize.html) 开启存储优化开关,包括显存垃圾及时回收和 Op 内部的输出复用输入等。若存储空间仍然不够,建议: 1. 降低 `batch_size` ; - 2. 对index进行排序,减少padding的数量。 + 2. 对 index 进行排序,减少 padding 的数量。 ---------- @@ -142,25 +142,25 @@ export FLAGS_fraction_of_gpu_memory_to_use=0 + 答复:不收敛的原因有很多,可以参考如下方式排查: 1. 检查数据集中训练数据的准确率,数据是否有错误,特征是否归一化; - 2. 简化网络结构,先基于benchmark实验,确保在baseline网络结构和数据集上的收敛结果正确; + 2. 简化网络结构,先基于 benchmark 实验,确保在 baseline 网络结构和数据集上的收敛结果正确; 3. 对于复杂的网络,每次只增加一个改动,确保改动后的网络正确; - 4. 检查网络在训练数据上的Loss是否下降; + 4. 检查网络在训练数据上的 Loss 是否下降; 5. 检查学习率、优化算法是否合适,学习率过大会导致不收敛; 6. 检查`batch_size`设置是否合适,`batch_size`过小会导致不收敛; 7. 检查梯度计算是否正确,是否有梯度过大的情况,是否为`NaN`。 ---------- -##### 问题:Loss为NaN,如何处理? +##### 问题:Loss 为 NaN,如何处理? -+ 答复:可能由于网络的设计问题,Loss过大(Loss为NaN)会导致梯度爆炸。如果没有改网络结构,但是出现了NaN,可能是数据读取导致,比如标签对应关系错误。还可以检查下网络中是否会出现除0,log0的操作等。 ++ 答复:可能由于网络的设计问题,Loss 过大(Loss 为 NaN)会导致梯度爆炸。如果没有改网络结构,但是出现了 NaN,可能是数据读取导致,比如标签对应关系错误。还可以检查下网络中是否会出现除 0,log0 的操作等。 ---------- ##### 问题:训练后的模型很大,如何压缩? -+ 答复:建议您使用飞桨模型压缩工具[PaddleSlim](https://www.paddlepaddle.org.cn/tutorials/projectdetail/489539)。PaddleSlim是飞桨开源的模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略,专注于**模型小型化技术**。 ++ 答复:建议您使用飞桨模型压缩工具[PaddleSlim](https://www.paddlepaddle.org.cn/tutorials/projectdetail/489539)。PaddleSlim 是飞桨开源的模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略,专注于**模型小型化技术**。 ---------- @@ -172,9 +172,9 @@ export FLAGS_fraction_of_gpu_memory_to_use=0 ##### 问题:预测时如何打印模型中每一步的耗时? -+ 答复:可以在设置config时使用`config.enable_profile()`统计预测时每个算子和数据搬运的耗时。对于推理api的使用,可以参考官网文档[Python预测API介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/python_infer_cn.html)。示例代码: ++ 答复:可以在设置 config 时使用`config.enable_profile()`统计预测时每个算子和数据搬运的耗时。对于推理 api 的使用,可以参考官网文档[Python 预测 API 介绍](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/05_inference_deployment/inference/python_infer_cn.html)。示例代码: ```python -# 设置config: +# 设置 config: def set_config(args): config = Config(args.model_file, args.params_file) config.disable_gpu() @@ -189,17 +189,17 @@ def set_config(args): ##### 问题:模型训练时如何进行梯度裁剪? -+ 答复:设置Optimizer中的`grad_clip`参数值。 ++ 答复:设置 Optimizer 中的`grad_clip`参数值。 ---------- -##### 问题:静态图模型如何拿到某个variable的梯度? +##### 问题:静态图模型如何拿到某个 variable 的梯度? + 答复:飞桨提供以下三种方式,用户可根据需求选择合适的方法: 1. 使用[paddle.static.Print()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/static/Print_cn.html#print)接口,可以打印中间变量及其梯度。 - 2. 将变量梯度名放到fetch_list里,通过[Executor.run()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/static/Executor_cn.html#run)获取,一般variable的梯度名是variable的名字加上 "@GRAD"。 - 3. 对于参数(不适用于中间变量和梯度),还可以通过[Scope.find_var()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/static/global_scope_cn.html#global-scope)接口,通过变量名字查找对应的tensor。 + 2. 将变量梯度名放到 fetch_list 里,通过[Executor.run()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/static/Executor_cn.html#run)获取,一般 variable 的梯度名是 variable 的名字加上 "@GRAD"。 + 3. 对于参数(不适用于中间变量和梯度),还可以通过[Scope.find_var()](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/static/global_scope_cn.html#global-scope)接口,通过变量名字查找对应的 tensor。 后两个方法需要使用变量名,飞桨中变量的命名规则请参见[Name](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_guides/low_level/program.html#api-guide-name) 。 @@ -223,15 +223,15 @@ exe.run(paddle.static.default_startup_program()) loss, loss_g, fc_bias_g = exe.run( paddle.static.default_main_program(), feed={'data': np.random.rand(4, 2).astype('float32')}, - fetch_list=[loss, loss.name + '@GRAD', 'fc.b_0@GRAD']) # 通过将变量名加入到fetch_list获取变量 + fetch_list=[loss, loss.name + '@GRAD', 'fc.b_0@GRAD']) # 通过将变量名加入到 fetch_list 获取变量 print(loss, loss_g, fc_bias_g) -print(paddle.static.global_scope().find_var('fc.b_0').get_tensor()) # 通过scope.find_var 获取变量 +print(paddle.static.global_scope().find_var('fc.b_0').get_tensor()) # 通过 scope.find_var 获取变量 ``` ---------- -##### 问题:paddle有对应torch.masked_fill函数api吗,还是需要自己实现? +##### 问题:paddle 有对应 torch.masked_fill 函数 api 吗,还是需要自己实现? + 答复:由于框架设计上的区别,没有对应的 api,但是可以使用 [paddle.where](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/where_cn.html) 实现相同的功能。 @@ -265,17 +265,17 @@ out = masked_fill(x, mask, 2) ---------- -##### 问题:在paddle中如何实现`torch.nn.utils.rnn.pack_padded_sequence`和`torch.nn.utils.rnn.pad_packed_sequence`这两个API? +##### 问题:在 paddle 中如何实现`torch.nn.utils.rnn.pack_padded_sequence`和`torch.nn.utils.rnn.pad_packed_sequence`这两个 API? -+ 答复:目前paddle中没有和上述两个API完全对应的实现。关于torch中这两个API的详细介绍可以参考知乎上的文章 [pack_padded_sequence 和 pad_packed_sequence](https://zhuanlan.zhihu.com/p/342685890) : -`pack_padded_sequence`的功能是将mini-batch数据进行压缩,压缩掉无效的填充值,然后输入RNN网络中;`pad_packed_sequence`则是把RNN网络输出的压紧的序列再填充回来,便于进行后续的处理。 -在paddle中,大家可以在GRU、LSTM等RNN网络中输入含有填充值的mini-batch数据的同时传入对应的`sequence_length`参数实现上述等价功能,具体用法可以参考 [RNN](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/RNN_cn.html#rnn) 。 ++ 答复:目前 paddle 中没有和上述两个 API 完全对应的实现。关于 torch 中这两个 API 的详细介绍可以参考知乎上的文章 [pack_padded_sequence 和 pad_packed_sequence](https://zhuanlan.zhihu.com/p/342685890) : +`pack_padded_sequence`的功能是将 mini-batch 数据进行压缩,压缩掉无效的填充值,然后输入 RNN 网络中;`pad_packed_sequence`则是把 RNN 网络输出的压紧的序列再填充回来,便于进行后续的处理。 +在 paddle 中,大家可以在 GRU、LSTM 等 RNN 网络中输入含有填充值的 mini-batch 数据的同时传入对应的`sequence_length`参数实现上述等价功能,具体用法可以参考 [RNN](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/RNN_cn.html#rnn) 。 ---------- -##### 问题:paddle是否有爱因斯坦求和(einsum)这个api? +##### 问题:paddle 是否有爱因斯坦求和(einsum)这个 api? -+ 答复:paddle在2.2rc 版本之后,新增了[paddle.einsum](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/einsum_cn.html#einsum),在 develop 和2.2rc 之后的版本中都可以正常使用。 ++ 答复:paddle 在 2.2rc 版本之后,新增了[paddle.einsum](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/einsum_cn.html#einsum),在 develop 和 2.2rc 之后的版本中都可以正常使用。 ---------- @@ -284,7 +284,7 @@ out = masked_fill(x, mask, 2) ##### 问题:[BatchNorm](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/BatchNorm_cn.html#batchnorm)在训练时加载预测时保存的模型参数时报错 AssertionError: Optimizer set error, batch_norm_1.w_0_moment_0 should in state dict. -+ 答复:BatchNorm在train模式和eval模式下需要的变量有差别,在train模式下要求传入优化器相关的变量,在eval模式下不管是保存参数还是加载参数都是不需要优化器相关变量的,因此如果在train模式下加载eval模式下保存的checkpoint,没有优化器相关的变量则会报错。如果想在train模式下加载eval模式下保存的checkpoint的话,用 ```paddle.load``` 加载进来参数之后,通过 ```set_state_dict``` 接口把参数赋值给模型,参考以下示例: ++ 答复:BatchNorm 在 train 模式和 eval 模式下需要的变量有差别,在 train 模式下要求传入优化器相关的变量,在 eval 模式下不管是保存参数还是加载参数都是不需要优化器相关变量的,因此如果在 train 模式下加载 eval 模式下保存的 checkpoint,没有优化器相关的变量则会报错。如果想在 train 模式下加载 eval 模式下保存的 checkpoint 的话,用 ```paddle.load``` 加载进来参数之后,通过 ```set_state_dict``` 接口把参数赋值给模型,参考以下示例: ```python import paddle diff --git a/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst b/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst index a48aa16f580..a1308048bf4 100644 --- a/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst +++ b/docs/guides/06_distributed_training/cluster_overview_ps_cn.rst @@ -13,13 +13,13 @@ :width: 800 :alt: whole_process :align: center -图1 点击率预估模型的训练、推理全流程 +图 1 点击率预估模型的训练、推理全流程 图中的分布式训练方式称为流式训练(也称在线学习),即模型训练数据集并非固定,而是随时间流式地加入到训练过程中,实时更新模型并配送到线上推理服务中,因此对训练时间和模型保存、配送时间有严格要求。 除此之外,该场景下的模型训练还有以下两个特点: -1. 稀疏参数量大:模型特征中包含大量的id类特征(例如userid、itemid),这些id类特征会对应大量的embedding(称为稀疏参数),通常参数量在百亿级别及以上,且随训练过程不断增加。 +1. 稀疏参数量大:模型特征中包含大量的 id 类特征(例如 userid、itemid),这些 id 类特征会对应大量的 embedding(称为稀疏参数),通常参数量在百亿级别及以上,且随训练过程不断增加。 2. 训练数据量大:线上服务会源源不断产生训练数据进入分布式训练中,训练数据量级巨大,单机训练速度过慢。 @@ -36,18 +36,18 @@ :width: 600 :alt: ps :align: center -图2 经典参数服务器架构 +图 2 经典参数服务器架构 -飞桨为了应对各种严苛的实际业务挑战,早在 2018 年,飞桨的纯 CPU 参数服务器模式就可以支持万亿规模稀疏参数的模型训练。之后随着模型中网络结构更加复杂,以及对训练效率和性价比的进一步追求,飞桨参数服务器技术也在更新换代:从早期 Worker 节点的硬件型号必须一致的纯 CPU 参数服务器到纯 GPU 参数服务器、纯 XPU 参数服务器,再到 CPU、GPU、XPU等其它 AI 硬件混布调度训练的异构参数服务器,始终引领参数服务器技术的发展;同时也得到了更多的应用落地,如视频推荐、搜索推荐等等。 +飞桨为了应对各种严苛的实际业务挑战,早在 2018 年,飞桨的纯 CPU 参数服务器模式就可以支持万亿规模稀疏参数的模型训练。之后随着模型中网络结构更加复杂,以及对训练效率和性价比的进一步追求,飞桨参数服务器技术也在更新换代:从早期 Worker 节点的硬件型号必须一致的纯 CPU 参数服务器到纯 GPU 参数服务器、纯 XPU 参数服务器,再到 CPU、GPU、XPU 等其它 AI 硬件混布调度训练的异构参数服务器,始终引领参数服务器技术的发展;同时也得到了更多的应用落地,如视频推荐、搜索推荐等等。 -1 纯CPU参数服务器 +1 纯 CPU 参数服务器 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 纯 CPU 参数服务器,由高性能异步训练 Worker、高效通信策略和高性能 Server 组成。 -其中Worker侧为数据并行,每个worker独立进行样本读取、参数拉取、前向后向计算和梯度回传; +其中 Worker 侧为数据并行,每个 worker 独立进行样本读取、参数拉取、前向后向计算和梯度回传; -Server端为模型并行,采用双层哈希实现了大规模参数的存储和更新。为了进一步降低成本,飞桨还支持SSD和AEP等硬件进行参数存储,即AEP+内存或者SSD+内存两级存储; +Server 端为模型并行,采用双层哈希实现了大规模参数的存储和更新。为了进一步降低成本,飞桨还支持 SSD 和 AEP 等硬件进行参数存储,即 AEP+内存或者 SSD+内存两级存储; 在通信策略上,使用了消息队列控制梯度的聚合,实现了自适应梯度聚合以降频通信; @@ -59,10 +59,10 @@ Server端为模型并行,采用双层哈希实现了大规模参数的存储 :width: 600 :alt: ps :align: center -图3 纯CPU参数服务器 +图 3 纯 CPU 参数服务器 -2 纯GPU参数服务器 +2 纯 GPU 参数服务器 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 随着模型网络越来越复杂,对算力要求越来越高,在数据量不变的情况下,CPU 计算性能差的弱势就会显现,虽然可以通过增加 CPU 机器数量来解决,甚至可以增加上百台,但是这种方法不仅成本大幅提高,而且集群的稳定性和扩展性也存在较大的问题。因此飞桨引入了纯 GPU 参数服务器来提升计算性能,之前 100 台 CPU 机器才能训练的模型,仅需 1 台多卡 GPU 机器即可完成训练。当然,同时也要解决因为硬件更替所带来的问题。 @@ -77,7 +77,7 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 :width: 600 :alt: ps :align: center -图4 纯 GPU 参数服务器 +图 4 纯 GPU 参数服务器 3 异构参数服务器 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -92,7 +92,7 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 :width: 600 :alt: ps :align: center -图5 异构参数服务器 +图 5 异构参数服务器 4 使用方法 @@ -105,12 +105,12 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 .. code-block:: python import paddle - # 导入分布式训练需要的依赖fleet + # 导入分布式训练需要的依赖 fleet import paddle.distributed.fleet as fleet # 导入模型 from model import WideDeepModel - # 参数服务器目前只支持静态图,需要使用enable_static() + # 参数服务器目前只支持静态图,需要使用 enable_static() paddle.enable_static() # 加载模型并构造优化器 @@ -118,7 +118,7 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 model.net(is_train=True) optimizer = paddle.optimizer.SGD(learning_rate=0.0001) - # 初始化fleet + # 初始化 fleet fleet.init(is_collective=False) # 设置分布式策略(异步更新方式) strategy = fleet.DistributedStrategy() @@ -143,29 +143,29 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 其中示例代码中省略的,训练节点的一个完整的训练过程应该包含以下几个部分: 1. 获取之前训练已经保存好的模型,并加载模型(如果之前没有保存模型,则跳过加载模型这一步)。 - 2. 分Pass训练,在每一个Pass的训练过程中,分为如下几步: + 2. 分 Pass 训练,在每一个 Pass 的训练过程中,分为如下几步: a. 加载数据。 - b. 分布式训练并获取训练指标(AUC等)。 + b. 分布式训练并获取训练指标(AUC 等)。 c. 分布式预测:主要用于召回模块的离线建库部分。 3. 保存模型: a. Checkpoint Model:用于下次训练开始时的模型加载部分。 b. Inference Model:用于线上推理部署。 -完整训练示例代码请参考:\ `CPUPS示例 `_\、\ `GPUPS示例 `_\,本节只介绍飞桨参数服务器在训练过程中需要使用到的与单机不同的API。 +完整训练示例代码请参考:\ `CPUPS 示例 `_\、\ `GPUPS 示例 `_\,本节只介绍飞桨参数服务器在训练过程中需要使用到的与单机不同的 API。 4.1 大规模稀疏参数 """""""""""" -为存储海量的稀疏参数,参数服务器使用 ``paddle.static.nn.sparse_embedding()`` 取代 ``paddle.static.nn.embedding()`` 作为embedding lookup层的算子。 +为存储海量的稀疏参数,参数服务器使用 ``paddle.static.nn.sparse_embedding()`` 取代 ``paddle.static.nn.embedding()`` 作为 embedding lookup 层的算子。 -``paddle.static.nn.sparse_embedding()`` 采用稀疏模式进行梯度的计算和更新,输入接受[0, UINT64]范围内的特征ID,支持稀疏参数各种高阶配置(特征准入、退场等),更加符合流式训练的功能需求。 +``paddle.static.nn.sparse_embedding()`` 采用稀疏模式进行梯度的计算和更新,输入接受[0, UINT64]范围内的特征 ID,支持稀疏参数各种高阶配置(特征准入、退场等),更加符合流式训练的功能需求。 .. code-block:: python import paddle - # sparse_embedding输入接受[0, UINT64]范围内的特征ID,参数size的第一维词表大小无用,可指定任意整数 - # 大规模稀疏场景下,参数规模初始为0,会随着训练的进行逐步扩展 + # sparse_embedding 输入接受[0, UINT64]范围内的特征 ID,参数 size 的第一维词表大小无用,可指定任意整数 + # 大规模稀疏场景下,参数规模初始为 0,会随着训练的进行逐步扩展 sparse_feature_num = 10 embedding_size = 64 @@ -180,16 +180,16 @@ RPC&NCCL 混合通信可以将部分稀疏参数采用 RPC 协议跨节点通信 4.2 数据加载 """""""""""" -由于搜索推荐场景涉及到的训练数据通常较大,为提升训练中的数据读取效率,参数服务器采用Dataset进行高性能的IO。 +由于搜索推荐场景涉及到的训练数据通常较大,为提升训练中的数据读取效率,参数服务器采用 Dataset 进行高性能的 IO。 -Dataset是为多线程及全异步方式量身打造的数据读取方式,每个数据读取线程会与一个训练线程耦合,形成了多生产者-多消费者的模式,会极大的加速模型训练过程。 +Dataset 是为多线程及全异步方式量身打造的数据读取方式,每个数据读取线程会与一个训练线程耦合,形成了多生产者-多消费者的模式,会极大的加速模型训练过程。 .. image:: ./images/dataset.JPG :width: 600 :alt: dataset :align: center -Dataset有两种不同的类型: +Dataset 有两种不同的类型: 1. QueueDataset:随训练流式读取数据。 2. InmemoryDataset:训练数据全部读入训练节点内存,然后分配至各个训练线程,支持全局秒级打散数据(global_shuffle)。 @@ -199,8 +199,8 @@ Dataset有两种不同的类型: dataset = paddle.distributed.QueueDataset() thread_num = 1 - # use_var指定网络中的输入数据,pipe_command指定数据处理脚本 - # 要求use_var中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 + # use_var 指定网络中的输入数据,pipe_command 指定数据处理脚本 + # 要求 use_var 中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 dataset.init(use_var=model.inputs, pipe_command="python reader.py", batch_size=batch_size, @@ -209,15 +209,15 @@ Dataset有两种不同的类型: train_files_list = [os.path.join(train_data_path, x) for x in os.listdir(train_data_path)] - # set_filelist指定dataset读取的训练文件的列表 + # set_filelist 指定 dataset 读取的训练文件的列表 dataset.set_filelist(train_files_list) -更多dataset用法参见\ `使用InMemoryDataset/QueueDataset进行训练 `_\。 +更多 dataset 用法参见\ `使用 InMemoryDataset/QueueDataset 进行训练 `_\。 4.3 分布式训练及预测 """""""""""" -与数据加载dataset相对应的,使用 ``exe.train_from_dataset()`` 接口进行分布式训练。 +与数据加载 dataset 相对应的,使用 ``exe.train_from_dataset()`` 接口进行分布式训练。 .. code-block:: python @@ -247,17 +247,17 @@ Dataset有两种不同的类型: 分布式指标是指在分布式训练任务中用以评测模型效果的指标。 由于参数服务器存在多个训练节点,传统的指标计算只能评测当前节点的数据,而分布式指标需要汇总所有节点的全量数据,进行全局指标计算。 -分布式指标计算的接口位于 ``paddle.distributed.fleet.metrics`` ,其中封装了包括AUC、Accuracy、MSE等常见指标计算。 +分布式指标计算的接口位于 ``paddle.distributed.fleet.metrics`` ,其中封装了包括 AUC、Accuracy、MSE 等常见指标计算。 -以AUC指标为例,全局AUC指标计算示例如下: +以 AUC 指标为例,全局 AUC 指标计算示例如下: .. code-block:: python - # 组网阶段,AUC算子在计算auc指标同时,返回正负样例中间统计结果(stat_pos, stat_neg) + # 组网阶段,AUC 算子在计算 auc 指标同时,返回正负样例中间统计结果(stat_pos, stat_neg) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg] = \ paddle.static.auc(input=pred, label=label) - # 利用AUC算子返回的中间计算结果,以及fleet提供的分布式指标计算接口,完成全局AUC计算。 + # 利用 AUC 算子返回的中间计算结果,以及 fleet 提供的分布式指标计算接口,完成全局 AUC 计算。 global_auc = fleet.metrics.auc(stat_pos, stat_neg) 更多分布式指标用法参见\ `分布式指标计算 `_\。 @@ -269,33 +269,33 @@ Dataset有两种不同的类型: 参数服务器的模型一般分为两种类型: 1. 明文模型(checkpoint model):主要用于增量训练,由服务节点以明文形式保存模型全量的稀疏参数和稠密参数以及优化器状态。 -2. 推理模型(inference model):主要用于线上推理部署,其中稠密参数由某个训练节点(一般是0号训练节点)以二进制方式保存,稀疏参数由服务节点以明文形式保存,为节省线上推理所需的存储空间,inference model中的稀疏参数可能并非全量,有一定的过滤逻辑。 +2. 推理模型(inference model):主要用于线上推理部署,其中稠密参数由某个训练节点(一般是 0 号训练节点)以二进制方式保存,稀疏参数由服务节点以明文形式保存,为节省线上推理所需的存储空间,inference model 中的稀疏参数可能并非全量,有一定的过滤逻辑。 .. code-block:: python exe = paddle.static.Executor(paddle.CPUPlace()) dirname = "/you/path/to/model" - # 保存checkpoint model + # 保存 checkpoint model fleet.save_persistables(exe, dirname) - # 保存inference model - # feed_var_names和target_vars用于指定需要裁剪网络的输入和输出 + # 保存 inference model + # feed_var_names 和 target_vars 用于指定需要裁剪网络的输入和输出 fleet.save_inference_model(exe, dirname, feed_var_names, target_vars) -在checkpoint model保存成功之后,可以在训练开始时加载已经保存好的模型,用于之后的增量训练 +在 checkpoint model 保存成功之后,可以在训练开始时加载已经保存好的模型,用于之后的增量训练 .. code-block:: python dirname = "/you/path/to/model" - # 加载checkpoint model + # 加载 checkpoint model fleet.load_model(dirname) 5 进阶教程 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. GPUPS示例 -2. HeterPS示例 +1. GPUPS 示例 +2. HeterPS 示例 3. 稀疏参数配置(accessor) 4. 二次开发 diff --git a/docs/guides/06_distributed_training/cluster_quick_start_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_cn.rst index 77d505cd330..d95471fb4dd 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_cn.rst @@ -8,5 +8,5 @@ -二、ParameterServer训练快速开始 +二、ParameterServer 训练快速开始 ------------------------------- diff --git a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst index 8d1e47a2290..53edb905fa4 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_collective_cn.rst @@ -7,20 +7,20 @@ 但在每个进程上处理不同的数据。因此,数据并行非常适合单卡已经能够放得下完整的模型和参数,但希望通过并行来增大 全局数据(global batch)大小来提升训练的吞吐量。 -本节将采用自定义卷积网络和Paddle内置的CIFAR-10数据集来介绍如何使用 `Fleet API `_ (paddle.distributed.fleet) 进行数据并行训练。 +本节将采用自定义卷积网络和 Paddle 内置的 CIFAR-10 数据集来介绍如何使用 `Fleet API `_ (paddle.distributed.fleet) 进行数据并行训练。 1.1 版本要求 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -在编写分布式训练程序之前,用户需要确保已经安装GPU版的PaddlePaddle 2.3.0及以上版本。 +在编写分布式训练程序之前,用户需要确保已经安装 GPU 版的 PaddlePaddle 2.3.0 及以上版本。 1.2 具体步骤 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -与单机单卡的普通模型训练相比,数据并行训练只需要按照如下5个步骤对代码进行简单调整即可: +与单机单卡的普通模型训练相比,数据并行训练只需要按照如下 5 个步骤对代码进行简单调整即可: 1. 导入分布式训练依赖包 - 2. 初始化Fleet环境 + 2. 初始化 Fleet 环境 3. 构建分布式训练使用的网络模型 4. 构建分布式训练使用的优化器 5. 构建分布式训练使用的数据加载器 @@ -30,23 +30,23 @@ 1.2.1 导入分布式训练依赖包 """""""""""""""""""""""""""" -导入飞桨分布式训练专用包Fleet。 +导入飞桨分布式训练专用包 Fleet。 .. code-block:: python - # 导入分布式专用Fleet API + # 导入分布式专用 Fleet API from paddle.distributed import fleet - # 导入分布式训练数据所需API + # 导入分布式训练数据所需 API from paddle.io import DataLoader, DistributedBatchSampler - # 设置GPU环境 + # 设置 GPU 环境 paddle.set_device('gpu') -1.2.2 初始化Fleet环境 +1.2.2 初始化 Fleet 环境 """""""""""""""""""""""""" 分布式初始化需要: - 1. 设置is_collective为True,表示分布式训练采用Collective模式。 + 1. 设置 is_collective 为 True,表示分布式训练采用 Collective 模式。 2. [可选] 设置分布式策略 `DistributedStrategy `_,跳过将使用缺省配置。 .. code-block:: python @@ -64,7 +64,7 @@ .. code-block:: python - # 等号右边model为原始串行网络模型 + # 等号右边 model 为原始串行网络模型 model = fleet.distributed_model(model) 1.2.4 构建分布式训练使用的优化器 @@ -73,7 +73,7 @@ .. code-block:: python - # 等号右边optimizer为原始串行网络模型 + # 等号右边 optimizer 为原始串行网络模型 optimizer = fleet.distributed_optimizer(optimizer) 1.2.5 构建分布式训练使用的数据加载器 @@ -84,7 +84,7 @@ .. code-block:: python # 构建分布式数据采样器 - # 注意:需要保证batch中每个样本数据shape相同,若原尺寸不一,需进行预处理 + # 注意:需要保证 batch 中每个样本数据 shape 相同,若原尺寸不一,需进行预处理 train_sampler = DistributedBatchSampler(train_dataset, 32, shuffle=True, drop_last=True) val_sampler = DistributedBatchSampler(val_dataset, 32) @@ -103,11 +103,11 @@ import paddle import paddle.nn.functional as F from paddle.vision.transforms import ToTensor - # 一、导入分布式专用Fleet API + # 一、导入分布式专用 Fleet API from paddle.distributed import fleet - # 构建分布式数据加载器所需API + # 构建分布式数据加载器所需 API from paddle.io import DataLoader, DistributedBatchSampler - # 设置GPU环境 + # 设置 GPU 环境 paddle.set_device('gpu') class MyNet(paddle.nn.Layer): @@ -152,7 +152,7 @@ val_loss_history = [] def train(): - # 二、初始化Fleet环境 + # 二、初始化 Fleet 环境 fleet.init(is_collective=True) model = MyNet(num_classes=10) @@ -219,19 +219,19 @@ 1.4 分布式启动 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -准备好分布式训练脚本后,就可以通过paddle.distributed.launch在集群上启动分布式训练: +准备好分布式训练脚本后,就可以通过 paddle.distributed.launch 在集群上启动分布式训练: - 单机多卡训练 - 假设只使用集群的一个节点,节点上可使用的GPU卡数为4,那么只需要在节点终端运行如下命令: + 假设只使用集群的一个节点,节点上可使用的 GPU 卡数为 4,那么只需要在节点终端运行如下命令: .. code-block:: bash python -m paddle.distributed.launch --gpus=0,1,2,3 train_with_fleet.py - 多机多卡训练 - 假设集群包含两个节点,每个节点上可使用的GPU卡数为4,IP地址分别为192.168.1.2和192.168.1.3,那么需要在两个节点的终端上分别运行如下命令: + 假设集群包含两个节点,每个节点上可使用的 GPU 卡数为 4,IP 地址分别为 192.168.1.2 和 192.168.1.3,那么需要在两个节点的终端上分别运行如下命令: - 在192.168.1.2节点运行: + 在 192.168.1.2 节点运行: .. code-block:: bash @@ -240,7 +240,7 @@ --ips=192.168.1.2,192.168.1.3 \ train_with_fleet.py - 在192.168.1.3节点运行相同命令: + 在 192.168.1.3 节点运行相同命令: .. code-block:: bash diff --git a/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst b/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst index 24db20de448..5fba3561e69 100644 --- a/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst +++ b/docs/guides/06_distributed_training/cluster_quick_start_ps_cn.rst @@ -19,15 +19,15 @@ 1.1 任务介绍 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -本节将采用推荐领域非常经典的模型wide_and_deep为例,介绍如何使用飞桨分布式完成参数服务器训练任务。 +本节将采用推荐领域非常经典的模型 wide_and_deep 为例,介绍如何使用飞桨分布式完成参数服务器训练任务。 -参数服务器训练基于飞桨静态图,为方便用户理解,我们准备了一个wide_and_deep模型的单机静态图示例:\ `单机静态图示例 `_\。 +参数服务器训练基于飞桨静态图,为方便用户理解,我们准备了一个 wide_and_deep 模型的单机静态图示例:\ `单机静态图示例 `_\。 -在单机静态图示例基础上,通过1.2章节的操作方法,可以将其修改为参数服务器训练示例,本次快速开始的完整示例代码参考:\ `参数服务器完整示例 `_\。 +在单机静态图示例基础上,通过 1.2 章节的操作方法,可以将其修改为参数服务器训练示例,本次快速开始的完整示例代码参考:\ `参数服务器完整示例 `_\。 -同时,我们在AIStudio上建立了一个参数服务器快速开始的项目:\ `参数服务器快速开始 `_\,用户可以跳转到AIStudio上直接运行参数服务器的训练代码。 +同时,我们在 AIStudio 上建立了一个参数服务器快速开始的项目:\ `参数服务器快速开始 `_\,用户可以跳转到 AIStudio 上直接运行参数服务器的训练代码。 -在编写分布式训练程序之前,用户需要确保已经安装PaddlePaddle2.3及以上版本的飞桨开源框架。 +在编写分布式训练程序之前,用户需要确保已经安装 PaddlePaddle2.3 及以上版本的飞桨开源框架。 1.2 操作方法 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -37,7 +37,7 @@ 1. 导入分布式训练需要的依赖包。 2. 定义分布式模式并初始化分布式训练环境。 3. 加载模型。 - 4. 构建dataset加载数据 + 4. 构建 dataset 加载数据 5. 定义参数更新策略及优化器。 6. 开始训练。 @@ -47,7 +47,7 @@ 1.2.1 导入依赖 """""""""""" -导入必要的依赖,例如分布式训练专用的Fleet API(paddle.distributed.fleet)。 +导入必要的依赖,例如分布式训练专用的 Fleet API(paddle.distributed.fleet)。 .. code-block:: python @@ -57,7 +57,7 @@ 1.2.2 定义分布式模式并初始化分布式训练环境 """""""""""" -通过 ``fleet.init()`` 接口,用户可以定义训练相关的环境,注意此环境是用户预先在环境变量中配置好的,包括:训练节点个数,服务节点个数,当前节点的序号,服务节点完整的IP:PORT列表等。 +通过 ``fleet.init()`` 接口,用户可以定义训练相关的环境,注意此环境是用户预先在环境变量中配置好的,包括:训练节点个数,服务节点个数,当前节点的序号,服务节点完整的 IP:PORT 列表等。 .. code-block:: python @@ -75,71 +75,71 @@ model = WideDeepModel() model.net(is_train=True) -1.2.4 构建dataset加载数据 +1.2.4 构建 dataset 加载数据 """""""""""" -由于搜索推荐场景涉及到的训练数据通常较大,为提升训练中的数据读取效率,参数服务器采用InMemoryDataset/QueueDataset进行高性能的IO。 +由于搜索推荐场景涉及到的训练数据通常较大,为提升训练中的数据读取效率,参数服务器采用 InMemoryDataset/QueueDataset 进行高性能的 IO。 -InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_and_deep_dataset/reader.py,与单机DataLoader相比,存在如下区别: +InMemoryDataset/QueueDataset 所对应的数据处理脚本参考 examples/wide_and_deep_dataset/reader.py,与单机 DataLoader 相比,存在如下区别: 1. 继承自 ``fleet.MultiSlotDataGenerator`` 基类。 - 2. 复用单机reader中的 ``line_process()`` 方法,该方法将数据文件中一行的数据处理后生成特征数组并返回,特征数组不需要转成np.array格式。 - 3. 实现基类中的 ``generate_sample()`` 函数,调用 ``line_process()`` 方法逐行读取数据进行处理,并返回一个可以迭代的reader方法。 - 4. reader方法需返回一个list,其中的每个元素都是一个元组,具体形式为 ``(特征名,[特征值列表])`` ,元组的第一个元素为特征名(string类型,需要与模型中对应输入input的name对应),第二个元素为特征值列表(list类型)。 + 2. 复用单机 reader 中的 ``line_process()`` 方法,该方法将数据文件中一行的数据处理后生成特征数组并返回,特征数组不需要转成 np.array 格式。 + 3. 实现基类中的 ``generate_sample()`` 函数,调用 ``line_process()`` 方法逐行读取数据进行处理,并返回一个可以迭代的 reader 方法。 + 4. reader 方法需返回一个 list,其中的每个元素都是一个元组,具体形式为 ``(特征名,[特征值列表])`` ,元组的第一个元素为特征名(string 类型,需要与模型中对应输入 input 的 name 对应),第二个元素为特征值列表(list 类型)。 5. 在__main__作用域中调用 ``run_from_stdin()`` 方法,直接从标准输入流获取待处理数据,而不需要对数据文件进行操作。 -一个完整的reader.py伪代码如下: +一个完整的 reader.py 伪代码如下: .. code-block:: python import paddle - # 导入所需要的fleet依赖 + # 导入所需要的 fleet 依赖 import paddle.distributed.fleet as fleet - # 需要继承fleet.MultiSlotDataGenerator + # 需要继承 fleet.MultiSlotDataGenerator class WideDeepDatasetReader(fleet.MultiSlotDataGenerator): def line_process(self, line): features = line.rstrip('\n').split('\t') - # 省略数据处理过程,具体实现可参考单机reader的line_process()方法 - # 返回值为一个list,其中的每个元素均为一个list,不需要转成np.array格式 + # 省略数据处理过程,具体实现可参考单机 reader 的 line_process()方法 + # 返回值为一个 list,其中的每个元素均为一个 list,不需要转成 np.array 格式 # 具体格式:[[dense_value1, dense_value2, ...], [sparse_value1], [sparse_value2], ..., [label]] return [dense_feature] + sparse_feature + [label] - # 实现generate_sample()函数 - # 该方法有一个名为line的参数,只需要逐行处理数据,不需要对数据文件进行操作 + # 实现 generate_sample()函数 + # 该方法有一个名为 line 的参数,只需要逐行处理数据,不需要对数据文件进行操作 def generate_sample(self, line): def wd_reader(): # 按行处理数据 input_data = self.line_process(line) - # 构造特征名数组feature_name + # 构造特征名数组 feature_name feature_name = ["dense_input"] for idx in categorical_range_: feature_name.append("C" + str(idx - 13)) feature_name.append("label") - # 返回一个list,其中的每个元素都是一个元组 - # 元组的第一个元素为特征名(string类型),第二个元素为特征值(list类型) + # 返回一个 list,其中的每个元素都是一个元组 + # 元组的第一个元素为特征名(string 类型),第二个元素为特征值(list 类型) # 具体格式:[('dense_input', [dense_value1, dense_value2, ...]), ('C1', [sparse_value1]), ('C2', [sparse_value2]), ..., ('label', [label])] yield zip(feature_name, input_data) - # generate_sample()函数需要返回一个可以迭代的reader方法 + # generate_sample()函数需要返回一个可以迭代的 reader 方法 return wd_reader if __name__ == "__main__": - # 调用run_from_stdin()方法,直接从标准输入流获取待处理数据 + # 调用 run_from_stdin()方法,直接从标准输入流获取待处理数据 my_data_generator = WideDeepDatasetReader() my_data_generator.run_from_stdin() -在训练脚本中,构建dataset加载数据: +在训练脚本中,构建 dataset 加载数据: .. code-block:: python dataset = paddle.distributed.QueueDataset() thread_num = 1 - # use_var指定网络中的输入数据,pipe_command指定数据处理脚本 - # 要求use_var中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 + # use_var 指定网络中的输入数据,pipe_command 指定数据处理脚本 + # 要求 use_var 中输入数据的顺序与数据处理脚本输出的特征顺序一一对应 dataset.init(use_var=model.inputs, pipe_command="python reader.py", batch_size=batch_size, @@ -148,16 +148,16 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an train_files_list = [os.path.join(train_data_path, x) for x in os.listdir(train_data_path)] - # set_filelist指定dataset读取的训练文件的列表 + # set_filelist 指定 dataset 读取的训练文件的列表 dataset.set_filelist(train_files_list) -备注:dataset更详细用法参见\ `使用InMemoryDataset/QueueDataset进行训练 `_\。 +备注:dataset 更详细用法参见\ `使用 InMemoryDataset/QueueDataset 进行训练 `_\。 1.2.5 定义参数更新策略及优化器 """""""""""" -在Fleet API中,用户可以使用 ``fleet.DistributedStrategy()`` 接口定义自己想要使用的分布式策略。 +在 Fleet API 中,用户可以使用 ``fleet.DistributedStrategy()`` 接口定义自己想要使用的分布式策略。 其中 ``a_sync`` 选项用于定义参数服务器相关的策略,当其被设定为 ``False`` 时,分布式训练将在同步的模式下进行。反之,当其被设定成 ``True`` 时,分布式训练将在异步的模式下进行。 @@ -175,7 +175,7 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an optimizer = paddle.optimizer.SGD(learning_rate=0.0001) # 单机优化器转换成分布式优化器 optimizer = fleet.distributed_optimizer(optimizer, dist_strategy) - # 使用分布式优化器最小化模型损失值model.loss,model.loss定义参见model.py + # 使用分布式优化器最小化模型损失值 model.loss,model.loss 定义参见 model.py optimizer.minimize(model.loss) 1.2.6 开始训练 @@ -209,13 +209,13 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an fleet.stop_worker() -备注:Paddle2.3版本及以后,ParameterServer训练将废弃掉dataloader + exe.run()方式,请切换到dataset + exe.train_from_dataset()方式。 +备注:Paddle2.3 版本及以后,ParameterServer 训练将废弃掉 dataloader + exe.run()方式,请切换到 dataset + exe.train_from_dataset()方式。 1.3 运行训练脚本 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -定义完训练脚本后,我们就可以用 ``fleetrun`` 指令运行分布式任务了。 ``fleetrun`` 是飞桨封装的分布式启动命令,命令参数 ``server_num`` , ``worker_num`` 分别为服务节点和训练节点的数量。在本例中,服务节点有1个,训练节点有2个。 +定义完训练脚本后,我们就可以用 ``fleetrun`` 指令运行分布式任务了。 ``fleetrun`` 是飞桨封装的分布式启动命令,命令参数 ``server_num`` , ``worker_num`` 分别为服务节点和训练节点的数量。在本例中,服务节点有 1 个,训练节点有 2 个。 .. code-block:: bash @@ -253,7 +253,7 @@ InMemoryDataset/QueueDataset所对应的数据处理脚本参考examples/wide_an LAUNCH INFO 2022-05-18 11:27:17,775 Run Pod: evjsyn, replicas 3, status ready LAUNCH INFO 2022-05-18 11:27:17,795 Watching Pod: evjsyn, replicas 3, status running -同时,在log目录下,会生成服务节点和训练节点的日志文件。 +同时,在 log 目录下,会生成服务节点和训练节点的日志文件。 服务节点日志:default.evjsyn.ps.0.log,日志中须包含以下内容,证明服务节点启动成功,可以提供服务。 .. code-block:: bash diff --git a/docs/guides/06_distributed_training/data_parallel/amp_cn.rst b/docs/guides/06_distributed_training/data_parallel/amp_cn.rst index 08682f345b0..277057b1e80 100644 --- a/docs/guides/06_distributed_training/data_parallel/amp_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/amp_cn.rst @@ -3,50 +3,50 @@ 自动混合精度 ========================== -传统上,深度学习训练通常使用32比特双精度浮点数\ ``FP32`` \ 作为参数、梯度和中间Activation等的数据存储格式。使用\ ``FP32``\ 作为数据存储格式,每个数据需要4个字节的存储空间。为了节约显存消耗,业界提出使用16比特单精度浮点数\ ``FP16``\ 作为数据存储格式。使用\ ``FP16``\ 作为数据存储格式,每个数据仅需要2个字节的存储空间,相比于\ ``FP32``\ 可以节省一半的存储空间。除了降低显存消耗,\ ``FP16``\ 格式下,计算速度通常也更快,因此可以加速训练。 +传统上,深度学习训练通常使用 32 比特双精度浮点数\ ``FP32`` \ 作为参数、梯度和中间 Activation 等的数据存储格式。使用\ ``FP32``\ 作为数据存储格式,每个数据需要 4 个字节的存储空间。为了节约显存消耗,业界提出使用 16 比特单精度浮点数\ ``FP16``\ 作为数据存储格式。使用\ ``FP16``\ 作为数据存储格式,每个数据仅需要 2 个字节的存储空间,相比于\ ``FP32``\ 可以节省一半的存储空间。除了降低显存消耗,\ ``FP16``\ 格式下,计算速度通常也更快,因此可以加速训练。 单精度浮点训练可以带来以下好处: -1. 减少对GPU显存的需求,或者在GPU显存保持不变的情况下,可以支持更大模型和更大的batch size; +1. 减少对 GPU 显存的需求,或者在 GPU 显存保持不变的情况下,可以支持更大模型和更大的 batch size; 2. 降低显存读写的带宽压力; -3. 加速GPU数学运算速度 (需要GPU支持\ `[1] `__);按照NVIDA数据,GPU上\ ``FP16``\ 计算吞吐量是\ ``FP32``\ 的2~8倍\ `[2] `__\ 。 +3. 加速 GPU 数学运算速度 (需要 GPU 支持\ `[1] `__);按照 NVIDA 数据,GPU 上\ ``FP16``\ 计算吞吐量是\ ``FP32``\ 的 2~8 倍\ `[2] `__\ 。 一、原理介绍 ----------------------- -我们首先介绍半精度(FP16)浮点数的表示,如下图所示。半精度浮点数是一种相对较新的浮点类型,在计算机中使用2字节(16比特)存储。在IEEE 754-2008标准中,它亦被称作binary16。与计算中常用的单精度(FP32)和双精度(FP64)浮点类型相比,因为FP16表示范围和表示精度更低,因此FP16更适于在精度要求不高的场景中使用。 +我们首先介绍半精度(FP16)浮点数的表示,如下图所示。半精度浮点数是一种相对较新的浮点类型,在计算机中使用 2 字节(16 比特)存储。在 IEEE 754-2008 标准中,它亦被称作 binary16。与计算中常用的单精度(FP32)和双精度(FP64)浮点类型相比,因为 FP16 表示范围和表示精度更低,因此 FP16 更适于在精度要求不高的场景中使用。 .. image:: ./img/amp.png :width: 400 :alt: amp :align: center -在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的Volta及Turing架构GPU在使用FP16计算时具有如下特点: +在使用相同的超参数下,混合精度训练使用半精度浮点(FP16)和单精度(FP32)浮点即可达到与使用纯单精度训练相同的准确率,并可加速模型的训练速度。这主要得益于英伟达推出的 Volta 及 Turing 架构 GPU 在使用 FP16 计算时具有如下特点: -- FP16可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的batch size大小。 +- FP16 可降低一半的内存带宽和存储需求,这使得在相同的硬件条件下研究人员可使用更大更复杂的模型以及更大的 batch size 大小。 -- FP16可以充分利用英伟达Volta及Turing架构GPU提供的Tensor Cores技术。在相同的GPU硬件上,Tensor Cores的FP16计算吞吐量是FP32的8倍。 +- FP16 可以充分利用英伟达 Volta 及 Turing 架构 GPU 提供的 Tensor Cores 技术。在相同的 GPU 硬件上,Tensor Cores 的 FP16 计算吞吐量是 FP32 的 8 倍。 -使用自动混合精度训练时,主要训练过程如下:模型参数使用单精度浮点格式存储,在实际计算时,模型参数从单精度浮点数转换为半精度浮点数参与前向计算,并得到半精度浮点数表示中间状态和模型的loss值,然后使用半精度浮点数计算梯度,并将参数对应的梯度转换为单精度浮点数格式后,更新模型参数。计算过程如下图所示。 +使用自动混合精度训练时,主要训练过程如下:模型参数使用单精度浮点格式存储,在实际计算时,模型参数从单精度浮点数转换为半精度浮点数参与前向计算,并得到半精度浮点数表示中间状态和模型的 loss 值,然后使用半精度浮点数计算梯度,并将参数对应的梯度转换为单精度浮点数格式后,更新模型参数。计算过程如下图所示。 .. image:: ./img/amp_arch.png :width: 600 :alt: AMP Architecture :align: center -如前所述,通常半精度浮点数的表示范围远小于单精度浮点数的表示范围,在深度学习领域,参数、中间状态和梯度的值通常很小,因此以半精度浮点数参与计算时容易出现数值下溢,即接近零的值下溢为零值。为了避免这个问题,通常采用\ ``loss scaling``\ 机制。具体地讲,对loss乘以一个称为\ ``loss_scaling``\ 的值,根据链式法则,在反向传播过程中,梯度也等价于相应的乘以了\ ``loss_scaling``\ 的值,因此在参数更新时需要将梯度值相应地除以\ ``loss_scaling``\ 的值。 +如前所述,通常半精度浮点数的表示范围远小于单精度浮点数的表示范围,在深度学习领域,参数、中间状态和梯度的值通常很小,因此以半精度浮点数参与计算时容易出现数值下溢,即接近零的值下溢为零值。为了避免这个问题,通常采用\ ``loss scaling``\ 机制。具体地讲,对 loss 乘以一个称为\ ``loss_scaling``\ 的值,根据链式法则,在反向传播过程中,梯度也等价于相应的乘以了\ ``loss_scaling``\ 的值,因此在参数更新时需要将梯度值相应地除以\ ``loss_scaling``\ 的值。 -然而,在模型训练过程中,选择合适的\ ``loss_scaling``\ 的值是个较大的挑战。因此,需要采用一种称为\ ``动态loss scaling``\ 的机制。用户只需要为\ ``loss_scaling``\ 设置一个初始值:\ ``init_loss_scaling``\ 。在训练过程中,会检查梯度值是否出现nan或inf值,当连续\ ``incr_every_n_steps``\ 次迭代均未出现nan和inf值时,将\ ``init_loss_scaling``\ 的值乘以一个因子:\ ``incr_ratio``\ ;当连续\ ``decr_every_n_steps``\ 次迭代均出现nan和inf值时,将\ ``init_loss_scaling``\ 的值除以一个因子:\ ``decr_ratio``\ 。 +然而,在模型训练过程中,选择合适的\ ``loss_scaling``\ 的值是个较大的挑战。因此,需要采用一种称为\ ``动态 loss scaling``\ 的机制。用户只需要为\ ``loss_scaling``\ 设置一个初始值:\ ``init_loss_scaling``\ 。在训练过程中,会检查梯度值是否出现 nan 或 inf 值,当连续\ ``incr_every_n_steps``\ 次迭代均未出现 nan 和 inf 值时,将\ ``init_loss_scaling``\ 的值乘以一个因子:\ ``incr_ratio``\ ;当连续\ ``decr_every_n_steps``\ 次迭代均出现 nan 和 inf 值时,将\ ``init_loss_scaling``\ 的值除以一个因子:\ ``decr_ratio``\ 。 -同时,我们知道某些算子不适合采用半精度浮点数参与计算,因为这类算子采用半精度浮点数进行计算容易出现nan或者inf值。为了解决这个问题,通常采用黑名单和白名单机制。其中,黑名单中放置不宜采用半精度浮点数进行计算的算子,白名单中放置适合采用半精度浮点数进行计算的算子。 +同时,我们知道某些算子不适合采用半精度浮点数参与计算,因为这类算子采用半精度浮点数进行计算容易出现 nan 或者 inf 值。为了解决这个问题,通常采用黑名单和白名单机制。其中,黑名单中放置不宜采用半精度浮点数进行计算的算子,白名单中放置适合采用半精度浮点数进行计算的算子。 -飞桨中,我们引入自动混合精度(Auto Mixed Precision, AMP),混合使用\ ``FP32``\ 和\ ``FP16``\ ,在保持训练精度的同时,进一步提升训练的速度。实现了 ``自动维护FP32 、FP16参数副本``,\ ``动态loss scaling``, ``op黑白名单`` 等策略来避免因\ ``FP16``\ 动态范围较小而带来的模型最终精度损失。Fleet作为飞桨通用的分布式训练API提供了简单易用的接口, 用户只需要添加几行代码就可将自动混合精度应用到原有的分布式训练中进一步提升训练速度。 +飞桨中,我们引入自动混合精度(Auto Mixed Precision, AMP),混合使用\ ``FP32``\ 和\ ``FP16``\ ,在保持训练精度的同时,进一步提升训练的速度。实现了 ``自动维护 FP32 、FP16 参数副本``,\ ``动态 loss scaling``, ``op 黑白名单`` 等策略来避免因\ ``FP16``\ 动态范围较小而带来的模型最终精度损失。Fleet 作为飞桨通用的分布式训练 API 提供了简单易用的接口, 用户只需要添加几行代码就可将自动混合精度应用到原有的分布式训练中进一步提升训练速度。 二、动态图操作实践 --------------------------- -使用飞桨框架提供的API:\ ``paddle.amp.auto_cast``\ 和\ ``paddle.amp.GradScaler``\ 能够实现动态图的自动混合精度训练,即在相关OP的计算中,自动选择FP16或FP32格式计算。开启AMP模式后,使用FP16与FP32进行计算的OP列表可以参见\ `AMP概览 `_\ 。 +使用飞桨框架提供的 API:\ ``paddle.amp.auto_cast``\ 和\ ``paddle.amp.GradScaler``\ 能够实现动态图的自动混合精度训练,即在相关 OP 的计算中,自动选择 FP16 或 FP32 格式计算。开启 AMP 模式后,使用 FP16 与 FP32 进行计算的 OP 列表可以参见\ `AMP 概览 `_\ 。 2.1 具体示例 ^^^^^^^^^^^^^^^^^^ @@ -73,7 +73,7 @@ print("\n" + msg) print("共计耗时 = {:.3f} sec".format(end_time - start_time)) -接着构建一个简单的网络,用于对比使用单精度浮点数进行训练与使用自动混合精度训练的速度。该网络由三层Linear组成,其中前两层Linear后接ReLU激活函数。 +接着构建一个简单的网络,用于对比使用单精度浮点数进行训练与使用自动混合精度训练的速度。该网络由三层 Linear 组成,其中前两层 Linear 后接 ReLU 激活函数。 .. code-block:: python @@ -100,14 +100,14 @@ return x -这里为了能有效的对比自动混合精度训练在速度方面的提升,我们将input_size与output_size的值设为较大的值,为了充分利用NVIDIA GPU提供的Tensor Core能力,我们将batch_size设置为8的倍数。 +这里为了能有效的对比自动混合精度训练在速度方面的提升,我们将 input_size 与 output_size 的值设为较大的值,为了充分利用 NVIDIA GPU 提供的 Tensor Core 能力,我们将 batch_size 设置为 8 的倍数。 .. code-block:: python epochs = 5 input_size = 4096 # 设为较大的值 output_size = 4096 # 设为较大的值 - batch_size = 512 # batch_size 为8的倍数 + batch_size = 512 # batch_size 为 8 的倍数 nums_batch = 50 train_data = [paddle.randn((batch_size, input_size)) for _ in range(nums_batch)] @@ -155,13 +155,13 @@ 2.2 模型训练 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -下面,我们介绍在动态图中如何使用AMP训练模型。在飞桨框架中,使用自动混合精度训练,需要以下三个步骤: +下面,我们介绍在动态图中如何使用 AMP 训练模型。在飞桨框架中,使用自动混合精度训练,需要以下三个步骤: -1. 定义 GradScaler,用于缩放loss比例,避免浮点数下溢,即进行\ ``loss scaling``\ 。 +1. 定义 GradScaler,用于缩放 loss 比例,避免浮点数下溢,即进行\ ``loss scaling``\ 。 -2. 使用auto_cast创建AMP上下文环境,该上下文中自动会确定每个OP的输入数据类型(FP16或FP32)。 +2. 使用 auto_cast 创建 AMP 上下文环境,该上下文中自动会确定每个 OP 的输入数据类型(FP16 或 FP32)。 -3. 使用步骤1中定义的GradScaler完成loss的缩放,并用缩放后的loss进行反向传播,完成训练。 +3. 使用步骤 1 中定义的 GradScaler 完成 loss 的缩放,并用缩放后的 loss 进行反向传播,完成训练。 实现代码如下所示: @@ -171,7 +171,7 @@ optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义优化器 - # Step1:定义 GradScaler,用于缩放loss比例,避免浮点数溢出 + # Step1:定义 GradScaler,用于缩放 loss 比例,避免浮点数溢出 scaler = paddle.amp.GradScaler(init_loss_scaling=1024) start_timer() # 获取训练开始时间 @@ -180,12 +180,12 @@ datas = zip(train_data, labels) for i, (data, label) in enumerate(datas): - # Step2:创建AMP上下文环境,开启自动混合精度训练 + # Step2:创建 AMP 上下文环境,开启自动混合精度训练 with paddle.amp.auto_cast(): output = model(data) loss = mse(output, label) - # Step3:使用 Step1中定义的 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 + # Step3:使用 Step1 中定义的 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 scaled = scaler.scale(loss) scaled.backward() @@ -194,7 +194,7 @@ optimizer.clear_grad() print(loss) - end_timer_and_print("使用AMP模式耗时:") + end_timer_and_print("使用 AMP 模式耗时:") 程序的输出如下: @@ -203,7 +203,7 @@ Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False, [1.23644269]) - 使用AMP模式耗时: + 使用 AMP 模式耗时: 共计耗时 = 1.222 sec 上述例子存放在:\ `example/amp/amp_dygraph.py `_\ 。 diff --git a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst index 12163ac9c24..8d1473d8e0a 100755 --- a/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/gradient_merge_cn.rst @@ -6,35 +6,35 @@ Gradient Merge 一、简介 ---------------------- -为了提升模型的性能,人们开始追求:更大规模的数据集、更深的网络层、更庞大的参数规模。但是随之而来的就是给模型训练带来了巨大的压力,因此分布式技术及定制化AI 芯片应运而生。但在分布式训练中,经常会遇到显存或者内存不足的情况,通常是以下几点原因导致的: +为了提升模型的性能,人们开始追求:更大规模的数据集、更深的网络层、更庞大的参数规模。但是随之而来的就是给模型训练带来了巨大的压力,因此分布式技术及定制化 AI 芯片应运而生。但在分布式训练中,经常会遇到显存或者内存不足的情况,通常是以下几点原因导致的: - 输入的数据过大,例如视频类训练数据。 - 深度模型的参数过多或过大,所需的存储空间超出了内存/显存的大小。 -- AI芯片的内存有限。 +- AI 芯片的内存有限。 -为了能正常完成训练,我们通常只能使用较小的batch -size 以降低模型训练中的所需要的存储空间,这将导致很多模型无法通过提高训练时的batch +为了能正常完成训练,我们通常只能使用较小的 batch +size 以降低模型训练中的所需要的存储空间,这将导致很多模型无法通过提高训练时的 batch size 来提高模型的精度。 -Gradient Merge (GM) 策略的主要思想是将连续多个batch 数据训练得到的参数梯度合并做一次更新。 -在该训练策略下,虽然从形式上看依然是小batch 规模的数据在训练,但是效果上可以达到多个小batch 数据合并成大batch 后训练的效果。 +Gradient Merge (GM) 策略的主要思想是将连续多个 batch 数据训练得到的参数梯度合并做一次更新。 +在该训练策略下,虽然从形式上看依然是小 batch 规模的数据在训练,但是效果上可以达到多个小 batch 数据合并成大 batch 后训练的效果。 二、原理介绍 ------------------------- -Gradient Merge 只是在训练流程上做了一些微调,达到模拟出大batch -size 训练效果的目的。具体来说,就是使用若干原有大小的batch 数据进行训练,即通过“前向+反向” -网络计算得到梯度。其间会有一部分显存/内存用于存放梯度,然后对每个batch计算出的梯度进行叠加,当累加的次数达到某个预设值后,使用累加的梯度对模型进行参数更新,从而达到使用大batch 数据训练的效果。 +Gradient Merge 只是在训练流程上做了一些微调,达到模拟出大 batch +size 训练效果的目的。具体来说,就是使用若干原有大小的 batch 数据进行训练,即通过“前向+反向” +网络计算得到梯度。其间会有一部分显存/内存用于存放梯度,然后对每个 batch 计算出的梯度进行叠加,当累加的次数达到某个预设值后,使用累加的梯度对模型进行参数更新,从而达到使用大 batch 数据训练的效果。 -在较大的粒度上看, GM 是将训练一个step 的过程由原来的 “前向 + 反向 + 更新” 改变成 “(前向 + 反向 + 梯度累加)x k + 更新”, 通过在最终更新前进行 k 次梯度的累加模拟出 batch size 扩大 k 倍的效果。 +在较大的粒度上看, GM 是将训练一个 step 的过程由原来的 “前向 + 反向 + 更新” 改变成 “(前向 + 反向 + 梯度累加)x k + 更新”, 通过在最终更新前进行 k 次梯度的累加模拟出 batch size 扩大 k 倍的效果。 更具体细节可以参考 `《MG-WFBP: Efficient Data Communication for Distributed Synchronous SGD Algorithms》 `__ 。 三、动态图使用方法 -------------------------------- -需要说明的是,动态图是天然支持Gradient Merge。即,只要不调用 ``clear_gradient`` 方法,动态图的梯度会一直累积。 -动态图下使用Gradient Merge的代码片段如下: +需要说明的是,动态图是天然支持 Gradient Merge。即,只要不调用 ``clear_gradient`` 方法,动态图的梯度会一直累积。 +动态图下使用 Gradient Merge 的代码片段如下: .. code-block:: diff --git a/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst b/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst index d8f20f65f8d..6ed60604922 100644 --- a/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/principle_and_demo_cn.rst @@ -8,7 +8,7 @@ 一、原理介绍 ----------------------- -深度学习模型训练过程计算通常分为前向计算、反向计算和梯度更新。由于各个计算设备的初始随机状态不同,各个计算设备上的初始模型参数也因此存在差异。数据并行方式下,为了保持各个计算设备上参数的一致性,在初始阶段需要通过广播的方式将第一张计算设备上的模型参数广播到其它所有计算设备。这样,各个计算设备上的模型参数在广播完成后是一致的。前向计算阶段,各个计算设备使用自己的数据计算模型损失值。由于各个计算设备读取的数据不同,因此各个计算设备上得到的模型损失值也往往是不同的。反向计算阶段,各个计算设备根据其前向计算得到的损失值计算梯度,使用AllReduce操作逐个累加每个参数在所有计算设备上的梯度值,并计算累积梯度的平均值,从而确保各个计算设备上用于更新参数的梯度值是相同的。参数更新阶段,使用梯度平均值更新参数。整个计算过程如下图所示。 +深度学习模型训练过程计算通常分为前向计算、反向计算和梯度更新。由于各个计算设备的初始随机状态不同,各个计算设备上的初始模型参数也因此存在差异。数据并行方式下,为了保持各个计算设备上参数的一致性,在初始阶段需要通过广播的方式将第一张计算设备上的模型参数广播到其它所有计算设备。这样,各个计算设备上的模型参数在广播完成后是一致的。前向计算阶段,各个计算设备使用自己的数据计算模型损失值。由于各个计算设备读取的数据不同,因此各个计算设备上得到的模型损失值也往往是不同的。反向计算阶段,各个计算设备根据其前向计算得到的损失值计算梯度,使用 AllReduce 操作逐个累加每个参数在所有计算设备上的梯度值,并计算累积梯度的平均值,从而确保各个计算设备上用于更新参数的梯度值是相同的。参数更新阶段,使用梯度平均值更新参数。整个计算过程如下图所示。 由于在训练起始阶段,通过广播操作确保了各个计算设备上的参数一致性;反向阶段,各个计算设备上使用相同的梯度均值更新参数;因此,可以保证训练过程中各个计算设备上的参数值始终是一致的。 @@ -38,7 +38,7 @@ 2.1 导入依赖包 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -导入必要的依赖,例如分布式训练专用的Fleet API(``paddle.distributed.fleet``)。 +导入必要的依赖,例如分布式训练专用的 Fleet API(``paddle.distributed.fleet``)。 .. code-block:: @@ -48,7 +48,7 @@ 2.2 初始化分布式环境 ^^^^^^^^^^^^^^^^^^^^^ -包括定义缺省的分布式策略,然后通过将参数 ``is_collective`` 设置为True,使训练架构设定为Collective架构。 +包括定义缺省的分布式策略,然后通过将参数 ``is_collective`` 设置为 True,使训练架构设定为 Collective 架构。 .. code-block:: @@ -58,7 +58,7 @@ 2.3 设置模型、优化器 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -使用Fleet API设置分布式训练需要的模型和优化器。 +使用 Fleet API 设置分布式训练需要的模型和优化器。 .. code-block:: @@ -110,7 +110,7 @@ l2_decay = 1e-4 # 权重衰减 epoch = 10 #训练迭代次数 - batch_num = 100 #每次迭代的batch数 + batch_num = 100 #每次迭代的 batch 数 batch_size = 32 #训练批次大小 class_dim = 102 @@ -138,13 +138,13 @@ # 设置训练函数 def train_model(): - # 初始化Fleet环境 + # 初始化 Fleet 环境 fleet.init(is_collective=True) model = ResNet(BottleneckBlock, 50, num_classes=class_dim) optimizer = optimizer_setting(parameter_list=model.parameters()) - # 通过Fleet API获取分布式model,用于支持分布式训练 + # 通过 Fleet API 获取分布式 model,用于支持分布式训练 model = fleet.distributed_model(model) optimizer = fleet.distributed_optimizer(optimizer) @@ -182,13 +182,13 @@ 3.2 运行方式 ^^^^^^^^^^^^^^^^^^ -飞桨分布式任务可以通过 ``paddle.distributed.launch`` 组件启动。假设要运行2卡任务,只需在命令行中执行: +飞桨分布式任务可以通过 ``paddle.distributed.launch`` 组件启动。假设要运行 2 卡任务,只需在命令行中执行: .. code-block:: python -m paddle.distributed.launch --gpus=0,1 --log_dir logs train.py -您将在logs路径下看到2份日志文件,即workerlog.0和workerlog.1,分别记录着 ``gpu0`` 和 ``gpu1`` 的运行信息。 +您将在 logs 路径下看到 2 份日志文件,即 workerlog.0 和 workerlog.1,分别记录着 ``gpu0`` 和 ``gpu1`` 的运行信息。 四、数据并行使用技巧 @@ -199,18 +199,18 @@ 4.1 学习率设置 ^^^^^^^^^^^^^^^^^^ -首先,阐述数据并行模式下学习率的设置技巧,其基本原则是学习率正比于global batch size。 +首先,阐述数据并行模式下学习率的设置技巧,其基本原则是学习率正比于 global batch size。 与单卡训练相比,数据并行训练通常有两种配置: -1. 一种是保持保持所有计算设备的batch size的总和(我们称为global batch size)与单卡训练的batch size保持一致。这中情形下,由于数据并行训练和单卡训练的global batch size是一致的,通常保持数据并行模式下各个计算设备上的学习率与单卡训练一致。 -2. 另一种情形是,保持数据并行模式下每个计算设备的batch size和单卡训练的batch size一致。这种情形下,数据并行模式的global batch size是单卡训练的 ``N`` 倍。这里, ``N`` 指的是数据并行计算的设备数。因此,通常需要将数据并行模式下每个计算设备的学习率相应的设置为单卡训练的 ``N`` 倍。这样,数据并行模式下的初始学习率通常较大,不利于模型的收敛。因此,通常需要使用warm-up机制。即,在初始训练时使用较小的学习率,并逐步缓慢增加学习率,经过一定迭代次数后,学习率增长到期望的学习率。 +1. 一种是保持保持所有计算设备的 batch size 的总和(我们称为 global batch size)与单卡训练的 batch size 保持一致。这中情形下,由于数据并行训练和单卡训练的 global batch size 是一致的,通常保持数据并行模式下各个计算设备上的学习率与单卡训练一致。 +2. 另一种情形是,保持数据并行模式下每个计算设备的 batch size 和单卡训练的 batch size 一致。这种情形下,数据并行模式的 global batch size 是单卡训练的 ``N`` 倍。这里, ``N`` 指的是数据并行计算的设备数。因此,通常需要将数据并行模式下每个计算设备的学习率相应的设置为单卡训练的 ``N`` 倍。这样,数据并行模式下的初始学习率通常较大,不利于模型的收敛。因此,通常需要使用 warm-up 机制。即,在初始训练时使用较小的学习率,并逐步缓慢增加学习率,经过一定迭代次数后,学习率增长到期望的学习率。 4.2 数据集切分 ^^^^^^^^^^^^^^^^^^ 接着,介绍数据集切分问题。数据并行中,我们通常将数据集切分为 ``N`` 份,每个训练卡负责训练其中的一份数据。这里, ``N`` 是数据并行的并行度。如我们前面介绍的,每一个迭代中,各个训练卡均需要做一次梯度同步。因此,我们需要确保对于每个 ``epoch`` ,各个训练卡经历相同的迭代数,否则,运行迭代数多的训练卡会一直等待通信完成。实践中,我们通常通过数据补齐或者丢弃的方式保证各个训练卡经历相同的迭代数。数据补齐的方式指的是,为某些迭代数少训练数据补充部分数据,从而保证切分后的各份数据集的迭代次数相同;丢弃的方式则是丢弃部分迭代次数较多的数据,从而保证各份数据集的迭代次数相同。 -通常,在每个 ``epoch`` 需要对数据做shuffle处理。因此,根据shuffle时机的不同,有两种数据切分的方法。一种是在数据切分前做shuffle;即,首先对完整的数据做shuffle处理,做相应的数据补充或丢弃,然后做数据的切分。另一种是在数据切分后做shuffle;即,首先做数据的补充或丢弃和数据切分,然后对切分后的每一份数据分别做shuffle处理。 +通常,在每个 ``epoch`` 需要对数据做 shuffle 处理。因此,根据 shuffle 时机的不同,有两种数据切分的方法。一种是在数据切分前做 shuffle;即,首先对完整的数据做 shuffle 处理,做相应的数据补充或丢弃,然后做数据的切分。另一种是在数据切分后做 shuffle;即,首先做数据的补充或丢弃和数据切分,然后对切分后的每一份数据分别做 shuffle 处理。 diff --git a/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst b/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst index bf311bdec39..013e25c60a1 100644 --- a/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst +++ b/docs/guides/06_distributed_training/data_parallel/recompute_cn.rst @@ -9,9 +9,9 @@ - **反向计算:** 运行反向算子来计算参数(Parameter)的梯度。 - **优化:** 应用优化算法以更新参数值 。 -在前向计算过程中,前向算子会计算出大量的中间结果,由于这些中间结果是训练数据和算子计算得到的,所以训练数据的batch bize 越大,中间结果占用的内存也就越大。飞桨核心框架会使用张量来存储这些隐层的中间结果。当模型层数加深时,其中间结果的数量可达数千甚至数万,占据大量的内存。飞桨核心框架的显存回收机制会及时清除无用的中间结果以节省显存,但是有些中间结果是反向计算过程中算子的输入,这些中间结果必须存储在内存中,直到相应的反向算子计算完毕。 +在前向计算过程中,前向算子会计算出大量的中间结果,由于这些中间结果是训练数据和算子计算得到的,所以训练数据的 batch bize 越大,中间结果占用的内存也就越大。飞桨核心框架会使用张量来存储这些隐层的中间结果。当模型层数加深时,其中间结果的数量可达数千甚至数万,占据大量的内存。飞桨核心框架的显存回收机制会及时清除无用的中间结果以节省显存,但是有些中间结果是反向计算过程中算子的输入,这些中间结果必须存储在内存中,直到相应的反向算子计算完毕。 -对于大小固定的内存来说,如果用户希望使用大batch bize 的数据进行训练,则将导致单个中间结果占用内存增大,那么就需要减少中间结果的存储数量,FRB就是基于这种思想设计的。FRB是将深度学习网络切分为k个部分(segments)。对每个segment 而言:前向计算时,除了小部分必须存储在内存中的张量外,其他中间结果都将被删除;在反向计算中,首先重新计算一遍前向算子,以获得中间结果,再运行反向算子。简而言之,FRB 和普通的网络迭代相比,多计算了一遍前向算子。 +对于大小固定的内存来说,如果用户希望使用大 batch bize 的数据进行训练,则将导致单个中间结果占用内存增大,那么就需要减少中间结果的存储数量,FRB 就是基于这种思想设计的。FRB 是将深度学习网络切分为 k 个部分(segments)。对每个 segment 而言:前向计算时,除了小部分必须存储在内存中的张量外,其他中间结果都将被删除;在反向计算中,首先重新计算一遍前向算子,以获得中间结果,再运行反向算子。简而言之,FRB 和普通的网络迭代相比,多计算了一遍前向算子。 具体过程如下图所示: @@ -19,15 +19,15 @@ :width: 600 :alt: forward_backward :align: center -* Recompute-Offload 支持多卡并行训练, 当多卡并行时开启Offload,训练中同一节点上所有GPU 上的checkpoints 都将卸载到Host 内存中,会存在以下风险: +* Recompute-Offload 支持多卡并行训练, 当多卡并行时开启 Offload,训练中同一节点上所有 GPU 上的 checkpoints 都将卸载到 Host 内存中,会存在以下风险: - - PCIe 带宽瓶颈: 同一节点上的所有GPU 和Host 内存间共享一根PCIe 带宽,如同一节点上GPU 数量较多(单机8卡)容易因为PCIe 带宽限制让训练速度进一步减慢。 - - Host 内存溢出: 当同一节点上GPU 数量较多,且每张GPU checkpoints size 较大时,需要注意卸载量是否超出Host 内存大小。 + - PCIe 带宽瓶颈: 同一节点上的所有 GPU 和 Host 内存间共享一根 PCIe 带宽,如同一节点上 GPU 数量较多(单机 8 卡)容易因为 PCIe 带宽限制让训练速度进一步减慢。 + - Host 内存溢出: 当同一节点上 GPU 数量较多,且每张 GPU checkpoints size 较大时,需要注意卸载量是否超出 Host 内存大小。 二、功能效果 ----------------------- -我们在BERT-Large模型上对Recompute 的效果进行了测试,Recompute 可以让batch size 扩大 10倍, Offload 可以在Recompute 的基础上再扩大1.43 倍。 +我们在 BERT-Large 模型上对 Recompute 的效果进行了测试,Recompute 可以让 batch size 扩大 10 倍, Offload 可以在 Recompute 的基础上再扩大 1.43 倍。 batch size = seq * seq_max_len 硬件: 单卡 V100 32GB @@ -44,8 +44,8 @@ batch size = seq * seq_max_len 三、动态图使用方法 ------------------------- -动态图recompute功能在Paddle2.1以上加入,建议将Paddle版本升级到最新版。动态图使用recompute功能步骤如下: -**注:当recompute中存在随机性算子比如dropout时,需要在最开始指定paddle.seed,保证反向的重计算随机性。 +动态图 recompute 功能在 Paddle2.1 以上加入,建议将 Paddle 版本升级到最新版。动态图使用 recompute 功能步骤如下: +**注:当 recompute 中存在随机性算子比如 dropout 时,需要在最开始指定 paddle.seed,保证反向的重计算随机性。 3.1 导入需要的包 @@ -58,10 +58,10 @@ batch size = seq * seq_max_len from paddle.distributed.fleet.utils import recompute import random -3.2 定义组网,添加recompute调用 +3.2 定义组网,添加 recompute 调用 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -在需要使用recompute的地方直接调用函数:recompute(function, checkpoint),paddle就会自动进行recompute相关操作。recompute函数的第一个参数\ ``function``\ 是前向计算函数,第二参数\ ``checkpoint``\ 是选择的checkpoint点。 +在需要使用 recompute 的地方直接调用函数:recompute(function, checkpoint),paddle 就会自动进行 recompute 相关操作。recompute 函数的第一个参数\ ``function``\ 是前向计算函数,第二参数\ ``checkpoint``\ 是选择的 checkpoint 点。 .. code:: python @@ -152,7 +152,7 @@ batch size = seq * seq_max_len 3.4 执行运行程序,打印结果 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -将正常的没有recompute的loss与recompute的loss进行比较,结果应该是相等的。 +将正常的没有 recompute 的 loss 与 recompute 的 loss 进行比较,结果应该是相等的。 .. code:: python @@ -171,7 +171,7 @@ batch size = seq * seq_max_len python recompute_dygraph.py -recompute动态图代码:`代码示例 `__。 +recompute 动态图代码:`代码示例 `__。 输出: @@ -215,7 +215,7 @@ recompute动态图代码:`代码示例 `_ 来使用,详细的使用方式请参考 `BML文档 `_ 。 +更为方便的是使用百度提供的全功能 AI 开发平台 `BML `_ 来使用,详细的使用方式请参考 `BML 文档 `_ 。 FAQ ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/guides/06_distributed_training/distributed_overview.rst b/docs/guides/06_distributed_training/distributed_overview.rst index fd11211e1bd..4bda31880c0 100644 --- a/docs/guides/06_distributed_training/distributed_overview.rst +++ b/docs/guides/06_distributed_training/distributed_overview.rst @@ -1,6 +1,6 @@ .. _distributed_overview: -Paddle分布式整体介绍 +Paddle 分布式整体介绍 ==================================== 1.概述 @@ -28,28 +28,28 @@ Paddle分布式整体介绍 搜索推荐场景经常面临数据量大、特征维度高且稀疏化的问题。而分布式训练的参数服务器模式采用了一种将模型参数中心化管理的方式来实现模型参数的分布式存储和更新,该模式有两个角色 Server 与 Worker:Worker 用于执行模型的前向与反向计算;Server 负责从各个 Worker 收集汇总梯度并更新参数,因此对于存储超大规模模型参数的训练场景十分友好,常被用于训练拥有海量稀疏参数的搜索推荐领域模型。 -Paddle提供了传统纯 CPU 参数服务器、纯 GPU 参数服务器以及异构参数服务器等不同方案,您可以根据自己的模型特点和资源情况进行选择。详细内容可以参考 `搜索推荐场景 <./cluster_quick_start_ps_cn.html>`__ +Paddle 提供了传统纯 CPU 参数服务器、纯 GPU 参数服务器以及异构参数服务器等不同方案,您可以根据自己的模型特点和资源情况进行选择。详细内容可以参考 `搜索推荐场景 <./cluster_quick_start_ps_cn.html>`__ .. image:: ./images/parameter_server.png :width: 600 :alt: parameter_server :align: center -2.2 稠密参数collective训练场景 +2.2 稠密参数 collective 训练场景 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 对于 NLP 和 CV 等这类拥有复杂网络、稠密参数特点的模型,飞桨分布式训练技术的集合通信模式可以很好的支持这类模型的训练。该模式没有管理模型参数的中心节点,每个节点都是 Worker,每个 Worker 负责模型训练的同时还需要掌握当前最新的全局梯度信息。集合通信模式对计算芯片的算力和芯片之间的网络互联要求较高,如高性能计算的 GPU、芯片之间的高速网络互联 NVLINK 和 InfiniBand 等,因此非常适合 CV 和 NLP 领域计算密集型训练任务。 在这类场景下,选择合适的分布式并行策略尤为重要,主要解决要面对的显存、通信和负载均衡的问题。下面我们以常见的模型训练场景为例作说明。 -当我们的模型比较小或者单卡能放下时,我们可以采用 `数据并行训练 <./data_parallel/index_cn.html>`__ 的方式通过多卡间复制模型、同步梯度、增加minibatch的方法提高训练的效率,比如ERNIE large或者Bert Large模型单卡能够放大下,但是优于计算量偏大,在V100上训练这样的模型经常需要扩展到4机甚至8机上进行训练。 +当我们的模型比较小或者单卡能放下时,我们可以采用 `数据并行训练 <./data_parallel/index_cn.html>`__ 的方式通过多卡间复制模型、同步梯度、增加 minibatch 的方法提高训练的效率,比如 ERNIE large 或者 Bert Large 模型单卡能够放大下,但是优于计算量偏大,在 V100 上训练这样的模型经常需要扩展到 4 机甚至 8 机上进行训练。 -当模型变大到10亿到百亿这个范围时,比如NLP Bert百亿模型,V100机器单卡放不下但是单机能够放下,我们可以采用 `GroupSharded并行 <./group_sharded_parallel_cn.html>`__ 切分优化器状态、参数方式减少显存使用,用通信换显存占用; 或者使用 `张量并行 <./model_parallel_cn.html>`__ 把 占比例高的参数比如矩阵进行按照行列的维度进行切分,减少显存使用切分计算同时切分计算量。 +当模型变大到 10 亿到百亿这个范围时,比如 NLP Bert 百亿模型,V100 机器单卡放不下但是单机能够放下,我们可以采用 `GroupSharded 并行 <./group_sharded_parallel_cn.html>`__ 切分优化器状态、参数方式减少显存使用,用通信换显存占用; 或者使用 `张量并行 <./model_parallel_cn.html>`__ 把 占比例高的参数比如矩阵进行按照行列的维度进行切分,减少显存使用切分计算同时切分计算量。 -.. 样例可以参考`ERNIE 百亿 `__ 或者Bert。 +.. 样例可以参考`ERNIE 百亿 `__ 或者 Bert。 -当模型进一步增加,到达100亿以上、甚至千亿模型,单机可能就放不下了。需要进一步的对模型进行切分,比如`流水线并行 <./pipeline_parallel_cn.html>`__;同时,我们针对这个场景提出了 `4D混合并行的策略 `__ 以充分利用各个并行策略的显存、通信、负载均衡等各个维度的特点,对模型进行合理的切分,充分利用机器的计算能力。 +当模型进一步增加,到达 100 亿以上、甚至千亿模型,单机可能就放不下了。需要进一步的对模型进行切分,比如`流水线并行 <./pipeline_parallel_cn.html>`__;同时,我们针对这个场景提出了 `4D 混合并行的策略 `__ 以充分利用各个并行策略的显存、通信、负载均衡等各个维度的特点,对模型进行合理的切分,充分利用机器的计算能力。 -当模型增加到万亿甚至10W亿,Dense参数的训练模式由于计算量太大以至于比较难实际实施。这个时候需要稀疏模型训练,例如MoE(Mixture-Of-Experts),与Dense大模型不同,`MoE训练 `__ 过程中只会激活部分的Expert参数从而大幅减少了计算量。目前MoE成为了通往万亿以及更大的模型的主要方式。 +当模型增加到万亿甚至 10W 亿,Dense 参数的训练模式由于计算量太大以至于比较难实际实施。这个时候需要稀疏模型训练,例如 MoE(Mixture-Of-Experts),与 Dense 大模型不同,`MoE 训练 `__ 过程中只会激活部分的 Expert 参数从而大幅减少了计算量。目前 MoE 成为了通往万亿以及更大的模型的主要方式。 .. note::需要注意的是,我们使用任何一个并行策略都是有性能代价的,而且常常随着并行策略所应用的范围变大而上升。所以,把并行策略限定到尽量少的范围中会对保证训练性能有益。 diff --git a/docs/guides/06_distributed_training/fleet_api_howto_cn.rst b/docs/guides/06_distributed_training/fleet_api_howto_cn.rst index 1b222b30070..b3d5be7a094 100644 --- a/docs/guides/06_distributed_training/fleet_api_howto_cn.rst +++ b/docs/guides/06_distributed_training/fleet_api_howto_cn.rst @@ -1,22 +1,22 @@ -使用FleetAPI进行分布式训练 +使用 FleetAPI 进行分布式训练 ========================== FleetAPI 设计说明 ----------------- -Fleet是PaddlePaddle分布式训练的高级API。Fleet的命名出自于PaddlePaddle,象征一个舰队中的多只双桨船协同工作。Fleet的设计在易用性和算法可扩展性方面做出了权衡。用户可以很容易从单机版的训练程序,通过添加几行代码切换到分布式训练程序。此外,分布式训练的算法也可以通过Fleet -API接口灵活定义。 +Fleet 是 PaddlePaddle 分布式训练的高级 API。Fleet 的命名出自于 PaddlePaddle,象征一个舰队中的多只双桨船协同工作。Fleet 的设计在易用性和算法可扩展性方面做出了权衡。用户可以很容易从单机版的训练程序,通过添加几行代码切换到分布式训练程序。此外,分布式训练的算法也可以通过 Fleet +API 接口灵活定义。 -Fleet API快速上手示例 +Fleet API 快速上手示例 --------------------- -下面会针对Fleet -API最常见的两种使用场景,用一个模型做示例,目的是让用户有快速上手体验的模板。 +下面会针对 Fleet +API 最常见的两种使用场景,用一个模型做示例,目的是让用户有快速上手体验的模板。 * - 假设我们定义MLP网络如下: + 假设我们定义 MLP 网络如下: .. code-block:: python @@ -31,7 +31,7 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 return avg_cost * - 定义一个在内存生成数据的Reader如下: + 定义一个在内存生成数据的 Reader 如下: .. code-block:: python @@ -42,7 +42,7 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 "y": np.random.randint(2, size=(128, 1)).astype('int64')} * - 单机Trainer定义 + 单机 Trainer 定义 .. code-block:: python @@ -66,9 +66,9 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 print("step%d cost=%f" % (i, cost_val[0])) * - Parameter Server训练方法 + Parameter Server 训练方法 - 参数服务器方法对于大规模数据,简单模型的并行训练非常适用,我们基于单机模型的定义给出使用Parameter Server进行训练的示例如下: + 参数服务器方法对于大规模数据,简单模型的并行训练非常适用,我们基于单机模型的定义给出使用 Parameter Server 进行训练的示例如下: .. code-block:: python @@ -115,9 +115,9 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 (fleet.worker_index(), i, cost_val[0])) * - Collective训练方法 + Collective 训练方法 - Collective Training通常在GPU多机多卡训练中使用,一般在复杂模型的训练中比较常见,我们基于上面的单机模型定义给出使用Collective方法进行分布式训练的示例如下: + Collective Training 通常在 GPU 多机多卡训练中使用,一般在复杂模型的训练中比较常见,我们基于上面的单机模型定义给出使用 Collective 方法进行分布式训练的示例如下: .. code-block:: python @@ -155,45 +155,45 @@ API最常见的两种使用场景,用一个模型做示例,目的是让用 (fleet.worker_index(), i, cost_val[0])) -Fleet API相关的接口说明 +Fleet API 相关的接口说明 ----------------------- -Fleet API接口 +Fleet API 接口 ^^^^^^^^^^^^^ * init(role_maker=None) - * fleet初始化,需要在使用fleet其他接口前先调用,用于定义多机的环境配置 + * fleet 初始化,需要在使用 fleet 其他接口前先调用,用于定义多机的环境配置 * is_worker() - * Parameter Server训练中使用,判断当前节点是否是Worker节点,是则返回True,否则返回False + * Parameter Server 训练中使用,判断当前节点是否是 Worker 节点,是则返回 True,否则返回 False * is_server(model_dir=None) - * Parameter Server训练中使用,判断当前节点是否是Server节点,是则返回True,否则返回False + * Parameter Server 训练中使用,判断当前节点是否是 Server 节点,是则返回 True,否则返回 False * init_server() - * Parameter Server训练中,fleet加载model_dir中保存的模型相关参数进行parameter - server的初始化 + * Parameter Server 训练中,fleet 加载 model_dir 中保存的模型相关参数进行 parameter + server 的初始化 * run_server() - * Parameter Server训练中使用,用来启动server端服务 + * Parameter Server 训练中使用,用来启动 server 端服务 * init_worker() - * Parameter Server训练中使用,用来启动worker端服务 + * Parameter Server 训练中使用,用来启动 worker 端服务 * stop_worker() - * 训练结束后,停止worker + * 训练结束后,停止 worker * distributed_optimizer(optimizer, strategy=None) - * 分布式优化算法装饰器,用户可带入单机optimizer,并配置分布式训练策略,返回一个分布式的optimizer + * 分布式优化算法装饰器,用户可带入单机 optimizer,并配置分布式训练策略,返回一个分布式的 optimizer RoleMaker ^^^^^^^^^ @@ -204,10 +204,10 @@ RoleMaker * - 描述:PaddleCloudRoleMaker是一个高级封装,支持使用paddle.distributed.launch或者paddle.distributed.launch_ps启动脚本 + 描述:PaddleCloudRoleMaker 是一个高级封装,支持使用 paddle.distributed.launch 或者 paddle.distributed.launch_ps 启动脚本 * - Parameter Server训练示例: + Parameter Server 训练示例: .. code-block:: python @@ -228,7 +228,7 @@ RoleMaker python -m paddle.distributed.launch_ps --worker_num 2 --server_num 2 trainer.py * - Collective训练示例: + Collective 训练示例: .. code-block:: python @@ -253,7 +253,7 @@ RoleMaker * - 描述:用户自定义节点的角色信息,IP和端口信息 + 描述:用户自定义节点的角色信息,IP 和端口信息 * 示例: diff --git a/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst b/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst index c3fa12e8156..1fc1d6fe73e 100644 --- a/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst +++ b/docs/guides/06_distributed_training/group_sharded_parallel_cn.rst @@ -4,10 +4,10 @@ ======================== 当模型参数达到百亿或者千亿时, 传统的数据并行训练可能会遇到显存瓶颈。 -在数据并行训练中,每个gpu worker 都有一份完整模型参数和优化器状态副本。 +在数据并行训练中,每个 gpu worker 都有一份完整模型参数和优化器状态副本。 `《ZeRO: Memory Optimizations Toward Training Trillion Parameter Models》 `__ -指出在每个GPU 上都保存一份模型参数和优化器状态副本是冗余的。 我们可以通过将上述参数和副本划分到不同GPU 中, -在每个GPU 只保存部分副本,来减少每张GPU上显存的占用,从而可以支持更大模型的训练。 +指出在每个 GPU 上都保存一份模型参数和优化器状态副本是冗余的。 我们可以通过将上述参数和副本划分到不同 GPU 中, +在每个 GPU 只保存部分副本,来减少每张 GPU 上显存的占用,从而可以支持更大模型的训练。 一、原理介绍 @@ -16,41 +16,41 @@ 1.1 GroupSharded ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -GroupSharded 实现了类似ZeRO-DP 的训练策略,将模型状态包括:模型参数(parameter)、参数梯度(gradient)、参数对应的优化器状态(以Adam为例moment和varience)切分到每一张GPU 上。让模型参数部分所占的显存随并行卡数的增加而减少。 +GroupSharded 实现了类似 ZeRO-DP 的训练策略,将模型状态包括:模型参数(parameter)、参数梯度(gradient)、参数对应的优化器状态(以 Adam 为例 moment 和 varience)切分到每一张 GPU 上。让模型参数部分所占的显存随并行卡数的增加而减少。 通过 paddle.distributed.sharding.group_sharded_parallel 提供的简单易用接口, 用户只需要添加几行代码就可将策略加入到原有的训练中。 模型训练过程中的显存消耗主要由两大部分组成:模型参数及优化器状态、训练产生的中间变量(activations)。 -GroupSharded 策略可以根据用户配置支持,分别切分模型参数、对应参数梯度和优化器状态,因此模型状态所消耗的显存可以随着并行GPU数量增加而线性减少; -但是每张GPU上仍然维护着模型完整的前向和反向,所以每张GPU依然需要存放模型的训练过程中的产生的全部的中间变量,这部分显存消耗 -不会随着GPU 数量的增加而减少。 用户可以通过结合 recompute 策略来减少 activation这部分的显存消耗。 +GroupSharded 策略可以根据用户配置支持,分别切分模型参数、对应参数梯度和优化器状态,因此模型状态所消耗的显存可以随着并行 GPU 数量增加而线性减少; +但是每张 GPU 上仍然维护着模型完整的前向和反向,所以每张 GPU 依然需要存放模型的训练过程中的产生的全部的中间变量,这部分显存消耗 +不会随着 GPU 数量的增加而减少。 用户可以通过结合 recompute 策略来减少 activation 这部分的显存消耗。 -通过GroupSharded 和增加并行GPU 数量,用户可以在A100-40G设备下8卡训练16.25B参量的模型 (需要结合 recompute, amp 策略)。 +通过 GroupSharded 和增加并行 GPU 数量,用户可以在 A100-40G 设备下 8 卡训练 16.25B 参量的模型 (需要结合 recompute, amp 策略)。 1.2 GroupSharded-hybrid-dp ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -GroupSharded hybrid数据并行策略,在GroupSharded 并行的基础上再增加一层数据并行逻辑。 -该策略的目的是通过 ``限制GroupSharded 通信的节点数`` 和 ``增加多路数据并行`` 来提高训练吞吐。 如果一个模型在普通GroupSharded 训练时需要M 张GPU,则则开启hybrid-dp 至少需要 N*M GPU (N>= 2)。 +GroupSharded hybrid 数据并行策略,在 GroupSharded 并行的基础上再增加一层数据并行逻辑。 +该策略的目的是通过 ``限制 GroupSharded 通信的节点数`` 和 ``增加多路数据并行`` 来提高训练吞吐。 如果一个模型在普通 GroupSharded 训练时需要 M 张 GPU,则则开启 hybrid-dp 至少需要 N*M GPU (N>= 2)。 GroupSharded-hybrid-dp 适用的场景如下: - * 当前有 4个 8 卡A100 节点 - * 目标模型A 在GroupSharded 训练时至少需要 8卡 A100 (一个完整的8 卡A100节点) + * 当前有 4 个 8 卡 A100 节点 + * 目标模型 A 在 GroupSharded 训练时至少需要 8 卡 A100 (一个完整的 8 卡 A100 节点) * 希望利用全部的 4 个节点来加速训练 -上述情况如果直接使用全部的 4 个节点 进行普通的GroupSharded 训练, 那么全部的 32 gpus 之间组成一个完整 GroupSharded parallelism。这样会因为通信瓶颈造成训练速度非常慢: +上述情况如果直接使用全部的 4 个节点 进行普通的 GroupSharded 训练, 那么全部的 32 gpus 之间组成一个完整 GroupSharded parallelism。这样会因为通信瓶颈造成训练速度非常慢: - * GroupSharded 中的allgather 通信 会涉及全部的32 张卡,且为跨节点通信。 - * GroupSharded 中的allreduce 通信 会涉及全部的32 张卡,且为跨节点通信。 + * GroupSharded 中的 allgather 通信 会涉及全部的 32 张卡,且为跨节点通信。 + * GroupSharded 中的 allreduce 通信 会涉及全部的 32 张卡,且为跨节点通信。 -开启 hybrid-dp 并设置 ``GroupSharded_group_size = 8`` 后, 每个节点内的 8 张卡组成一个完整的 GroupSharded parallelism,4 个节点构成4路hybrid data parallelism: +开启 hybrid-dp 并设置 ``GroupSharded_group_size = 8`` 后, 每个节点内的 8 张卡组成一个完整的 GroupSharded parallelism,4 个节点构成 4 路 hybrid data parallelism: - * GroupSharded 中的allgather 通信被限制在每个节点内的 8 张GPU 之间, 没有跨节点通信。 - * GroupSharded 中的allreduce 可以先进行机内通信,再跨节点通信,且每张GPU每次仅需要allreduce 通信 1/8 的模型参数。 + * GroupSharded 中的 allgather 通信被限制在每个节点内的 8 张 GPU 之间, 没有跨节点通信。 + * GroupSharded 中的 allreduce 可以先进行机内通信,再跨节点通信,且每张 GPU 每次仅需要 allreduce 通信 1/8 的模型参数。 -GroupSharded-hybrid-dp 通过上述措施,可以较大程度 减少 GroupSharded 训练 从1节点扩展到4 节点时的(跨节点)通信量。提高节点增加时的加速比,提高训练吞吐。 +GroupSharded-hybrid-dp 通过上述措施,可以较大程度 减少 GroupSharded 训练 从 1 节点扩展到 4 节点时的(跨节点)通信量。提高节点增加时的加速比,提高训练吞吐。 -P.S. hybrid dp 是因为 GroupSharded parallelism 本身内含一层 data parallelism 逻辑, hybrid dp 是在 GroupSharded parallelism之上再增加新的一层 data parallelism 逻辑。 +P.S. hybrid dp 是因为 GroupSharded parallelism 本身内含一层 data parallelism 逻辑, hybrid dp 是在 GroupSharded parallelism 之上再增加新的一层 data parallelism 逻辑。 二、功能效果 @@ -59,7 +59,7 @@ P.S. hybrid dp 是因为 GroupSharded parallelism 本身内含一层 data parall 下面表格将对比 GroupSharded 策略对显存的影响。 模型为 GPT(11.375B),试验环境为 A100 (40GB), recompute = ON, amp(O2) = ON, hybrid-dp = OFF。 -模型不变,单卡batch size 不变,当并行GPU数量增加时,显存的消耗将减小。 省下的显存可以用来增大模型。 +模型不变,单卡 batch size 不变,当并行 GPU 数量增加时,显存的消耗将减小。 省下的显存可以用来增大模型。 +------------------------------+----------+----------------+ | setting | GPU Mem | Speed | @@ -77,12 +77,12 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 三、使用方法 ---------------------- -首先简单总结 GroupSharded stage1、stage2、stage3 分别实现减小参数规模的原理。stage1、stage2、stage3 分别在训练过程中对模型优化器状态、梯度+优化器状态、参数+梯度+优化器状态进行切分,通过减小训练的相关Tensor(参数、梯度、优化器状态)达到同样计算资源下能够训练更大模型的效果。 +首先简单总结 GroupSharded stage1、stage2、stage3 分别实现减小参数规模的原理。stage1、stage2、stage3 分别在训练过程中对模型优化器状态、梯度+优化器状态、参数+梯度+优化器状态进行切分,通过减小训练的相关 Tensor(参数、梯度、优化器状态)达到同样计算资源下能够训练更大模型的效果。 以下是分别从 GroupSharded 的三种实现阶段的实现方式: * 使用 group_sharded_parallel 和 save_group_sharded_model 两个 API 可以进行训练和保存。使用 group_sharded_parallel 提供 stage1 的选项,内部使用 stage2 完成优化实现。参考 `group_sharded_parallel `__, `save_group_sharded_model `__。 * 此处需要注意,使用 save_group_sharded_model 保存模型,再次 load 时需要在调用 group_sharded_parallel 前对 model 和 optimizer 进行 set_state_dict。 - * 目前stage2、3已经适配GPT模型,可以参考请参考 `示例代码 `__。 + * 目前 stage2、3 已经适配 GPT 模型,可以参考请参考 `示例代码 `__。 * 其次解决组网中共享参数训练问题,stage3 需要额外在组网中加入外置参数注册逻辑,在组网中需要注册 ``self.extra_parameters = [self.gpt.embeddings.word_embeddings.weight]``,这部分可以参考 PaddleNLP 中 GPT-3 的组网。`示例代码 `__。 .. code-block:: @@ -134,7 +134,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 save_group_sharded_model(model, output_dir, optimizer) -运行方式(需要保证当前机器有两张GPU): +运行方式(需要保证当前机器有两张 GPU): .. code-block:: bash @@ -162,7 +162,7 @@ GroupSharded 结合 amp (O2) + recompute,可以在 8 张 40GB A100 并行的 | FLAGS_selected_accelerators 6 | +=======================================================================================+ -日志信息位于log目录下: +日志信息位于 log 目录下: .. code-block:: bash diff --git a/docs/guides/06_distributed_training/index_cn.rst b/docs/guides/06_distributed_training/index_cn.rst index 0ff1984a6eb..0510358b7a6 100644 --- a/docs/guides/06_distributed_training/index_cn.rst +++ b/docs/guides/06_distributed_training/index_cn.rst @@ -12,7 +12,7 @@ - `张量模型并行 <./model_parallel_cn.html>`_ : 使用飞桨框架完成张量模型并行训练。 - `流水线并行 <./pipeline_parallel_cn.html>`_ : 使用飞桨框架完成流水线并行训练。 - `分组切分并行 <./group_sharded_parallel_cn.html>`_ : 使用飞桨框架完成分组切分并行训练。 -- `MoE <./moe_cn.html>`_ : 使用飞桨框架完成MoE模型训练。 +- `MoE <./moe_cn.html>`_ : 使用飞桨框架完成 MoE 模型训练。 .. toctree:: :hidden: diff --git a/docs/guides/06_distributed_training/model_parallel_cn.rst b/docs/guides/06_distributed_training/model_parallel_cn.rst index 83083ea85ae..f4ccaec8acd 100644 --- a/docs/guides/06_distributed_training/model_parallel_cn.rst +++ b/docs/guides/06_distributed_training/model_parallel_cn.rst @@ -8,14 +8,14 @@ 一、原理介绍 ----------------------- -张量模型并行需要解决两个问题:参数如何切分到不同设备(切分方式);以及切分后,如何保证数学一致性(数学等价)。本文以NLP中的Transformer结构为例,介绍张量模型并行的切分方式和随机性控制。 +张量模型并行需要解决两个问题:参数如何切分到不同设备(切分方式);以及切分后,如何保证数学一致性(数学等价)。本文以 NLP 中的 Transformer 结构为例,介绍张量模型并行的切分方式和随机性控制。 1.1 切分方法 ^^^^^^^^^^^^^^^^^^^^^^^^^^ -自2017年提出以来, `Transformer `__ 及其变种模型成为自然语言类任务的常用模型,并于近年来被应用到图像视觉领域。Transformer模型的基础结构是由Attention和MLP组成的Encoder和Decoder,以及Embedding,如下图所示[1]。其中Attention和MLP的底层实现均为矩阵乘法运算,而Embedding是一种查找表实现。本文以NLP中的Transformer结构为例,介绍张量模型并行的切分方式和随机性控制。但总体上看核心思想都是利用分块矩阵的计算原理,实现其参数切分到不同的设备2 。下面详细介绍这三种层的切分方式。 +自 2017 年提出以来, `Transformer `__ 及其变种模型成为自然语言类任务的常用模型,并于近年来被应用到图像视觉领域。Transformer 模型的基础结构是由 Attention 和 MLP 组成的 Encoder 和 Decoder,以及 Embedding,如下图所示[1]。其中 Attention 和 MLP 的底层实现均为矩阵乘法运算,而 Embedding 是一种查找表实现。本文以 NLP 中的 Transformer 结构为例,介绍张量模型并行的切分方式和随机性控制。但总体上看核心思想都是利用分块矩阵的计算原理,实现其参数切分到不同的设备 2 。下面详细介绍这三种层的切分方式。 .. image:: ./images/transformer_overview.png :width: 200 @@ -27,9 +27,9 @@ ::::::::::::::::::::::::: -对于Embedding操作,可以将其理解为一种查找表操作。即,将输入看做索引,将Embedding参数看做查找表,根据该索引查表得到相应的输出,如下图(a)所示。当采用模型并行时,Embedding的参数被均匀切分到多个卡上。假设Embedding参数的维度为N*D,并采用K张卡执行模型并行,那么模型并行模式下每张卡上的Embedding参数的维度为N//K*D。当参数的维度N不能被卡数K整除时,最后一张卡的参数维度值为(N//K+N%K)*D。以下图(b)为例,Embedding参数的维度为8*D,采用2张卡执行模型并行,那么每张卡上Embedding参数的维度为4*D。 +对于 Embedding 操作,可以将其理解为一种查找表操作。即,将输入看做索引,将 Embedding 参数看做查找表,根据该索引查表得到相应的输出,如下图(a)所示。当采用模型并行时,Embedding 的参数被均匀切分到多个卡上。假设 Embedding 参数的维度为 N*D,并采用 K 张卡执行模型并行,那么模型并行模式下每张卡上的 Embedding 参数的维度为 N//K*D。当参数的维度 N 不能被卡数 K 整除时,最后一张卡的参数维度值为(N//K+N%K)*D。以下图(b)为例,Embedding 参数的维度为 8*D,采用 2 张卡执行模型并行,那么每张卡上 Embedding 参数的维度为 4*D。 -为了便于说明,以下我们均假设Embedding的参数维度值D可以被模型并行的卡数D整除。此时,每张卡上Embeeding参数的索引值为[0, N/K),逻辑索引值为[k*N/K, (k+1)*N/K),其中k表示卡序号,0<=k`_。 -运行方式(需要保证当前机器有两张gpu): +运行方式(需要保证当前机器有两张 gpu): .. code-block:: bash @@ -351,7 +351,7 @@ LAUNCH INFO 2022-05-31 02:35:16,957 Run Pod: jbvsbv, replicas 2, status ready LAUNCH INFO 2022-05-31 02:35:16,984 Watching Pod: jbvsbv, replicas 2, status running -日志信息位于log目录下, loss的输出信息: +日志信息位于 log 目录下, loss 的输出信息: .. code-block:: bash diff --git a/docs/guides/06_distributed_training/moe_cn.rst b/docs/guides/06_distributed_training/moe_cn.rst index 106b42964ff..a6ffe21cd02 100644 --- a/docs/guides/06_distributed_training/moe_cn.rst +++ b/docs/guides/06_distributed_training/moe_cn.rst @@ -5,7 +5,7 @@ MoE 通常来讲,模型规模的扩展会导致训练成本显著增加,计算资源的限制成为了大规模密集模型训练的瓶颈。为了解决这个问题, `《Outrageously large neural networks: The sparsely-gated mixture-of-experts layer》 `__ -提出了一种基于稀疏MoE层的深度学习模型架构,即将大模型拆分成多个小模型(专家, ``expert`` ), 每轮迭代根据样本决定激活一部分专家用于计算,达到了节省计算资源的效果; +提出了一种基于稀疏 MoE 层的深度学习模型架构,即将大模型拆分成多个小模型(专家, ``expert`` ), 每轮迭代根据样本决定激活一部分专家用于计算,达到了节省计算资源的效果; 并引入可训练并确保稀疏性的门( ``gate`` )机制,以保证计算能力的优化。 一、原理介绍 @@ -16,28 +16,28 @@ MoE :alt: moe_layer :align: center -与密集模型不同,MoE将模型的某一层扩展为多个具有相同结构的专家网络( ``expert`` ),并由门( ``gate`` )网络决定激活哪些 ``expert`` 用于计算,从而实现超大规模稀疏模型的训练。 -以上图为例,示例模型包含3个模型层;如(a)到(b),将中间层扩展为具有 ``n`` 个 ``expert`` 的MoE结构,并引入 ``Gating network`` 和 ``Top_k`` 机制,MoE细节见图(c),计算过程如下述公式。 +与密集模型不同,MoE 将模型的某一层扩展为多个具有相同结构的专家网络( ``expert`` ),并由门( ``gate`` )网络决定激活哪些 ``expert`` 用于计算,从而实现超大规模稀疏模型的训练。 +以上图为例,示例模型包含 3 个模型层;如(a)到(b),将中间层扩展为具有 ``n`` 个 ``expert`` 的 MoE 结构,并引入 ``Gating network`` 和 ``Top_k`` 机制,MoE 细节见图(c),计算过程如下述公式。 .. math:: MoE\left ( {x} \right )=\sum ^{n}_{i=1} \left ( {{G\left ( {x} \right )}_{i}{E}_{i}\left ( {x} \right )} \right ) .. math:: G\left ( {x} \right )=TopK\left ( {softmax\left ( {{W}_{g}\left ( {x} \right )+ϵ} \right )} \right ) -上述第1个公式表示了包含 ``n`` 个专家的MoE层的计算过程。具体来讲,首先对样本 ``x`` 进行门控计算, ``W`` 表示权重矩阵;然后由 ``Softmax`` 处理后获得样本 ``x`` 被分配到各个 ``expert`` 的权重; +上述第 1 个公式表示了包含 ``n`` 个专家的 MoE 层的计算过程。具体来讲,首先对样本 ``x`` 进行门控计算, ``W`` 表示权重矩阵;然后由 ``Softmax`` 处理后获得样本 ``x`` 被分配到各个 ``expert`` 的权重; 然后只取前 ``k`` (通常取 1 或者 2)个最大权重,最终整个 ``MoE Layer`` 的计算结果就是选中的 ``k`` 个专家网络输出的加权和。 二、功能效果 ------------------------- -使用MoE结构,可以在计算成本次线性增加的同时实现超大规模模型训练,为恒定的计算资源预算带来巨大增益。 +使用 MoE 结构,可以在计算成本次线性增加的同时实现超大规模模型训练,为恒定的计算资源预算带来巨大增益。 三、动态图使用方法 ------------------------ -下面我们将分别介绍如何在动态图模式下使用飞桨框架进行MoE架构的适配和训练。以下代码(train_moe.py)在Paddle2.3以上可以运行,建议将Paddle版本升级到最新版. +下面我们将分别介绍如何在动态图模式下使用飞桨框架进行 MoE 架构的适配和训练。以下代码(train_moe.py)在 Paddle2.3 以上可以运行,建议将 Paddle 版本升级到最新版. 首先导入需要的包 @@ -69,7 +69,7 @@ MoE x = self.h4toh(x) return x -然后初始化分布式环境,并构建expert通信组moe_group +然后初始化分布式环境,并构建 expert 通信组 moe_group .. code-block:: python @@ -90,7 +90,7 @@ MoE exp_layer = ExpertLayer(d_model, d_hidden) experts_list.append(exp_layer) -接着调用 ``MoELayer`` API 封装并创建出MoE模型 +接着调用 ``MoELayer`` API 封装并创建出 MoE 模型 .. code-block:: python diff --git a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst index 040499be557..2cc80b0ae26 100644 --- a/docs/guides/06_distributed_training/pipeline_parallel_cn.rst +++ b/docs/guides/06_distributed_training/pipeline_parallel_cn.rst @@ -3,7 +3,7 @@ 流水线并行 ======================= -通常来讲,训练更大规模的网络模型可以在多种任务上取得更好的效果,如提升图像分类任务的准确率。然而,随着参数规模的扩大,AI加速卡存储(如GPU显存)容量问题和卡的协同计算问题成为了训练超大模型的瓶颈。流水线并行从模型切分和调度执行两个角度解决了这些问题,下面将以飞桨流水线并行为例,介绍下基本原理和使用方法。 +通常来讲,训练更大规模的网络模型可以在多种任务上取得更好的效果,如提升图像分类任务的准确率。然而,随着参数规模的扩大,AI 加速卡存储(如 GPU 显存)容量问题和卡的协同计算问题成为了训练超大模型的瓶颈。流水线并行从模型切分和调度执行两个角度解决了这些问题,下面将以飞桨流水线并行为例,介绍下基本原理和使用方法。 一、原理介绍 ------------------- @@ -13,7 +13,7 @@ :alt: pipeline :align: center -与数据并行不同,流水线并行将模型的不同层放置到不同的计算设备,降低单个计算设备的显存消耗,从而实现超大规模模型训练。以上图为例,示例模型包含四个模型层。该模型被切分为三个部分,并分别放置到三个不同的计算设备。即,第1层放置到设备0,第2层和第三3层放置到设备1,第4层放置到设备2。相邻设备间通过通信链路传输数据。具体地讲,前向计算过程中,输入数据首先在设备0上通过第1层的计算得到中间结果,并将中间结果传输到设备1,然后在设备1上计算得到第2层和第3层的输出,并将模型第3层的输出结果传输到设备2,在设备2上经由最后一层的计算得到前向计算结果。反向传播过程类似。最后,各个设备上的网络层会使用反向传播过程计算得到的梯度更新参数。由于各个设备间传输的仅是相邻设备间的输出张量,而不是梯度信息,因此通信量较小。 +与数据并行不同,流水线并行将模型的不同层放置到不同的计算设备,降低单个计算设备的显存消耗,从而实现超大规模模型训练。以上图为例,示例模型包含四个模型层。该模型被切分为三个部分,并分别放置到三个不同的计算设备。即,第 1 层放置到设备 0,第 2 层和第三 3 层放置到设备 1,第 4 层放置到设备 2。相邻设备间通过通信链路传输数据。具体地讲,前向计算过程中,输入数据首先在设备 0 上通过第 1 层的计算得到中间结果,并将中间结果传输到设备 1,然后在设备 1 上计算得到第 2 层和第 3 层的输出,并将模型第 3 层的输出结果传输到设备 2,在设备 2 上经由最后一层的计算得到前向计算结果。反向传播过程类似。最后,各个设备上的网络层会使用反向传播过程计算得到的梯度更新参数。由于各个设备间传输的仅是相邻设备间的输出张量,而不是梯度信息,因此通信量较小。 下图给出流水线并行的时序图。最简配置流水线并行模型下,任意时刻只有单个计算设备处于计算状态,其它计算设备则处于空闲状态,因此设备利用率和计算效率较差。 @@ -22,14 +22,14 @@ :alt: pipeline_timeline1 :align: center -为了优化流水线并行中设备的计算效率,可以进一步将 mini-batch 切分成若干更小粒度的 micro-batch,以提升流水线并行的并发度,进而达到提升设备利用率和计算效率的目的。如下图所示,一个 mini-batch 被切分为4个micro-batch;前向阶段,每个设备依次计算单个 micro-batch 的结果;从而增加了设备间的并发度,降低了流水线并行 bubble 空间比例,提高了计算效率。 +为了优化流水线并行中设备的计算效率,可以进一步将 mini-batch 切分成若干更小粒度的 micro-batch,以提升流水线并行的并发度,进而达到提升设备利用率和计算效率的目的。如下图所示,一个 mini-batch 被切分为 4 个 micro-batch;前向阶段,每个设备依次计算单个 micro-batch 的结果;从而增加了设备间的并发度,降低了流水线并行 bubble 空间比例,提高了计算效率。 .. image:: ./images/pipeline-3.png :width: 600 :alt: pipeline_timeline2 :align: center -如上图所示先进行前向计算,再进行反向计算,这种方式我们称之为 F-the-B 模式。不难看出这种 F-then-B 模式由于缓存了多个 micro-batch 的中间变量和梯度,显存的实际利用率并不高。接下来我们介绍一种前向计算和反向计算交叉进行的方式,即 1F1B 模型。在 1F1B 模式下,前向计算和反向计算交叉进行,可以及时释放不必要的中间变量。我们以下图1F1B中 stage4 的 F42(stage4的第2个 micro-batch 的前向计算)为例,F42 在计算前,F41 的反向 B41(stage4的第1个 micro-batch 的反向计算)已经计算结束,即可释放 F41 的中间变量,从而 F42 可以复用 F41 中间变量的显存。1F1B 方式相比 F-then-B 方式峰值显存可以节省37.5%,对比朴素流水线并行峰值显存明显下降,设备资源利用率显著提升。 +如上图所示先进行前向计算,再进行反向计算,这种方式我们称之为 F-the-B 模式。不难看出这种 F-then-B 模式由于缓存了多个 micro-batch 的中间变量和梯度,显存的实际利用率并不高。接下来我们介绍一种前向计算和反向计算交叉进行的方式,即 1F1B 模型。在 1F1B 模式下,前向计算和反向计算交叉进行,可以及时释放不必要的中间变量。我们以下图 1F1B 中 stage4 的 F42(stage4 的第 2 个 micro-batch 的前向计算)为例,F42 在计算前,F41 的反向 B41(stage4 的第 1 个 micro-batch 的反向计算)已经计算结束,即可释放 F41 的中间变量,从而 F42 可以复用 F41 中间变量的显存。1F1B 方式相比 F-then-B 方式峰值显存可以节省 37.5%,对比朴素流水线并行峰值显存明显下降,设备资源利用率显著提升。 .. image:: ./images/pipeline-4.png :width: 600 @@ -45,9 +45,9 @@ 三、动态图使用方法 ------------------------ -流水线并行根据执行的策略,可以分为 F-then-B 和 1F1B 两种模式,目前Paddle动态图流水线只支持 1F1B 模式。 +流水线并行根据执行的策略,可以分为 F-then-B 和 1F1B 两种模式,目前 Paddle 动态图流水线只支持 1F1B 模式。 -下面代码在Paddle2.0以上可以运行,建议将Paddle版本升级到最新版 +下面代码在 Paddle2.0 以上可以运行,建议将 Paddle 版本升级到最新版 首先导入需要的包 @@ -98,7 +98,7 @@ num_workers=2) -构建一个可以运行流水线的模型,模型的layer需要被LayerDesc或者继承了LayerDesc的SharedLayerDesc包裹,这里因为不需要共享参数,所以就使用LayerDesc +构建一个可以运行流水线的模型,模型的 layer 需要被 LayerDesc 或者继承了 LayerDesc 的 SharedLayerDesc 包裹,这里因为不需要共享参数,所以就使用 LayerDesc .. code-block:: python class ReshapeHelp(Layer): @@ -162,7 +162,7 @@ fleet.init(is_collective=True, strategy=strategy) -为了保证流水线并行参数初始化和普通模型初始化一致,需要在不同卡间设置不同的seed。 +为了保证流水线并行参数初始化和普通模型初始化一致,需要在不同卡间设置不同的 seed。 .. code-block:: python @@ -183,11 +183,11 @@ 然后创建出流水线并行的模型, -AlexNetPipeDesc(....):这一步主要是在切分普通模型的layer,将属于当前卡的layer添加到模型里面 +AlexNetPipeDesc(....):这一步主要是在切分普通模型的 layer,将属于当前卡的 layer 添加到模型里面 -fleet.distributed_model(....):这一步则是真正进行流水线模型并行的初始化,会得到之前构建拓扑组已经组建好的流水线通信组,并且如果流水线并行混合了数据并行,模型并行,会对数据并行和模型并行相关参数进行broadcast +fleet.distributed_model(....):这一步则是真正进行流水线模型并行的初始化,会得到之前构建拓扑组已经组建好的流水线通信组,并且如果流水线并行混合了数据并行,模型并行,会对数据并行和模型并行相关参数进行 broadcast -fleet.distributed_optimizer(...):这一步则是为优化器添加分布式属性,如果流水线并行混合了数据并行,group_sharded,就会对相应梯度进行all reduce +fleet.distributed_optimizer(...):这一步则是为优化器添加分布式属性,如果流水线并行混合了数据并行,group_sharded,就会对相应梯度进行 all reduce .. code-block:: python @@ -244,7 +244,7 @@ fleet.distributed_optimizer(...):这一步则是为优化器添加分布式属 开始训练 -model.train_batch(...):这一步主要就是执行1F1B的流水线并行方式 +model.train_batch(...):这一步主要就是执行 1F1B 的流水线并行方式 .. code-block:: python @@ -254,14 +254,14 @@ model.train_batch(...):这一步主要就是执行1F1B的流水线并行方式 loss = model.train_batch([image, label], optimizer, scheduler) print("pp_loss: ", loss.numpy()) -运行方式(需要保证当前机器有两张GPU): +运行方式(需要保证当前机器有两张 GPU): .. code-block:: bash export CUDA_VISIBLE_DEVICES=0,1 - python -m paddle.distributed.launch alexnet_dygraph_pipeline.py # alexnet_dygraph_pipeline.py是用户运行动态图流水线的python文件 + python -m paddle.distributed.launch alexnet_dygraph_pipeline.py # alexnet_dygraph_pipeline.py 是用户运行动态图流水线的 python 文件 -基于AlexNet的完整的流水线并行动态图代码:`alex `_。 +基于 AlexNet 的完整的流水线并行动态图代码:`alex `_。 控制台输出信息如下: @@ -295,7 +295,7 @@ model.train_batch(...):这一步主要就是执行1F1B的流水线并行方式 LAUNCH INFO 2022-05-31 02:47:23,605 Run Pod: ldmpbt, replicas 2, status ready LAUNCH INFO 2022-05-31 02:47:23,629 Watching Pod: ldmpbt, replicas 2, status running -日志信息位于log目录下: +日志信息位于 log 目录下: .. code-block:: bash diff --git a/docs/guides/10_contribution/community_contribution_cn.md b/docs/guides/10_contribution/community_contribution_cn.md index b7fcf595663..dea8119b37b 100644 --- a/docs/guides/10_contribution/community_contribution_cn.md +++ b/docs/guides/10_contribution/community_contribution_cn.md @@ -4,12 +4,12 @@ ## 社区支持 -### QQ群/微信群 +### QQ 群/微信群 飞桨官方组织了官方交流群,加入社群即可与其他开发者共同交流在深度学习中的所见所得。除此之外,飞桨还有各个垂直方向技术交流群,在这里你将能更快遇到志同道合的开发者,与他们一起交流学习。 我们鼓励开发者们在社群中发挥所长,发表自己的技术见解,与社群开发者互帮互助,共同成长。 -[点击加入PaddlePaddle开发者技术交流群](https://www.paddlepaddle.org.cn/support/news?action=detail&id=2439) +[点击加入 PaddlePaddle 开发者技术交流群](https://www.paddlepaddle.org.cn/support/news?action=detail&id=2439) ## 飞桨论坛 @@ -21,14 +21,14 @@ ### GitHub/Gitee -在这里,开发者可以在开源项目中以PR的形式贡献代码,也可以在遇到问题时提交issue与相对应的开源贡献者进行交流沟通,飞桨的开源项目都会在这里向大家展示。 -当然,你也可以选择自己喜欢的开源项目,并为它投上一颗宝贵的Star,让项目开发者感受到来自你的鼓励与支持。 +在这里,开发者可以在开源项目中以 PR 的形式贡献代码,也可以在遇到问题时提交 issue 与相对应的开源贡献者进行交流沟通,飞桨的开源项目都会在这里向大家展示。 +当然,你也可以选择自己喜欢的开源项目,并为它投上一颗宝贵的 Star,让项目开发者感受到来自你的鼓励与支持。 GitHub: [PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle)、Gitee:[PaddlePaddle/Paddle](https://gitee.com/paddlepaddle/Paddle) ### 飞桨特殊兴趣小组(PPSIG) -由飞桨社区开发者负责和参与的PPSIG,是飞桨开源社区的重要组成部分。目前有 12个活跃的 PPSIG,围绕特定领域开展协作。还有更多PPSIG正在组建中,欢迎更多开发者们踊跃加入,参与飞桨生态共建。 +由飞桨社区开发者负责和参与的 PPSIG,是飞桨开源社区的重要组成部分。目前有 12 个活跃的 PPSIG,围绕特定领域开展协作。还有更多 PPSIG 正在组建中,欢迎更多开发者们踊跃加入,参与飞桨生态共建。 [飞桨特殊兴趣小组(SIG)](https://www.paddlepaddle.org.cn/sig) @@ -53,14 +53,14 @@ GitHub: [PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle)、Gitee: - 先说明当下遇到的行业痛点,从而引出阐述该项目的价值,包括但不局限于为生活提供便利、促进某个行业发展、趣味十足的小案例等 - 项目是什么(What) - 数据集:如用到了数据集,需说明数据集来源,测试集、训练集的比例或数量 - - 模型或算法:如使用了飞桨的模型或算法,请说明飞桨模型的名称和相关 GitHub链接 + - 模型或算法:如使用了飞桨的模型或算法,请说明飞桨模型的名称和相关 GitHub 链接 - 项目的效果:文章中最好能有图表来显示效果 - 怎么做(How) - - 做事情要有始有终,文章也一样需要有一个好的ending,结尾要包括项目的效果 + - 做事情要有始有终,文章也一样需要有一个好的 ending,结尾要包括项目的效果 [前往飞桨开发者说文章](https://mp.weixin.qq.com/mp/homepage?__biz=Mzg2OTEzODA5MA==&hid=16&sn=0561fc80d64fc079892454aafeb47bc4&scene=18) -### 飞桨开发者说Live直播 +### 飞桨开发者说 Live 直播 通过飞桨开发者说直播的形式,分享在项目开发过程中的实践经验和踩坑破解的方法。我们相信,观看分享的观众也是你的同行者,会带给你无尽的力量。 @@ -68,16 +68,16 @@ GitHub: [PaddlePaddle/Paddle](https://github.com/PaddlePaddle/Paddle)、Gitee: ### 布道分享 -以代码和项目为基础,在飞桨官方或开发者自组织的Meetup/j技术沙龙中向开发者宣讲技术,让飞桨PaddlePaddle提供的各种技术工具,变得更容易被理解和应用,而每一场分享布道的Meetup,也会带给你更大的行业和技术影响力。 +以代码和项目为基础,在飞桨官方或开发者自组织的 Meetup/j 技术沙龙中向开发者宣讲技术,让飞桨 PaddlePaddle 提供的各种技术工具,变得更容易被理解和应用,而每一场分享布道的 Meetup,也会带给你更大的行业和技术影响力。 请联系飞桨小助手(paddle-help)咨询投稿和参与方式。 ## 全国各地开源社区 -飞桨领航团是飞桨开发者的兴趣社区,在各个城市/高校领航团团长及成员的热情支持下,飞桨领航团已建立150个社群,覆盖29个省级行政区,133个高校,并且在持续增长中。开发者们可以参与丰富的本地技术沙龙、Meetup、及线上交流。 +飞桨领航团是飞桨开发者的兴趣社区,在各个城市/高校领航团团长及成员的热情支持下,飞桨领航团已建立 150 个社群,覆盖 29 个省级行政区,133 个高校,并且在持续增长中。开发者们可以参与丰富的本地技术沙龙、Meetup、及线上交流。 飞桨领航团面向所有对人工智能及深度学习领域感兴趣的开发者开放,欢迎广大开发者们加入领航团,结识更多本地技术同好,共享开源成果与快乐。 [查看我所在的本地开源社区](https://www.paddlepaddle.org.cn/ppdenavigategroup) -加入飞桨领航团QQ群:484908840 +加入飞桨领航团 QQ 群:484908840 diff --git a/docs/guides/10_contribution/docs_contribution.md b/docs/guides/10_contribution/docs_contribution.md index 4a538974763..b1166573e07 100644 --- a/docs/guides/10_contribution/docs_contribution.md +++ b/docs/guides/10_contribution/docs_contribution.md @@ -1,7 +1,7 @@ -# 中文API文档贡献指南 +# 中文 API 文档贡献指南 -PaddlePaddle 的中文API文档以 rst 文件的格式,存储于 [PaddlePaddle/docs](https://github.com/PaddlePaddle/docs) 中,通过技术手段,将rst文件转为 HTML文件后呈现至[官网API文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html) 。如果想要修改中文API文档,需要按以下流程完成修改。 +PaddlePaddle 的中文 API 文档以 rst 文件的格式,存储于 [PaddlePaddle/docs](https://github.com/PaddlePaddle/docs) 中,通过技术手段,将 rst 文件转为 HTML 文件后呈现至[官网 API 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/index_cn.html) 。如果想要修改中文 API 文档,需要按以下流程完成修改。 ## 一、修改前的准备工作 @@ -10,7 +10,7 @@ PaddlePaddle 的中文API文档以 rst 文件的格式,存储于 [PaddlePaddle 先跳转到 [PaddlePaddle/docs](https://github.com/PaddlePaddle/docs) GitHub 首页,然后单击 Fork 按钮,生成自己仓库下的目录,比如你的 GitHub 用户名为 USERNAME,则生成: https://github.com/USERNAME/docs。 ### 1.2 Clone -将你目录下的远程仓库clone到本地。 +将你目录下的远程仓库 clone 到本地。 ``` ➜ git clone https://github.com/USERNAME/docs ➜ cd docs @@ -18,7 +18,7 @@ PaddlePaddle 的中文API文档以 rst 文件的格式,存储于 [PaddlePaddle ### 1.3 创建本地分支 -docs 目前使用 [Git流分支模型](https://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护。 +docs 目前使用 [Git 流分支模型](https://nvie.com/posts/a-successful-git-branching-model/)进行开发,测试,发行和维护。 所有的 feature 和 bug fix 的开发工作都应该在一个新的分支上完成,一般从 develop 分支上创建新分支。 @@ -33,7 +33,7 @@ docs 目前使用 [Git流分支模型](https://nvie.com/posts/a-successful-git-b Paddle 开发人员使用 [pre-commit](https://pre-commit.com/) 工具来管理 Git 预提交钩子。 它可以帮助你格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。 -pre-commit测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: +pre-commit 测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 Paddle,首先安装并在当前目录运行它: ``` ➜ pip install pre-commit @@ -42,46 +42,46 @@ pre-commit测试是 Travis-CI 中单元测试的一部分,不满足钩子的 P Paddle 使用 clang-format 来调整 C/C++ 源代码格式,请确保 clang-format 版本在 3.8 以上。 -**注**:通过``pip install pre-commit``和 ``conda install -c conda-forge pre-commit``安装的yapf稍有不同,Paddle 开发人员使用的是 ``pip install pre-commit``。 +**注**:通过``pip install pre-commit``和 ``conda install -c conda-forge pre-commit``安装的 yapf 稍有不同,Paddle 开发人员使用的是 ``pip install pre-commit``。 -## 二、正式修改API文档 +## 二、正式修改 API 文档 -目前,[docs](https://github.com/PaddlePaddle/docs) 的 `docs/api/` 下存放了与 Paddle 中文API文档所有相关的文件。说明如下: +目前,[docs](https://github.com/PaddlePaddle/docs) 的 `docs/api/` 下存放了与 Paddle 中文 API 文档所有相关的文件。说明如下: ``` docs/api -|--paddle # 存放中文API文档,文件名为api_name_cn.rst,路径为暴露的路径 +|--paddle # 存放中文 API 文档,文件名为 api_name_cn.rst,路径为暴露的路径 | |--amp | |--compat | |--device ... | |--utils | |--vision -|-- api_label # 英文API文档的标签,用于API文档的相互引用 +|-- api_label # 英文 API 文档的标签,用于 API 文档的相互引用 |-- display_doc_list -|-- gen_alias_api.py # 生成全量的API别名关系 +|-- gen_alias_api.py # 生成全量的 API 别名关系 |-- gen_alias_mapping.sh # 已废弃 -|-- gen_doc.py # 生成英文API文档目录树程序 -|-- gen_doc.sh # 生成英文API文档目录树脚本 -|-- index_cn.rst # 官网中文API文档首页 -|-- index_en_rst # 官网英文API文档首页 -|-- not_display_doc_list # 官网不展示的API列表 +|-- gen_doc.py # 生成英文 API 文档目录树程序 +|-- gen_doc.sh # 生成英文 API 文档目录树脚本 +|-- index_cn.rst # 官网中文 API 文档首页 +|-- index_en_rst # 官网英文 API 文档首页 +|-- not_display_doc_list # 官网不展示的 API 列表 ``` ### 2.1 新增 API 文档 -当你新增了一个API时,需要同时新增该API的中文文档。你需要在该API文档的暴露路径下,新建一个 api_name_cn.rst 文件,文件内容需要按照 [飞桨API文档书写规范](https://github.com/PaddlePaddle/docs/wiki/%E9%A3%9E%E6%A1%A8API%E6%96%87%E6%A1%A3%E4%B9%A6%E5%86%99%E8%A7%84%E8%8C%83)进行书写。 +当你新增了一个 API 时,需要同时新增该 API 的中文文档。你需要在该 API 文档的暴露路径下,新建一个 api_name_cn.rst 文件,文件内容需要按照 [飞桨 API 文档书写规范](https://github.com/PaddlePaddle/docs/wiki/%E9%A3%9E%E6%A1%A8API%E6%96%87%E6%A1%A3%E4%B9%A6%E5%86%99%E8%A7%84%E8%8C%83)进行书写。 -**注意:** 暴露路径是指,在开发API时,确认的API路径,如 `paddle.add`、`paddle.nn.Conv2D` 等。 +**注意:** 暴露路径是指,在开发 API 时,确认的 API 路径,如 `paddle.add`、`paddle.nn.Conv2D` 等。 ### 2.2 修改 API 文档 -修改中文API文档,可以通过API的URL,确定API文档的源文件。如 ``paddle.all`` 的中文API文档URL为:https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/all_cn.html,URL路径中,``api/paddle/all_cn.html`` 即对应 ``(docs/docs/)api/paddle/all_cn.rst`` , 因此,可以很快的确定中文API文档的源文件,然后直接修改即可。 +修改中文 API 文档,可以通过 API 的 URL,确定 API 文档的源文件。如 ``paddle.all`` 的中文 API 文档 URL 为:https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/all_cn.html,URL 路径中,``api/paddle/all_cn.html`` 即对应 ``(docs/docs/)api/paddle/all_cn.rst`` , 因此,可以很快的确定中文 API 文档的源文件,然后直接修改即可。 ## 三、提交&push -### 3.1 提交&触发CI单测 +### 3.1 提交&触发 CI 单测 - 修改 ``paddle/all_cn.rst`` 这个文件,并提交这个文件 @@ -121,7 +121,7 @@ Tabs remover.........................................(no files to check)Skipped convert jinja2 into html.............................(no files to check)Skipped convert-markdown-into-html...........................(no files to check)Skipped ``` - 全部Passed 或 Skipped后,即可进入下一步。如果有 Failed 文件,则需要按照规范,修改出现Failed 的文件后,重新 ``git add -> pre-commit`` ,直至没有 Failed 文件。 + 全部 Passed 或 Skipped 后,即可进入下一步。如果有 Failed 文件,则需要按照规范,修改出现 Failed 的文件后,重新 ``git add -> pre-commit`` ,直至没有 Failed 文件。 ``` ➜ pre-commit yapf.................................................(no files to check)Skipped @@ -185,22 +185,22 @@ upstream ➜ git push origin my-cool-stuff ``` -## 四、提交PR +## 四、提交 PR -在你push后在对应仓库会提醒你进行PR操作,按格式填写PR内容,即可。 +在你 push 后在对应仓库会提醒你进行 PR 操作,按格式填写 PR 内容,即可。 ## 五、review&merge -提交PR后,可以指定 Paddle 的同学进行 Review。 目前,Paddle 负责API文档的同学是 @TCChenLong、@jzhang533、@saxon-zh、@Heeenrrry、@dingjiaweiww等 。 +提交 PR 后,可以指定 Paddle 的同学进行 Review。 目前,Paddle 负责 API 文档的同学是 @TCChenLong、@jzhang533、@saxon-zh、@Heeenrrry、@dingjiaweiww 等 。 ## CI -Paddle 中与文档相关的CI 流水线是 `Docs-NEW`等,主要对以下几个方面进行检查: +Paddle 中与文档相关的 CI 流水线是 `Docs-NEW`等,主要对以下几个方面进行检查: -- 检查PR CLA -- 检查增量修改的API是否需要相关人员审核 +- 检查 PR CLA +- 检查增量修改的 API 是否需要相关人员审核 - 若需要执行示例代码则执行看能否正常运行 -如果无法通过该CI,请点击对应CI的details,查看CI运行的的log,并根据log修改你的PR,直至通过CI。 +如果无法通过该 CI,请点击对应 CI 的 details,查看 CI 运行的的 log,并根据 log 修改你的 PR,直至通过 CI。 diff --git a/docs/guides/10_contribution/faq_cn.rst b/docs/guides/10_contribution/faq_cn.rst index 31a66d0df0a..6d1a7590991 100644 --- a/docs/guides/10_contribution/faq_cn.rst +++ b/docs/guides/10_contribution/faq_cn.rst @@ -6,37 +6,37 @@ FAQ .. contents:: -1. CLA签署不成功,怎么办? +1. CLA 签署不成功,怎么办? --------------------------- -由于 `CLA `_ 是第三方开源库,有时候会不稳定。如果确定自己已签署CLA,但CLA没触发成功,可尝试: +由于 `CLA `_ 是第三方开源库,有时候会不稳定。如果确定自己已签署 CLA,但 CLA 没触发成功,可尝试: -* 关闭并重新开启本PR,来重新触发CLA。点击 :code:`Close pull request` ,再点击 :code:`Reopen pull request` ,并等待几分钟。 -* 如果上述操作重复2次仍未生效,请重新提一个PR或评论区留言。 +* 关闭并重新开启本 PR,来重新触发 CLA。点击 :code:`Close pull request` ,再点击 :code:`Reopen pull request` ,并等待几分钟。 +* 如果上述操作重复 2 次仍未生效,请重新提一个 PR 或评论区留言。 -2. CI没有触发,怎么办? +2. CI 没有触发,怎么办? ------------------------ -* 请在commit信息中添加正确的CI触发规则: +* 请在 commit 信息中添加正确的 CI 触发规则: - * develop分支请添加 :code:`test=develop` - * release分支请添加如 :code:`test=release/1.4` 来触发release/1.4分支 + * develop 分支请添加 :code:`test=develop` + * release 分支请添加如 :code:`test=release/1.4` 来触发 release/1.4 分支 * 文档预览请添加 :code:`test=document_preview` -* 该CI触发规则以commit为单位,即对同一个PR来说,不管前面的commit是否已经添加,如果新commit想继续触发CI,那么仍然需要添加。 -* 添加CI触发规则后,仍有部分CI没有触发:请关闭并重新开启本PR,来重新触发CI。 +* 该 CI 触发规则以 commit 为单位,即对同一个 PR 来说,不管前面的 commit 是否已经添加,如果新 commit 想继续触发 CI,那么仍然需要添加。 +* 添加 CI 触发规则后,仍有部分 CI 没有触发:请关闭并重新开启本 PR,来重新触发 CI。 -3. CI随机挂,即错误信息与本PR无关,怎么办? +3. CI 随机挂,即错误信息与本 PR 无关,怎么办? -------------------------------------- -由于develop分支代码的不稳定性,CI可能会随机挂。 -如果确定CI错误和本PR无关,请在评论区贴上错误截图和错误链接。 +由于 develop 分支代码的不稳定性,CI 可能会随机挂。 +如果确定 CI 错误和本 PR 无关,请在评论区贴上错误截图和错误链接。 -4. 如何修改API.spec? +4. 如何修改 API.spec? ----------------------- -为了保证API接口/文档的稳定性,我们对API进行了监控,即API.spec文件。 +为了保证 API 接口/文档的稳定性,我们对 API 进行了监控,即 API.spec 文件。 修改方法请参考 `diff_api.py `_ 。 -**注意**:提交PR后请查看下diff,不要改到非本PR修改的API上。 +**注意**:提交 PR 后请查看下 diff,不要改到非本 PR 修改的 API 上。 diff --git a/docs/guides/10_contribution/hackathon_cn.md b/docs/guides/10_contribution/hackathon_cn.md index 5a97c001202..a2ded79d5a6 100644 --- a/docs/guides/10_contribution/hackathon_cn.md +++ b/docs/guides/10_contribution/hackathon_cn.md @@ -15,10 +15,10 @@ ### 1、本次活动整体流程,具体说明如下: - 1、活动报名:点击**[此处](https://www.wjx.top/vj/t8yHphe.aspx?udsid=268437)** 填写相关信息,完成活动报名; -- 2、报名成功后,你可以进入百度飞桨黑客松QQ交流群(群号:343734965),所有活动相关信息都会在群中及时同步; +- 2、报名成功后,你可以进入百度飞桨黑客松 QQ 交流群(群号:343734965),所有活动相关信息都会在群中及时同步; - 3、**任务认领及开放性任务提交** - **任务认领**:首先 fork 你想要认领的任务 ISSUE 所在 repo,然后在 GitHub **[Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940)** 按格式相应回复,**完成任务认领**;鼓励大家在 ISSUE 下自由组队,完成任务~ - - **开放性任务提交**:如果你对飞桨框架以及飞桨家族相关项目有更好的功能建议,也可以通过在相应仓库下提交 ISSUE 的形式,提交开放性任务,并在 GitHub 的 **[Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940)**,按格式回复你的信息以及你提交的开放性任务,黑客松评审组将在提交后3个工作日内反馈该任务是否通过审核以及对应难度; + - **开放性任务提交**:如果你对飞桨框架以及飞桨家族相关项目有更好的功能建议,也可以通过在相应仓库下提交 ISSUE 的形式,提交开放性任务,并在 GitHub 的 **[Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940)**,按格式回复你的信息以及你提交的开放性任务,黑客松评审组将在提交后 3 个工作日内反馈该任务是否通过审核以及对应难度; - 4、完成任务认领或开放性任务通过审核后, @@ -26,7 +26,7 @@ - **困难任务**: - 按照任务要求在认领任务时 fork 的代码仓库(repo)中新建 RFC(Request for Comments),并对任务进行技术方案设计; - - **10月20日** 前将 RFC 以 Pull Request(PR) 形式提交至任务代码仓库(repo),并按模板发送邮件至 paddle-hack@baidu.com 提交; + - **10 月 20 日** 前将 RFC 以 Pull Request(PR) 形式提交至任务代码仓库(repo),并按模板发送邮件至 paddle-hack@baidu.com 提交; - 黑客松评审组将在每个任务提交的所有有效方案中选择一个最佳方案,确认为本任务的 Leading Developer,并在开发方向上进行一定的指导,完善后的技术方案将会共享给所有任务提交小组,推荐大家基于最佳方案进行相应开发; - **开放性任务**:按照任务要求进行开发即可; @@ -58,8 +58,8 @@ PS: -1. 若一个任务出现两个及以上作品提交,将根据提交 Pull Request的时间顺序进行验收,第一个通过验收的作品可获得奖金。 -2. 若该任务仅有一个作品提交,未100%完成但完成部分足够优质,经评审组评审后,可根据完成进度按照对应比例进行现金奖励发放。 +1. 若一个任务出现两个及以上作品提交,将根据提交 Pull Request 的时间顺序进行验收,第一个通过验收的作品可获得奖金。 +2. 若该任务仅有一个作品提交,未 100%完成但完成部分足够优质,经评审组评审后,可根据完成进度按照对应比例进行现金奖励发放。 | **难度** | **标志** | | **金额** | | ------- | --------- | ----------- | @@ -108,19 +108,19 @@ PS: - 3、活动报名 -(1)2021年9月23日至2021年11月12日,首先在活动官网完成报名。若组队报名,则由队长统一填写报名信息,完成报名即可; +(1)2021 年 9 月 23 日至 2021 年 11 月 12 日,首先在活动官网完成报名。若组队报名,则由队长统一填写报名信息,完成报名即可; -(2)在GitHub 的 [Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940) 中,可以查看到本次活动的所有任务,选取感兴趣的任务,按格式回复 队名、任务序号、任务对应 fork 链接,即为完成任务认领。 +(2)在 GitHub 的 [Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940) 中,可以查看到本次活动的所有任务,选取感兴趣的任务,按格式回复 队名、任务序号、任务对应 fork 链接,即为完成任务认领。 **补充信息**: (1)**组队报名方法**:由队长统一填写报名信息(包括队员基本信息)即可。也可以个人形式完成报名后,联系工作人员进行组队; -(2)官方交流群 【飞桨黑客松QQ交流群】:343734965。 +(2)官方交流群 【飞桨黑客松 QQ 交流群】:343734965。 - 4、任务开发 -当你完成相应开发后,相应的代码需要以 Pull Request 的形式上传到任务 ISSUE 所在仓库,并在 [Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940) 中,按格式回复相关的提交信息。黑客松评审组一般会在3个工作日内给出作品反馈,如果符合任务要求,则实时通报该任务已经被完成。 +当你完成相应开发后,相应的代码需要以 Pull Request 的形式上传到任务 ISSUE 所在仓库,并在 [Pinned ISSUE](https://github.com/PaddlePaddle/Paddle/issues/35940) 中,按格式回复相关的提交信息。黑客松评审组一般会在 3 个工作日内给出作品反馈,如果符合任务要求,则实时通报该任务已经被完成。 **注意**: @@ -134,7 +134,7 @@ PS: ## 四、重磅福利 -为了帮助大家更快更好的完成任务内容开发,本次活动预计会有10+系列直播同步推出,针对通用开发流程、PaddlePaddle框架、Paddle Lite、Paddle Inference、PaddleCV、PaddleNLP、Paddle2ONNX、Paddle Quantum、PaddleHelix、OpenVINO 等相关项目技术讲解、开源社区贡献等内容,都有来自百度以及Intel等深耕该方向的工程师深入讲解,系列直播时间安排,敬请期待。 +为了帮助大家更快更好的完成任务内容开发,本次活动预计会有 10+系列直播同步推出,针对通用开发流程、PaddlePaddle 框架、Paddle Lite、Paddle Inference、PaddleCV、PaddleNLP、Paddle2ONNX、Paddle Quantum、PaddleHelix、OpenVINO 等相关项目技术讲解、开源社区贡献等内容,都有来自百度以及 Intel 等深耕该方向的工程师深入讲解,系列直播时间安排,敬请期待。 ## 五、反作弊声明 diff --git a/docs/guides/10_contribution/rfcs.md b/docs/guides/10_contribution/rfcs.md index afa93bfb689..b7d84ee1099 100644 --- a/docs/guides/10_contribution/rfcs.md +++ b/docs/guides/10_contribution/rfcs.md @@ -13,21 +13,21 @@ GitHub Issue:[PaddlePaddle#35993](https://github.com/PaddlePaddle/Paddle/issue 我们为什么要这样做?它支持哪些用例?预期的结果是什么? # 使用指南级别的说明 -解释这个rfc,就好像它已经包含在 PaddlePaddle 中,而且你正在教一个 PaddlePaddle 用户。 +解释这个 rfc,就好像它已经包含在 PaddlePaddle 中,而且你正在教一个 PaddlePaddle 用户。 这通常意味着。 - 引入新的命名概念。 - 解释该功能能够实现什么(提示:用例子来思考)。 - 如果适用,提供错误信息样本、废弃警告或迁移指导。 -- 对于新功能的RFC,这部分应该提供一个以实例为导向的介绍,并具体解释其影响。 +- 对于新功能的 RFC,这部分应该提供一个以实例为导向的介绍,并具体解释其影响。 # 参考文献级别的说明 -这是RFC的技术部分。对这个设计进行足够详细的解释,即 +这是 RFC 的技术部分。对这个设计进行足够详细的解释,即 - 它与其他功能的交互是清楚的。 - 合理地清楚该功能将如何实现。 -- 通过实例剖析corner cases。 +- 通过实例剖析 corner cases。 - 该部分应该回到上一节给出的例子,并更充分地解释详细的建议如何使这些例子发挥作用。 # 缺点 @@ -39,7 +39,7 @@ GitHub Issue:[PaddlePaddle#35993](https://github.com/PaddlePaddle/Paddle/issue 不这样做的影响是什么? # 现有技术 -讨论现有技术,包括好的和坏的,与本rfc有关的。可以包括以下几个例子。 +讨论现有技术,包括好的和坏的,与本 rfc 有关的。可以包括以下几个例子。 - 这个功能在其他深度学习框架中是否存在,讨论他们的社区所做的实验? - 对于社区建议。这个功能是否由其他社区完成,他们的经验是什么? @@ -47,21 +47,21 @@ GitHub Issue:[PaddlePaddle#35993](https://github.com/PaddlePaddle/Paddle/issue - 论文。是否有任何已发表的论文或帖子来讨论这个问题?如果你有一些相关的论文可以参考,这可以作为一个更详细的理论背景。 - 如果没有先例,那也没关系--无论你的未来的可能性想法是全新的还是从其他语言改编的,对我们来说都是有趣的。 -请注意,虽然其他深度学习框架的先例是某种动力,但它本身并不能成为RFC的动力。也请考虑到 PaddlePaddle 有意与其他深度学习框架有所区别。 +请注意,虽然其他深度学习框架的先例是某种动力,但它本身并不能成为 RFC 的动力。也请考虑到 PaddlePaddle 有意与其他深度学习框架有所区别。 # 未解决的问题 -- 在合并前,你希望通过RFC程序解决设计中的哪些部分? +- 在合并前,你希望通过 RFC 程序解决设计中的哪些部分? - 在功能稳定前,你希望通过实现这个功能来解决哪些部分的设计问题? -- 你认为哪些相关问题超出了本RFC的范围,可以在未来独立于本RFC的解决方案来解决? +- 你认为哪些相关问题超出了本 RFC 的范围,可以在未来独立于本 RFC 的解决方案来解决? # 未来的可能性 -想一想你的RFC的自然延伸和演变,以及它将如何全面影响项目。试着把这部分作为一个工具,在你的RFC中更全面地考虑与框架所有可能的互动。还要考虑这一切如何与项目和相关的子团队的路线图相适应。 +想一想你的 RFC 的自然延伸和演变,以及它将如何全面影响项目。试着把这部分作为一个工具,在你的 RFC 中更全面地考虑与框架所有可能的互动。还要考虑这一切如何与项目和相关的子团队的路线图相适应。 -这也是一个 "倾倒想法 "的好地方,如果这些想法超出了你所写的RFC的范围,但又与之相关。 +这也是一个 "倾倒想法 "的好地方,如果这些想法超出了你所写的 RFC 的范围,但又与之相关。 如果你已经尝试过,但想不出任何未来的可能性,你可以简单地说,你什么都想不出来。 -请注意,在未来可能性部分写下一些东西并不是接受当前或未来RFC的理由;这种说明应该放在本篇或后续RFC的动机或理由部分。该部分只是提供额外的信息。 +请注意,在未来可能性部分写下一些东西并不是接受当前或未来 RFC 的理由;这种说明应该放在本篇或后续 RFC 的动机或理由部分。该部分只是提供额外的信息。 参考:[tvm/rfc-template](https://github.com/apache/tvm-rfcs/blob/main/0000-template.md) diff --git a/docs/guides/advanced/autograd_cn.rst b/docs/guides/advanced/autograd_cn.rst index 1bf81598f33..01326d4e6e0 100644 --- a/docs/guides/advanced/autograd_cn.rst +++ b/docs/guides/advanced/autograd_cn.rst @@ -1,7 +1,7 @@ 自动微分机制介绍 ================ -PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介绍如何使用飞桨的自动微分,以及飞桨的自动微分机制,帮助你更好的使用飞桨进行训练。 +PaddlePaddle 的神经网络核心是自动微分,本篇文章主要为你介绍如何使用飞桨的自动微分,以及飞桨的自动微分机制,帮助你更好的使用飞桨进行训练。 一、背景 -------- @@ -10,9 +10,9 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 为了让神经网络的判断更加准确,首先需要有衡量效果的工具,于是损失函数应运而生。如果你想要神经网络的效果好,那么就要让损失函数尽可能的小,于是深度学习引入了能够有效计算函数最小值的算法–梯度下降等优化算法,以及参数优化更新过程–反向传播。 -- 前向传播是输入通过每一层节点计算后得到每层输出,上层输出又作为下一层的输入,最终达到输出层。然后通过损失函数计算得到loss值。 +- 前向传播是输入通过每一层节点计算后得到每层输出,上层输出又作为下一层的输入,最终达到输出层。然后通过损失函数计算得到 loss 值。 -- 反向传播是通过loss值来指导前向节点中的函数参数如何改变,并更新每层中每个节点的参数,来让整个神经网络达到更小的loss值。 +- 反向传播是通过 loss 值来指导前向节点中的函数参数如何改变,并更新每层中每个节点的参数,来让整个神经网络达到更小的 loss 值。 自动微分机制就是让你只关注组网中的前向传播过程,然后飞桨框架来自动完成反向传播过程,从而来让你从繁琐的求导、求梯度的过程中解放出来。 @@ -20,7 +20,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 ------------------------------ 本文通过一个比较简单的模型来还原飞桨的自动微分过程。 -本示例基于Paddle2.0编写。 +本示例基于 Paddle2.0 编写。 .. code:: ipython3 @@ -38,7 +38,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 2.2.0 -本案例首先定义网络。因为本示例着重展示如何使用飞桨进行自动微分,故组网部分不过多展开,直接使用高层API中封装好的模型\ ``vgg11``\ 。 +本案例首先定义网络。因为本示例着重展示如何使用飞桨进行自动微分,故组网部分不过多展开,直接使用高层 API 中封装好的模型\ ``vgg11``\ 。 然后随机初始化一个输入\ ``x``\ ,和对应标签\ ``label``\ 。 @@ -56,7 +56,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 # 前向传播 predicts = model(x) -前向传播结束后,你就得到模型的预测结果\ ``predicts``\ ,这时可以使用飞桨中的对应损失函数API进行损失函数的计算。该例子中使用\ ``cross_entropy``\ 来计算损失函数,来衡量模型的预测情况。 +前向传播结束后,你就得到模型的预测结果\ ``predicts``\ ,这时可以使用飞桨中的对应损失函数 API 进行损失函数的计算。该例子中使用\ ``cross_entropy``\ 来计算损失函数,来衡量模型的预测情况。 .. code:: ipython3 @@ -93,7 +93,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 1、飞桨中的\ ``Tensor``\ 有\ ``stop_gradient``\ 属性,这个属性可以查看一个\ ``Tensor``\ 是否计算并传播梯度。 -- 如果为\ ``True``\ ,则该\ ``Tensor``\ 不会计算梯度,并会阻绝Autograd的梯度传播。 +- 如果为\ ``True``\ ,则该\ ``Tensor``\ 不会计算梯度,并会阻绝 Autograd 的梯度传播。 - 反之,则会计算梯度并传播梯度。用户自行创建的的\ ``Tensor``\ ,默认\ ``stop_gradient``\ 为\ ``True``\ ,即默认不计算梯度;模型参数的\ ``stop_gradient``\ 默认都为\ ``False``\ ,即默认计算梯度。 @@ -102,7 +102,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 import paddle a = paddle.to_tensor([1.0, 2.0, 3.0]) - b = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) # 将b设置为需要计算梯度的属性 + b = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) # 将 b 设置为需要计算梯度的属性 print(a.stop_gradient) print(b.stop_gradient) @@ -138,7 +138,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 .. image:: images/autograd_image_3-1.png -对z调用\ ``backward()``\ ,飞桨即可以自动计算\ ``x``\ 和\ ``y``\ 的梯度,并且将他们存进\ ``grad``\ 属性中。 +对 z 调用\ ``backward()``\ ,飞桨即可以自动计算\ ``x``\ 和\ ``y``\ 的梯度,并且将他们存进\ ``grad``\ 属性中。 .. code:: ipython3 @@ -153,7 +153,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 Tensor y's grad is: [4. 4. 4.] -此外,飞桨默认会释放反向计算图。如果在\ ``backward()``\ 之后继续添加OP,需要将\ ``backward()``\ 中的\ ``retain_graph``\ 参数设置为\ ``True``\ ,此时之前的反向计算图会保留。 +此外,飞桨默认会释放反向计算图。如果在\ ``backward()``\ 之后继续添加 OP,需要将\ ``backward()``\ 中的\ ``retain_graph``\ 参数设置为\ ``True``\ ,此时之前的反向计算图会保留。 温馨小提示:将其设置为\ ``False``\ 会更加节省内存。因为他的默认值是\ ``False``\ ,所以也可以直接不设置此参数。 @@ -163,7 +163,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 x = paddle.to_tensor([1.0, 2.0, 3.0], stop_gradient=False) y = x + 3 - y.backward(retain_graph=True) # 设置retain_graph为True,保留反向计算图 + y.backward(retain_graph=True) # 设置 retain_graph 为 True,保留反向计算图 print("Tensor x's grad is: {}".format(x.grad)) @@ -209,7 +209,7 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 本章主要介绍飞桨在实现反向传播进行自动微分计算时,内部是如何运行工作的。此部分为选读部分,更多是介绍飞桨内部实现机制,可以选择跳过,跳过不会影响你的正常使用。 -飞桨的自动微分是通过\ ``trace``\ 的方式,记录\ ``前向OP``\ 的执行,并自动创建\ ``反向var``\ 和添加相应的\ ``反向OP``\ ,然后来实现反向梯度计算的。 +飞桨的自动微分是通过\ ``trace``\ 的方式,记录\ ``前向 OP``\ 的执行,并自动创建\ ``反向 var``\ 和添加相应的\ ``反向 OP``\ ,然后来实现反向梯度计算的。 .. image:: images/autograd_image_4-1.png @@ -239,17 +239,17 @@ PaddlePaddle的神经网络核心是自动微分,本篇文章主要为你介 .. image:: images/autograd_image_4-2.png -当创建\ ``Tensor``\ ,\ ``Tensor``\ 的\ ``stop_grad=False``\ 时,会自动为此\ ``Tensor``\ 创建一个\ ``反向Tensor``\ 。在此例子中,a的反向Tensor就是\ ``a_grad``\ 。在\ ``a_grad``\ 中,会记录他的反向OP,因为a没有作为任何反向op的输入,所以它的\ ``grad_op``\ 为\ ``None``\ 。 +当创建\ ``Tensor``\ ,\ ``Tensor``\ 的\ ``stop_grad=False``\ 时,会自动为此\ ``Tensor``\ 创建一个\ ``反向 Tensor``\ 。在此例子中,a 的反向 Tensor 就是\ ``a_grad``\ 。在\ ``a_grad``\ 中,会记录他的反向 OP,因为 a 没有作为任何反向 op 的输入,所以它的\ ``grad_op``\ 为\ ``None``\ 。 -当执行OP时,会自动创建反向OP,不同的OP创建反向OP的方法不同,传的内容也不同。本文以这个乘法OP为例: +当执行 OP 时,会自动创建反向 OP,不同的 OP 创建反向 OP 的方法不同,传的内容也不同。本文以这个乘法 OP 为例: --乘法OP的反向OP,即\ ``MulBackward``\ 的输入是,正向OP的两个输入,以及正向OP的输出Tensor的反向Tensor。在此例子中就是,\ ``a``\ 、\ ``b``\ 、\ ``c_grad`` +-乘法 OP 的反向 OP,即\ ``MulBackward``\ 的输入是,正向 OP 的两个输入,以及正向 OP 的输出 Tensor 的反向 Tensor。在此例子中就是,\ ``a``\ 、\ ``b``\ 、\ ``c_grad`` --乘法OP的反向OP,即\ ``MulBackward``\ 的输出是,正向OP的两个输入的反向Tensor(如果输入是stop_gradient=True,则即为None)。在此例子中就是,\ ``a_grad``\ 、\ ``None(b_grad)`` +-乘法 OP 的反向 OP,即\ ``MulBackward``\ 的输出是,正向 OP 的两个输入的反向 Tensor(如果输入是 stop_gradient=True,则即为 None)。在此例子中就是,\ ``a_grad``\ 、\ ``None(b_grad)`` --乘法OP的反向OP,即\ ``MulBackward``\ 的\ ``grad_pending_ops``\ 是自动构建反向网络的时候,让这个反向op知道它下一个可以执行的反向op是哪一个,可以理解为反向网络中,一个反向op指向下一个反向op的边。 +-乘法 OP 的反向 OP,即\ ``MulBackward``\ 的\ ``grad_pending_ops``\ 是自动构建反向网络的时候,让这个反向 op 知道它下一个可以执行的反向 op 是哪一个,可以理解为反向网络中,一个反向 op 指向下一个反向 op 的边。 -当c通过乘法OP被创建后,c会创建一个反向Tensor:\ ``c_grad``,他的\ ``grad_op``\ 为该乘法OP的反向OP,即\ ``MulBackward``\ 。 +当 c 通过乘法 OP 被创建后,c 会创建一个反向 Tensor:\ ``c_grad``,他的\ ``grad_op``\ 为该乘法 OP 的反向 OP,即\ ``MulBackward``\ 。 调用\ ``backward()``\ 后,正式开始进行反向传播过程,开始自动计算微分。 diff --git a/docs/guides/advanced/gradient_clip_cn.rst b/docs/guides/advanced/gradient_clip_cn.rst index a93d44b5141..07329c9ee8f 100644 --- a/docs/guides/advanced/gradient_clip_cn.rst +++ b/docs/guides/advanced/gradient_clip_cn.rst @@ -11,7 +11,7 @@ .. math:: O^k = f(W O^{k-1} + b) -在计算出网络的估计值后,使用类似均方误差的方法,计算由目标值与估计值的差距定义的损失函数。其中 :math:`y_i` 为label,:math:`y_i'` 为预测值。 +在计算出网络的估计值后,使用类似均方误差的方法,计算由目标值与估计值的差距定义的损失函数。其中 :math:`y_i` 为 label,:math:`y_i'` 为预测值。 .. math:: loss = \frac{1}{n} \sum_{i=1}^n(y_i-y_i')^2 @@ -28,11 +28,11 @@ .. math:: \nabla w_1 = \alpha \frac{\partial loss}{\partial W_2} = \alpha \frac{\partial loss}{\partial f_4} \frac{\partial f_4}{\partial f_3} \frac{\partial f_3}{\partial f_2} \frac{\partial f_2}{\partial w_2} -当出现下列情形时,可以认为发生了梯度爆炸:两次迭代间的参数变化剧烈,或者模型参数和损失值变为NaN。 +当出现下列情形时,可以认为发生了梯度爆炸:两次迭代间的参数变化剧烈,或者模型参数和损失值变为 NaN。 -如果发生了 "梯度爆炸",在网络学习过程中会直接跳过最优解,所以有必要进行梯度裁剪,防止网络在学习过程中越过最优解。Paddle提供了三种梯度裁剪方式:设置范围值裁剪、通过L2范数裁剪、通过全局L2范数裁剪。设置范围值裁剪方法简单,但是很难确定一个合适的阈值。通过L2范数裁剪和通过全局L2范数裁剪方法,都是用阈值限制梯度向量的L2范数,前者只对特定梯度进行裁剪,后者会对优化器的所有梯度进行裁剪。 +如果发生了 "梯度爆炸",在网络学习过程中会直接跳过最优解,所以有必要进行梯度裁剪,防止网络在学习过程中越过最优解。Paddle 提供了三种梯度裁剪方式:设置范围值裁剪、通过 L2 范数裁剪、通过全局 L2 范数裁剪。设置范围值裁剪方法简单,但是很难确定一个合适的阈值。通过 L2 范数裁剪和通过全局 L2 范数裁剪方法,都是用阈值限制梯度向量的 L2 范数,前者只对特定梯度进行裁剪,后者会对优化器的所有梯度进行裁剪。 -二、Paddle梯度裁剪使用方法 +二、Paddle 梯度裁剪使用方法 --------------------------- 1. 设定范围值裁剪 @@ -60,7 +60,7 @@ - **部分参数裁剪** -部分参数裁剪需要设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为True,表示需要裁剪,如果设置为False,则不会裁剪。 +部分参数裁剪需要设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为 True,表示需要裁剪,如果设置为 False,则不会裁剪。 例如:仅裁剪 `linear` 中 `weight` 的梯度,则需要在创建 `linear` 层时设置 `bias_attr` 如下: @@ -68,10 +68,10 @@ linear = paddle.nn.Linear(10, 10,bias_attr=paddle.ParamAttr(need_clip=False)) -2. 通过L2范数裁剪 +2. 通过 L2 范数裁剪 ################### -通过L2范数裁剪:梯度作为一个多维Tensor,计算其L2范数,如果超过最大值则按比例进行裁剪,否则不裁剪。 +通过 L2 范数裁剪:梯度作为一个多维 Tensor,计算其 L2 范数,如果超过最大值则按比例进行裁剪,否则不裁剪。 使用方式: @@ -90,7 +90,7 @@ \right. -其中 :math:`X` 为梯度向量,:math:`clip\_norm` 为设置的L2范数阈值, :math:`norm(X)` 代表 :math:`X` 的L2范数 +其中 :math:`X` 为梯度向量,:math:`clip\_norm` 为设置的 L2 范数阈值, :math:`norm(X)` 代表 :math:`X` 的 L2 范数 .. math:: \\norm(X) = (\sum_{i=1}^{n}|x_i|^2)^{\frac{1}{2}}\\ @@ -109,7 +109,7 @@ - **部分参数裁剪** -部分参数裁剪的设置方式与上面一致,也是通过设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为True,表示需要裁剪,如果设置为False,则不会裁剪。 +部分参数裁剪的设置方式与上面一致,也是通过设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为 True,表示需要裁剪,如果设置为 False,则不会裁剪。 例如:仅裁剪 `linear` 中 `bias` 的梯度,则需要在创建 `linear` 层时设置 `weight_attr` 如下: @@ -117,10 +117,10 @@ linear = paddle.nn.Linear(10, 10, weight_attr=paddle.ParamAttr(need_clip=False)) -3. 通过全局L2范数裁剪 +3. 通过全局 L2 范数裁剪 ####################### -将优化器中全部参数的梯度组成向量,对该向量求解L2范数,如果超过最大值则按比例进行裁剪,否则不裁剪。 +将优化器中全部参数的梯度组成向量,对该向量求解 L2 范数,如果超过最大值则按比例进行裁剪,否则不裁剪。 使用方式: @@ -145,7 +145,7 @@ \\global\_norm=\sqrt{\sum_{i=0}^{n-1}(norm(X[i]))^2}\\ -:math:`X[i]` 为梯度向量,:math:`clip\_norm` 为设置的L2范数阈值, :math:`norm(X[i])` 代表 :math:`X[i]` 的L2范数,:math:`global\_norm` 为所有梯度向量的L2范数的均方根值。 +:math:`X[i]` 为梯度向量,:math:`clip\_norm` 为设置的 L2 范数阈值, :math:`norm(X[i])` 代表 :math:`X[i]` 的 L2 范数,:math:`global\_norm` 为所有梯度向量的 L2 范数的均方根值。 - **全部参数裁剪(默认)** @@ -161,14 +161,14 @@ - **部分参数裁剪** -部分参数裁剪的设置方式与上面一致,也是通过设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为True,表示需要裁剪,如果设置为False,则不会裁剪。可参考上面的示例代码进行设置。 +部分参数裁剪的设置方式与上面一致,也是通过设置参数的 :ref:`paddle.ParamAttr ` ,其中的 ``need_clip`` 默认为 True,表示需要裁剪,如果设置为 False,则不会裁剪。可参考上面的示例代码进行设置。 -由上面的介绍可以知道,设置范围值裁剪可能会改变梯度向量的方向。例如,阈值为1.0,原梯度向量为[0.8, 89.0],裁剪后的梯度向量变为[0,8, 1.0],方向发生了很大的改变。而对于通过L2范数裁剪的两种方式,阈值为1.0,则裁剪后的梯度向量为[0.00899, 0.99996],能够保证原梯度向量的方向,但是由于分量2的值较大,导致分量1的值变得接近0。在实际的训练过程中,如果遇到梯度爆炸情况,可以试着用不同的裁剪方式对比在验证集上的效果。 +由上面的介绍可以知道,设置范围值裁剪可能会改变梯度向量的方向。例如,阈值为 1.0,原梯度向量为[0.8, 89.0],裁剪后的梯度向量变为[0,8, 1.0],方向发生了很大的改变。而对于通过 L2 范数裁剪的两种方式,阈值为 1.0,则裁剪后的梯度向量为[0.00899, 0.99996],能够保证原梯度向量的方向,但是由于分量 2 的值较大,导致分量 1 的值变得接近 0。在实际的训练过程中,如果遇到梯度爆炸情况,可以试着用不同的裁剪方式对比在验证集上的效果。 三、 实例 -------------------- -为了说明梯度裁剪的作用,以一个简单的3层无激活函数的神经网络为例,说明梯度裁剪的作用。其第一层的权重全部加上2,表示初始化权重过大。通过is_clip控制是否开启梯度裁剪,若开启,则使用L2范数裁剪方式对所有隐藏层的权重梯度进行裁剪,所允许的L2范数为1.0。该例子仅是为了阐释梯度裁剪的作用,并不是真正意义上的深度学习模型! +为了说明梯度裁剪的作用,以一个简单的 3 层无激活函数的神经网络为例,说明梯度裁剪的作用。其第一层的权重全部加上 2,表示初始化权重过大。通过 is_clip 控制是否开启梯度裁剪,若开启,则使用 L2 范数裁剪方式对所有隐藏层的权重梯度进行裁剪,所允许的 L2 范数为 1.0。该例子仅是为了阐释梯度裁剪的作用,并不是真正意义上的深度学习模型! .. code:: ipython3 @@ -226,15 +226,15 @@ x_data = np.random.randn(total_data, input_size).astype(np.float32) - y_data = x_data + 3 # y和x是线性关系 + y_data = x_data + 3 # y 和 x 是线性关系 model = Net(input_size, hidden_size) - clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) # 创建ClipGradByNorm类的实例,指定L2范数阈值 + clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) # 创建 ClipGradByNorm 类的实例,指定 L2 范数阈值 loss_fn = paddle.nn.MSELoss(reduction='mean') optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters(), - grad_clip=clip) # 将创建的ClipGradByNorm类的实例传入优化器SGD中 + grad_clip=clip) # 将创建的 ClipGradByNorm 类的实例传入优化器 SGD 中 def train(): for t in range(100): @@ -251,8 +251,8 @@ train() -未开启梯度裁剪时的部分日志如下,由于linear1层权重加上了一个正值,导致计算出的loss和相应梯度特别大,并且随着迭代进行,放大效应逐渐累积, -loss和模型的linear1层权重的梯度最终达到正无穷大,变为nan。事实上,网络各个隐藏层的权重都在增大。 +未开启梯度裁剪时的部分日志如下,由于 linear1 层权重加上了一个正值,导致计算出的 loss 和相应梯度特别大,并且随着迭代进行,放大效应逐渐累积, +loss 和模型的 linear1 层权重的梯度最终达到正无穷大,变为 nan。事实上,网络各个隐藏层的权重都在增大。 :: @@ -273,7 +273,7 @@ loss和模型的linear1层权重的梯度最终达到正无穷大,变为nan。 [[nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, ...]]) -开启梯度裁剪后,loss和梯度先是在较大值波动,随后在第50个迭代步开始逐渐减小,最终收敛到0.5左右。由于步数较多,这里仅展示部分迭代步的loss。 +开启梯度裁剪后,loss 和梯度先是在较大值波动,随后在第 50 个迭代步开始逐渐减小,最终收敛到 0.5 左右。由于步数较多,这里仅展示部分迭代步的 loss。 :: diff --git a/docs/guides/advanced/index_cn.rst b/docs/guides/advanced/index_cn.rst index 1a87d1ef677..550242c30fe 100644 --- a/docs/guides/advanced/index_cn.rst +++ b/docs/guides/advanced/index_cn.rst @@ -6,9 +6,9 @@ - `模型可视化 <./visualdl_usage_cn.html>`_ - `自动微分 <./autograd_cn.html>`_ - `层与模型 <./layer_and_model_cn.html>`_ -- `自定义Loss、Metric 及 Callback <./customize_cn.html>`_ +- `自定义 Loss、Metric 及 Callback <./customize_cn.html>`_ - `梯度裁剪 <./gradient_clip_cn.html>`_ -- `模型导出ONNX协议 <./model_to_onnx_cn.html>`_ +- `模型导出 ONNX 协议 <./model_to_onnx_cn.html>`_ .. toctree:: :hidden: diff --git a/docs/guides/advanced/layer_and_model_cn.md b/docs/guides/advanced/layer_and_model_cn.md index 0f89e8308c4..e988653f1f2 100644 --- a/docs/guides/advanced/layer_and_model_cn.md +++ b/docs/guides/advanced/layer_and_model_cn.md @@ -1,4 +1,4 @@ -# Paddle中的模型与层 +# Paddle 中的模型与层 模型是深度学习中的重要概念之一。模型的核心功能是将一组输入变量经过一系列计算,映射到另一组输出变量,该映射函数即代表一种深度学习算法。在**Paddle**框架中,模型包括以下两方面内容: @@ -260,7 +260,7 @@ Tensor(shape=[10, 1], dtype=float32, place=CPUPlace, stop_gradient=True, ### 执行函数 -模式设置完成后可以直接调用执行函数。可以直接调用forward()方法进行前向执行,也可以调用 ``__call__()`` ,从而执行在 ``forward()`` 当中定义的前向计算逻辑。 +模式设置完成后可以直接调用执行函数。可以直接调用 forward()方法进行前向执行,也可以调用 ``__call__()`` ,从而执行在 ``forward()`` 当中定义的前向计算逻辑。 ```python model = Model() diff --git a/docs/guides/advanced/model_to_onnx_cn.rst b/docs/guides/advanced/model_to_onnx_cn.rst index 29d769f3903..170100ba11f 100755 --- a/docs/guides/advanced/model_to_onnx_cn.rst +++ b/docs/guides/advanced/model_to_onnx_cn.rst @@ -1,29 +1,29 @@ .. _cn_model_to_onnx: ################ -模型导出ONNX协议 +模型导出 ONNX 协议 ################ 一、简介 ################## -ONNX (Open Neural Network Exchange) 是针对机器学习所设计的开源文件格式,用于存储训练好的模型。它使得不同的人工智能框架可以采用相同格式存储模型并交互。通过ONNX格式,Paddle模型可以使用OpenVINO、ONNX Runtime等框架进行推理。 +ONNX (Open Neural Network Exchange) 是针对机器学习所设计的开源文件格式,用于存储训练好的模型。它使得不同的人工智能框架可以采用相同格式存储模型并交互。通过 ONNX 格式,Paddle 模型可以使用 OpenVINO、ONNX Runtime 等框架进行推理。 -Paddle转ONNX协议由 `paddle2onnx `_ 实现,下面介绍如何将Paddle模型转换为ONNX模型并验证正确性。 +Paddle 转 ONNX 协议由 `paddle2onnx `_ 实现,下面介绍如何将 Paddle 模型转换为 ONNX 模型并验证正确性。 -本教程涉及的示例代码,可点击 `IPython `_ 获取, 除Paddle以外,还需安装以下依赖: +本教程涉及的示例代码,可点击 `IPython `_ 获取, 除 Paddle 以外,还需安装以下依赖: .. code-block:: bash pip install paddle2onnx onnx onnxruntime // -i https://mirror.baidu.com/pypi/simple 如果网速不好,可以使用其他源下载 -二、模型导出为ONNX协议 +二、模型导出为 ONNX 协议 ################## -2.1 动态图导出ONNX协议 +2.1 动态图导出 ONNX 协议 ------------ -Paddle动态图模型转换为ONNX协议,首先会将Paddle的动态图 ``paddle.nn.Layer`` 转换为静态图, 详细原理可以参考 `动态图转静态图 <../04_dygraph_to_static/index_cn.html>`_ 。然后依照ONNX的算子协议,将Paddle的算子一一映射为ONNX的算子。动态图转换ONNX调用 ``paddle.onnx.export()`` 接口即可实现,该接口通过 ``input_spec`` 参数为模型指定输入的形状和数据类型,支持 ``Tensor`` 或 ``InputSpec`` ,其中 ``InputSpec`` 支持动态的shape。 +Paddle 动态图模型转换为 ONNX 协议,首先会将 Paddle 的动态图 ``paddle.nn.Layer`` 转换为静态图, 详细原理可以参考 `动态图转静态图 <../04_dygraph_to_static/index_cn.html>`_ 。然后依照 ONNX 的算子协议,将 Paddle 的算子一一映射为 ONNX 的算子。动态图转换 ONNX 调用 ``paddle.onnx.export()`` 接口即可实现,该接口通过 ``input_spec`` 参数为模型指定输入的形状和数据类型,支持 ``Tensor`` 或 ``InputSpec`` ,其中 ``InputSpec`` 支持动态的 shape。 关于 ``paddle.onnx.export`` 接口更详细的使用方法,请参考 `API `_ 。 @@ -47,15 +47,15 @@ Paddle动态图模型转换为ONNX协议,首先会将Paddle的动态图 ``padd x_spec = InputSpec([None, 784], 'float32', 'x') paddle.onnx.export(layer, save_path, input_spec=[x_spec]) -2.2 静态图导出ONNX协议 +2.2 静态图导出 ONNX 协议 ------------ -Paddle 2.0以后将主推动态图组网方式,如果您的模型来自于旧版本的Paddle,使用静态图组网,请参考paddle2onnx的 `使用文档 `_ 和 `示例 `_ 。 +Paddle 2.0 以后将主推动态图组网方式,如果您的模型来自于旧版本的 Paddle,使用静态图组网,请参考 paddle2onnx 的 `使用文档 `_ 和 `示例 `_ 。 -三、ONNX模型的验证 +三、ONNX 模型的验证 ################## -ONNX官方工具包提供了API可验证模型的正确性,主要包括两个方面,一是算子是否符合对应版本的协议,二是网络结构是否完整。 +ONNX 官方工具包提供了 API 可验证模型的正确性,主要包括两个方面,一是算子是否符合对应版本的协议,二是网络结构是否完整。 .. code-block:: python @@ -67,11 +67,11 @@ ONNX官方工具包提供了API可验证模型的正确性,主要包括两个 onnx.checker.check_model(onnx_model) print('The model is checked!') -如果模型检查失败,请到 `Paddle `_ 或 `paddle2onnx `_ 提出Issue,我们会跟进相应的问题。 +如果模型检查失败,请到 `Paddle `_ 或 `paddle2onnx `_ 提出 Issue,我们会跟进相应的问题。 -四、ONNXRuntime推理 +四、ONNXRuntime 推理 ################## -本节介绍使用ONNXRuntime对已转换的Paddle模型进行推理,并与使用Paddle进行推理的结果进行对比。 +本节介绍使用 ONNXRuntime 对已转换的 Paddle 模型进行推理,并与使用 Paddle 进行推理的结果进行对比。 .. code-block:: python diff --git a/docs/guides/advanced/visualdl_cn.md b/docs/guides/advanced/visualdl_cn.md index bca20b9d972..3de1f8ded86 100644 --- a/docs/guides/advanced/visualdl_cn.md +++ b/docs/guides/advanced/visualdl_cn.md @@ -7,13 +7,13 @@ -VisualDL是飞桨可视化分析工具,以丰富的图表呈现训练参数变化趋势、模型结构、数据样本、直方图、PR曲线及高维数据分布。可帮助用户更清晰直观地理解深度学习模型训练过程及模型结构,进而实现高效的模型优化。 +VisualDL 是飞桨可视化分析工具,以丰富的图表呈现训练参数变化趋势、模型结构、数据样本、直方图、PR 曲线及高维数据分布。可帮助用户更清晰直观地理解深度学习模型训练过程及模型结构,进而实现高效的模型优化。 -具体功能使用方式请参见**VisualDL使用指南**。项目正处于高速迭代中,敬请期待新组件的加入。 +具体功能使用方式请参见**VisualDL 使用指南**。项目正处于高速迭代中,敬请期待新组件的加入。 -VisualDL支持浏览器种类:Chrome(81和83)、Safari 13、Firefox(77和78)、Edge(Chromium版)。 +VisualDL 支持浏览器种类:Chrome(81 和 83)、Safari 13、Firefox(77 和 78)、Edge(Chromium 版)。 -VisualDL原生支持python的使用, 通过在模型的Python配置中添加几行代码,便可为训练过程提供丰富的可视化支持。 +VisualDL 原生支持 python 的使用, 通过在模型的 Python 配置中添加几行代码,便可为训练过程提供丰富的可视化支持。 @@ -33,15 +33,15 @@ VisualDL原生支持python的使用, 通过在模型的Python配置中添加 ### 简单易用 -API设计简洁易懂,使用简单。模型结构一键实现可视化。 +API 设计简洁易懂,使用简单。模型结构一键实现可视化。 ### 功能丰富 -功能覆盖标量、数据样本、图结构、直方图、PR曲线及数据降维可视化。 +功能覆盖标量、数据样本、图结构、直方图、PR 曲线及数据降维可视化。 ### 高兼容性 -全面支持Paddle、ONNX、Caffe等市面主流模型结构可视化,广泛支持各类用户进行可视化分析。 +全面支持 Paddle、ONNX、Caffe 等市面主流模型结构可视化,广泛支持各类用户进行可视化分析。 ### 全面支持 @@ -51,7 +51,7 @@ API设计简洁易懂,使用简单。模型结构一键实现可视化。 ## 安装方式 -### 使用pip安装 +### 使用 pip 安装 ```shell pip install --upgrade --pre visualdl @@ -67,15 +67,15 @@ python setup.py bdist_wheel pip install --upgrade dist/visualdl-*.whl ``` -需要注意,官方自2020年1月1日起不再维护Python2,为了保障代码可用性,VisualDL现仅支持Python3 +需要注意,官方自 2020 年 1 月 1 日起不再维护 Python2,为了保障代码可用性,VisualDL 现仅支持 Python3 ## 使用方式 -VisualDL将训练过程中的数据、参数等信息储存至日志文件中后,启动面板即可查看可视化结果。 +VisualDL 将训练过程中的数据、参数等信息储存至日志文件中后,启动面板即可查看可视化结果。 ### 1. 记录日志 -VisualDL的后端提供了Python SDK,可通过LogWriter定制一个日志记录器,接口如下: +VisualDL 的后端提供了 Python SDK,可通过 LogWriter 定制一个日志记录器,接口如下: ```python class LogWriter(logdir=None, @@ -91,8 +91,8 @@ class LogWriter(logdir=None, | 参数 | 格式 | 含义 | | --------------- | ------- | ------------------------------------------------------------ | -| logdir | string | 日志文件所在的路径,VisualDL将在此路径下建立日志文件并进行记录,如果不填则默认为`runs/${CURRENT_TIME}` | -| comment | string | 为日志文件夹名添加后缀,如果制定了logdir则此项无效 | +| logdir | string | 日志文件所在的路径,VisualDL 将在此路径下建立日志文件并进行记录,如果不填则默认为`runs/${CURRENT_TIME}` | +| comment | string | 为日志文件夹名添加后缀,如果制定了 logdir 则此项无效 | | max_queue | int | 日志记录消息队列的最大容量,达到此容量则立即写入到日志文件 | | flush_secs | int | 日志记录消息队列的最大缓存时间,达到此时间则立即写入到日志文件 | | filename_suffix | string | 为默认的日志文件名添加后缀 | @@ -107,7 +107,7 @@ from visualdl import LogWriter # 在`./log/scalar_test/train`路径下建立日志文件 with LogWriter(logdir="./log/scalar_test/train") as writer: - # 使用scalar组件记录一个标量数据 + # 使用 scalar 组件记录一个标量数据 writer.add_scalar(tag="acc", step=1, value=0.5678) writer.add_scalar(tag="acc", step=2, value=0.6878) writer.add_scalar(tag="acc", step=3, value=0.9878) @@ -115,11 +115,11 @@ with LogWriter(logdir="./log/scalar_test/train") as writer: ### 2. 启动面板 -在上述示例中,日志已记录三组标量数据,现可启动VisualDL面板查看日志的可视化结果,共有两种启动方式: +在上述示例中,日志已记录三组标量数据,现可启动 VisualDL 面板查看日志的可视化结果,共有两种启动方式: #### 在命令行启动 -使用命令行启动VisualDL面板,命令格式如下: +使用命令行启动 VisualDL 面板,命令格式如下: ```python visualdl --logdir --host --port --cache-timeout --language --public-path --api-only @@ -129,14 +129,14 @@ visualdl --logdir --host --port --cach | 参数 | 意义 | | --------------- | ------------------------------------------------------------ | -| --logdir | 设定日志所在目录,可以指定多个目录,VisualDL将遍历并且迭代寻找指定目录的子目录,将所有实验结果进行可视化 | -| --model | 设定模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化,目前可支持PaddlePaddle、ONNX、Keras、Core ML、Caffe等多种模型结构,详情可查看[graph支持模型种类]([https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--%E7%BD%91%E7%BB%9C%E7%BB%93%E6%9E%84%E7%BB%84%E4%BB%B6](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--网络结构组件)) | -| --host | 设定IP,默认为`127.0.0.1` | +| --logdir | 设定日志所在目录,可以指定多个目录,VisualDL 将遍历并且迭代寻找指定目录的子目录,将所有实验结果进行可视化 | +| --model | 设定模型文件路径(非文件夹路径),VisualDL 将在此路径指定的模型文件进行可视化,目前可支持 PaddlePaddle、ONNX、Keras、Core ML、Caffe 等多种模型结构,详情可查看[graph 支持模型种类]([https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--%E7%BD%91%E7%BB%9C%E7%BB%93%E6%9E%84%E7%BB%84%E4%BB%B6](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/components/README.md#Graph--网络结构组件)) | +| --host | 设定 IP,默认为`127.0.0.1` | | --port | 设定端口,默认为`8040` | -| --cache-timeout | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | -| --language | VisualDL面板语言,可指定为'EN'或'ZH',默认为浏览器使用语言 | -| --public-path | VisualDL面板URL路径,默认是'/app',即访问地址为'http://<host>:<port>/app' | -| --api-only | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://<host>:<port>/<public_path>/api';若没有设置public_path参数,则默认为'http://<host>:<port>/api' | +| --cache-timeout | 后端缓存时间,在缓存时间内前端多次请求同一 url,返回的数据从缓存中获取,默认为 20 秒 | +| --language | VisualDL 面板语言,可指定为'EN'或'ZH',默认为浏览器使用语言 | +| --public-path | VisualDL 面板 URL 路径,默认是'/app',即访问地址为'http://<host>:<port>/app' | +| --api-only | 是否只提供 API,如果设置此参数,则 VisualDL 不提供页面展示,只提供 API 服务,此时 API 地址为'http://<host>:<port>/<public_path>/api';若没有设置 public_path 参数,则默认为'http://<host>:<port>/api' | 针对上一步生成的日志,启动命令为: @@ -144,9 +144,9 @@ visualdl --logdir --host --port --cach visualdl --logdir ./log ``` -#### 在Python脚本中启动 +#### 在 Python 脚本中启动 -支持在Python脚本中启动VisualDL面板,接口如下: +支持在 Python 脚本中启动 VisualDL 面板,接口如下: ```python visualdl.server.app.run(logdir, @@ -165,15 +165,15 @@ visualdl.server.app.run(logdir, | 参数 | 格式 | 含义 | | ------------- | ------------------------------------------------ | ------------------------------------------------------------ | -| logdir | string或list[string_1, string_2, ... , string_n] | 日志文件所在的路径,VisualDL将在此路径下递归搜索日志文件并进行可视化,可指定单个或多个路径 | -| model | string | 模型文件路径(非文件夹路径),VisualDL将在此路径指定的模型文件进行可视化 | -| host | string | 指定启动服务的ip,默认为`127.0.0.1` | +| logdir | string 或 list[string_1, string_2, ... , string_n] | 日志文件所在的路径,VisualDL 将在此路径下递归搜索日志文件并进行可视化,可指定单个或多个路径 | +| model | string | 模型文件路径(非文件夹路径),VisualDL 将在此路径指定的模型文件进行可视化 | +| host | string | 指定启动服务的 ip,默认为`127.0.0.1` | | port | int | 启动服务端口,默认为`8040` | -| cache_timeout | int | 后端缓存时间,在缓存时间内前端多次请求同一url,返回的数据从缓存中获取,默认为20秒 | -| language | string | VisualDL面板语言,可指定为'en'或'zh',默认为浏览器使用语言 | -| public_path | string | VisualDL面板URL路径,默认是'/app',即访问地址为'http://:/app' | -| api_only | boolean | 是否只提供API,如果设置此参数,则VisualDL不提供页面展示,只提供API服务,此时API地址为'http://://api';若没有设置public_path参数,则默认为http://:/api' | -| open_browser | boolean | 是否打开浏览器,设置为True则在启动后自动打开浏览器并访问VisualDL面板,若设置api_only,则忽略此参数 | +| cache_timeout | int | 后端缓存时间,在缓存时间内前端多次请求同一 url,返回的数据从缓存中获取,默认为 20 秒 | +| language | string | VisualDL 面板语言,可指定为'en'或'zh',默认为浏览器使用语言 | +| public_path | string | VisualDL 面板 URL 路径,默认是'/app',即访问地址为'http://:/app' | +| api_only | boolean | 是否只提供 API,如果设置此参数,则 VisualDL 不提供页面展示,只提供 API 服务,此时 API 地址为'http://://api';若没有设置 public_path 参数,则默认为 http://:/api' | +| open_browser | boolean | 是否打开浏览器,设置为 True 则在启动后自动打开浏览器并访问 VisualDL 面板,若设置 api_only,则忽略此参数 | 针对上一步生成的日志,我们的启动脚本为: @@ -183,7 +183,7 @@ from visualdl.server import app app.run(logdir="./log") ``` -在使用任意一种方式启动VisualDL面板后,打开浏览器访问VisualDL面板,即可查看日志的可视化结果,如图: +在使用任意一种方式启动 VisualDL 面板后,打开浏览器访问 VisualDL 面板,即可查看日志的可视化结果,如图:

@@ -195,11 +195,11 @@ app.run(logdir="./log") ### Scalar -以图表形式实时展示训练过程参数,如loss、accuracy。让用户通过观察单组或多组训练参数变化,了解训练过程,加速模型调优。具有两大特点: +以图表形式实时展示训练过程参数,如 loss、accuracy。让用户通过观察单组或多组训练参数变化,了解训练过程,加速模型调优。具有两大特点: #### 动态展示 -在启动VisualDL后,LogReader将不断增量的读取日志中数据并供前端调用展示,因此能够在训练中同步观测指标变化,如下图: +在启动 VisualDL 后,LogReader 将不断增量的读取日志中数据并供前端调用展示,因此能够在训练中同步观测指标变化,如下图:

@@ -209,7 +209,7 @@ app.run(logdir="./log") #### 多实验对比 -只需在启动VisualDL时将每个实验日志所在路径同时传入即可,每个实验中相同tag的指标将绘制在一张图中同步呈现,如下图: +只需在启动 VisualDL 时将每个实验日志所在路径同时传入即可,每个实验中相同 tag 的指标将绘制在一张图中同步呈现,如下图:

@@ -249,9 +249,9 @@ app.run(logdir="./log") ### Histogram -以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 +以直方图形式展示 Tensor(weight、bias、gradient 等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 -- Offset模式 +- Offset 模式

@@ -259,7 +259,7 @@ app.run(logdir="./log") -- Overlay模式 +- Overlay 模式

@@ -278,7 +278,7 @@ app.run(logdir="./log") ### High Dimensional -将高维数据进行降维展示,目前支持T-SNE、PCA两种降维方式,用于深入分析高维数据间的关系,方便用户根据数据特征进行算法优化。 +将高维数据进行降维展示,目前支持 T-SNE、PCA 两种降维方式,用于深入分析高维数据间的关系,方便用户根据数据特征进行算法优化。

@@ -293,8 +293,8 @@ Graph 相关功能由 [Netron](https://github.com/lutzroeder/netron) 提供技 ## 更多细节 -想了解更多关于VisualDL可视化功能的使用详情介绍,请查看**VisualDL使用指南**。 +想了解更多关于 VisualDL 可视化功能的使用详情介绍,请查看**VisualDL 使用指南**。 ## 技术交流 -欢迎您加入VisualDL官方QQ群:1045783368 与飞桨团队以及其他用户共同针对VisualDL进行讨论与交流。 +欢迎您加入 VisualDL 官方 QQ 群:1045783368 与飞桨团队以及其他用户共同针对 VisualDL 进行讨论与交流。 diff --git a/docs/guides/advanced/visualdl_usage_cn.md b/docs/guides/advanced/visualdl_usage_cn.md index 3e536dbd5e6..4103301fb22 100644 --- a/docs/guides/advanced/visualdl_usage_cn.md +++ b/docs/guides/advanced/visualdl_usage_cn.md @@ -13,7 +13,7 @@ VisualDL 是一个面向深度学习任务设计的可视化工具。VisualDL | [Audio](#Audio--音频播放组件) | 音频播放 | 播放训练过程中的音频数据,监控语音识别与合成等任务的训练过程 | | [Graph](#Graph--网络结构组件) | 网络结构 | 展示网络结构、节点属性及数据流向,辅助学习、优化网络结构 | | [Histogram](#Histogram--直方图组件) | 直方图 | 展示训练过程中权重、梯度等张量的分布 | -| [PR Curve](#PR-Curve--PR曲线组件) | 折线图 | 权衡精度与召回率之间的平衡关系,便于选择最佳阈值 | +| [PR Curve](#PR-Curve--PR 曲线组件) | 折线图 | 权衡精度与召回率之间的平衡关系,便于选择最佳阈值 | | [High Dimensional](#High-Dimensional--数据降维组件) | 数据降维 | 将高维数据映射到 2D/3D 空间来可视化嵌入,便于观察不同数据的相关性 | ## Scalar -- 折线图组件 @@ -43,7 +43,7 @@ add_scalar(tag, value, step, walltime=None) - 基础使用 -下面展示了使用 Scalar 组件记录数据的示例,代码文件请见[Scalar组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py) +下面展示了使用 Scalar 组件记录数据的示例,代码文件请见[Scalar 组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/scalar_test.py) ```python from visualdl import LogWriter @@ -53,9 +53,9 @@ if __name__ == '__main__': # 初始化一个记录器 with LogWriter(logdir="./log/scalar_test/train") as writer: for step in range(1000): - # 向记录器添加一个tag为`acc`的数据 + # 向记录器添加一个 tag 为`acc`的数据 writer.add_scalar(tag="acc", step=step, value=value[step]) - # 向记录器添加一个tag为`loss`的数据 + # 向记录器添加一个 tag 为`loss`的数据 writer.add_scalar(tag="loss", step=step, value=1/(value[step] + 1)) ``` @@ -75,32 +75,32 @@ visualdl --logdir ./log --port 8080 - 多组实验对比 -下面展示了使用Scalar组件实现多组实验对比 +下面展示了使用 Scalar 组件实现多组实验对比 多组实验对比的实现分为两步: 1. 创建子日志文件储存每组实验的参数数据 -2. 将数据写入scalar组件时,**使用相同的tag**,即可实现对比**不同实验**的**同一类型参数** +2. 将数据写入 scalar 组件时,**使用相同的 tag**,即可实现对比**不同实验**的**同一类型参数** ```python from visualdl import LogWriter if __name__ == '__main__': value = [i/1000.0 for i in range(1000)] - # 步骤一:创建父文件夹:log与子文件夹:scalar_test + # 步骤一:创建父文件夹:log 与子文件夹:scalar_test with LogWriter(logdir="./log/scalar_test") as writer: for step in range(1000): - # 步骤二:向记录器添加一个tag为`train/acc`的数据 + # 步骤二:向记录器添加一个 tag 为`train/acc`的数据 writer.add_scalar(tag="train/acc", step=step, value=value[step]) - # 步骤二:向记录器添加一个tag为`train/loss`的数据 + # 步骤二:向记录器添加一个 tag 为`train/loss`的数据 writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) - # 步骤一:创建第二个子文件夹scalar_test2 + # 步骤一:创建第二个子文件夹 scalar_test2 value = [i/500.0 for i in range(1000)] with LogWriter(logdir="./log/scalar_test2") as writer: for step in range(1000): - # 步骤二:在同样名为`train/acc`下添加scalar_test2的accuracy的数据 + # 步骤二:在同样名为`train/acc`下添加 scalar_test2 的 accuracy 的数据 writer.add_scalar(tag="train/acc", step=step, value=value[step]) - # 步骤二:在同样名为`train/loss`下添加scalar_test2的loss的数据 + # 步骤二:在同样名为`train/loss`下添加 scalar_test2 的 loss 的数据 writer.add_scalar(tag="train/loss", step=step, value=1/(value[step] + 1)) ``` @@ -110,19 +110,19 @@ if __name__ == '__main__': visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图,对比「scalar_test」和「scalar_test2」的Accuracy和Loss。 +接着在浏览器打开`http://127.0.0.1:8080`,即可查看以下折线图,对比「scalar_test」和「scalar_test2」的 Accuracy 和 Loss。

-*多组实验对比的应用案例可参考AI Studio项目:[VisualDL 2.0--眼疾识别训练可视化](https://aistudio.baidu.com/aistudio/projectdetail/502834) +*多组实验对比的应用案例可参考 AI Studio 项目:[VisualDL 2.0--眼疾识别训练可视化](https://aistudio.baidu.com/aistudio/projectdetail/502834) ### 功能操作说明 -* 支持数据卡片「最大化」、「还原」、「坐标系转化」(y轴对数坐标)、「下载」折线图 +* 支持数据卡片「最大化」、「还原」、「坐标系转化」(y 轴对数坐标)、「下载」折线图

@@ -132,7 +132,7 @@ visualdl --logdir ./log --port 8080 -* 数据点Hover展示详细信息 +* 数据点 Hover 展示详细信息

@@ -161,7 +161,7 @@ visualdl --logdir ./log --port 8080 -* X轴有三种衡量尺度 +* X 轴有三种衡量尺度 1. Step:迭代次数 2. Walltime:训练绝对时间 @@ -200,13 +200,13 @@ add_image(tag, img, step, walltime=None) | 参数 | 格式 | 含义 | | -------- | ------------- | ------------------------------------------- | | tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | -| img | numpy.ndarray | 以ndarray格式表示的图片 | +| img | numpy.ndarray | 以 ndarray 格式表示的图片 | | step | int | 记录的步数 | | walltime | int | 记录数据的时间戳,默认为当前时间戳 | ### Demo -下面展示了使用 Image 组件记录数据的示例,代码文件请见[Image组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py) +下面展示了使用 Image 组件记录数据的示例,代码文件请见[Image 组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/image_test.py) ```python import numpy as np @@ -261,7 +261,7 @@ visualdl --logdir ./log --port 8080 -支持滑动Step/迭代次数查看不同迭代次数下的图片数据 +支持滑动 Step/迭代次数查看不同迭代次数下的图片数据

@@ -274,7 +274,7 @@ visualdl --logdir ./log --port 8080 ### 介绍 -Audio组件实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 +Audio 组件实时查看训练过程中的音频数据,监控语音识别与合成等任务的训练过程。 ### 记录接口 @@ -289,13 +289,13 @@ add_audio(tag, audio_array, step, sample_rate) | 参数 | 格式 | 含义 | | ----------- | ------------- | ------------------------------------------ | | tag | string | 记录指标的标志,如`audio_tag`,不能含有`%` | -| audio_arry | numpy.ndarray | 以ndarray格式表示的音频 | +| audio_arry | numpy.ndarray | 以 ndarray 格式表示的音频 | | step | int | 记录的步数 | | sample_rate | int | 采样率,**注意正确填写对应音频的原采样率** | ### Demo -下面展示了使用 Audio 组件记录数据的示例,代码文件请见[Audio组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py) +下面展示了使用 Audio 组件记录数据的示例,代码文件请见[Audio 组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/audio_test.py) ```python from visualdl import LogWriter @@ -354,7 +354,7 @@ visualdl --logdir ./log --port 8080 -- 支持滑动Step/迭代次数试听不同迭代次数下的音频数据 +- 支持滑动 Step/迭代次数试听不同迭代次数下的音频数据

@@ -391,7 +391,7 @@ visualdl --logdir ./log --port 8080 ### 介绍 -Graph组件一键可视化模型的网络结构。用于查看模型属性、节点信息、节点输入输出等,并进行节点搜索,协助开发者们快速分析模型结构与了解数据流向。 +Graph 组件一键可视化模型的网络结构。用于查看模型属性、节点信息、节点输入输出等,并进行节点搜索,协助开发者们快速分析模型结构与了解数据流向。 ### Demo @@ -399,7 +399,7 @@ Graph组件一键可视化模型的网络结构。用于查看模型属性、节 - 前端模型文件拖拽上传: - - 如只需使用Graph组件,则无需添加任何参数,在命令行执行`visualdl`后即可启动面板进行上传。 + - 如只需使用 Graph 组件,则无需添加任何参数,在命令行执行`visualdl`后即可启动面板进行上传。 - 如果同时需使用其他功能,在命令行指定日志文件路径(以`./log`为例)即可启动面板进行上传: ```shell @@ -412,7 +412,7 @@ Graph组件一键可视化模型的网络结构。用于查看模型属性、节 -- 后端启动Graph: +- 后端启动 Graph: - 在命令行加入参数`--model`并指定**模型文件**路径(非文件夹路径),即可启动并查看网络结构可视化: @@ -476,7 +476,7 @@ Graph组件一键可视化模型的网络结构。用于查看模型属性、节 -- 支持以PNG、SVG格式导出模型结构图 +- 支持以 PNG、SVG 格式导出模型结构图

@@ -504,7 +504,7 @@ Graph组件一键可视化模型的网络结构。用于查看模型属性、节 ### 介绍 -Histogram组件以直方图形式展示Tensor(weight、bias、gradient等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 +Histogram 组件以直方图形式展示 Tensor(weight、bias、gradient 等)数据在训练过程中的变化趋势。深入了解模型各层效果,帮助开发者精准调整模型结构。 ### 记录接口 @@ -519,14 +519,14 @@ add_histogram(tag, values, step, walltime=None, buckets=10) | 参数 | 格式 | 含义 | | -------- | --------------------- | ------------------------------------------- | | tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | -| values | numpy.ndarray or list | 以ndarray或list格式表示的数据 | +| values | numpy.ndarray or list | 以 ndarray 或 list 格式表示的数据 | | step | int | 记录的步数 | | walltime | int | 记录数据的时间戳,默认为当前时间戳 | -| buckets | int | 生成直方图的分段数,默认为10 | +| buckets | int | 生成直方图的分段数,默认为 10 | ### Demo -下面展示了使用 Histogram组件记录数据的示例,代码文件请见[Histogram组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py) +下面展示了使用 Histogram 组件记录数据的示例,代码文件请见[Histogram 组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/histogram_test.py) ```python from visualdl import LogWriter @@ -562,14 +562,14 @@ visualdl --logdir ./log --port 8080

-- 可选择Offset或Overlay模式 +- 可选择 Offset 或 Overlay 模式

- - Offset模式 + - Offset 模式

@@ -577,16 +577,16 @@ visualdl --logdir ./log --port 8080 - - Overlay模式 + - Overlay 模式

-- 数据点Hover展示参数值、训练步数、频次 +- 数据点 Hover 展示参数值、训练步数、频次 - - 在第240次训练步数时,权重为-0.0031,且出现的频次是2734次 + - 在第 240 次训练步数时,权重为-0.0031,且出现的频次是 2734 次

@@ -604,15 +604,15 @@ visualdl --logdir ./log --port 8080

-## PR Curve--PR曲线组件 +## PR Curve--PR 曲线组件 ### 介绍 -PR Curve以折线图形式呈现精度与召回率的权衡分析,清晰直观了解模型训练效果,便于分析模型是否达到理想标准。 +PR Curve 以折线图形式呈现精度与召回率的权衡分析,清晰直观了解模型训练效果,便于分析模型是否达到理想标准。 ### 记录接口 -PR Curve组件的记录接口如下: +PR Curve 组件的记录接口如下: ```python add_pr_curve(tag, labels, predictions, step=None, num_thresholds=10) @@ -623,14 +623,14 @@ add_pr_curve(tag, labels, predictions, step=None, num_thresholds=10) | 参数 | 格式 | 含义 | | -------------- | --------------------- | ------------------------------------------- | | tag | string | 记录指标的标志,如`train/loss`,不能含有`%` | -| labels | numpy.ndarray or list | 以ndarray或list格式表示的实际类别 | -| predictions | numpy.ndarray or list | 以ndarray或list格式表示的预测类别 | +| labels | numpy.ndarray or list | 以 ndarray 或 list 格式表示的实际类别 | +| predictions | numpy.ndarray or list | 以 ndarray 或 list 格式表示的预测类别 | | step | int | 记录的步数 | -| num_thresholds | int | 阈值设置的个数,默认为10,最大值为127 | +| num_thresholds | int | 阈值设置的个数,默认为 10,最大值为 127 | ### Demo -下面展示了使用 PR Curve 组件记录数据的示例,代码文件请见[PR Curve组件](#https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py) +下面展示了使用 PR Curve 组件记录数据的示例,代码文件请见[PR Curve 组件](#https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/pr_curve_test.py) ```python from visualdl import LogWriter @@ -653,7 +653,7 @@ with LogWriter("./log/pr_curve_test/train") as writer: visualdl --logdir ./log --port 8080 ``` -接着在浏览器打开`http://127.0.0.1:8080`,即可查看PR Curve +接着在浏览器打开`http://127.0.0.1:8080`,即可查看 PR Curve

@@ -663,13 +663,13 @@ visualdl --logdir ./log --port 8080 ### 功能操作说明 -- 支持数据卡片「最大化」,「还原」、「下载」PR曲线 +- 支持数据卡片「最大化」,「还原」、「下载」PR 曲线

-- 数据点Hover展示详细信息:阈值对应的TP、TN、FP、FN +- 数据点 Hover 展示详细信息:阈值对应的 TP、TN、FP、FN

@@ -688,13 +688,13 @@ visualdl --logdir ./log --port 8080

-- 支持查看不同训练步数下的PR曲线 +- 支持查看不同训练步数下的 PR 曲线

-- X轴-时间显示类型有三种衡量尺度 +- X 轴-时间显示类型有三种衡量尺度 - Step:迭代次数 - Walltime:训练绝对时间 @@ -726,13 +726,13 @@ add_embeddings(tag, labels, hot_vectors, walltime=None) | 参数 | 格式 | 含义 | | ----------- | ------------------- | ---------------------------------------------------- | | tag | string | 记录指标的标志,如`default`,不能含有`%` | -| labels | numpy.array 或 list | 一维数组表示的标签,每个元素是一个string类型的字符串 | -| hot_vectors | numpy.array or list | 与labels一一对应,每个元素可以看作是某个标签的特征 | +| labels | numpy.array 或 list | 一维数组表示的标签,每个元素是一个 string 类型的字符串 | +| hot_vectors | numpy.array or list | 与 labels 一一对应,每个元素可以看作是某个标签的特征 | | walltime | int | 记录数据的时间戳,默认为当前时间戳 | ### Demo -下面展示了使用 High Dimensional 组件记录数据的示例,代码文件请见[High Dimensional组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py) +下面展示了使用 High Dimensional 组件记录数据的示例,代码文件请见[High Dimensional 组件](https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/components/high_dimensional_test.py) ```python from visualdl import LogWriter @@ -749,7 +749,7 @@ if __name__ == '__main__': labels = ["label_1", "label_2", "label_3", "label_4", "label_5"] # 初始化一个记录器 with LogWriter(logdir="./log/high_dimensional_test/train") as writer: - # 将一组labels和对应的hot_vectors传入记录器进行记录 + # 将一组 labels 和对应的 hot_vectors 传入记录器进行记录 writer.add_embeddings(tag='default', labels=labels, hot_vectors=hot_vectors) diff --git a/docs/guides/beginner/index_cn.rst b/docs/guides/beginner/index_cn.rst index 3142d7db4c2..75c7ec38e20 100644 --- a/docs/guides/beginner/index_cn.rst +++ b/docs/guides/beginner/index_cn.rst @@ -2,17 +2,17 @@ 模型开发入门 ################### -本部分将介绍飞桨框架2.0的开发流程。 +本部分将介绍飞桨框架 2.0 的开发流程。 -为了快速上手飞桨框架2.0,你可以参考 `10分钟快速上手飞桨 <./quick_start_cn.html>`_ ; +为了快速上手飞桨框架 2.0,你可以参考 `10 分钟快速上手飞桨 <./quick_start_cn.html>`_ ; -当完成了快速上手的任务后,下面这些模块会阐述如何用飞桨框架2.0,实现深度学习过程中的每一步。具体包括: +当完成了快速上手的任务后,下面这些模块会阐述如何用飞桨框架 2.0,实现深度学习过程中的每一步。具体包括: - `Tensor 介绍 <./tensor_cn.html>`_ : 介绍飞桨基本数据类型 `Tensor` 的概念与常见用法。 - `数据集定义与加载 <./data_load_cn.html>`_ : 飞桨框架数据加载的方式,主要为\ ``paddle.io.Dataset + paddle.io.DataLoader``\ ,以及飞桨内置数据集的介绍。 - `数据预处理 <./data_preprocessing_cn.html>`_ : 飞桨框架数据预处理的方法,主要是\ ``paddle.vision.transform.*``\ 。 -- `模型组网 <./model_cn.html>`_ : 飞桨框架组网API的介绍,主要是\ ``paddle.nn.*``\ ,然后是飞桨框架组网方式的介绍,即 Sequential 的组网与 SubClass 的组网。 -- `训练与预测 <./train_eval_predict_cn.html>`_ : 飞桨框架训练与预测的方法,有两种方式,一种是使用高层API\ ``paddle.Model``\ 封装模型,然后调用\ ``model.fit()、model.evaluate()、model.predict()``\ 完成模型的训练与预测;另一种是用基础API完成模型的训练与预测,也就是对高层API的拆解。 +- `模型组网 <./model_cn.html>`_ : 飞桨框架组网 API 的介绍,主要是\ ``paddle.nn.*``\ ,然后是飞桨框架组网方式的介绍,即 Sequential 的组网与 SubClass 的组网。 +- `训练与预测 <./train_eval_predict_cn.html>`_ : 飞桨框架训练与预测的方法,有两种方式,一种是使用高层 API\ ``paddle.Model``\ 封装模型,然后调用\ ``model.fit()、model.evaluate()、model.predict()``\ 完成模型的训练与预测;另一种是用基础 API 完成模型的训练与预测,也就是对高层 API 的拆解。 - `模型的加载与保存 <./model_save_load_cn.html>`_ : 飞桨框架模型的加载与保存体系介绍。 .. toctree:: diff --git a/docs/guides/beginner/model_save_load_cn.rst b/docs/guides/beginner/model_save_load_cn.rst index 205bf4f7127..39830214f2e 100644 --- a/docs/guides/beginner/model_save_load_cn.rst +++ b/docs/guides/beginner/model_save_load_cn.rst @@ -7,10 +7,10 @@ 一、保存载入体系简介 ################## -1.1 基础API保存载入体系 +1.1 基础 API 保存载入体系 -------------------- -飞桨框架2.1对模型与参数的保存与载入相关接口进行了梳理:对于训练调优场景,我们推荐使用paddle.save/load保存和载入模型;对于推理部署场景,我们推荐使用paddle.jit.save/load(动态图)和paddle.static.save/load_inference_model(静态图)保存载入模型。 +飞桨框架 2.1 对模型与参数的保存与载入相关接口进行了梳理:对于训练调优场景,我们推荐使用 paddle.save/load 保存和载入模型;对于推理部署场景,我们推荐使用 paddle.jit.save/load(动态图)和 paddle.static.save/load_inference_model(静态图)保存载入模型。 飞桨保存载入相关接口包括: @@ -34,17 +34,17 @@ .. image:: images/paddle_jit_save_load_2.1.png -1.2 高阶API保存载入体系 +1.2 高阶 API 保存载入体系 -------------------- - paddle.Model.fit (训练接口,同时带有参数保存的功能) - paddle.Model.save - paddle.Model.load -飞桨框架2.0高阶API仅有一套Save/Load接口,表意直观,体系清晰,若有需要,建议直接阅读相关API文档,此处不再赘述。 +飞桨框架 2.0 高阶 API 仅有一套 Save/Load 接口,表意直观,体系清晰,若有需要,建议直接阅读相关 API 文档,此处不再赘述。 .. note:: - 本教程着重介绍飞桨框架2.1的各个保存载入接口的关系及各种使用场景,不对接口参数进行详细介绍,如果需要了解具体接口参数的含义,请直接阅读对应API文档。 + 本教程着重介绍飞桨框架 2.1 的各个保存载入接口的关系及各种使用场景,不对接口参数进行详细介绍,如果需要了解具体接口参数的含义,请直接阅读对应 API 文档。 `模型保存常见问题 <./../../faq/save_cn.html>`_ @@ -56,7 +56,7 @@ 2.1 动态图参数保存载入 ------------------- -若仅需要保存/载入模型的参数,可以使用 ``paddle.save/load`` 结合Layer和Optimizer的state_dict达成目的,此处state_dict是对象的持久参数的载体,dict的key为参数名,value为参数真实的numpy array值。 +若仅需要保存/载入模型的参数,可以使用 ``paddle.save/load`` 结合 Layer 和 Optimizer 的 state_dict 达成目的,此处 state_dict 是对象的持久参数的载体,dict 的 key 为参数名,value 为参数真实的 numpy array 值。 结合以下简单示例,介绍参数保存和载入的方法,以下示例完成了一个简单网络的训练过程: @@ -126,7 +126,7 @@ 2.1.1 参数保存 ------------- -参数保存时,先获取目标对象(Layer或者Optimzier)的state_dict,然后将state_dict保存至磁盘,示例如下(接前述示例): +参数保存时,先获取目标对象(Layer 或者 Optimzier)的 state_dict,然后将 state_dict 保存至磁盘,示例如下(接前述示例): .. code-block:: python @@ -138,7 +138,7 @@ 2.1.2 参数载入 ------------- -参数载入时,先从磁盘载入保存的state_dict,然后通过set_state_dict方法配置到目标对象中,示例如下(接前述示例): +参数载入时,先从磁盘载入保存的 state_dict,然后通过 set_state_dict 方法配置到目标对象中,示例如下(接前述示例): .. code-block:: python @@ -152,7 +152,7 @@ 2.2 静态图模型&参数保存载入 ----------------------- -若仅需要保存/载入模型的参数,可以使用 ``paddle.save/load`` 结合Program的state_dict达成目的,此处state_dict与动态图state_dict概念类似,dict的key为参数名,value为参数真实的值。若想保存整个模型,需要使用``paddle.save``将Program和state_dict都保存下来。 +若仅需要保存/载入模型的参数,可以使用 ``paddle.save/load`` 结合 Program 的 state_dict 达成目的,此处 state_dict 与动态图 state_dict 概念类似,dict 的 key 为参数名,value 为参数真实的值。若想保存整个模型,需要使用``paddle.save``将 Program 和 state_dict 都保存下来。 结合以下简单示例,介绍参数保存和载入的方法: @@ -176,14 +176,14 @@ 2.2.1 静态图模型&参数保存 --------------------- -参数保存时,先获取Program的state_dict,然后将state_dict保存至磁盘,示例如下(接前述示例): +参数保存时,先获取 Program 的 state_dict,然后将 state_dict 保存至磁盘,示例如下(接前述示例): .. code-block:: python paddle.save(prog.state_dict(), "temp/model.pdparams") -如果想要保存整个静态图模型,除了state_dict还需要保存Program +如果想要保存整个静态图模型,除了 state_dict 还需要保存 Program .. code-block:: python @@ -193,14 +193,14 @@ 2.2.2 静态图模型&参数载入 --------------------- -如果只保存了state_dict,可以跳过此段代码,直接载入state_dict。如果模型文件中包含Program和state_dict,请先载入Program,示例如下(接前述示例): +如果只保存了 state_dict,可以跳过此段代码,直接载入 state_dict。如果模型文件中包含 Program 和 state_dict,请先载入 Program,示例如下(接前述示例): .. code-block:: python prog = paddle.load("temp/model.pdmodel") -参数载入时,先从磁盘载入保存的state_dict,然后通过set_state_dict方法配置到Program中,示例如下(接前述示例): +参数载入时,先从磁盘载入保存的 state_dict,然后通过 set_state_dict 方法配置到 Program 中,示例如下(接前述示例): .. code-block:: python @@ -227,7 +227,7 @@ 3.1.1.1 动转静训练 + 模型&参数保存 `````````````````````````````` -动转静训练相比直接使用动态图训练具有更好的执行性能,训练完成后,直接将目标Layer传入 ``paddle.jit.save`` 保存即可。: +动转静训练相比直接使用动态图训练具有更好的执行性能,训练完成后,直接将目标 Layer 传入 ``paddle.jit.save`` 保存即可。: 一个简单的网络训练示例如下: @@ -306,9 +306,9 @@ 通过动转静训练后保存模型&参数,有以下三项注意点: -(1) Layer对象的forward方法需要经由 ``paddle.jit.to_static`` 装饰 +(1) Layer 对象的 forward 方法需要经由 ``paddle.jit.to_static`` 装饰 -经过 ``paddle.jit.to_static`` 装饰forward方法后,相应Layer在执行时,会先生成描述模型的Program,然后通过执行Program获取计算结果,示例如下: +经过 ``paddle.jit.to_static`` 装饰 forward 方法后,相应 Layer 在执行时,会先生成描述模型的 Program,然后通过执行 Program 获取计算结果,示例如下: .. code-block:: python @@ -327,7 +327,7 @@ def forward(self, x): return self._linear(x) -若最终需要生成的描述模型的Program支持动态输入,可以同时指明模型的 ``InputSepc`` ,示例如下: +若最终需要生成的描述模型的 Program 支持动态输入,可以同时指明模型的 ``InputSepc`` ,示例如下: .. code-block:: python @@ -348,9 +348,9 @@ return self._linear(x) -(2) 请确保Layer.forward方法中仅实现预测功能,避免将训练所需的loss计算逻辑写入forward方法 +(2) 请确保 Layer.forward 方法中仅实现预测功能,避免将训练所需的 loss 计算逻辑写入 forward 方法 -Layer更准确的语义是描述一个具有预测功能的模型对象,接收输入的样本数据,输出预测的结果,而loss计算是仅属于模型训练中的概念。将loss计算的实现放到Layer.forward方法中,会使Layer在不同场景下概念有所差别,并且增大Layer使用的复杂性,这不是良好的编码行为,同时也会在最终保存预测模型时引入剪枝的复杂性,因此建议保持Layer实现的简洁性,下面通过两个示例对比说明: +Layer 更准确的语义是描述一个具有预测功能的模型对象,接收输入的样本数据,输出预测的结果,而 loss 计算是仅属于模型训练中的概念。将 loss 计算的实现放到 Layer.forward 方法中,会使 Layer 在不同场景下概念有所差别,并且增大 Layer 使用的复杂性,这不是良好的编码行为,同时也会在最终保存预测模型时引入剪枝的复杂性,因此建议保持 Layer 实现的简洁性,下面通过两个示例对比说明: 错误示例如下: @@ -401,7 +401,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 (3) 如果你需要保存多个方法,需要用 ``paddle.jit.to_static`` 装饰每一个需要被保存的方法。 .. note:: - 只有在forward之外还需要保存其他方法时才用这个特性,如果仅装饰非forward的方法,而forward没有被装饰,是不符合规范的。此时 ``paddle.jit.save`` 的 ``input_spec`` 参数必须为None。 + 只有在 forward 之外还需要保存其他方法时才用这个特性,如果仅装饰非 forward 的方法,而 forward 没有被装饰,是不符合规范的。此时 ``paddle.jit.save`` 的 ``input_spec`` 参数必须为 None。 示例代码如下: @@ -436,10 +436,10 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 path = "example.model/linear" paddle.jit.save(layer, path) -保存的模型命名规则:forward的模型名字为:模型名+后缀,其他函数的模型名字为:模型名+函数名+后缀。每个函数有各自的pdmodel和pdiparams的文件,所有函数共用pdiparams.info。上述代码将在 ``example.model`` 文件夹下产生5个文件: +保存的模型命名规则:forward 的模型名字为:模型名+后缀,其他函数的模型名字为:模型名+函数名+后缀。每个函数有各自的 pdmodel 和 pdiparams 的文件,所有函数共用 pdiparams.info。上述代码将在 ``example.model`` 文件夹下产生 5 个文件: ``linear.another_forward.pdiparams、 linear.pdiparams、 linear.pdmodel、 linear.another_forward.pdmodel、 linear.pdiparams.info`` -(4) 当使用 ``jit.save`` 保存函数时,``jit.save`` 只保存这个函数对应的静态图 `Program` ,不会保存和这个函数相关的参数。如果你必须保存参数,请使用Layer封装这个函数。 +(4) 当使用 ``jit.save`` 保存函数时,``jit.save`` 只保存这个函数对应的静态图 `Program` ,不会保存和这个函数相关的参数。如果你必须保存参数,请使用 Layer 封装这个函数。 示例代码如下: @@ -547,15 +547,15 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 动态图训练后使用 ``paddle.jit.save`` 保存模型和参数注意点如下: -(1) 相比动转静训练,Layer对象的forward方法不需要额外装饰,保持原实现即可 +(1) 相比动转静训练,Layer 对象的 forward 方法不需要额外装饰,保持原实现即可 -(2) 与动转静训练相同,请确保Layer.forward方法中仅实现预测功能,避免将训练所需的loss计算逻辑写入forward方法 +(2) 与动转静训练相同,请确保 Layer.forward 方法中仅实现预测功能,避免将训练所需的 loss 计算逻辑写入 forward 方法 -(3) 在最后使用 ``paddle.jit.save`` 时,需要指定Layer的 ``InputSpec`` ,Layer对象forward方法的每一个参数均需要对应的 ``InputSpec`` 进行描述,不能省略。这里的 ``input_spec`` 参数支持两种类型的输入: +(3) 在最后使用 ``paddle.jit.save`` 时,需要指定 Layer 的 ``InputSpec`` ,Layer 对象 forward 方法的每一个参数均需要对应的 ``InputSpec`` 进行描述,不能省略。这里的 ``input_spec`` 参数支持两种类型的输入: - ``InputSpec`` 列表 -使用InputSpec描述forward输入参数的shape,dtype和name,如前述示例(此处示例中name省略,name省略的情况下会使用forward的对应参数名作为name,所以这里的name为 ``x`` ): +使用 InputSpec 描述 forward 输入参数的 shape,dtype 和 name,如前述示例(此处示例中 name 省略,name 省略的情况下会使用 forward 的对应参数名作为 name,所以这里的 name 为 ``x`` ): .. code-block:: python @@ -566,7 +566,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 - Example Tensor 列表 -除使用InputSpec之外,也可以直接使用forward训练时的示例输入,此处可以使用前述示例中迭代DataLoader得到的 ``image`` ,示例如下: +除使用 InputSpec 之外,也可以直接使用 forward 训练时的示例输入,此处可以使用前述示例中迭代 DataLoader 得到的 ``image`` ,示例如下: .. code-block:: python @@ -578,7 +578,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 3.1.2 动态图模型&参数载入 ---------------------- -载入模型参数,使用 ``paddle.jit.load`` 载入即可,载入后得到的是一个Layer的派生类对象 ``TranslatedLayer`` , ``TranslatedLayer`` 具有Layer具有的通用特征,支持切换 ``train`` 或者 ``eval`` 模式,可以进行模型调优或者预测。 +载入模型参数,使用 ``paddle.jit.load`` 载入即可,载入后得到的是一个 Layer 的派生类对象 ``TranslatedLayer`` , ``TranslatedLayer`` 具有 Layer 具有的通用特征,支持切换 ``train`` 或者 ``eval`` 模式,可以进行模型调优或者预测。 .. note:: 为了规避变量名字冲突,载入之后会重命名变量。 @@ -655,7 +655,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 paddle.jit.save(loaded_layer, "fine-tune.model/linear", input_spec=[x]) -此外, ``paddle.jit.save`` 同时保存了模型和参数,如果你只需要从存储结果中载入模型的参数,可以使用 ``paddle.load`` 接口载入,返回所存储模型的state_dict,示例如下: +此外, ``paddle.jit.save`` 同时保存了模型和参数,如果你只需要从存储结果中载入模型的参数,可以使用 ``paddle.load`` 接口载入,返回所存储模型的 state_dict,示例如下: .. code-block:: python @@ -714,7 +714,7 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 3.2.1 静态图推理模型&参数保存 ------------------------- -静态图导出推理模型需要指定导出路径、输入、输出变量以及执行器。 ``save_inference_model`` 会裁剪Program的冗余部分,并导出两个文件: ``path_prefix.pdmodel`` 、 ``path_prefix.pdiparams`` 。示例如下(接前述示例): +静态图导出推理模型需要指定导出路径、输入、输出变量以及执行器。 ``save_inference_model`` 会裁剪 Program 的冗余部分,并导出两个文件: ``path_prefix.pdmodel`` 、 ``path_prefix.pdiparams`` 。示例如下(接前述示例): .. code-block:: python @@ -740,4 +740,4 @@ Layer更准确的语义是描述一个具有预测功能的模型对象,接收 四、旧保存格式兼容载入 ################### -如果你是从飞桨框架1.x切换到2.1,曾经使用飞桨框架1.x的fluid相关接口保存模型或者参数,飞桨框架2.1也对这种情况进行了兼容性支持,请参考 :ref:`兼容载入旧格式模型 ` +如果你是从飞桨框架 1.x 切换到 2.1,曾经使用飞桨框架 1.x 的 fluid 相关接口保存模型或者参数,飞桨框架 2.1 也对这种情况进行了兼容性支持,请参考 :ref:`兼容载入旧格式模型 ` diff --git a/docs/guides/beginner/tensor_cn.md b/docs/guides/beginner/tensor_cn.md index 5cd8bfc4523..c640c94e1b8 100644 --- a/docs/guides/beginner/tensor_cn.md +++ b/docs/guides/beginner/tensor_cn.md @@ -1,4 +1,4 @@ -# Tensor介绍 +# Tensor 介绍 ## 一、Tensor 的概念介绍 @@ -19,7 +19,7 @@ def train(model): print("x_data: ", x_data[0][0][0][0]) # 打印神经网络的输入:批数据中的第一个数据的第一个元素 predicts = model(x_data) print("predicts: ", predicts[0]) # 打印神经网络的输出:批数据中的第一个数据的第一个元素 - print("weight: ", model.linear1.weight[0][0]) # 打印神经网络的权重:linear1层的weight中的第一个元素 + print("weight: ", model.linear1.weight[0][0]) # 打印神经网络的权重:linear1 层的 weight 中的第一个元素 loss = F.cross_entropy(predicts, y_data) acc = paddle.metric.accuracy(predicts, y_data) loss.backward() @@ -39,7 +39,7 @@ predicts: Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=Fa weight: Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=False, [0.02227839]) ``` -以上示例代码来源 [使用LeNet在MNIST数据集实现图像分类](../../../practices/cv/image_classification) 任务 5.1 小节(篇幅原因仅截取部分),分别打印了神经网络的输入、输出数据和网络中的参数,可以看到均采用了 Tensor 数据结构。 +以上示例代码来源 [使用 LeNet 在 MNIST 数据集实现图像分类](../../../practices/cv/image_classification) 任务 5.1 小节(篇幅原因仅截取部分),分别打印了神经网络的输入、输出数据和网络中的参数,可以看到均采用了 Tensor 数据结构。 ## 二、Tensor 的创建 @@ -121,7 +121,7 @@ Tensor(shape=[2, 2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
-
图1 不同维度的Tensor可视化表示
+
图 1 不同维度的 Tensor 可视化表示
需要注意的是,Tensor 必须形如矩形,即在任何一个维度上,元素的数量必须相等,否则会抛出异常,示例如下: @@ -160,8 +160,8 @@ Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True, 如果要在指定区间内创建 Tensor,可以使用[paddle.arrange](../../../api/paddle/arrange_cn.html)、 [paddle.linspace](../../../api/paddle/linspace_cn.html) 实现。 ```python -paddle.arange(start, end, step) # 创建以步长step均匀分隔区间[start, end)的Tensor -paddle.linspace(start, end, num) # 创建以元素个数num均匀分隔区间[start, end)的Tensor +paddle.arange(start, end, step) # 创建以步长 step 均匀分隔区间[start, end)的 Tensor +paddle.linspace(start, end, num) # 创建以元素个数 num 均匀分隔区间[start, end)的 Tensor ``` 示例如下: @@ -178,7 +178,7 @@ Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True, > * **创建一个空 Tensor**,即根据 shape 和 dtype 创建尚未初始化元素值的 Tensor,可通过 [paddle.empty](../../../api/paddle/empty_cn.html) 实现。 > * **创建一个与其他 Tensor 具有相同 shape 与 dtype 的 Tensor**,可通过 [paddle.ones_like](../../../api/paddle/ones_like_cn.html) 、 [paddle.zeros_like](../../../api/paddle/zeros_like_cn.html) 、 [paddle.full_like](../../../api/paddle/full_like_cn.html) 、[paddle.empty_like](../../../api/paddle/empty_like_cn.html) 实现。 > * **拷贝并创建一个与其他 Tensor 完全相同的 Tensor**,可通过 [paddle.clone](../../../api/paddle/clone_cn.html) 实现。 -> * **创建一个满足特定分布的Tensor**,如 [paddle.rand](../../../api/paddle/rand_cn.html), [paddle.randn](../../../api/paddle/randn_cn.html) , [paddle.randint](../../../api/paddle/randint_cn.html) 等。 +> * **创建一个满足特定分布的 Tensor**,如 [paddle.rand](../../../api/paddle/rand_cn.html), [paddle.randn](../../../api/paddle/randn_cn.html) , [paddle.randint](../../../api/paddle/randint_cn.html) 等。 > * **通过设置随机种子创建 Tensor**,可每次生成相同元素值的随机数 Tensor,可通过 [paddle.seed](../../../api/paddle/seed_cn.html) 和 [paddle.rand](../../../api/paddle/rand_cn.html) 组合实现。 @@ -187,7 +187,7 @@ Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True, 在常见深度学习任务中,数据样本可能是图片(image)、文本(text)、语音(audio)等多种类型,在送入神经网络训练或推理前,这些数据和对应的标签均需要创建为 Tensor。以下是图像场景和 NLP 场景中手动转换 Tensor 方法的介绍。 -* 对于图像场景,可使用 [paddle.vision.transforms.ToTensor](../../../api/paddle/vision/transforms/ToTensor_cn.html) 直接将 PIL.Image 格式的数据转为 Tensor,使用 [paddle.to_tensor](../../../api/paddle/to_tensor_cn.html) 将图像的标签(Label,通常是Python 或 Numpy 格式的数据)转为 Tensor。 +* 对于图像场景,可使用 [paddle.vision.transforms.ToTensor](../../../api/paddle/vision/transforms/ToTensor_cn.html) 直接将 PIL.Image 格式的数据转为 Tensor,使用 [paddle.to_tensor](../../../api/paddle/to_tensor_cn.html) 将图像的标签(Label,通常是 Python 或 Numpy 格式的数据)转为 Tensor。 * 对于文本场景,需将文本数据解码为数字后,再通过 [paddle.to_tensor](../../../api/paddle/to_tensor_cn.html) 转为 Tensor。不同文本任务标签形式不一样,有的任务标签也是文本,有的则是数字,均需最终通过 paddle.to_tensor 转为 Tensor。 下面以图像场景为例介绍,以下示例代码中将随机生成的图片转换为 Tensor。 @@ -200,7 +200,7 @@ import paddle.vision.transforms.functional as F fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) # 创建随机图片 transform = T.ToTensor() -tensor = transform(fake_img) # 使用ToTensor()将图片转换为Tensor +tensor = transform(fake_img) # 使用 ToTensor()将图片转换为 Tensor print(tensor) ``` @@ -216,7 +216,7 @@ Tensor(shape=[3, 224, 224], dtype=float32, place=Place(gpu:0), stop_gradient=Tru ### 2.5 自动创建 Tensor 的功能介绍 -除了手动创建 Tensor 外,实际在飞桨框架中有一些 API 封装了 Tensor 创建的操作,从而无需用户手动创建 Tensor。例如 [paddle.io.DataLoader](../../../api/paddle/io/DataLoader_cn.html) 能够基于原始 Dataset,返回读取 Dataset 数据的迭代器,迭代器返回的数据中的每个元素都是一个 Tensor。另外在一些高层API,如 [paddle.Model.fit](../../../api/paddle/Model_cn.html) 、[paddle.Model.predict](../../../api/paddle/Model_cn.html) ,如果传入的数据不是 Tensor,会自动转为 Tensor 再进行模型训练或推理。 +除了手动创建 Tensor 外,实际在飞桨框架中有一些 API 封装了 Tensor 创建的操作,从而无需用户手动创建 Tensor。例如 [paddle.io.DataLoader](../../../api/paddle/io/DataLoader_cn.html) 能够基于原始 Dataset,返回读取 Dataset 数据的迭代器,迭代器返回的数据中的每个元素都是一个 Tensor。另外在一些高层 API,如 [paddle.Model.fit](../../../api/paddle/Model_cn.html) 、[paddle.Model.predict](../../../api/paddle/Model_cn.html) ,如果传入的数据不是 Tensor,会自动转为 Tensor 再进行模型训练或推理。 > **说明:** > > paddle.Model.fit、paddle.Model.predict 等高层 API 支持传入 Dataset 或 DataLoader,如果传入的是 Dataset,那么会用 DataLoader 封装转为 Tensor 数据;如果传入的是 DataLoader,则直接从 DataLoader 迭代读取 Tensor 数据送入模型训练或推理。因此即使没有写将数据转为 Tensor 的代码,也能正常执行,提升了编程效率和容错性。 @@ -232,17 +232,17 @@ transform = Compose([Normalize(mean=[127.5], data_format='CHW')]) test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=transform) -print(test_dataset[0][1]) # 打印原始数据集的第一个数据的label +print(test_dataset[0][1]) # 打印原始数据集的第一个数据的 label loader = paddle.io.DataLoader(test_dataset) for data in enumerate(loader): x, label = data[1] - print(label) # 打印由DataLoader返回的迭代器中的第一个数据的label + print(label) # 打印由 DataLoader 返回的迭代器中的第一个数据的 label break ``` ```text -[7] # 原始数据中label为Python list +[7] # 原始数据中 label 为 Python list Tensor(shape=[1, 1], dtype=int64, place=Place(gpu_pinned), stop_gradient=True, - [[7]]) # 由DataLoader转换后,label为Tensor + [[7]]) # 由 DataLoader 转换后,label 为 Tensor ``` ## 三、Tensor 的属性 @@ -253,14 +253,14 @@ Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True, [2., 3., 4.]) ``` -### 3.1 Tensor的形状(shape) +### 3.1 Tensor 的形状(shape) **(1)形状的介绍** 形状是 Tensor 的一个重要的基础属性,可以通过 [Tensor.shape](../../../api/paddle/Tensor_cn.html#shape) 查看一个 Tensor 的形状,以下为相关概念: * shape:描述了 Tensor 每个维度上元素的数量。 - * ndim: Tensor 的维度数量,例如向量的维度为 1,矩阵的维度为2,Tensor 可以有任意数量的维度。 + * ndim: Tensor 的维度数量,例如向量的维度为 1,矩阵的维度为 2,Tensor 可以有任意数量的维度。 * axis 或者 dimension:Tensor 的轴,即某个特定的维度。 * size:Tensor 中全部元素的个数。 @@ -271,7 +271,7 @@ ndim_4_Tensor = paddle.ones([2, 3, 4, 5])
-
图2 Tensor的shape、axis、dimension、ndim之间的关系
+
图 2 Tensor 的 shape、axis、dimension、ndim 之间的关系
```python print("Data Type of every element:", ndim_4_Tensor.dtype) @@ -312,10 +312,10 @@ After reshape: [1, 3] 通过几个例子来详细了解: ```text origin:[3, 2, 5] reshape:[3, 10] actual: [3, 10] # 直接指定目标 shape -origin:[3, 2, 5] reshape:[-1] actual: [30] # 转换为1维,维度根据元素总数推断出来是3*2*5=30 -origin:[3, 2, 5] reshape:[-1, 5] actual: [6, 5] # 转换为2维,固定一个维度5,另一个维度根据元素总数推断出来是30÷5=6 -origin:[3, 2, 5] reshape:[0, -1] actual: [3, 6] # reshape:[0, -1]中0的索引值为0,按照规则,转换后第0维的元素数量与原始Tensor第0维的元素数量相同,为3;第1维的元素数量根据元素总值计算得出为30÷3=10。 -origin:[3, 2] reshape:[3, 1, 0] error: # reshape:[3, 1, 0]中0的索引值为2,但原Tensor只有2维,无法找到与第3维对应的元素数量,因此出错。 +origin:[3, 2, 5] reshape:[-1] actual: [30] # 转换为 1 维,维度根据元素总数推断出来是 3*2*5=30 +origin:[3, 2, 5] reshape:[-1, 5] actual: [6, 5] # 转换为 2 维,固定一个维度 5,另一个维度根据元素总数推断出来是 30÷5=6 +origin:[3, 2, 5] reshape:[0, -1] actual: [3, 6] # reshape:[0, -1]中 0 的索引值为 0,按照规则,转换后第 0 维的元素数量与原始 Tensor 第 0 维的元素数量相同,为 3;第 1 维的元素数量根据元素总值计算得出为 30÷3=10。 +origin:[3, 2] reshape:[3, 1, 0] error: # reshape:[3, 1, 0]中 0 的索引值为 2,但原 Tensor 只有 2 维,无法找到与第 3 维对应的元素数量,因此出错。 ``` 从上面的例子可以看到,通过 reshape:[-1] ,可以很方便地将 Tensor 按其在计算机上的内存分布展平为一维。 @@ -335,9 +335,9 @@ Tensor flattened to Vector: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 1 **(3)原位(Inplace)操作和非原位操作的区别** -飞桨框架的 API 有原位(Inplace)操作和非原位操作之分,原位操作即在原 Tensor 上保存操作结果,输出 Tensor 将与输入Tensor 共享数据,并且没有 Tensor 数据拷贝的过程。非原位操作则不会修改原 Tensor,而是返回一个新的 Tensor。通过 API 名称区分两者,如 [paddle.reshape](../../../api/paddle/reshape_cn.html) 是非原位操作,[paddle.reshape_](../../../api/paddle/reshape__cn.html) 是原位操作。 +飞桨框架的 API 有原位(Inplace)操作和非原位操作之分,原位操作即在原 Tensor 上保存操作结果,输出 Tensor 将与输入 Tensor 共享数据,并且没有 Tensor 数据拷贝的过程。非原位操作则不会修改原 Tensor,而是返回一个新的 Tensor。通过 API 名称区分两者,如 [paddle.reshape](../../../api/paddle/reshape_cn.html) 是非原位操作,[paddle.reshape_](../../../api/paddle/reshape__cn.html) 是原位操作。 -下面以 reshape 为例说明,通过对比Tensor的 name (每个 Tensor 创建时都会有一个独一无二的 name),判断是否为同一个Tensor。 +下面以 reshape 为例说明,通过对比 Tensor 的 name (每个 Tensor 创建时都会有一个独一无二的 name),判断是否为同一个 Tensor。 ```python origin_tensor = paddle.to_tensor([1, 2, 3]) new_tensor = paddle.reshape(origin_tensor, [1, 3]) # 非原位操作 @@ -348,11 +348,11 @@ print("same_tensor name: ", same_tensor.name) ``` ```text origin_tensor name: generated_tensor_0 -new_tensor name: auto_0_ # 非原位操作后产生的Tensor与原始Tensor的名称不同 -same_tensor name: generated_tensor_0 # 原位操作后产生的Tensor与原始Tensor的名称相同 +new_tensor name: auto_0_ # 非原位操作后产生的 Tensor 与原始 Tensor 的名称不同 +same_tensor name: generated_tensor_0 # 原位操作后产生的 Tensor 与原始 Tensor 的名称相同 ``` -### 3.2 Tensor的数据类型(dtype) +### 3.2 Tensor 的数据类型(dtype) **(1)指定数据类型的介绍** Tensor 的数据类型 dtype 可以通过 [Tensor.dtype](../../../api/paddle/Tensor_cn.html#dtype) 查看,支持类型包括:`bool`、`float16`、`float32`、`float64`、`uint8`、`int8`、`int16`、`int32`、`int64`、`complex64`、`complex128`。 @@ -364,10 +364,10 @@ Tensor 的数据类型 dtype 可以通过 [Tensor.dtype](../../../api/paddle/Te * 对于 Python 整型数据,默认会创建 `int64` 型 Tensor; * 对于 Python 浮点型数据,默认会创建 `float32` 型 Tensor,并且可以通过 [paddle.set_default_dtype](../../../api/paddle/set_default_dtype_cn.html) 来调整浮点型数据的默认类型。 ```python -# 创建Tensor时指定dtype +# 创建 Tensor 时指定 dtype ndim_1_tensor = paddle.to_tensor([2.0, 3.0, 4.0], dtype='float64') print("Tensor dtype of ndim_1_tensor:", ndim_1_tensor.dtype) -# 创建Tensor时不指定dtype,自动选择对应的默认类型 +# 创建 Tensor 时不指定 dtype,自动选择对应的默认类型 print("Tensor dtype from Python integers:", paddle.to_tensor(1).dtype) print("Tensor dtype from Python floating point:", paddle.to_tensor(1.0).dtype) ``` @@ -406,16 +406,16 @@ print("Tensor after cast to int64:", int64_Tensor.dtype) Tensor after cast to float64: paddle.float64 Tensor after cast to int64: paddle.int64 ``` -### 3.3 Tensor的设备位置(place) +### 3.3 Tensor 的设备位置(place) 初始化 Tensor 时可以通过 [Tensor.place](../../../api/paddle/Tensor_cn.html#place) 来指定其分配的设备位置,可支持的设备位置有:CPU、GPU、固定内存、XPU(Baidu Kunlun)、NPU(Huawei)、MLU(寒武纪)、IPU(Graphcore)等。其中固定内存也称为不可分页内存或锁页内存,其与 GPU 之间具有更高的读写效率,并且支持异步传输,这对网络整体性能会有进一步提升,但其缺点是分配空间过多时可能会降低主机系统的性能,因为其减少了用于存储虚拟内存数据的可分页内存。 > **说明:** > > * 当未指定 place 时,Tensor 默认设备位置和安装的飞桨框架版本一致。如安装了 GPU 版本的飞桨,则设备位置默认为 GPU,即 Tensor 的`place` 默认为 [paddle.CUDAPlace](../../../api/paddle/CUDAPlace_cn.html)。 > * 使用 [paddle.device.set_device](../../../api/paddle/device/set_device_cn.html) 可设置全局默认的设备位置。Tensor.place 的指定值优先级高于全局默认值。 -以下示例分别创建了CPU、GPU和固定内存上的 Tensor,并通过 `Tensor.place` 查看 Tensor 所在的设备位置: +以下示例分别创建了 CPU、GPU 和固定内存上的 Tensor,并通过 `Tensor.place` 查看 Tensor 所在的设备位置: -* **创建CPU上的Tensor** +* **创建 CPU 上的 Tensor** ```python cpu_Tensor = paddle.to_tensor(1, place=paddle.CPUPlace()) print(cpu_Tensor.place) @@ -425,17 +425,17 @@ print(cpu_Tensor.place) Place(cpu) ``` -* **创建GPU上的Tensor** +* **创建 GPU 上的 Tensor** ```python gpu_Tensor = paddle.to_tensor(1, place=paddle.CUDAPlace(0)) -print(gpu_Tensor.place) # 显示Tensor位于GPU设备的第 0 张显卡上 +print(gpu_Tensor.place) # 显示 Tensor 位于 GPU 设备的第 0 张显卡上 ``` ```text Place(gpu:0) ``` -* **创建固定内存上的Tensor** +* **创建固定内存上的 Tensor** ```python pin_memory_Tensor = paddle.to_tensor(1, place=paddle.CUDAPinnedPlace()) print(pin_memory_Tensor.place) @@ -445,9 +445,9 @@ print(pin_memory_Tensor.place) Place(gpu_pinned) ``` -### 3.4 Tensor的名称(name) +### 3.4 Tensor 的名称(name) -Tensor 的名称是其唯一的标识符,为 Python 字符串类型,查看一个 Tensor 的名称可以通过 Tensor.name 属性。默认地,在每个Tensor 创建时,会自定义一个独一无二的名称。 +Tensor 的名称是其唯一的标识符,为 Python 字符串类型,查看一个 Tensor 的名称可以通过 Tensor.name 属性。默认地,在每个 Tensor 创建时,会自定义一个独一无二的名称。 ```python print("Tensor name:", paddle.to_tensor(1).name) @@ -456,7 +456,7 @@ print("Tensor name:", paddle.to_tensor(1).name) Tensor name: generated_tensor_0 ``` ### 3.5 Tensor 的 stop_gradient 属性 -stop_gradient 表示是否停止计算梯度,默认值为 True,表示停止计算梯度,梯度不再回传。在设计网络时,如不需要对某些参数进行训练更新,可以将参数的stop_gradient设置为True。可参考以下代码直接设置 stop_gradient 的值。 +stop_gradient 表示是否停止计算梯度,默认值为 True,表示停止计算梯度,梯度不再回传。在设计网络时,如不需要对某些参数进行训练更新,可以将参数的 stop_gradient 设置为 True。可参考以下代码直接设置 stop_gradient 的值。 ```python eg = paddle.to_tensor(1) @@ -469,7 +469,7 @@ Tensor stop_gradient: True Tensor stop_gradient: False ``` -## 四、Tensor的操作 +## 四、Tensor 的操作 ### 4.1 索引和切片 通过索引或切片方式可访问或修改 Tensor。飞桨框架使用标准的 Python 索引规则与 Numpy 索引规则,与 [Indexing a list or a string in Python](https://docs.python.org/3/tutorial/introduction.html#strings) 类似。具有以下特点: @@ -481,8 +481,8 @@ Tensor stop_gradient: False * 针对一维 Tensor,仅有单个维度上的索引或切片: ```python ndim_1_Tensor = paddle.to_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8]) -print("Origin Tensor:", ndim_1_Tensor.numpy()) # 原始1维Tensor -print("First element:", ndim_1_Tensor[0].numpy()) # 取Tensor第一个元素的值? +print("Origin Tensor:", ndim_1_Tensor.numpy()) # 原始 1 维 Tensor +print("First element:", ndim_1_Tensor[0].numpy()) # 取 Tensor 第一个元素的值? print("Last element:", ndim_1_Tensor[-1].numpy()) print("All element:", ndim_1_Tensor[:].numpy()) print("Before 3:", ndim_1_Tensor[:3].numpy()) @@ -566,7 +566,7 @@ x[1] = paddle.ones([3]) # x : [[1., 2., 3.], [1., 1., 1.]] --- -同时,飞桨还提供了丰富的 Tensor 操作的 API,包括数学运算、逻辑运算、线性代数等100余种 API,这些 API 调用有两种方法: +同时,飞桨还提供了丰富的 Tensor 操作的 API,包括数学运算、逻辑运算、线性代数等 100 余种 API,这些 API 调用有两种方法: ```python x = paddle.to_tensor([[1.1, 2.2], [3.3, 4.4]], dtype="float64") y = paddle.to_tensor([[5.5, 6.6], [7.7, 8.8]], dtype="float64") @@ -594,7 +594,7 @@ x.ceil() #逐元素向上取整 x.floor() #逐元素向下取整 x.round() #逐元素四舍五入 x.exp() #逐元素计算自然常数为底的指数 -x.log() #逐元素计算x的自然对数 +x.log() #逐元素计算 x 的自然对数 x.reciprocal() #逐元素求倒数 x.square() #逐元素计算平方 x.sqrt() #逐元素计算平方根 @@ -624,33 +624,33 @@ x ** y -> x.pow(y) #逐元素幂运算 ### 4.3 逻辑运算 ```python -x.isfinite() #判断Tensor中元素是否是有限的数字,即不包括inf与nan -x.equal_all(y) #判断两个Tensor的全部元素是否相等,并返回形状为[1]的布尔类Tensor -x.equal(y) #判断两个Tensor的每个元素是否相等,并返回形状相同的布尔类Tensor -x.not_equal(y) #判断两个Tensor的每个元素是否不相等 -x.less_than(y) #判断Tensor x的元素是否小于Tensor y的对应元素 -x.less_equal(y) #判断Tensor x的元素是否小于或等于Tensor y的对应元素 -x.greater_than(y) #判断Tensor x的元素是否大于Tensor y的对应元素 -x.greater_equal(y) #判断Tensor x的元素是否大于或等于Tensor y的对应元素 -x.allclose(y) #判断Tensor x的全部元素是否与Tensor y的全部元素接近,并返回形状为[1]的布尔类Tensor +x.isfinite() #判断 Tensor 中元素是否是有限的数字,即不包括 inf 与 nan +x.equal_all(y) #判断两个 Tensor 的全部元素是否相等,并返回形状为[1]的布尔类 Tensor +x.equal(y) #判断两个 Tensor 的每个元素是否相等,并返回形状相同的布尔类 Tensor +x.not_equal(y) #判断两个 Tensor 的每个元素是否不相等 +x.less_than(y) #判断 Tensor x 的元素是否小于 Tensor y 的对应元素 +x.less_equal(y) #判断 Tensor x 的元素是否小于或等于 Tensor y 的对应元素 +x.greater_than(y) #判断 Tensor x 的元素是否大于 Tensor y 的对应元素 +x.greater_equal(y) #判断 Tensor x 的元素是否大于或等于 Tensor y 的对应元素 +x.allclose(y) #判断 Tensor x 的全部元素是否与 Tensor y 的全部元素接近,并返回形状为[1]的布尔类 Tensor ``` 同样地,飞桨框架对 Python 逻辑比较相关的魔法函数进行了重写,以下操作与上述结果相同。 ```text -x == y -> x.equal(y) #判断两个Tensor的每个元素是否相等 -x != y -> x.not_equal(y) #判断两个Tensor的每个元素是否不相等 -x < y -> x.less_than(y) #判断Tensor x的元素是否小于Tensor y的对应元素 -x <= y -> x.less_equal(y) #判断Tensor x的元素是否小于或等于Tensor y的对应元素 -x > y -> x.greater_than(y) #判断Tensor x的元素是否大于Tensor y的对应元素 -x >= y -> x.greater_equal(y) #判断Tensor x的元素是否大于或等于Tensor y的对应元素 +x == y -> x.equal(y) #判断两个 Tensor 的每个元素是否相等 +x != y -> x.not_equal(y) #判断两个 Tensor 的每个元素是否不相等 +x < y -> x.less_than(y) #判断 Tensor x 的元素是否小于 Tensor y 的对应元素 +x <= y -> x.less_equal(y) #判断 Tensor x 的元素是否小于或等于 Tensor y 的对应元素 +x > y -> x.greater_than(y) #判断 Tensor x 的元素是否大于 Tensor y 的对应元素 +x >= y -> x.greater_equal(y) #判断 Tensor x 的元素是否大于或等于 Tensor y 的对应元素 ``` -以下操作仅针对 bool 型Tensor: +以下操作仅针对 bool 型 Tensor: ```python -x.logical_and(y) #对两个布尔类型Tensor逐元素进行逻辑与操作 -x.logical_or(y) #对两个布尔类型Tensor逐元素进行逻辑或操作 -x.logical_xor(y) #对两个布尔类型Tensor逐元素进行逻辑亦或操作 -x.logical_not(y) #对两个布尔类型Tensor逐元素进行逻辑非操作 +x.logical_and(y) #对两个布尔类型 Tensor 逐元素进行逻辑与操作 +x.logical_or(y) #对两个布尔类型 Tensor 逐元素进行逻辑或操作 +x.logical_xor(y) #对两个布尔类型 Tensor 逐元素进行逻辑亦或操作 +x.logical_not(y) #对两个布尔类型 Tensor 逐元素进行逻辑非操作 ``` ### 4.4 线性代数 @@ -658,7 +658,7 @@ x.logical_not(y) #对两个布尔类型Tensor逐元素进行逻辑 x.t() #矩阵转置 x.transpose([1, 0]) #交换第 0 维与第 1 维的顺序 x.norm('fro') #矩阵的弗罗贝尼乌斯范数 -x.dist(y, p=2) #矩阵(x-y)的2范数 +x.dist(y, p=2) #矩阵(x-y)的 2 范数 x.matmul(y) #矩阵乘法 ``` @@ -670,7 +670,7 @@ x.matmul(y) #矩阵乘法 ## 五、Tensor 的广播机制 在深度学习任务中,有时需要使用较小形状的 Tensor 与较大形状的 Tensor 执行计算,广播机制就是将较小形状的 Tensor 扩展到与较大形状的 Tensor 一样的形状,便于匹配计算,同时又没有对较小形状 Tensor 进行数据拷贝操作,从而提升算法实现的运算效率。 -飞桨框架提供的一些API支持广播(broadcasting)机制,允许在一些运算时使用不同形状的 Tensor。 +飞桨框架提供的一些 API 支持广播(broadcasting)机制,允许在一些运算时使用不同形状的 Tensor。 飞桨 Tensor 的广播机制主要遵循如下规则(参考 [Numpy 广播机制](https://numpy.org/doc/stable/user/basics.broadcasting.html#module-numpy.doc.broadcasting)): * 每个 Tensor 至少为一维 Tensor @@ -679,24 +679,24 @@ x.matmul(y) #矩阵乘法 举例如下: ```python -# 可以广播的例子1 +# 可以广播的例子 1 x = paddle.ones((2, 3, 4)) y = paddle.ones((2, 3, 4)) -# 两个Tensor 形状一致,可以广播 +# 两个 Tensor 形状一致,可以广播 z = x + y print(z.shape) # [2, 3, 4] ``` ```python -# 可以广播的例子2 +# 可以广播的例子 2 x = paddle.ones((2, 3, 1, 5)) y = paddle.ones((3, 4, 1)) # 从最后一个维度向前依次比较: -# 第一次:y的维度大小是1 -# 第二次:x的维度大小是1 -# 第三次:x和y的维度大小相等 -# 第四次:y的维度不存在 -# 所以 x和y是可以广播的 +# 第一次:y 的维度大小是 1 +# 第二次:x 的维度大小是 1 +# 第三次:x 和 y 的维度大小相等 +# 第四次:y 的维度不存在 +# 所以 x 和 y 是可以广播的 z = x + y print(z.shape) # [2, 3, 4, 5] @@ -705,29 +705,29 @@ print(z.shape) # 不可广播的例子 x = paddle.ones((2, 3, 4)) y = paddle.ones((2, 3, 6)) -# 此时x和y是不可广播的,因为第一次比较:4不等于6 +# 此时 x 和 y 是不可广播的,因为第一次比较:4 不等于 6 # z = x + y # ValueError: (InvalidArgument) Broadcast dimension mismatch. ``` -在了解两个 Tensor 在什么情况下可以广播的规则后,两个Tensor进行广播语义后的结果Tensor的形状计算规则如下: +在了解两个 Tensor 在什么情况下可以广播的规则后,两个 Tensor 进行广播语义后的结果 Tensor 的形状计算规则如下: -* 如果两个Tensor的形状的长度不一致,会在较小长度的形状矩阵前部添加1,直到两个Tensor的形状长度相等。 -* 保证两个Tensor形状相等之后,每个维度上的结果维度就是当前维度上的较大值。 +* 如果两个 Tensor 的形状的长度不一致,会在较小长度的形状矩阵前部添加 1,直到两个 Tensor 的形状长度相等。 +* 保证两个 Tensor 形状相等之后,每个维度上的结果维度就是当前维度上的较大值。 举例如下: ```python x = paddle.ones((2, 1, 4)) -y = paddle.ones((3, 1)) # y的形状长度为2,小于x的形状长度3,因此会在y的形状前部添加1,结果就是y的形状变为[1, 3, 1] +y = paddle.ones((3, 1)) # y 的形状长度为 2,小于 x 的形状长度 3,因此会在 y 的形状前部添加 1,结果就是 y 的形状变为[1, 3, 1] z = x + y print(z.shape) -# z的形状: [2,3,4],z的每一维度上的尺寸,将取x和y对应维度上尺寸的较大值,如第0维x的尺寸为2,y的尺寸为1,则z的第0维尺寸为2 +# z 的形状: [2,3,4],z 的每一维度上的尺寸,将取 x 和 y 对应维度上尺寸的较大值,如第 0 维 x 的尺寸为 2,y 的尺寸为 1,则 z 的第 0 维尺寸为 2 ```
-
图3 Tensor 广播示例
+
图 3 Tensor 广播示例
## 六、Tensor 与 Numpy 数组相互转换 diff --git a/docs/guides/custom_op/index_cn.rst b/docs/guides/custom_op/index_cn.rst index 92dc6f027e5..98c6a280d6e 100644 --- a/docs/guides/custom_op/index_cn.rst +++ b/docs/guides/custom_op/index_cn.rst @@ -2,14 +2,14 @@ 自定义算子 ############# -介绍如何使用飞桨的自定义算子(Operator,简称Op)机制,包括以下两类: +介绍如何使用飞桨的自定义算子(Operator,简称 Op)机制,包括以下两类: 1. C++算子:编写方法较为简洁,不涉及框架内部概念,无需重新编译飞桨框架,以外接模块的方式使用的算子 -2. Python算子:使用Python编写实现前向(forward)和反向(backward)方法,在模型组网中使用的自定义API +2. Python 算子:使用 Python 编写实现前向(forward)和反向(backward)方法,在模型组网中使用的自定义 API -- `自定义C++算子 <./new_cpp_op_cn.html>`_ +- `自定义 C++算子 <./new_cpp_op_cn.html>`_ -- `自定义Python算子 <./new_python_op_cn.html>`_ +- `自定义 Python 算子 <./new_python_op_cn.html>`_ .. toctree:: diff --git a/docs/guides/custom_op/new_cpp_op_cn.md b/docs/guides/custom_op/new_cpp_op_cn.md index 0ed20bd46ee..476dc256d07 100644 --- a/docs/guides/custom_op/new_cpp_op_cn.md +++ b/docs/guides/custom_op/new_cpp_op_cn.md @@ -1,8 +1,8 @@ -# 自定义C++算子 +# 自定义 C++算子 ## 概述 -算子(Operator,简称Op)是构建神经网络的基础组件,飞桨框架提供了丰富的算子库,能够满足绝大多数场景的使用需求。但是出于以下几点原因,您可能希望定制化算子的C++实现,从而满足特定需求: +算子(Operator,简称 Op)是构建神经网络的基础组件,飞桨框架提供了丰富的算子库,能够满足绝大多数场景的使用需求。但是出于以下几点原因,您可能希望定制化算子的 C++实现,从而满足特定需求: 1. 已有的算子无法组合出您需要的运算逻辑; 2. 使用已有算子组合得到的运算逻辑无法满足您的性能需求。 @@ -11,7 +11,7 @@ 使用自定义算子机制,仅需要以下两个步骤: -1. 实现算子的C++运算逻辑,完成算子构建 +1. 实现算子的 C++运算逻辑,完成算子构建 2. 调用 `python` 接口完成算子编译与注册 随后即可在模型中使用,下面通过实现一个 `relu` 运算,介绍具体的实现、编译与应用流程。 @@ -19,11 +19,11 @@ > 注意事项: > - 在使用本机制实现自定义算子之前,请确保已经正确安装了 `PaddlePaddle 2.3` 及以上版本 > - 该机制已支持 `Linux` 、 `Mac` 和 `Windows` 平台。 -> - 本自定义外部算子机制仅保证源码级别的兼容,不保证二进制级别的兼容,例如,基于飞桨2.3版本编写的自定义算子源码实现,在飞桨2.3或者后续版本中编译链接使用没有问题,但基于飞桨2.3之前的版本编译得到的自定义算子动态库文件(*.so, *.dylib, *.dll),在2.3或者后续发布的版本中可能会加载失败。 +> - 本自定义外部算子机制仅保证源码级别的兼容,不保证二进制级别的兼容,例如,基于飞桨 2.3 版本编写的自定义算子源码实现,在飞桨 2.3 或者后续版本中编译链接使用没有问题,但基于飞桨 2.3 之前的版本编译得到的自定义算子动态库文件(*.so, *.dylib, *.dll),在 2.3 或者后续发布的版本中可能会加载失败。 -## 自定义算子C++实现 +## 自定义算子 C++实现 -使用自定义算子机制,需要编写以下组件的C++实现,包括: +使用自定义算子机制,需要编写以下组件的 C++实现,包括: 1. **算子的运算函数**:算子核心的计算逻辑实现,主要是对输入 `Tensor` 进行处理,得到输出 `Tensor` 的过程 2. **算子的维度与类型推导函数**:用于在组网编译和运行时,正确推导出输出 `Tensor` 的 `shape` 和 `data type` @@ -31,7 +31,7 @@ 下面结合示例进行介绍。 -### 运算函数与基础API +### 运算函数与基础 API #### 基本写法要求 @@ -68,7 +68,7 @@ std::vector OpFucntion(const paddle::Tensor& x, ..., int attr, . #### 设备类型 -设备类型使用 `Place` 表示,`Place` 含有内存类型AllocationType与设备ID信息,是 `Tensor` 的基础描述信息之一。 +设备类型使用 `Place` 表示,`Place` 含有内存类型 AllocationType 与设备 ID 信息,是 `Tensor` 的基础描述信息之一。 其中设备类型是枚举类型: @@ -82,20 +82,20 @@ enum class AllocationType : int8_t { }; ``` -设备ID是一个int8_t的数值,用于表示当前使用的设备卡号。 +设备 ID 是一个 int8_t 的数值,用于表示当前使用的设备卡号。 -一些Place使用示例如下: +一些 Place 使用示例如下: ```c++ auto cpu_place = paddle::CPUPlace(); -auto gpu_place = paddle::GPUPlace(); // 默认设备ID为0,一般在自定义算子内使用默认的构造方式即可 -auto gpu_place = paddle::GPUPlace(1); // GPU 1号卡 +auto gpu_place = paddle::GPUPlace(); // 默认设备 ID 为 0,一般在自定义算子内使用默认的构造方式即可 +auto gpu_place = paddle::GPUPlace(1); // GPU 1 号卡 ``` -此外,Place还有两个常用的方法: +此外,Place 还有两个常用的方法: -- GetType():获取Place的内存类型AllocationType -- GetDeviceId():获取Place的设备ID +- GetType():获取 Place 的内存类型 AllocationType +- GetDeviceId():获取 Place 的设备 ID 使用示例如下: @@ -105,9 +105,9 @@ auto alloc_type = gpu_place.GetType(); // paddle::AllocationType::GPU auto dev_id = gpu_place.GetDeviceId(); // 0 ``` -详细的Place定义请参考 [paddle/phi/common/place.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/common/place.h)。 +详细的 Place 定义请参考 [paddle/phi/common/place.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/common/place.h)。 -> 注:目前自定义算子仅在CPU与GPU上进行了验证,其他类型会视需求在后续版本支持 +> 注:目前自定义算子仅在 CPU 与 GPU 上进行了验证,其他类型会视需求在后续版本支持 #### 数据类型 @@ -135,13 +135,13 @@ enum class DataType { } ``` -详细的DataType定义请参考 [paddle/phi/common/data_type.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/common/data_type.h)。 +详细的 DataType 定义请参考 [paddle/phi/common/data_type.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/common/data_type.h)。 #### Tensor API (1) `Tensor` 构造 -对于 `paddle::Tensor` 的构造,我们推荐使用相应的初始化paddle API,包括: +对于 `paddle::Tensor` 的构造,我们推荐使用相应的初始化 paddle API,包括: ```c++ PADDLE_API Tensor empty(const IntArray& shape, DataType dtype=DataType::FLOAT32, const Place& place=CPUPlace()); @@ -162,23 +162,23 @@ auto gpu_tensor = paddle::full({3, 4}, 1.0, paddle::DataType::FLOAT64, paddle::G (2) `Tensor` 成员方法 -此外 `paddle::Tensor` 自身目前提供了一些基础的功能API,在定义算子最后那个常用的包括: +此外 `paddle::Tensor` 自身目前提供了一些基础的功能 API,在定义算子最后那个常用的包括: -- 设备、数据类型获取API: +- 设备、数据类型获取 API: - `const Place& place() const`:获取 `Tensor` 所在的设备 - `DataType dtype() const`:获取 `Tensor` 的数据类型 -- 长度与维度获取API: +- 长度与维度获取 API: - `int64_t numel() const`:获取 `Tensor` 的数据长度 - `std::vector shape() const`:获取 `Tensor` 的维度信息 -- 数据访问API: +- 数据访问 API: - `template const T* data() const`:模板类方法,获取数据内存的起始地址(只读) - `template T* data()`:模板类方法,获取数据内存的起始地址(读写) -- 状态或属性判断API: +- 状态或属性判断 API: - `bool defined() const`: 确认 `Tensor` 是否有效 - `bool initialized() const`: 确认 `Tensor` 是否已被初始化 - - `bool is_cpu() const`:确认 `Tensor` 是否在CPU上 - - `bool is_gpu() const`:确认 `Tensor` 是否在GPU上 -- 工具类API: + - `bool is_cpu() const`:确认 `Tensor` 是否在 CPU 上 + - `bool is_gpu() const`:确认 `Tensor` 是否在 GPU 上 +- 工具类 API: - `Tensor copy_to(const Place& place, bool blocking) const`: - 模板类方法,输入参数 `place`,将当前 `Tensor` 拷贝到指定设备上并返回 - `Tensor cast(DataType target_type) const`: @@ -187,14 +187,14 @@ auto gpu_tensor = paddle::full({3, 4}, 1.0, paddle::DataType::FLOAT64, paddle::G - 输入参数起始行 begin_idx 和终止行 end_idx,返回当前 Tensor 从起始行(含)到终止行(不含)的一个视图 - 目前仅支持对当前 Tensor 的第一个维度(即 axis = 0)进行切分 - `cudaStream_t stream() const`: - - 用于获取当前 `Tensor` 所处的CUDA Stream(仅在GPU编译版本中生效) - - 仅能够获取函数输入 `Tensor` 的stream + - 用于获取当前 `Tensor` 所处的 CUDA Stream(仅在 GPU 编译版本中生效) + - 仅能够获取函数输入 `Tensor` 的 stream -后续我们会继续扩展其他Tensor API,详细的Tensor定义请参考 [paddle/phi/api/include/tensor.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/api/include/tensor.h) 。 +后续我们会继续扩展其他 Tensor API,详细的 Tensor 定义请参考 [paddle/phi/api/include/tensor.h](https://github.com/PaddlePaddle/Paddle/blob/release/2.3/paddle/phi/api/include/tensor.h) 。 #### Exception API -- `PD_CHECK(COND, ...)`:输入bool条件表达式进行检查,如果值为 `false` ,则抛出异常,支持变长参数输入,伪代码示例如下: +- `PD_CHECK(COND, ...)`:输入 bool 条件表达式进行检查,如果值为 `false` ,则抛出异常,支持变长参数输入,伪代码示例如下: ```c++ // case 1: No error message specified @@ -226,11 +226,11 @@ PD_THROW("PD_THROW returns ", false) // [/User/custom_op/custom_relu_op.cc:82] ``` -#### 类Python的C++运算API +#### 类 Python 的 C++运算 API -自paddle 2.3版本开始,我们提供定义与用法与相应Python API类似的C++ API,其API命名、参数顺序及类型均和相应的paddle Python API对齐,可以通过查找相应Python API的官方文档了解其用法,并在自定义算子开发时使用。通过调用这些接口,可以省去封装基础运算的时间,从而提高开发效率。 +自 paddle 2.3 版本开始,我们提供定义与用法与相应 Python API 类似的 C++ API,其 API 命名、参数顺序及类型均和相应的 paddle Python API 对齐,可以通过查找相应 Python API 的官方文档了解其用法,并在自定义算子开发时使用。通过调用这些接口,可以省去封装基础运算的时间,从而提高开发效率。 -在2.3版本支持的C++ API列表如下,可以通过 `paddle::xxx` 进行调用: +在 2.3 版本支持的 C++ API 列表如下,可以通过 `paddle::xxx` 进行调用: ```c++ PADDLE_API Tensor abs(const Tensor& x); @@ -344,20 +344,20 @@ PADDLE_API std::tuple unsqueeze(const Tensor& x, const IntArray& PADDLE_API Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y); ``` -> 注:后续我们会提供更方便的查阅C++ API文档的入口。 +> 注:后续我们会提供更方便的查阅 C++ API 文档的入口。 -在2.3版本,我们共支持了大约250个类似的C++ API,能够覆盖大部分的基础运算,但是除前述的109个C++ API之外,剩余的C++ API由于一些历史原因,其参数列表尚未和相应的Python API对齐,因此目前剩余这些API只能作为experimental的API使用,需要通过 `paddle::experimental::xxx` 进行调用,且这些experimental API在下个版本可能会有不兼容的升级,如果不介意随下一版本升级的话,可以使用,追求稳定的话则不建议使用。 +在 2.3 版本,我们共支持了大约 250 个类似的 C++ API,能够覆盖大部分的基础运算,但是除前述的 109 个 C++ API 之外,剩余的 C++ API 由于一些历史原因,其参数列表尚未和相应的 Python API 对齐,因此目前剩余这些 API 只能作为 experimental 的 API 使用,需要通过 `paddle::experimental::xxx` 进行调用,且这些 experimental API 在下个版本可能会有不兼容的升级,如果不介意随下一版本升级的话,可以使用,追求稳定的话则不建议使用。 -如有需要,目前支持的全量API列表(包含experimental API)请参考paddle安装路径下的api.h头文件,以Python3.7为例,其路径是 `python3.7/site-packages/paddle/include/paddle/phi/api/include/api.h`。 +如有需要,目前支持的全量 API 列表(包含 experimental API)请参考 paddle 安装路径下的 api.h 头文件,以 Python3.7 为例,其路径是 `python3.7/site-packages/paddle/include/paddle/phi/api/include/api.h`。 ### 运算函数实现 -对函数写法以及基础API的定义有了初步认识后,下面结合具体的示例进行介绍。 +对函数写法以及基础 API 的定义有了初步认识后,下面结合具体的示例进行介绍。 -#### CPU实现 +#### CPU 实现 -以 `relu` 算子为例,一个支持 `float32` 类型的CPU `relu` 算子运算函数可以实现如下: +以 `relu` 算子为例,一个支持 `float32` 类型的 CPU `relu` 算子运算函数可以实现如下: - relu_cpu_fp32.cc @@ -415,7 +415,7 @@ std::vector ReluCPUBackward(const paddle::Tensor& x, 前述 `relu` 示例实现仅支持 `float32` 类型的计算,如果仅有一种数据类型的支持需求,用以上写法即可。 -如果需要同时支持多种数据类型,例如同时支持 `float32` 与 `float64` 的计算,可以使用相应的DIAPATCH宏进行声明,示例如下: +如果需要同时支持多种数据类型,例如同时支持 `float32` 与 `float64` 的计算,可以使用相应的 DIAPATCH 宏进行声明,示例如下: - relu_cpu.cc @@ -483,7 +483,7 @@ std::vector ReluCPUBackward(const paddle::Tensor& x, > 注:编写模板计算函数时,模板参数名 `data_t` 用于适配不同的数据类型,不可更改为其他命名,否则会编译失败 -示例中的 `PD_DISPATCH_FLOATING_TYPES` 会展开得到 `float32` 与 `float64` 的switch-case实现,从而在运行时根据输入的数据类型,选择实际需要执行的分支。 +示例中的 `PD_DISPATCH_FLOATING_TYPES` 会展开得到 `float32` 与 `float64` 的 switch-case 实现,从而在运行时根据输入的数据类型,选择实际需要执行的分支。 例如,`ReluCPUForward` 中的 `PD_DISPATCH_FLOATING_TYPES` 实际代码展开如下: @@ -504,21 +504,21 @@ switch(x.type()) { } ``` -目前定义的dispatch宏包括: +目前定义的 dispatch 宏包括: - `PD_DISPATCH_FLOATING_TYPES` :dispatch 生成 `float` 和 `double` 对应的实现 - `PD_DISPATCH_FLOATING_AND_HALF_TYPES` :dispatch 生成 `float` , `double` 和 `paddle::float16` 对应的实现 -- `PD_DISPATCH_INTEGRAL_TYPES` :dispatch生成 `int8_t`, `uint8_t`, `int16_t`, `int`的`int64_t` 对应的实现 -- `PD_DISPATCH_COMPLEX_TYPES`:dispatch生成 `paddle::complex64` 和 `paddle::complex128` 对应的实现 -- `PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES` :dispatch生成前述 `PD_DISPATCH_FLOATING_TYPES` 和 `PD_DISPATCH_INTEGRAL_TYPES` 两个宏全部数据类型对应的实现 -- `PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES`:dispatch生成前述 `PD_DISPATCH_FLOATING_TYPES` 和 `PD_DISPATCH_COMPLEX_TYPES` 两个宏全部数据类型对应的实现 -- `PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES`:dispatch生成前述 `PD_DISPATCH_FLOATING_TYPES` , `PD_DISPATCH_INTEGRAL_TYPES` 和 `PD_DISPATCH_COMPLEX_TYPES` 三个宏全部数据类型对应的实现 +- `PD_DISPATCH_INTEGRAL_TYPES` :dispatch 生成 `int8_t`, `uint8_t`, `int16_t`, `int`的`int64_t` 对应的实现 +- `PD_DISPATCH_COMPLEX_TYPES`:dispatch 生成 `paddle::complex64` 和 `paddle::complex128` 对应的实现 +- `PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES` :dispatch 生成前述 `PD_DISPATCH_FLOATING_TYPES` 和 `PD_DISPATCH_INTEGRAL_TYPES` 两个宏全部数据类型对应的实现 +- `PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES`:dispatch 生成前述 `PD_DISPATCH_FLOATING_TYPES` 和 `PD_DISPATCH_COMPLEX_TYPES` 两个宏全部数据类型对应的实现 +- `PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES`:dispatch 生成前述 `PD_DISPATCH_FLOATING_TYPES` , `PD_DISPATCH_INTEGRAL_TYPES` 和 `PD_DISPATCH_COMPLEX_TYPES` 三个宏全部数据类型对应的实现 当然,如果这几个宏无法满足您实际使用的需求,您可以直接通过 `switch-case` 语句实现,将来视需求我们也会添加更多的宏。 -#### CPU&CUDA混合实现 +#### CPU&CUDA 混合实现 -通常只有CPU的算子实现是不够的,实际生产环境中一般需要使用GPU算子。此处将前述 `relu_cpu.cc` 中算子的CPU实现改为GPU示例如下: +通常只有 CPU 的算子实现是不够的,实际生产环境中一般需要使用 GPU 算子。此处将前述 `relu_cpu.cc` 中算子的 CPU 实现改为 GPU 示例如下: - relu_cuda.cu ```c++ @@ -611,9 +611,9 @@ std::vector ReluCUDABackward(const paddle::Tensor& x, } ``` -在 `.cu` 文件中实现对应的CUDA kernel和计算函数,在 `.cc` 文件中声明调用即可。 +在 `.cu` 文件中实现对应的 CUDA kernel 和计算函数,在 `.cc` 文件中声明调用即可。 -注意这里的 `CHECK_INPUT` 也改为检查输入 `Tensor` 是否在GPU上,如果后续仍然在CPU上执行,将会报错如下,可以看到报错提示与 `CHECK_INPUT` 缩写提示一致。至于错误类型,`PaddlePaddle` 将外部扩展自定义算子视为第三方模块,错误类型统一为 `OSError: (External)` ,与其他第三方库报错类型一致。报错示例如下: +注意这里的 `CHECK_INPUT` 也改为检查输入 `Tensor` 是否在 GPU 上,如果后续仍然在 CPU 上执行,将会报错如下,可以看到报错提示与 `CHECK_INPUT` 缩写提示一致。至于错误类型,`PaddlePaddle` 将外部扩展自定义算子视为第三方模块,错误类型统一为 `OSError: (External)` ,与其他第三方库报错类型一致。报错示例如下: ``` Traceback (most recent call last): @@ -636,7 +636,7 @@ OSError: (External) x must be a GPU Tensor. [operator < custom_relu > error] ``` -实际使用时,一般您只需要根据您实际使用的设备,编写对应设备的算子实现即可,例如您使用GPU训练,仅需要实现算子的CUDA版本即可使用,如果您需要您的自定义算子同时支持多种设备,例如同时支持CPU与GPU,只需要将CPU和GPU的实现整合到一起,并在前反向函数中实现对应的分支即可,示例如下: +实际使用时,一般您只需要根据您实际使用的设备,编写对应设备的算子实现即可,例如您使用 GPU 训练,仅需要实现算子的 CUDA 版本即可使用,如果您需要您的自定义算子同时支持多种设备,例如同时支持 CPU 与 GPU,只需要将 CPU 和 GPU 的实现整合到一起,并在前反向函数中实现对应的分支即可,示例如下: - relu.cc ```c++ @@ -809,7 +809,7 @@ std::vector relu_cuda_backward(const paddle::Tensor& x, ### 维度与类型推导函数实现 -`PaddlePaddle` 框架同时支持动态图与静态图的执行模式,在静态图模式下,组网阶段需要完成 `Tensor shape` 和 `dtype` 的推导,从而生成正确的模型描述,用于后续Graph优化与执行。因此,除了算子的运算函数之外,还需要实现前向运算的维度和类型的推导函数。 +`PaddlePaddle` 框架同时支持动态图与静态图的执行模式,在静态图模式下,组网阶段需要完成 `Tensor shape` 和 `dtype` 的推导,从而生成正确的模型描述,用于后续 Graph 优化与执行。因此,除了算子的运算函数之外,还需要实现前向运算的维度和类型的推导函数。 维度推导(InferShape)和类型推导(InferDtype)的函数写法也是有要求的,形式如下: @@ -828,8 +828,8 @@ std::vector OpInferDtype(paddle::DataType x_dtype, ...) { - 函数输入参数与前述运算函数的输入 `Tensor` 按顺序一一对应,依次为输入参数的 `shape` 和 `dtype`,这里的对应规则为: - `paddle::Tensor` -> `std::vector` - `std::vector` -> `std::vector>` -- 函数返回值vector中的 `shape` 或 `dtype` 信息也需要与返回 `Tensor` 按顺序一一对应 -- 维度推导函数支持 `Attribute` 的输入,在实现维度推导函数时,可以不使用 `Attribute` 的输入参数,也可以使用,但如果要使用的话,需要和Forward函数的 `Attribute` 参数保持一致 +- 函数返回值 vector 中的 `shape` 或 `dtype` 信息也需要与返回 `Tensor` 按顺序一一对应 +- 维度推导函数支持 `Attribute` 的输入,在实现维度推导函数时,可以不使用 `Attribute` 的输入参数,也可以使用,但如果要使用的话,需要和 Forward 函数的 `Attribute` 参数保持一致 - 类型推导函数不支持 `Attribute` 的输入 以 `relu` 为例,其维度与类型推导函数如下: @@ -848,7 +848,7 @@ std::vector ReluInferDtype(paddle::DataType x_dtype) { } ``` -> 注:如果是CUDA算子,ReluInferShape和ReluInferDtype仅需要在.cc文件中实现,不需要在.cu中重复实现 +> 注:如果是 CUDA 算子,ReluInferShape 和 ReluInferDtype 仅需要在.cc 文件中实现,不需要在.cu 中重复实现 对于仅有一个输入 `Tensor` 和一个输出 `Tensor` 的自定义算子,如果输出 `Tensor` 和输入 `Tensor` 的 `shape` 和 `dtype` 一致,可以省略 `InferShape` 和 `InferDtype` 函数的实现,其他场景下均需要实现这两个函数。因此,对于这里的 `relu` 算子来说,这两个函数可以不写。 @@ -873,7 +873,7 @@ std::vector ConcatInferDtypeStaticAxis( 最后,需要调用 `PD_BUILD_OP` 系列宏,构建算子的描述信息,并关联前述算子运算函数和维度、类型推导函数。 -我们提供了3个构建算子的宏: +我们提供了 3 个构建算子的宏: - `PD_BUILD_OP` :用于构建前向算子 - `PD_BUILD_GRAD_OP` :用于构建前向算子对应的反向算子 @@ -881,7 +881,7 @@ std::vector ConcatInferDtypeStaticAxis( > 注:二阶以上的反向算子构建暂不支持。 -对于 `relu` CPU示例来说,构建算子描述如下: +对于 `relu` CPU 示例来说,构建算子描述如下: - relu_cpu_fp32.cc / relu_cpu.cc (需将以下代码追加到前述文件中) @@ -900,13 +900,13 @@ PD_BUILD_GRAD_OP(custom_relu) ``` 这里写法上需要注意以下几点: -- `PD_BUILD_OP` 系列宏后面的括号内为算子名,也是后面在python端使用的接口名,注意前后不需要引号,注意该算子名不能与 `PaddlePaddle` 内已有算子名重名,比如 `relu` 为 `PaddlePaddle` 内已有算子,如果直接使用relu作为算子名将无法注册成功,所以此处增加了前缀 `custom_` +- `PD_BUILD_OP` 系列宏后面的括号内为算子名,也是后面在 python 端使用的接口名,注意前后不需要引号,注意该算子名不能与 `PaddlePaddle` 内已有算子名重名,比如 `relu` 为 `PaddlePaddle` 内已有算子,如果直接使用 relu 作为算子名将无法注册成功,所以此处增加了前缀 `custom_` - `PD_BUILD_OP`、 `PD_BUILD_GRAD_OP` 和 `PD_BUILD_DOUBLE_GRAD_OP` 构建同一个算子的前向、反向、二阶反向实现,宏后面使用的算子名需要保持一致,比如该示例中均使用 `custom_relu` - `PD_BUILD_OP`、 `PD_BUILD_GRAD_OP` 和 `PD_BUILD_DOUBLE_GRAD_OP` 必须顺次调用,不允许在未调用 `PD_BUILD_OP` 构建前向算子的情况下,直接调用 `PD_BUILD_GRAD_OP` 构建反向算子 -- Inputs与Outputs的输入参数为 `std::vector` ,依次是前面算子运算函数的输入输出 `Tensor` 的name,需要按顺序一一对应,此处的name与函数输入参数的变量名没有强关联,比如函数输入参数是 `const paddle::Tensor& x` ,Inputs中的name可以是 `Input, x, X, In` 等等 -- `PD_BUILD_OP` 与 `PD_BUILD_GRAD_OP` 中的Inputs与Outputs的name有强关联,对于前向算子的某个输入,如果反向算子仍然要复用,那么其name一定要保持一致,因为内部执行时,会以name作为key去查找对应的变量,比如这里前向算子的 `X, Out` 与反向算子的 `X, Out` 指代同一个 `Tensor` -- 在声明反向算子的Inputs与Outputs时,前向 `Tensor` 对应的梯度 `Tensor` 名需要由 `paddle::Grad` 处理前向 `Tensor` 名得到,不能够随意声明,例如这里 `"X"` 对应的梯度 `Tensor` 名为 `paddle::Grad("X")` -- 如果算子的Inputs与Outputs中包含变长的 `Tensor` 输入和输出,其 `Tensor` 名需要由 `paddle::Vec` 方法处理得到,例如对于前述 `concat` 算子的前向输入 `const std::vector& inputs` ,其 `Tensor` 名可以为 `paddle::Vec("X")` ,对应的梯度 `Tensor` 名为 `paddle::Grad(paddle::Vec("X"))` ,此处 `paddle::Grad` 需要在 `paddle::Vec` 的外面 +- Inputs 与 Outputs 的输入参数为 `std::vector` ,依次是前面算子运算函数的输入输出 `Tensor` 的 name,需要按顺序一一对应,此处的 name 与函数输入参数的变量名没有强关联,比如函数输入参数是 `const paddle::Tensor& x` ,Inputs 中的 name 可以是 `Input, x, X, In` 等等 +- `PD_BUILD_OP` 与 `PD_BUILD_GRAD_OP` 中的 Inputs 与 Outputs 的 name 有强关联,对于前向算子的某个输入,如果反向算子仍然要复用,那么其 name 一定要保持一致,因为内部执行时,会以 name 作为 key 去查找对应的变量,比如这里前向算子的 `X, Out` 与反向算子的 `X, Out` 指代同一个 `Tensor` +- 在声明反向算子的 Inputs 与 Outputs 时,前向 `Tensor` 对应的梯度 `Tensor` 名需要由 `paddle::Grad` 处理前向 `Tensor` 名得到,不能够随意声明,例如这里 `"X"` 对应的梯度 `Tensor` 名为 `paddle::Grad("X")` +- 如果算子的 Inputs 与 Outputs 中包含变长的 `Tensor` 输入和输出,其 `Tensor` 名需要由 `paddle::Vec` 方法处理得到,例如对于前述 `concat` 算子的前向输入 `const std::vector& inputs` ,其 `Tensor` 名可以为 `paddle::Vec("X")` ,对应的梯度 `Tensor` 名为 `paddle::Grad(paddle::Vec("X"))` ,此处 `paddle::Grad` 需要在 `paddle::Vec` 的外面 - 此处 `SetKernelFn` 、`SetInferShapeFn` 与 `SetInferDtypeFn` 中的 `PD_KERNEL` 、`PD_INFER_SHAPE` 、`PD_INFER_DTYPE` 宏用于自动转换并统一函数的签名,不可以省略 - 反向算子构建暂时不支持调用 `SetInferShapeFn` 和 `SetInferDtypeFn` 自定义维度与类型推导函数,框架会根据前向 `Tensor` 的 `shape` 和 `dtype` ,设定其对应梯度 `Tensor` 的 `shape` 和 `dtype` @@ -924,7 +924,7 @@ PD_BUILD_GRAD_OP(custom_relu) .SetKernelFn(PD_KERNEL(ReluCPUBackward)); ``` -类似地,GPU示例构建算子描述如下,替换 `KernelFn` 即可: +类似地,GPU 示例构建算子描述如下,替换 `KernelFn` 即可: - relu_cuda.cc (需将以下代码追加到前述文件中) @@ -960,11 +960,11 @@ PD_BUILD_GRAD_OP(custom_concat_with_attr) #### Attribute 声明 -对于 `Attribute` 的声明,和Inputs、Outputs的声明有所不同,需要按照如下格式声明字符串: +对于 `Attribute` 的声明,和 Inputs、Outputs 的声明有所不同,需要按照如下格式声明字符串: `: ` -其中,`name` 为 `Attribute` 变量的name,`` 为 `Attribute` 变量的类型,类型字符串需要与C++类型严格一致。通过如下示例说明: +其中,`name` 为 `Attribute` 变量的 name,`` 为 `Attribute` 变量的类型,类型字符串需要与 C++类型严格一致。通过如下示例说明: 假如有前向运算函数形式如下: @@ -1068,7 +1068,7 @@ setup( ) ``` -其中 `paddle.utils.cpp_extension.setup` 能够自动搜索和检查本地的 `cc(Linux)` 、 `cl.exe(Windows)` 和 `nvcc` 编译命令和版本环境,根据用户指定的 `Extension` 类型,完成CPU或CPU设备的算子编译安装。 +其中 `paddle.utils.cpp_extension.setup` 能够自动搜索和检查本地的 `cc(Linux)` 、 `cl.exe(Windows)` 和 `nvcc` 编译命令和版本环境,根据用户指定的 `Extension` 类型,完成 CPU 或 CPU 设备的算子编译安装。 执行 `python setup_cpu.py install` 或者 `python setup_cuda.py install` 即可一键完成自定义算子的编译和安装。 @@ -1196,9 +1196,9 @@ x = paddle.randn([4, 10], dtype='float32') relu_out = custom_relu(x) ``` -> 注:`setuptools` 的封装是为了简化自定义算子编译和使用流程,即使不依赖于 `setuptools` ,也可以自行编译生成动态库,并封装相应的python API,然后在基于 `PaddlePaddle` 实现的模型中使用 +> 注:`setuptools` 的封装是为了简化自定义算子编译和使用流程,即使不依赖于 `setuptools` ,也可以自行编译生成动态库,并封装相应的 python API,然后在基于 `PaddlePaddle` 实现的模型中使用 -如果需要详细了解相关接口,或需要配置其他编译选项,请参考以下API文档: +如果需要详细了解相关接口,或需要配置其他编译选项,请参考以下 API 文档: - [paddle.utils.cpp_extension.setup](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/utils/cpp_extension/setup_cn.html) - [paddle.utils.cpp_extension.setupCppExtension](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/utils/cpp_extension/CppExtension_cn.html) @@ -1206,7 +1206,7 @@ relu_out = custom_relu(x) ### 即时编译(`JIT Compile`) -即时编译将 `setuptools.setup` 编译方式做了进一步的封装,通过将自定义算子对应的 `.cc` 和 `.cu` 文件传入API `paddle.utils.cpp_extension.load`,在后台生成 `setup.py` 文件,并通过子进程的方式,隐式地执行源码文件编译、符号链接、动态库生成、组网 API 接口生成等一系列过程。不需要本地预装 CMake 或者 Ninja 等工具命令,仅需必要的编译器命令环境。 Linux 下需安装版本不低于 5.4 的 GCC,并软链到 `/usr/bin/cc` ,Windows下需安装版本不低于2017的Visual Studio;若编译支持 GPU 设备的算子,则需要提前安装CUDA,其中自带 `nvcc` 编译环境。 +即时编译将 `setuptools.setup` 编译方式做了进一步的封装,通过将自定义算子对应的 `.cc` 和 `.cu` 文件传入 API `paddle.utils.cpp_extension.load`,在后台生成 `setup.py` 文件,并通过子进程的方式,隐式地执行源码文件编译、符号链接、动态库生成、组网 API 接口生成等一系列过程。不需要本地预装 CMake 或者 Ninja 等工具命令,仅需必要的编译器命令环境。 Linux 下需安装版本不低于 5.4 的 GCC,并软链到 `/usr/bin/cc` ,Windows 下需安装版本不低于 2017 的 Visual Studio;若编译支持 GPU 设备的算子,则需要提前安装 CUDA,其中自带 `nvcc` 编译环境。 对于前述 `relu` 示例,使用方式如下: @@ -1223,9 +1223,9 @@ x = paddle.randn([4, 10], dtype='float32') out = custom_ops.custom_relu(x) ``` -`load` 返回一个包含自定义算子API的 `Module` 对象,可以直接使用自定义算子name调用API。 +`load` 返回一个包含自定义算子 API 的 `Module` 对象,可以直接使用自定义算子 name 调用 API。 -以Linux平台为例,`load` 接口调用过程中,如果不指定 `build_directory` 参数,Linux 会默认在 `~/.cache/paddle_extensions` 目录下生成一个 `{name}_setup.py`(Windows 默认目录为 `C:\\Users\\xxx\\.cache\\paddle_extensions` 用户目录),然后通过subprocess执行 `python {name}_setup.py build`,然后载入动态库,生成 Python API 之后返回。 +以 Linux 平台为例,`load` 接口调用过程中,如果不指定 `build_directory` 参数,Linux 会默认在 `~/.cache/paddle_extensions` 目录下生成一个 `{name}_setup.py`(Windows 默认目录为 `C:\\Users\\xxx\\.cache\\paddle_extensions` 用户目录),然后通过 subprocess 执行 `python {name}_setup.py build`,然后载入动态库,生成 Python API 之后返回。 对于本示例,默认生成路径内容如下: @@ -1234,13 +1234,13 @@ out = custom_ops.custom_relu(x) custom_jit_ops/ custom_jit_ops_setup.py ``` -其中,`custom_jit_ops_setup.py` 是生成的setup编译文件,`custom_jit_ops` 目录是编译生成的内容。 +其中,`custom_jit_ops_setup.py` 是生成的 setup 编译文件,`custom_jit_ops` 目录是编译生成的内容。 -如果需要详细了解load接口,或需要配置其他编译选项,请参考API文档 [paddle.utils.cpp_extension.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/utils/cpp_extension/load_cn.html) 。 +如果需要详细了解 load 接口,或需要配置其他编译选项,请参考 API 文档 [paddle.utils.cpp_extension.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/utils/cpp_extension/load_cn.html) 。 ### 同时编译多个算子 -以上两种方式均支持同时编译多个自定义算子,只需要将多个算子对应的源文件均传入对应的参数,编译生成的动态库中会包含多个算子的实现,导入 Module 之后,同样以算子名作为API名进行调用,示例如下: +以上两种方式均支持同时编译多个自定义算子,只需要将多个算子对应的源文件均传入对应的参数,编译生成的动态库中会包含多个算子的实现,导入 Module 之后,同样以算子名作为 API 名进行调用,示例如下: - setuptools 编译 ```python @@ -1282,15 +1282,15 @@ relu_out = custom_ops.custom_relu(x) tanh_out = custom_ops.custom_tanh(x) ``` -### ABI兼容性检查 +### ABI 兼容性检查 以上两种方式,编译前均会执行 ABI 兼容性检查 。对于 Linux,会检查 cc 命令对应的 GCC 版本是否与所安装的 `PaddlePaddle` 的 GCC 版本一致。例如对于 CUDA 10.1 以上的 `PaddlePaddle` 默认使用 GCC 8.2 编译,则本地 cc 对应的编译器版本也需为 8.2。对于 Windows,则会检查本地的 Visual Studio 版本是否与所安装的 `PaddlePaddle` 的 Visual Studio 版本一致(>=2017)。如果上述版本不一致,则会打印出相应 warning,且可能由于引发自定义 OP 编译执行报错。 ## 在模型中使用自定义算子 -经过前述过程,自定义算子的编写、编译安装及API生成均已完成,现在您可以在网络模型中使用您自定义生成的算子了,本方案生成的自定义算子在动态图和静态图模式下均能够使用。 +经过前述过程,自定义算子的编写、编译安装及 API 生成均已完成,现在您可以在网络模型中使用您自定义生成的算子了,本方案生成的自定义算子在动态图和静态图模式下均能够使用。 -以下验证用例均基于前述源文件 `relu_cuda.cc` 和 `relu_cuda.cu` 测试 `custom_relu` 在GPU环境中的使用,均采用JIT Compile的方式编译自定义算子。 +以下验证用例均基于前述源文件 `relu_cuda.cc` 和 `relu_cuda.cu` 测试 `custom_relu` 在 GPU 环境中的使用,均采用 JIT Compile 的方式编译自定义算子。 通过定义一个简单的网络模型,完成训练迭代和存储推理模型的基本过程。 @@ -1500,7 +1500,7 @@ static.save_inference_model(path, [image], [out], exe) ### 算子与推理库联合编译 -编写推理的测试程序,其中需要使用前述验证过程中存储的inference model,目录为 `custom_relu_dynamic/net` 或者 `custom_relu_static/net` ,下面通过示例介绍使用流程,该示例需要准备的文件包括: +编写推理的测试程序,其中需要使用前述验证过程中存储的 inference model,目录为 `custom_relu_dynamic/net` 或者 `custom_relu_static/net` ,下面通过示例介绍使用流程,该示例需要准备的文件包括: ``` - cmake @@ -1517,7 +1517,7 @@ static.save_inference_model(path, [image], [out], exe) #### 编写推理程序 -下面是一个简单的推理Demo,导入前述 `custom_relu_dynamic/net` 中存储的模型和参数,进行预测: +下面是一个简单的推理 Demo,导入前述 `custom_relu_dynamic/net` 中存储的模型和参数,进行预测: ```c++ #include @@ -1566,7 +1566,7 @@ int main() { } ``` -#### 编写CMake文件 +#### 编写 CMake 文件 编写 `CMakeList` 编译构建文件,示例如下: @@ -1819,7 +1819,7 @@ make -j 此处要根据实际情况对执行脚本中的几处配置进行调整: ```sh -# 根据预编译库中的version.txt信息判断是否将以下三个标记打开 +# 根据预编译库中的 version.txt 信息判断是否将以下三个标记打开 WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=OFF @@ -1827,18 +1827,18 @@ USE_TENSORRT=OFF # 配置预测库的根目录 LIB_DIR=${YOUR_LIB_DIR}/paddle_inference_install_dir -# 如果上述的WITH_GPU 或 USE_TENSORRT设为ON,请设置对应的CUDA, CUDNN, TENSORRT的路径。 +# 如果上述的 WITH_GPU 或 USE_TENSORRT 设为 ON,请设置对应的 CUDA, CUDNN, TENSORRT 的路径。 CUDNN_LIB=/paddle/nvidia-downloads/cudnn_v7.5_cuda10.1/lib64 CUDA_LIB=/paddle/nvidia-downloads/cuda-10.1/lib64 # TENSORRT_ROOT=/paddle/nvidia-downloads/TensorRT-6.0.1.5 ``` -然后,运行 `sh run.sh` ,完成编译,会在目录下产生build目录。 +然后,运行 `sh run.sh` ,完成编译,会在目录下产生 build 目录。 ### 运行推理程序 ``` -# 进入build目录 +# 进入 build 目录 cd build # 运行样例 ./custom_op_test @@ -1849,4 +1849,4 @@ cd build ### 更多推理使用文档 - [Paddle Inference 快速开始](https://paddleinference.paddlepaddle.org.cn/quick_start/workflow.html) -- [Paddle Inference API文档](https://paddleinference.paddlepaddle.org.cn/api_reference/cxx_api_index.html) +- [Paddle Inference API 文档](https://paddleinference.paddlepaddle.org.cn/api_reference/cxx_api_index.html) diff --git a/docs/guides/custom_op/new_python_op_cn.md b/docs/guides/custom_op/new_python_op_cn.md index 1466354e301..c07454499a9 100644 --- a/docs/guides/custom_op/new_python_op_cn.md +++ b/docs/guides/custom_op/new_python_op_cn.md @@ -1,6 +1,6 @@ -# 自定义Python算子 -## 动态图自定义Python算子 -Paddle 通过 `PyLayer` 接口和`PyLayerContext`接口支持动态图的Python端自定义OP。 +# 自定义 Python 算子 +## 动态图自定义 Python 算子 +Paddle 通过 `PyLayer` 接口和`PyLayerContext`接口支持动态图的 Python 端自定义 OP。 ### 相关接口概述 @@ -24,9 +24,9 @@ class PyLayer: 其中, -- `forward` 是自定义Op的前向函数,必须被子类重写,它的第一个参数是 `PyLayerContext` 对象,其他输入参数的类型和数量任意。 -- `backward` 是自定义Op的反向函数,必须被子类重写,其第一个参数为 `PyLayerContext` 对象,其他输入参数为`forward`输出`Tensor`的梯度。它的输出``Tensor``为``forward``输入`Tensor`的梯度。 -- `apply` 是自定义Op的执行方法,构建完自定义Op后,通过apply运行Op。 +- `forward` 是自定义 Op 的前向函数,必须被子类重写,它的第一个参数是 `PyLayerContext` 对象,其他输入参数的类型和数量任意。 +- `backward` 是自定义 Op 的反向函数,必须被子类重写,其第一个参数为 `PyLayerContext` 对象,其他输入参数为`forward`输出`Tensor`的梯度。它的输出``Tensor``为``forward``输入`Tensor`的梯度。 +- `apply` 是自定义 Op 的执行方法,构建完自定义 Op 后,通过 apply 运行 Op。 `PyLayerContext` 接口描述如下: @@ -42,43 +42,43 @@ class PyLayerContext: 其中, -- `save_for_backward` 用于暂存`backward`需要的`Tensor`,这个API只能被调用一次,且只能在``forward``中调用。 +- `save_for_backward` 用于暂存`backward`需要的`Tensor`,这个 API 只能被调用一次,且只能在``forward``中调用。 - `saved_tensor` 获取被`save_for_backward`暂存的`Tensor`。 -### 如何编写动态图Python Op +### 如何编写动态图 Python Op -以下以tanh为例,介绍如何利用 `PyLayer` 编写Python Op。 +以下以 tanh 为例,介绍如何利用 `PyLayer` 编写 Python Op。 - 第一步:创建`PyLayer`子类并定义前向函数和反向函数 -前向函数和反向函数均由Python编写,可以方便地使用Paddle相关API来实现一个自定义的OP。需要遵守以下规则: +前向函数和反向函数均由 Python 编写,可以方便地使用 Paddle 相关 API 来实现一个自定义的 OP。需要遵守以下规则: 1. `forward`和`backward`都是静态函数,它们的第一个参数是`PyLayerContext`对象。 2. `backward` 除了第一个参数以外,其他参数都是`forward`函数的输出`Tensor`的梯度,因此,`backward`输入的`Tensor`的数量必须等于`forward`输出`Tensor`的数量。如果您需在`backward`中使用`forward`中的`Tensor`,您可以利用`save_for_backward`和`saved_tensor`这两个方法传递`Tensor`。 - 3. `backward`的输出可以是`Tensor`或者`list/tuple(Tensor)`,这些`Tensor`是`forward`输入`Tensor`的梯度。因此,`backward`的输出`Tensor`的个数等于forward输入`Tensor`的个数。如果`backward`的某个返回值(梯度)在`forward`中对应的`Tensor`的`stop_gradient`属性为`False`,这个返回值必须是`Tensor`类型。 + 3. `backward`的输出可以是`Tensor`或者`list/tuple(Tensor)`,这些`Tensor`是`forward`输入`Tensor`的梯度。因此,`backward`的输出`Tensor`的个数等于 forward 输入`Tensor`的个数。如果`backward`的某个返回值(梯度)在`forward`中对应的`Tensor`的`stop_gradient`属性为`False`,这个返回值必须是`Tensor`类型。 ```Python import paddle from paddle.autograd import PyLayer -# 通过创建`PyLayer`子类的方式实现动态图Python Op +# 通过创建`PyLayer`子类的方式实现动态图 Python Op class cus_tanh(PyLayer): @staticmethod def forward(ctx, x): y = paddle.tanh(x) - # ctx 为PyLayerContext对象,可以把y从forward传递到backward。 + # ctx 为 PyLayerContext 对象,可以把 y 从 forward 传递到 backward。 ctx.save_for_backward(y) return y @staticmethod - # 因为forward只有一个输出,因此除了ctx外,backward只有一个输入。 + # 因为 forward 只有一个输出,因此除了 ctx 外,backward 只有一个输入。 def backward(ctx, dy): - # ctx 为PyLayerContext对象,saved_tensor获取在forward时暂存的y。 + # ctx 为 PyLayerContext 对象,saved_tensor 获取在 forward 时暂存的 y。 y, = ctx.saved_tensor() - # 调用Paddle API自定义反向计算 + # 调用 Paddle API 自定义反向计算 grad = dy * (1 - paddle.square(y)) - # forward只有一个Tensor输入,因此,backward只有一个输出。 + # forward 只有一个 Tensor 输入,因此,backward 只有一个输出。 return grad ``` - 第二步:通过`apply`方法组建网络。 @@ -86,13 +86,13 @@ class cus_tanh(PyLayer): ```Python data = paddle.randn([2, 3], dtype="float32") data.stop_gradient = False -# 通过 apply运行这个Python算子 +# 通过 apply 运行这个 Python 算子 z = cus_tanh.apply(data) z.mean().backward() print(data.grad) ``` -### 动态图自定义Python算子的注意事项 +### 动态图自定义 Python 算子的注意事项 - 为了从`forward`到`backward`传递信息,您可以在`forward`中给`PyLayerContext`添加临时属性,在`backward`中读取这个属性。如果传递`Tensor`推荐使用`save_for_backward`和`saved_tensor`,如果传递非`Tensor`推荐使用添加临时属性的方式。 ```Python import paddle @@ -102,17 +102,17 @@ import numpy as np class tanh(PyLayer): @staticmethod def forward(ctx, x1, func1, func2=paddle.square): - # 添加临时属性的方式传递func2 + # 添加临时属性的方式传递 func2 ctx.func = func2 y1 = func1(x1) - # 使用save_for_backward传递y1 + # 使用 save_for_backward 传递 y1 ctx.save_for_backward(y1) return y1 @staticmethod def backward(ctx, dy1): y1, = ctx.saved_tensor() - # 获取func2 + # 获取 func2 re1 = dy1 * (1 - ctx.func(y1)) return re1 @@ -123,14 +123,14 @@ input2.stop_gradient = False z = tanh.apply(x1=input1, func1=paddle.tanh) ``` -- forward的输入和输出的类型任意,但是至少有一个输入和输出为`Tensor`类型。 +- forward 的输入和输出的类型任意,但是至少有一个输入和输出为`Tensor`类型。 ```Python # 错误示例 class cus_tanh(PyLayer): @staticmethod def forward(ctx, x1, x2): y = x1+x2 - # y.shape: 列表类型,非Tensor,输出至少包含一个Tensor + # y.shape: 列表类型,非 Tensor,输出至少包含一个 Tensor return y.shape @staticmethod @@ -139,7 +139,7 @@ class cus_tanh(PyLayer): data = paddle.randn([2, 3], dtype="float32") data.stop_gradient = False -# 由于forward输出没有Tensor引发报错 +# 由于 forward 输出没有 Tensor 引发报错 z, y_shape = cus_tanh.apply(data, data) @@ -148,12 +148,12 @@ class cus_tanh(PyLayer): @staticmethod def forward(ctx, x1, x2): y = x1+x2 - # y.shape: 列表类型,非Tensor + # y.shape: 列表类型,非 Tensor return y, y.shape @staticmethod def backward(ctx, dy): - # forward两个Tensor输入,因此,backward有两个输出。 + # forward 两个 Tensor 输入,因此,backward 有两个输出。 return dy, dy data = paddle.randn([2, 3], dtype="float32") @@ -164,7 +164,7 @@ z.mean().backward() print(data.grad) ``` -- 如果forward的某个输入为`Tensor`且`stop_gredient = True`,则在`backward`中与其对应的返回值应为`None`。 +- 如果 forward 的某个输入为`Tensor`且`stop_gredient = True`,则在`backward`中与其对应的返回值应为`None`。 ```Python class cus_tanh(PyLayer): @staticmethod @@ -174,7 +174,7 @@ class cus_tanh(PyLayer): @staticmethod def backward(ctx, dy): - # x2.stop_gradient=True,其对应梯度需要返回None + # x2.stop_gradient=True,其对应梯度需要返回 None return dy, None @@ -187,7 +187,7 @@ fake_loss.backward() print(data1.grad) ``` -- 如果forward的所有输入`Tensor`都是`stop_gredient = True`的,则`backward`不会被执行。 +- 如果 forward 的所有输入`Tensor`都是`stop_gredient = True`的,则`backward`不会被执行。 ```Python class cus_tanh(PyLayer): @staticmethod @@ -205,16 +205,16 @@ data2 = paddle.randn([2, 3], dtype="float32") z = cus_tanh.apply(data1, data2) fake_loss = z.mean() fake_loss.backward() -# 因为data1.stop_gradient = True、data2.stop_gradient = True,所以backward不会被执行。 +# 因为 data1.stop_gradient = True、data2.stop_gradient = True,所以 backward 不会被执行。 print(data1.grad is None) ``` -## 静态图自定义Python算子 -Paddle 通过 `py_func` 接口支持静态图的Python端自定义OP。 py_func的设计原理在于Paddle中的Tensor可以与numpy数组可以方便的互相转换,从而可以使用Python中的numpy API来自定义一个Python OP。 +## 静态图自定义 Python 算子 +Paddle 通过 `py_func` 接口支持静态图的 Python 端自定义 OP。 py_func 的设计原理在于 Paddle 中的 Tensor 可以与 numpy 数组可以方便的互相转换,从而可以使用 Python 中的 numpy API 来自定义一个 Python OP。 -### py_func接口概述 +### py_func 接口概述 `py_func` 具体接口为: @@ -225,21 +225,21 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): 其中, -- `x` 是Python Op的输入变量,可以是单个 `Tensor` | `tuple[Tensor]` | `list[Tensor]` 。多个Tensor以tuple[Tensor]或list[Tensor]的形式传入。 -- `out` 是Python Op的输出变量,可以是单个 `Tensor` | `tuple[Tensor]` | `list[Tensor]`,也可以是`Numpy Array `。 -- `func` 是Python Op的前向函数。在运行网络前向时,框架会调用 `out = func(*x)` ,根据前向输入 `x` 和前向函数 `func` 计算前向输出 `out`。在 ``func`` 建议先主动将Tensor转换为numpy数组,方便灵活的使用numpy相关的操作,如果未转换成numpy,则可能某些操作无法兼容。 -- `backward_func` 是Python Op的反向函数。若 `backward_func` 为 `None` ,则该Python Op没有反向计算逻辑; +- `x` 是 Python Op 的输入变量,可以是单个 `Tensor` | `tuple[Tensor]` | `list[Tensor]` 。多个 Tensor 以 tuple[Tensor]或 list[Tensor]的形式传入。 +- `out` 是 Python Op 的输出变量,可以是单个 `Tensor` | `tuple[Tensor]` | `list[Tensor]`,也可以是`Numpy Array `。 +- `func` 是 Python Op 的前向函数。在运行网络前向时,框架会调用 `out = func(*x)` ,根据前向输入 `x` 和前向函数 `func` 计算前向输出 `out`。在 ``func`` 建议先主动将 Tensor 转换为 numpy 数组,方便灵活的使用 numpy 相关的操作,如果未转换成 numpy,则可能某些操作无法兼容。 +- `backward_func` 是 Python Op 的反向函数。若 `backward_func` 为 `None` ,则该 Python Op 没有反向计算逻辑; 若 `backward_func` 不为 `None`,则框架会在运行网路反向时调用 `backward_func` 计算前向输入 `x` 的梯度。 - `skip_vars_in_backward_input` 为反向函数 `backward_func` 中不需要的输入,可以是单个 `Tensor` | `tuple[Tensor]` | `list[Tensor]` 。 -### 如何使用py_func编写Python Op +### 如何使用 py_func 编写 Python Op -以下以tanh为例,介绍如何利用 `py_func` 编写Python Op。 +以下以 tanh 为例,介绍如何利用 `py_func` 编写 Python Op。 - 第一步:定义前向函数和反向函数 -前向函数和反向函数均由Python编写,可以方便地使用Python与numpy中的相关API来实现一个自定义的OP。 +前向函数和反向函数均由 Python 编写,可以方便地使用 Python 与 numpy 中的相关 API 来实现一个自定义的 OP。 若前向函数的输入为 `x_1`, `x_2`, ..., `x_n` ,输出为`y_1`, `y_2`, ..., `y_m`,则前向函数的定义格式为: ```Python @@ -257,21 +257,21 @@ def backward_func(x_1, x_2, ..., x_n, y_1, y_2, ..., y_m, dy_1, dy_2, ..., dy_m) 若反向函数不需要某些前向输入变量或前向输出变量,可设置 `skip_vars_in_backward_input` 进行排除(步骤三中会叙述具体的排除方法)。 -注:,x_1, ..., x_n为输入的多个Tensor,请以tuple(Tensor)或list[Tensor]的形式在py_func中传入。建议先主动将Tensor通过numpy.array转换为数组,否则Python与numpy中的某些操作可能无法兼容使用在Tensor上。 +注:,x_1, ..., x_n 为输入的多个 Tensor,请以 tuple(Tensor)或 list[Tensor]的形式在 py_func 中传入。建议先主动将 Tensor 通过 numpy.array 转换为数组,否则 Python 与 numpy 中的某些操作可能无法兼容使用在 Tensor 上。 -此处我们利用numpy的相关API完成tanh的前向函数和反向函数编写。下面给出多个前向与反向函数定义的示例: +此处我们利用 numpy 的相关 API 完成 tanh 的前向函数和反向函数编写。下面给出多个前向与反向函数定义的示例: ```Python import numpy as np -# 前向函数1:模拟tanh激活函数 +# 前向函数 1:模拟 tanh 激活函数 def tanh(x): - # 可以直接将Tensor作为np.tanh的输入参数 + # 可以直接将 Tensor 作为 np.tanh 的输入参数 return np.tanh(x) -# 前向函数2:将两个2-D Tenosr相加,输入多个Tensor以list[Tensor]或tuple(Tensor)形式 +# 前向函数 2:将两个 2-D Tenosr 相加,输入多个 Tensor 以 list[Tensor]或 tuple(Tensor)形式 def element_wise_add(x, y): - # 必须先手动将Tensor转换为numpy数组,否则无法支持numpy的shape操作 + # 必须先手动将 Tensor 转换为 numpy 数组,否则无法支持 numpy 的 shape 操作 x = np.array(x) y = np.array(y) @@ -285,21 +285,21 @@ def element_wise_add(x, y): return result -# 前向函数3:可用于调试正在运行的网络(打印值) +# 前向函数 3:可用于调试正在运行的网络(打印值) def debug_func(x): - # 可以直接将Tensor作为print的输入参数 + # 可以直接将 Tensor 作为 print 的输入参数 print(x) -# 前向函数1对应的反向函数,默认的输入顺序为:x、out、out的梯度 +# 前向函数 1 对应的反向函数,默认的输入顺序为:x、out、out 的梯度 def tanh_grad(x, y, dy): - # 必须先手动将Tensor转换为numpy数组,否则"+/-"等操作无法使用 + # 必须先手动将 Tensor 转换为 numpy 数组,否则"+/-"等操作无法使用 return np.array(dy) * (1 - np.square(np.array(y))) ``` -注意,前向函数和反向函数的输入均是 `Tensor` 类型,输出可以是Numpy Array或 `Tensor`。 -由于 `Tensor` 实现了Python的buffer protocol协议,因此即可通过 `numpy.array` 直接将 `Tensor` 转换为numpy Array来进行操作,也可直接将 `Tensor` 作为numpy函数的输入参数。但建议先主动转换为numpy Array,则可以任意的使用python与numpy中的所有操作(例如"numpy array的+/-/shape")。 +注意,前向函数和反向函数的输入均是 `Tensor` 类型,输出可以是 Numpy Array 或 `Tensor`。 +由于 `Tensor` 实现了 Python 的 buffer protocol 协议,因此即可通过 `numpy.array` 直接将 `Tensor` 转换为 numpy Array 来进行操作,也可直接将 `Tensor` 作为 numpy 函数的输入参数。但建议先主动转换为 numpy Array,则可以任意的使用 python 与 numpy 中的所有操作(例如"numpy array 的+/-/shape")。 -tanh的反向函数不需要前向输入x,因此我们可定义一个不需要前向输入x的反向函数,并在后续通过 `skip_vars_in_backward_input` 进行排除 : +tanh 的反向函数不需要前向输入 x,因此我们可定义一个不需要前向输入 x 的反向函数,并在后续通过 `skip_vars_in_backward_input` 进行排除 : ```Python def tanh_grad_without_x(y, dy): @@ -308,7 +308,7 @@ def tanh_grad_without_x(y, dy): - 第二步:创建前向输出变量 -我们需调用 `Program.current_block().create_var` 创建前向输出变量。在创建前向输出变量时,必须指明变量的名称name、数据类型dtype和维度shape。 +我们需调用 `Program.current_block().create_var` 创建前向输出变量。在创建前向输出变量时,必须指明变量的名称 name、数据类型 dtype 和维度 shape。 ```Python import paddle @@ -339,10 +339,10 @@ paddle.static.nn.py_func(func=tanh, x=in_var, out=out_var, backward_func=tanh_gr skip_vars_in_backward_input=in_var) ``` -至此,使用 `py_func` 编写Python Op的步骤结束。我们可以与使用其他Op一样进行网路训练/预测。 +至此,使用 `py_func` 编写 Python Op 的步骤结束。我们可以与使用其他 Op 一样进行网路训练/预测。 -### 静态图自定义Python算子注意事项 +### 静态图自定义 Python 算子注意事项 - `py_func` 的前向函数和反向函数内部不应调用 `paddle.xx`组网接口 ,因为前向函数和反向函数是在网络运行时调用的,而 `paddle.xx` 是在组建网络的阶段调用 。 diff --git a/docs/guides/flags/check_nan_inf_cn.md b/docs/guides/flags/check_nan_inf_cn.md index 96d35852b3f..427e0fc3b32 100644 --- a/docs/guides/flags/check_nan_inf_cn.md +++ b/docs/guides/flags/check_nan_inf_cn.md @@ -1,74 +1,74 @@ -# check nan inf工具 +# check nan inf 工具 -check nan inf工具用于检查Operator的结果是否含有nan(not a number,非有效数)或inf(infinite,无穷大数)。支持float32、double、float16三类浮点型,整型由于不存在nan、inf不作检查。 +check nan inf 工具用于检查 Operator 的结果是否含有 nan(not a number,非有效数)或 inf(infinite,无穷大数)。支持 float32、double、float16 三类浮点型,整型由于不存在 nan、inf 不作检查。 ## 使用 #### 1. 使用方法 -设置环境变量为FLAGS_check_nan_inf为True或者1即可。 +设置环境变量为 FLAGS_check_nan_inf 为 True 或者 1 即可。 ``` export FLAGS_check_nan_inf=1 # 或者=True ``` #### 2. 进阶使用 -添加上述环境变量后,可以通过设置环境变量跳过op、op类型及op变量的检查。设置的格式如下: +添加上述环境变量后,可以通过设置环境变量跳过 op、op 类型及 op 变量的检查。设置的格式如下: ``` PADDLE_INF_NAN_SKIP_OP="op0,op1,op2" PADDLE_INF_NAN_SKIP_ROLE="role1,role2,role3" PADDLE_INF_NAN_SKIP_VAR="op0:var0,op0:var1,op1:var0" ``` -其中上面三个环境变量分别表示跳过op、op类型和op里变量的检查。 -##### 2.1 跳过op检查 -如下设置中前一个只跳过mul op的nan inf检查,后一个设置则跳过mul、softmax_with_cross_entropy这两个op的检查。 -`注意`:op跳过只接受精准匹配,要跳过softmax_with_cross_entropy的检查,不能设置环境变量为softmax_with或者with_cross进行模糊匹配,必须设置softmax_with_cross_entropy全名。 +其中上面三个环境变量分别表示跳过 op、op 类型和 op 里变量的检查。 +##### 2.1 跳过 op 检查 +如下设置中前一个只跳过 mul op 的 nan inf 检查,后一个设置则跳过 mul、softmax_with_cross_entropy 这两个 op 的检查。 +`注意`:op 跳过只接受精准匹配,要跳过 softmax_with_cross_entropy 的检查,不能设置环境变量为 softmax_with 或者 with_cross 进行模糊匹配,必须设置 softmax_with_cross_entropy 全名。 ``` export PADDLE_INF_NAN_SKIP_OP="mul" export PADDLE_INF_NAN_SKIP_OP="mul,softmax_with_cross_entropy" ``` -##### 2.2 跳过op类型检查 -目前接受的类型有: forward、backward、optimize、rpc、dist、lrsched、loss、default。正常fp32训练中,不需要跳过op类型进行nan inf检查。但在`fp16`中,在反向过程出现inf会对其进行修正,所以一般需要跳过backward的检查,这也是添加该功能的缘由。 -如下设置中前一个只跳过backward的检查,后一个设置跳过backward、optimize两种类型的检查。同上,op类型跳过也只支持精准匹配。 +##### 2.2 跳过 op 类型检查 +目前接受的类型有: forward、backward、optimize、rpc、dist、lrsched、loss、default。正常 fp32 训练中,不需要跳过 op 类型进行 nan inf 检查。但在`fp16`中,在反向过程出现 inf 会对其进行修正,所以一般需要跳过 backward 的检查,这也是添加该功能的缘由。 +如下设置中前一个只跳过 backward 的检查,后一个设置跳过 backward、optimize 两种类型的检查。同上,op 类型跳过也只支持精准匹配。 ``` export PADDLE_INF_NAN_SKIP_ROLE="backward" export PADDLE_INF_NAN_SKIP_ROLE="backward,optimize" ``` -##### 2.3 跳过指定op中变量的检查 -如下设置中前一个跳过mul op中fc_0.tmp_0变量,后一个设置则跳过mul op中fc_0.tmp_0和fc_0.tmp_1变量及 dropout op的new_relative变量。 +##### 2.3 跳过指定 op 中变量的检查 +如下设置中前一个跳过 mul op 中 fc_0.tmp_0 变量,后一个设置则跳过 mul op 中 fc_0.tmp_0 和 fc_0.tmp_1 变量及 dropout op 的 new_relative 变量。 ``` export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0" export PADDLE_INF_NAN_SKIP_VAR="mul:fc_0.tmp_0,mul:fc_0.tmp_1,dropout:new_relative" ``` -`注意`:指定op变量检查中,对于op只接受精准匹配,对于变量则为模糊匹配,如上述的mlu op中的fc_0.tmp_0和fc_0.tmp_1变量可用c_0.tmp进行匹配。 +`注意`:指定 op 变量检查中,对于 op 只接受精准匹配,对于变量则为模糊匹配,如上述的 mlu op 中的 fc_0.tmp_0 和 fc_0.tmp_1 变量可用 c_0.tmp 进行匹配。 ## 试用 -可以使用单测中的[check_nan_inf_base.py](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/check_nan_inf_base.py)文件进行试用。该脚本已设置FLAGS_check_nan_inf=1打开check nan inf功能。直接python check_nan_inf_base.py执行即可。 -#### 1. GPU日志信息 -其中GPU的check nan信息由于在GPU中打印,所以nan inf信息会出现在出错信息栈前面。工具中会打印出现inf、nan的op及tensor名称,每个block会打印nan、inf、num中的3个值,并打印各自block中nan、inf、num的数量。 +可以使用单测中的[check_nan_inf_base.py](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/check_nan_inf_base.py)文件进行试用。该脚本已设置 FLAGS_check_nan_inf=1 打开 check nan inf 功能。直接 python check_nan_inf_base.py 执行即可。 +#### 1. GPU 日志信息 +其中 GPU 的 check nan 信息由于在 GPU 中打印,所以 nan inf 信息会出现在出错信息栈前面。工具中会打印出现 inf、nan 的 op 及 tensor 名称,每个 block 会打印 nan、inf、num 中的 3 个值,并打印各自 block 中 nan、inf、num 的数量。 ![gpu_nan_inf.png](check_nan_inf_files/gpu_nan_inf.png) -#### 2. CPU日志信息 -CPU中打印的nan、inf、num会在出错信息栈前面显示,同样打印了nan、inf、num中的三个值,并打印nan、inf、num的数量。check nan信息中op及tensor的名称会在最后显示。 +#### 2. CPU 日志信息 +CPU 中打印的 nan、inf、num 会在出错信息栈前面显示,同样打印了 nan、inf、num 中的三个值,并打印 nan、inf、num 的数量。check nan 信息中 op 及 tensor 的名称会在最后显示。 ![cpu_nan_inf.png](check_nan_inf_files/cpu_nan_inf.png) ![cpu_nan_inf_op_var.png](check_nan_inf_files/cpu_nan_inf_op_var.png) ## 速度 -测试环境:v100 32G单卡测试,Resnet50模型,imagenet数据集。`不同环境模型数据集下速度可能不同,以下速度仅供参考` ->不检查nan inf速度,每张卡307.7 images/s。 -检查nan inf速度,每张卡250.2 images/s。 +测试环境:v100 32G 单卡测试,Resnet50 模型,imagenet 数据集。`不同环境模型数据集下速度可能不同,以下速度仅供参考` +>不检查 nan inf 速度,每张卡 307.7 images/s。 +检查 nan inf 速度,每张卡 250.2 images/s。 ## 原理 #### 1. 工具原理 -对于浮点类型操作,正常数值num,无穷大inf,非数值nan有如下运行关系。更详细可查看[INF, NAN, and NULL](https://wiki.analytica.com/index.php?title=INF,_NAN,_and_NULL_-_Exception_values&title=INF,_NAN,_and_NULL_-_Exception_values) +对于浮点类型操作,正常数值 num,无穷大 inf,非数值 nan 有如下运行关系。更详细可查看[INF, NAN, and NULL](https://wiki.analytica.com/index.php?title=INF,_NAN,_and_NULL_-_Exception_values&title=INF,_NAN,_and_NULL_-_Exception_values) ``` nan - nan = nan, inf - inf = nan, num - num = 0, nan + nan = nan, inf + inf = inf, nan + 0 = nan, inf + 0 = inf, nan + inf = nan, 0 + 0 = 0 ``` -基于此使用如下操作仅需最后检查sum是否为nan或者inf就行了。 +基于此使用如下操作仅需最后检查 sum 是否为 nan 或者 inf 就行了。 ``` for(value:values): sum += (value-value) ``` -***`注意`:本文档的进阶使用、速度、原理目前仅在develop版本的paddle生效,并将随1.7版本的paddle发布。 -此前版本的check nan inf工具在GPU上不推荐使用,旧工具速度为0.25 images/s,测试会拖慢1000多倍的训练速度。*** +***`注意`:本文档的进阶使用、速度、原理目前仅在 develop 版本的 paddle 生效,并将随 1.7 版本的 paddle 发布。 +此前版本的 check nan inf 工具在 GPU 上不推荐使用,旧工具速度为 0.25 images/s,测试会拖慢 1000 多倍的训练速度。*** diff --git a/docs/guides/flags/cudnn_cn.rst b/docs/guides/flags/cudnn_cn.rst index 8151c80d429..cde0b90cb78 100644 --- a/docs/guides/flags/cudnn_cn.rst +++ b/docs/guides/flags/cudnn_cn.rst @@ -5,66 +5,66 @@ cudnn FLAGS_conv_workspace_size_limit ******************************************* -(始于0.13.0) +(始于 0.13.0) -用于选择cuDNN卷积算法的工作区限制大小(单位为MB)。cuDNN的内部函数在这个内存限制范围内获得速度最快的匹配算法。通常,在较大的工作区内可以选择更快的算法,但同时也会显著增加内存空间。用户需要在内存和速度之间进行权衡。 +用于选择 cuDNN 卷积算法的工作区限制大小(单位为 MB)。cuDNN 的内部函数在这个内存限制范围内获得速度最快的匹配算法。通常,在较大的工作区内可以选择更快的算法,但同时也会显著增加内存空间。用户需要在内存和速度之间进行权衡。 取值范围 --------------- -Uint64型,缺省值为512。即512MB显存工作区。 +Uint64 型,缺省值为 512。即 512MB 显存工作区。 示例 ------- -FLAGS_conv_workspace_size_limit=1024 - 将用于选择cuDNN卷积算法的工作区限制大小设置为1024MB。 +FLAGS_conv_workspace_size_limit=1024 - 将用于选择 cuDNN 卷积算法的工作区限制大小设置为 1024MB。 FLAGS_cudnn_batchnorm_spatial_persistent ******************************************* -(始于1.4.0) +(始于 1.4.0) -表示是否在batchnorm中使用新的批量标准化模式CUDNN_BATCHNORM_SPATIAL_PERSISTENT函数。 +表示是否在 batchnorm 中使用新的批量标准化模式 CUDNN_BATCHNORM_SPATIAL_PERSISTENT 函数。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_cudnn_batchnorm_spatial_persistent=True - 开启CUDNN_BATCHNORM_SPATIAL_PERSISTENT模式。 +FLAGS_cudnn_batchnorm_spatial_persistent=True - 开启 CUDNN_BATCHNORM_SPATIAL_PERSISTENT 模式。 注意 ------- -此模式在某些任务中可以更快,因为将为CUDNN_DATA_FLOAT和CUDNN_DATA_HALF数据类型选择优化路径。我们默认将其设置为False的原因是此模式可能使用原子整数缩减(scaled atomic integer reduction)而导致某些输入数据范围的数字溢出。 +此模式在某些任务中可以更快,因为将为 CUDNN_DATA_FLOAT 和 CUDNN_DATA_HALF 数据类型选择优化路径。我们默认将其设置为 False 的原因是此模式可能使用原子整数缩减(scaled atomic integer reduction)而导致某些输入数据范围的数字溢出。 FLAGS_cudnn_deterministic ******************************************* -(始于0.13.0) +(始于 0.13.0) -cuDNN对于同一操作有几种算法,一些算法结果是非确定性的,如卷积算法。该flag用于调试。它表示是否选择cuDNN中的确定性函数。 +cuDNN 对于同一操作有几种算法,一些算法结果是非确定性的,如卷积算法。该 flag 用于调试。它表示是否选择 cuDNN 中的确定性函数。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_cudnn_deterministic=True - 选择cuDNN中的确定性函数。 +FLAGS_cudnn_deterministic=True - 选择 cuDNN 中的确定性函数。 注意 ------- -现在,在cuDNN卷积和池化Operator中启用此flag。确定性算法速度可能较慢,因此该flag通常用于调试。 +现在,在 cuDNN 卷积和池化 Operator 中启用此 flag。确定性算法速度可能较慢,因此该 flag 通常用于调试。 FLAGS_cudnn_exhaustive_search ******************************************* -(始于1.2.0) +(始于 1.2.0) -表示是否使用穷举搜索方法来选择卷积算法。在cuDNN中有两种搜索方法,启发式搜索和穷举搜索。穷举搜索尝试所有cuDNN算法以选择其中最快的算法。此方法非常耗时,所选择的算法将针对给定的层规格进行缓存。 一旦更改了图层规格(如batch大小,feature map大小),它将再次搜索。 +表示是否使用穷举搜索方法来选择卷积算法。在 cuDNN 中有两种搜索方法,启发式搜索和穷举搜索。穷举搜索尝试所有 cuDNN 算法以选择其中最快的算法。此方法非常耗时,所选择的算法将针对给定的层规格进行缓存。 一旦更改了图层规格(如 batch 大小,feature map 大小),它将再次搜索。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- diff --git a/docs/guides/flags/data_cn.rst b/docs/guides/flags/data_cn.rst index efc400902dc..e4d60e77922 100644 --- a/docs/guides/flags/data_cn.rst +++ b/docs/guides/flags/data_cn.rst @@ -5,42 +5,42 @@ FLAGS_enable_cublas_tensor_op_math ******************************************* -(始于1.2.0) +(始于 1.2.0) -该flag表示是否使用Tensor Core,但可能会因此降低部分精确度。 +该 flag 表示是否使用 Tensor Core,但可能会因此降低部分精确度。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_enable_cublas_tensor_op_math=True - 使用Tensor Core。 +FLAGS_enable_cublas_tensor_op_math=True - 使用 Tensor Core。 FLAGS_use_mkldnn ******************************************* -(始于0.13.0) +(始于 0.13.0) -在预测或训练过程中,可以通过该选项选择使用Intel MKL-DNN(https://github.com/intel/mkl-dnn)库运行。 -“用于深度神经网络的英特尔(R)数学核心库(Intel(R) MKL-DNN)”是一个用于深度学习应用程序的开源性能库。该库加速了英特尔(R)架构上的深度学习应用程序和框架。Intel MKL-DNN包含矢量化和线程化构建建块,您可以使用它们来实现具有C和C ++接口的深度神经网络(DNN)。 +在预测或训练过程中,可以通过该选项选择使用 Intel MKL-DNN(https://github.com/intel/mkl-dnn)库运行。 +“用于深度神经网络的英特尔(R)数学核心库(Intel(R) MKL-DNN)”是一个用于深度学习应用程序的开源性能库。该库加速了英特尔(R)架构上的深度学习应用程序和框架。Intel MKL-DNN 包含矢量化和线程化构建建块,您可以使用它们来实现具有 C 和 C ++接口的深度神经网络(DNN)。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_use_mkldnn=True - 开启使用MKL-DNN运行。 +FLAGS_use_mkldnn=True - 开启使用 MKL-DNN 运行。 注意 ------- -FLAGS_use_mkldnn仅用于python训练和预测脚本。要在CAPI中启用MKL-DNN,请设置选项 -DWITH_MKLDNN=ON。 -英特尔MKL-DNN支持英特尔64架构和兼容架构。 +FLAGS_use_mkldnn 仅用于 python 训练和预测脚本。要在 CAPI 中启用 MKL-DNN,请设置选项 -DWITH_MKLDNN=ON。 +英特尔 MKL-DNN 支持英特尔 64 架构和兼容架构。 该库对基于以下设备的系统进行了优化: -英特尔SSE4.1支持的英特尔凌动(R)处理器; -第4代,第5代,第6代,第7代和第8代英特尔(R)Core(TM)处理器; -英特尔(R)Xeon(R)处理器E3,E5和E7系列(原Sandy Bridge,Ivy Bridge,Haswell和Broadwell); -英特尔(R)Xeon(R)可扩展处理器(原Skylake和Cascade Lake); -英特尔(R)Xeon Phi(TM)处理器(原Knights Landing and Knights Mill); +英特尔 SSE4.1 支持的英特尔凌动(R)处理器; +第 4 代,第 5 代,第 6 代,第 7 代和第 8 代英特尔(R)Core(TM)处理器; +英特尔(R)Xeon(R)处理器 E3,E5 和 E7 系列(原 Sandy Bridge,Ivy Bridge,Haswell 和 Broadwell); +英特尔(R)Xeon(R)可扩展处理器(原 Skylake 和 Cascade Lake); +英特尔(R)Xeon Phi(TM)处理器(原 Knights Landing and Knights Mill); 兼容处理器。 diff --git a/docs/guides/flags/debug_cn.rst b/docs/guides/flags/debug_cn.rst index 9504f314e14..706899e2210 100644 --- a/docs/guides/flags/debug_cn.rst +++ b/docs/guides/flags/debug_cn.rst @@ -5,81 +5,81 @@ FLAGS_check_nan_inf ******************** -(始于0.13.0) +(始于 0.13.0) -用于调试。它用于检查Operator的结果是否含有Nan或Inf。 +用于调试。它用于检查 Operator 的结果是否含有 Nan 或 Inf。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_check_nan_inf=True - 检查Operator的结果是否含有Nan或Inf。 +FLAGS_check_nan_inf=True - 检查 Operator 的结果是否含有 Nan 或 Inf。 FLAGS_cpu_deterministic ******************************************* -(始于0.15.0) +(始于 0.15.0) -该flag用于调试。它表示是否在CPU侧确定计算结果。 在某些情况下,不同求和次序的结果可能不同,例如,`a+b+c+d` 的结果可能与 `c+a+b+d` 的结果不同。 +该 flag 用于调试。它表示是否在 CPU 侧确定计算结果。 在某些情况下,不同求和次序的结果可能不同,例如,`a+b+c+d` 的结果可能与 `c+a+b+d` 的结果不同。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_cpu_deterministic=True - 在CPU侧确定计算结果。 +FLAGS_cpu_deterministic=True - 在 CPU 侧确定计算结果。 FLAGS_enable_rpc_profiler ******************************************* -(始于1.0.0) +(始于 1.0.0) -是否启用RPC分析器。 +是否启用 RPC 分析器。 取值范围 ---------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_enable_rpc_profiler=True - 启用RPC分析器并在分析器文件中记录时间线。 +FLAGS_enable_rpc_profiler=True - 启用 RPC 分析器并在分析器文件中记录时间线。 FLAGS_multiple_of_cupti_buffer_size ******************************************* -(始于1.4.0) +(始于 1.4.0) -该flag用于分析。它表示CUPTI设备缓冲区大小的倍数。如果在profiler过程中程序挂掉或者在chrome://tracing中加载timeline文件时出现异常,请尝试增大此值。 +该 flag 用于分析。它表示 CUPTI 设备缓冲区大小的倍数。如果在 profiler 过程中程序挂掉或者在 chrome://tracing 中加载 timeline 文件时出现异常,请尝试增大此值。 取值范围 --------------- -Int32型,缺省值为1。 +Int32 型,缺省值为 1。 示例 ------- -FLAGS_multiple_of_cupti_buffer_size=1 - 将CUPTI设备缓冲区大小的倍数设为1。 +FLAGS_multiple_of_cupti_buffer_size=1 - 将 CUPTI 设备缓冲区大小的倍数设为 1。 FLAGS_reader_queue_speed_test_mode ******************************************* -(始于1.1.0) +(始于 1.1.0) -将pyreader数据队列设置为测试模式。在测试模式下,pyreader将缓存一些数据,然后执行器将读取缓存的数据,因此阅读器不会成为瓶颈。 +将 pyreader 数据队列设置为测试模式。在测试模式下,pyreader 将缓存一些数据,然后执行器将读取缓存的数据,因此阅读器不会成为瓶颈。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_reader_queue_speed_test_mode=True - 启用pyreader测试模式。 +FLAGS_reader_queue_speed_test_mode=True - 启用 pyreader 测试模式。 注意 ------- -仅当使用py_reader时该flag才有效。 +仅当使用 py_reader 时该 flag 才有效。 .. toctree:: :hidden: diff --git a/docs/guides/flags/device_cn.rst b/docs/guides/flags/device_cn.rst index 143518d5fac..ce58609dd10 100644 --- a/docs/guides/flags/device_cn.rst +++ b/docs/guides/flags/device_cn.rst @@ -5,33 +5,33 @@ FLAGS_paddle_num_threads ******************************************* -(始于0.15.0) +(始于 0.15.0) -控制每个paddle实例的线程数。 +控制每个 paddle 实例的线程数。 取值范围 --------------- -Int32型,缺省值为1。 +Int32 型,缺省值为 1。 示例 ------- -FLAGS_paddle_num_threads=2 - 将每个实例的最大线程数设为2。 +FLAGS_paddle_num_threads=2 - 将每个实例的最大线程数设为 2。 FLAGS_selected_gpus ******************************************* -(始于1.3) +(始于 1.3) -设置用于训练或预测的GPU设备。 +设置用于训练或预测的 GPU 设备。 取值范围 --------------- -以逗号分隔的设备ID列表,其中每个设备ID是一个非负整数,且应小于您的机器拥有的GPU设备总数。 +以逗号分隔的设备 ID 列表,其中每个设备 ID 是一个非负整数,且应小于您的机器拥有的 GPU 设备总数。 示例 ------- -FLAGS_selected_gpus=0,1,2,3,4,5,6,7 - 令0-7号GPU设备用于训练和预测。 +FLAGS_selected_gpus=0,1,2,3,4,5,6,7 - 令 0-7 号 GPU 设备用于训练和预测。 注意 ------- -使用该flag的原因是我们希望在GPU设备之间使用聚合通信,但通过CUDA_VISIBLE_DEVICES只能使用共享内存。 +使用该 flag 的原因是我们希望在 GPU 设备之间使用聚合通信,但通过 CUDA_VISIBLE_DEVICES 只能使用共享内存。 diff --git a/docs/guides/flags/distributed_cn.rst b/docs/guides/flags/distributed_cn.rst index f786f925e16..bca437c073b 100644 --- a/docs/guides/flags/distributed_cn.rst +++ b/docs/guides/flags/distributed_cn.rst @@ -5,32 +5,32 @@ FLAGS_communicator_fake_rpc ********************** -(始于1.5.0) +(始于 1.5.0) -当设为True时,通信器不会实际进行rpc调用,因此速度不会受到网络通信的影响。该flag用于调试。 +当设为 True 时,通信器不会实际进行 rpc 调用,因此速度不会受到网络通信的影响。该 flag 用于调试。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_communicator_fake_rpc=True - 启用通信器fake模式。 +FLAGS_communicator_fake_rpc=True - 启用通信器 fake 模式。 注意 ------- -该flag仅用于paddlepaddle的开发者,普通用户不应对其设置。 +该 flag 仅用于 paddlepaddle 的开发者,普通用户不应对其设置。 FLAGS_communicator_independent_recv_thread ************************************** -(始于1.5.0) +(始于 1.5.0) 使用独立线程以从参数服务器接收参数。 取值范围 --------------- -Bool型,缺省值为True。 +Bool 型,缺省值为 True。 示例 ------- @@ -38,37 +38,37 @@ FLAGS_communicator_independent_recv_thread=True - 使用独立线程以从参数 注意 ------- -开发者使用该flag进行框架的调试与优化,普通用户不应对其设置。 +开发者使用该 flag 进行框架的调试与优化,普通用户不应对其设置。 FLAGS_communicator_max_merge_var_num ************************************** -(始于1.5.0) +(始于 1.5.0) 要通过通信器合并为一个梯度并发送的最大梯度数。训练器将所有梯度放入队列,然后通信器将从队列中取出梯度并在合并后发送。 取值范围 --------------- -Int32型,缺省值为20。 +Int32 型,缺省值为 20。 示例 ------- -FLAGS_communicator_max_merge_var_num=16 - 将要通过通信器合并为一个梯度并发送的最大梯度数设为16。 +FLAGS_communicator_max_merge_var_num=16 - 将要通过通信器合并为一个梯度并发送的最大梯度数设为 16。 注意 ------- -该flag和训练器线程数有着密切关联,缺省值应和线程数一致。 +该 flag 和训练器线程数有着密切关联,缺省值应和线程数一致。 FLAGS_communicator_merge_sparse_grad ******************************************* -(始于1.5.0) +(始于 1.5.0) 在发送之前,合并稀疏梯度。 取值范围 --------------- -Bool型,缺省值true。 +Bool 型,缺省值 true。 示例 ------- @@ -76,165 +76,165 @@ FLAGS_communicator_merge_sparse_grad=true - 设置合并稀疏梯度。 注意 ------- -合并稀疏梯度会耗费时间。如果重复ID较多,内存占用会变少,通信会变快;如果重复ID较少,则并不会节约内存。 +合并稀疏梯度会耗费时间。如果重复 ID 较多,内存占用会变少,通信会变快;如果重复 ID 较少,则并不会节约内存。 FLAGS_communicator_min_send_grad_num_before_recv ******************************************* -(始于1.5.0) +(始于 1.5.0) -在通信器中,有一个发送线程向参数服务器发送梯度,一个接收线程从参数服务器接收参数,且它们之间彼此独立。该flag用于控制接收线程的频率。 仅当发送线程至少发送FLAGS_communicator_min_send_grad_num_before_recv数量的梯度时,接收线程才会从参数服务器接收参数。 +在通信器中,有一个发送线程向参数服务器发送梯度,一个接收线程从参数服务器接收参数,且它们之间彼此独立。该 flag 用于控制接收线程的频率。 仅当发送线程至少发送 FLAGS_communicator_min_send_grad_num_before_recv 数量的梯度时,接收线程才会从参数服务器接收参数。 取值范围 --------------- -Int32型,缺省值为20。 +Int32 型,缺省值为 20。 示例 ------- -FLAGS_communicator_min_send_grad_num_before_recv=10 - 在接收线程从参数服务器接收参数之前,发送线程发送的梯度数为10。 +FLAGS_communicator_min_send_grad_num_before_recv=10 - 在接收线程从参数服务器接收参数之前,发送线程发送的梯度数为 10。 注意 ------- -由于该flag和训练器的训练线程数强相关,而每个训练线程都会发送其梯度,所以缺省值应和线程数一致。 +由于该 flag 和训练器的训练线程数强相关,而每个训练线程都会发送其梯度,所以缺省值应和线程数一致。 FLAGS_communicator_send_queue_size ******************************************* -(始于1.5.0) +(始于 1.5.0) 每个梯度的队列大小。训练器将梯度放入队列,然后通信器将其从队列中取出并发送出去。 当通信器很慢时,队列可能会满,训练器在队列有空间之前被持续阻塞。它用于避免训练比通信快得多,以致太多的梯度没有及时发出的情况。 取值范围 --------------- -Int32型,缺省值为20。 +Int32 型,缺省值为 20。 示例 ------- -FLAGS_communicator_send_queue_size=10 - 设置每个梯度的队列大小为10。 +FLAGS_communicator_send_queue_size=10 - 设置每个梯度的队列大小为 10。 注意 ------- -该flag会影响训练速度,若队列大小过大,速度会变快但结果可能会变差。 +该 flag 会影响训练速度,若队列大小过大,速度会变快但结果可能会变差。 FLAGS_communicator_send_wait_times ******************************************* -(始于1.5.0) +(始于 1.5.0) -合并数没有达到max_merge_var_num的情况下发送线程等待的次数。 +合并数没有达到 max_merge_var_num 的情况下发送线程等待的次数。 取值范围 --------------- -Int32型,缺省值为5。 +Int32 型,缺省值为 5。 示例 ------- -FLAGS_communicator_send_wait_times=5 - 将合并数没有达到max_merge_var_num的情况下发送线程等待的次数设为5。 +FLAGS_communicator_send_wait_times=5 - 将合并数没有达到 max_merge_var_num 的情况下发送线程等待的次数设为 5。 FLAGS_communicator_thread_pool_size ******************************************* -(始于1.5.0) +(始于 1.5.0) 设置用于发送梯度和接收参数的线程池大小。 取值范围 --------------- -Int32型,缺省值为5。 +Int32 型,缺省值为 5。 示例 ------- -FLAGS_communicator_thread_pool_size=10 - 设置线程池大小为10。 +FLAGS_communicator_thread_pool_size=10 - 设置线程池大小为 10。 注意 ------- -大部分情况下,用户不需要设置该flag。 +大部分情况下,用户不需要设置该 flag。 FLAGS_dist_threadpool_size ******************************************* -(始于1.0.0) +(始于 1.0.0) 控制用于分布式模块的线程数。如果未设置,则将其设置为硬线程。 取值范围 --------------- -Int32型,缺省值为0。 +Int32 型,缺省值为 0。 示例 ------- -FLAGS_dist_threadpool_size=10 - 将用于分布式模块的最大线程数设为10。 +FLAGS_dist_threadpool_size=10 - 将用于分布式模块的最大线程数设为 10。 FLAGS_rpc_deadline ******************************************* -(始于1.0.0) +(始于 1.0.0) -它控制rpc通信的deadline超时。 +它控制 rpc 通信的 deadline 超时。 取值范围 --------------- -Int32型,缺省值为180000,单位为ms。 +Int32 型,缺省值为 180000,单位为 ms。 示例 ------- -FLAGS_rpc_deadline=180000 - 将deadline超时设为3分钟。 +FLAGS_rpc_deadline=180000 - 将 deadline 超时设为 3 分钟。 FLAGS_rpc_disable_reuse_port ******************************************* -(始于1.2.0) +(始于 1.2.0) -FLAGS_rpc_disable_reuse_port为True时,grpc的 GRPC_ARG_ALLOW_REUSEPORT会被设置为False以禁用SO_REUSEPORT。 +FLAGS_rpc_disable_reuse_port 为 True 时,grpc 的 GRPC_ARG_ALLOW_REUSEPORT 会被设置为 False 以禁用 SO_REUSEPORT。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_rpc_disable_reuse_port=True - 禁用SO_REUSEPORT。 +FLAGS_rpc_disable_reuse_port=True - 禁用 SO_REUSEPORT。 FLAGS_rpc_get_thread_num ******************************************* -(始于1.0.0) +(始于 1.0.0) 它控制用于从参数服务器获取参数的线程数。 取值范围 --------------- -Int32型,缺省值为12。 +Int32 型,缺省值为 12。 示例 ------- -FLAGS_rpc_get_thread_num=6 - 将从参数服务器获取参数的线程数设为6。 +FLAGS_rpc_get_thread_num=6 - 将从参数服务器获取参数的线程数设为 6。 FLAGS_rpc_send_thread_num ******************************************* -(始于1.0.0) +(始于 1.0.0) -它控制用于发送rpc的线程数。 +它控制用于发送 rpc 的线程数。 取值范围 --------------- -Int32型,缺省值为12。 +Int32 型,缺省值为 12。 示例 ------- -FLAGS_rpc_send_thread_num=6 - 将用于发送的线程数设为6。 +FLAGS_rpc_send_thread_num=6 - 将用于发送的线程数设为 6。 FLAGS_rpc_server_profile_path ******************************************* since(v0.15.0) -设置分析器输出日志文件路径前缀。完整路径为FLAGS_rpc_server_profile_path_listener_id,其中listener_id为随机数。 +设置分析器输出日志文件路径前缀。完整路径为 FLAGS_rpc_server_profile_path_listener_id,其中 listener_id 为随机数。 取值范围 --------------- -String型,缺省值为"./profile_ps"。 +String 型,缺省值为"./profile_ps"。 示例 ------- @@ -245,27 +245,27 @@ FLAGS_apply_pass_to_program ******************************************* since(v2.2.0) -它控制当使用Fleet API时,是否在Program上使用IR Pass优化。 +它控制当使用 Fleet API 时,是否在 Program 上使用 IR Pass 优化。 取值范围 --------------- -Bool型,缺省值为false。 +Bool 型,缺省值为 false。 示例 ------- -FLAGS_apply_pass_to_program=true - 当使用Fleet API时,在Program上使用IR Pass优化。 +FLAGS_apply_pass_to_program=true - 当使用 Fleet API 时,在 Program 上使用 IR Pass 优化。 FLAGS_allreduce_record_one_event ******************************************* since(v2.2.0) -使allreduce操作只等待一个事件而不是多个事件。目前只适用于fuse allreduce的场景,否则精度会有误。 +使 allreduce 操作只等待一个事件而不是多个事件。目前只适用于 fuse allreduce 的场景,否则精度会有误。 取值范围 --------------- -Bool型,缺省值为false。 +Bool 型,缺省值为 false。 示例 ------- -FLAGS_allreduce_record_one_event=true - 使allreduce操作只等待一个事件而不是多个事件。 +FLAGS_allreduce_record_one_event=true - 使 allreduce 操作只等待一个事件而不是多个事件。 diff --git a/docs/guides/flags/executor_cn.rst b/docs/guides/flags/executor_cn.rst index 3882b185552..bcc3cf75758 100644 --- a/docs/guides/flags/executor_cn.rst +++ b/docs/guides/flags/executor_cn.rst @@ -5,43 +5,43 @@ FLAGS_enable_parallel_graph ******************************************* -(始于1.2.0) +(始于 1.2.0) -该flag用于ParallelExecutor以禁用并行图执行模式。 +该 flag 用于 ParallelExecutor 以禁用并行图执行模式。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_enable_parallel_graph=False - 通过ParallelExecutor强制禁用并行图执行模式。 +FLAGS_enable_parallel_graph=False - 通过 ParallelExecutor 强制禁用并行图执行模式。 FLAGS_pe_profile_fname ******************************************* -(始于1.3.0) +(始于 1.3.0) -该flag用于ParallelExecutor的调试。ParallelExecutor会通过gpertools生成配置文件结果,并将结果存储在FLAGS_pe_profile_fname指定的文件中。仅在编译选项选择 `WITH_PRIFILER=ON` 时有效。如果禁用则设为empty。 +该 flag 用于 ParallelExecutor 的调试。ParallelExecutor 会通过 gpertools 生成配置文件结果,并将结果存储在 FLAGS_pe_profile_fname 指定的文件中。仅在编译选项选择 `WITH_PRIFILER=ON` 时有效。如果禁用则设为 empty。 取值范围 --------------- -String型,缺省值为empty ("")。 +String 型,缺省值为 empty ("")。 示例 ------- -FLAGS_pe_profile_fname="./parallel_executor.perf" - 将配置文件结果存储在parallel_executor.perf中。 +FLAGS_pe_profile_fname="./parallel_executor.perf" - 将配置文件结果存储在 parallel_executor.perf 中。 FLAGS_print_sub_graph_dir ******************************************* -(始于1.2.0) +(始于 1.2.0) -该flag用于调试。如果程序中转换图的某些子图失去连接,则结果可能会出错。我们可以将这些断开连接的子图打印到该flag指定的文件中。如果禁用则设为empty。 +该 flag 用于调试。如果程序中转换图的某些子图失去连接,则结果可能会出错。我们可以将这些断开连接的子图打印到该 flag 指定的文件中。如果禁用则设为 empty。 取值范围 --------------- -String型,缺省值为empty ("")。 +String 型,缺省值为 empty ("")。 示例 ------- @@ -50,18 +50,18 @@ FLAGS_print_sub_graph_dir="./sub_graphs.txt" - 将断开连接的子图打印到 FLAGS_use_ngraph ******************************************* -(始于1.4.0) +(始于 1.4.0) -在预测或训练过程中,可以通过该选项选择使用英特尔nGraph(https://github.com/NervanaSystems/ngraph)引擎。它将在英特尔Xeon CPU上获得很大的性能提升。 +在预测或训练过程中,可以通过该选项选择使用英特尔 nGraph(https://github.com/NervanaSystems/ngraph)引擎。它将在英特尔 Xeon CPU 上获得很大的性能提升。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- -FLAGS_use_ngraph=True - 开启使用nGraph运行。 +FLAGS_use_ngraph=True - 开启使用 nGraph 运行。 注意 ------- -英特尔nGraph目前仅在少数模型中支持。我们只验证了[ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md)的训练和预测。 +英特尔 nGraph 目前仅在少数模型中支持。我们只验证了[ResNet-50](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/image_classification/README_ngraph.md)的训练和预测。 diff --git a/docs/guides/flags/flags_cn.rst b/docs/guides/flags/flags_cn.rst index 95bea66249c..3d67d3134e6 100644 --- a/docs/guides/flags/flags_cn.rst +++ b/docs/guides/flags/flags_cn.rst @@ -1,20 +1,20 @@ .. _cn_guides_flags_flags: -环境变量FLAGS +环境变量 FLAGS ================== 调用说明 ---------- -PaddlePaddle中的环境变量FLAGS支持两种设置方式。 +PaddlePaddle 中的环境变量 FLAGS 支持两种设置方式。 -- 通过export来设置环境变量,如 :code:`export FLAGS_eager_delete_tensor_gb = 1.0` 。 +- 通过 export 来设置环境变量,如 :code:`export FLAGS_eager_delete_tensor_gb = 1.0` 。 -- 通过API::code:`get_flag` 和 :code:`set_flags` 来打印和设置环境变量FLAGS。API使用详情请参考 :ref:`cn_api_paddle_get_flags` 与 :ref:`cn_api_paddle_set_flags` 。 +- 通过 API::code:`get_flag` 和 :code:`set_flags` 来打印和设置环境变量 FLAGS。API 使用详情请参考 :ref:`cn_api_paddle_get_flags` 与 :ref:`cn_api_paddle_set_flags` 。 -环境变量FLAGS功能分类 +环境变量 FLAGS 功能分类 ---------------------- .. toctree:: diff --git a/docs/guides/flags/memory_cn.rst b/docs/guides/flags/memory_cn.rst index 42ae8f41d5d..8c7a3d5bd91 100644 --- a/docs/guides/flags/memory_cn.rst +++ b/docs/guides/flags/memory_cn.rst @@ -5,31 +5,31 @@ FLAGS_allocator_strategy ******************** -(始于1.2) +(始于 1.2) -用于选择PaddlePaddle的分配器策略。 +用于选择 PaddlePaddle 的分配器策略。 取值范围 --------------- -String型,['naive_best_fit', 'auto_growth']中的一个。缺省值如果编译Paddle CMake时使用-DON_INFER=ON为'naive_best_fit'。 -其他默认情况为'auto_growth'。PaddlePaddle pip安装包的默认策略也是'auto_growth' +String 型,['naive_best_fit', 'auto_growth']中的一个。缺省值如果编译 Paddle CMake 时使用-DON_INFER=ON 为'naive_best_fit'。 +其他默认情况为'auto_growth'。PaddlePaddle pip 安装包的默认策略也是'auto_growth' 示例 -------- -FLAGS_allocator_strategy=naive_best_fit - 使用预分配best fit分配器,PaddlePaddle会先占用大多比例的可用内存/显存,在Paddle具体数据使用时分配,这种方式预占空间较大,但内存/显存碎片较少(比如能够支持模型的最大batch size会变大)。 +FLAGS_allocator_strategy=naive_best_fit - 使用预分配 best fit 分配器,PaddlePaddle 会先占用大多比例的可用内存/显存,在 Paddle 具体数据使用时分配,这种方式预占空间较大,但内存/显存碎片较少(比如能够支持模型的最大 batch size 会变大)。 -FLAGS_allocator_strategy=auto_growth - 使用auto growth分配器。PaddlePaddle会随着真实数据需要再占用内存/显存,但内存/显存可能会产生碎片(比如能够支持模型的最大batch size会变小)。 +FLAGS_allocator_strategy=auto_growth - 使用 auto growth 分配器。PaddlePaddle 会随着真实数据需要再占用内存/显存,但内存/显存可能会产生碎片(比如能够支持模型的最大 batch size 会变小)。 FLAGS_eager_delete_scope ******************************************* -(始于0.12.0) +(始于 0.12.0) -同步局域删除。设置后,它将降低GPU内存使用量,但同时也会减慢销毁变量的速度(性能损害约1%)。 +同步局域删除。设置后,它将降低 GPU 内存使用量,但同时也会减慢销毁变量的速度(性能损害约 1%)。 取值范围 --------------- -Bool型,缺省值为True。 +Bool 型,缺省值为 True。 示例 ------- @@ -38,36 +38,36 @@ FLAGS_eager_delete_scope=True - 同步局域删除。 FLAGS_eager_delete_tensor_gb ******************************************* -(始于1.0.0) +(始于 1.0.0) -表示是否使用垃圾回收策略来优化网络的内存使用。如果FLAGS_eager_delete_tensor_gb < 0,则禁用垃圾回收策略。如果FLAGS_eager_delete_tensor_gb >= 0,则启用垃圾回收策略,并在运行网络时回收内存垃圾,这有利于节省内存使用量。它仅在您使用Executor运行程序、编译程序或使用并行数据编译程序时才有用。垃圾回收器直到垃圾的内存大小达到FLAGS_eager_delete_tensor_gb GB时才会释放内存垃圾。 +表示是否使用垃圾回收策略来优化网络的内存使用。如果 FLAGS_eager_delete_tensor_gb < 0,则禁用垃圾回收策略。如果 FLAGS_eager_delete_tensor_gb >= 0,则启用垃圾回收策略,并在运行网络时回收内存垃圾,这有利于节省内存使用量。它仅在您使用 Executor 运行程序、编译程序或使用并行数据编译程序时才有用。垃圾回收器直到垃圾的内存大小达到 FLAGS_eager_delete_tensor_gb GB 时才会释放内存垃圾。 取值范围 --------------- -Double型,单位为GB,缺省值为0.0。 +Double 型,单位为 GB,缺省值为 0.0。 示例 ------- -FLAGS_eager_delete_tensor_gb=0.0 - 垃圾占用大小达到0.0GB时释放内存垃圾,即一旦出现垃圾则马上释放。 +FLAGS_eager_delete_tensor_gb=0.0 - 垃圾占用大小达到 0.0GB 时释放内存垃圾,即一旦出现垃圾则马上释放。 -FLAGS_eager_delete_tensor_gb=1.0 - 垃圾占用内存大小达到1.0GB时释放内存垃圾。 +FLAGS_eager_delete_tensor_gb=1.0 - 垃圾占用内存大小达到 1.0GB 时释放内存垃圾。 FLAGS_eager_delete_tensor_gb=-1.0 - 禁用垃圾回收策略。 注意 ------- -建议用户在训练大型网络时设置FLAGS_eager_delete_tensor_gb=0.0以启用垃圾回收策略。 +建议用户在训练大型网络时设置 FLAGS_eager_delete_tensor_gb=0.0 以启用垃圾回收策略。 FLAGS_fast_eager_deletion_mode ******************************************* -(始于1.3) +(始于 1.3) -是否使用快速垃圾回收策略。如果未设置,则在CUDA内核结束时释放gpu内存。否则gpu内存将在CUDA内核尚未结束的情况下被释放,从而使垃圾回收策略更快。仅在启用垃圾回收策略时有效。 +是否使用快速垃圾回收策略。如果未设置,则在 CUDA 内核结束时释放 gpu 内存。否则 gpu 内存将在 CUDA 内核尚未结束的情况下被释放,从而使垃圾回收策略更快。仅在启用垃圾回收策略时有效。 取值范围 --------------- -Bool型,缺省值为True。 +Bool 型,缺省值为 True。 示例 ------- @@ -78,92 +78,92 @@ FLAGS_fast_eager_deletion_mode=False - 禁用快速垃圾回收策略。 FLAGS_fraction_of_cpu_memory_to_use ******************************************* -(始于1.2.0) +(始于 1.2.0) -表示分配的内存块占CPU总内存大小的比例。将来的内存使用将从该内存块分配。 如果内存块没有足够的cpu内存,将从cpu请求分配与内存块相同大小的新的内存块,直到cpu没有足够的内存为止。 +表示分配的内存块占 CPU 总内存大小的比例。将来的内存使用将从该内存块分配。 如果内存块没有足够的 cpu 内存,将从 cpu 请求分配与内存块相同大小的新的内存块,直到 cpu 没有足够的内存为止。 取值范围 --------------- -Double型,范围[0, 1],表示初始分配的内存块占CPU内存的比例。缺省值为1.0。 +Double 型,范围[0, 1],表示初始分配的内存块占 CPU 内存的比例。缺省值为 1.0。 示例 ------- -FLAGS_fraction_of_cpu_memory_to_use=0.1 - 分配总CPU内存大小的10%作为初始CPU内存块。 +FLAGS_fraction_of_cpu_memory_to_use=0.1 - 分配总 CPU 内存大小的 10%作为初始 CPU 内存块。 FLAGS_fraction_of_cuda_pinned_memory_to_use ******************************************* -(始于1.2.0) +(始于 1.2.0) -表示分配的CUDA Pinned内存块占CPU总内存大小的比例。将来的CUDA Pinned内存使用将从该内存块分配。 如果内存块没有足够的cpu内存,将从cpu请求分配与内存块相同大小的新的内存块,直到cpu没有足够的内存为止。 +表示分配的 CUDA Pinned 内存块占 CPU 总内存大小的比例。将来的 CUDA Pinned 内存使用将从该内存块分配。 如果内存块没有足够的 cpu 内存,将从 cpu 请求分配与内存块相同大小的新的内存块,直到 cpu 没有足够的内存为止。 取值范围 --------------- -Double型,范围[0, 1],表示初始分配的内存块占CPU内存的比例。缺省值为0.5。 +Double 型,范围[0, 1],表示初始分配的内存块占 CPU 内存的比例。缺省值为 0.5。 示例 ------- -FLAGS_fraction_of_cuda_pinned_memory_to_use=0.1 - 分配总CPU内存大小的10%作为初始CUDA Pinned内存块。 +FLAGS_fraction_of_cuda_pinned_memory_to_use=0.1 - 分配总 CPU 内存大小的 10%作为初始 CUDA Pinned 内存块。 FLAGS_fraction_of_gpu_memory_to_use ******************************************* -(始于1.2.0) +(始于 1.2.0) -表示分配的显存块占GPU总可用显存大小的比例。将来的显存使用将从该显存块分配。 如果显存块没有足够的gpu显存,将从gpu请求分配与显存块同样大小的新的显存块,直到gpu没有足够的显存为止。 +表示分配的显存块占 GPU 总可用显存大小的比例。将来的显存使用将从该显存块分配。 如果显存块没有足够的 gpu 显存,将从 gpu 请求分配与显存块同样大小的新的显存块,直到 gpu 没有足够的显存为止。 取值范围 --------------- -Double型,范围[0, 1],表示初始分配的显存块占GPU可用显存的比例。 +Double 型,范围[0, 1],表示初始分配的显存块占 GPU 可用显存的比例。 示例 ------- -FLAGS_fraction_of_gpu_memory_to_use=0.1 - 分配GPU总可用显存大小的10%作为初始GPU显存块。 +FLAGS_fraction_of_gpu_memory_to_use=0.1 - 分配 GPU 总可用显存大小的 10%作为初始 GPU 显存块。 注意 ------- -Windows系列平台会将FLAGS_fraction_of_gpu_memory_to_use默认设为0.5,Linux则会默认设为0.92。 +Windows 系列平台会将 FLAGS_fraction_of_gpu_memory_to_use 默认设为 0.5,Linux 则会默认设为 0.92。 FLAGS_fuse_parameter_groups_size ******************************************* -(始于1.4.0) +(始于 1.4.0) -FLAGS_fuse_parameter_groups_size表示每一组中参数的个数。缺省值是一个经验性的结果。如果fuse_parameter_groups_size为1,则表示组的大小和参数梯度的数目一致。 如果fuse_parameter_groups_size为-1,则表示只有一个组。缺省值为3,这只是一个经验值。 +FLAGS_fuse_parameter_groups_size 表示每一组中参数的个数。缺省值是一个经验性的结果。如果 fuse_parameter_groups_size 为 1,则表示组的大小和参数梯度的数目一致。 如果 fuse_parameter_groups_size 为-1,则表示只有一个组。缺省值为 3,这只是一个经验值。 取值范围 --------------- -Int32型,缺省值为3。 +Int32 型,缺省值为 3。 示例 ------- -FLAGS_fuse_parameter_groups_size=3 - 将单组参数的梯度大小设为3。 +FLAGS_fuse_parameter_groups_size=3 - 将单组参数的梯度大小设为 3。 FLAGS_fuse_parameter_memory_size ******************************************* -(始于1.5.0) +(始于 1.5.0) -FLAGS_fuse_parameter_memory_size表示作为通信调用输入(例如NCCLAllReduce)的单组参数梯度的上限内存大小。默认值为-1.0,表示不根据memory_size设置组。单位是MB。 +FLAGS_fuse_parameter_memory_size 表示作为通信调用输入(例如 NCCLAllReduce)的单组参数梯度的上限内存大小。默认值为-1.0,表示不根据 memory_size 设置组。单位是 MB。 取值范围 --------------- -Double型,缺省值为-1.0。 +Double 型,缺省值为-1.0。 示例 ------- -FLAGS_fuse_parameter_memory_size=16 - 将单组参数梯度的上限大小设为16MB。 +FLAGS_fuse_parameter_memory_size=16 - 将单组参数梯度的上限大小设为 16MB。 FLAGS_init_allocated_mem ******************************************* -(始于0.15.0) +(始于 0.15.0) -是否对分配的内存进行非零值初始化。该flag用于调试,以防止某些Ops假定已分配的内存都是初始化为零的。 +是否对分配的内存进行非零值初始化。该 flag 用于调试,以防止某些 Ops 假定已分配的内存都是初始化为零的。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- @@ -174,49 +174,49 @@ FLAGS_init_allocated_mem=False - 不会对分配的内存进行非零初始化 FLAGS_initial_cpu_memory_in_mb ******************************************* -(始于0.14.0) +(始于 0.14.0) -初始PaddlePaddle分配器的CPU内存块大小,单位为MB。分配器将FLAGS_initial_cpu_memory_in_mb和FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)的最小值作为内存块大小。 +初始 PaddlePaddle 分配器的 CPU 内存块大小,单位为 MB。分配器将 FLAGS_initial_cpu_memory_in_mb 和 FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)的最小值作为内存块大小。 取值范围 --------------- -Uint64型,缺省值为500,单位为MB。 +Uint64 型,缺省值为 500,单位为 MB。 示例 ------- -FLAGS_initial_cpu_memory_in_mb=100 - 在FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)大于100MB的情况下,首次提出分配请求时,分配器预先分配100MB内存,并在预分配的内存耗尽时再次分配100MB。 +FLAGS_initial_cpu_memory_in_mb=100 - 在 FLAGS_fraction_of_cpu_memory_to_use*(总物理内存)大于 100MB 的情况下,首次提出分配请求时,分配器预先分配 100MB 内存,并在预分配的内存耗尽时再次分配 100MB。 FLAGS_initial_gpu_memory_in_mb ******************************************* -(始于1.4.0) +(始于 1.4.0) -预分配一块指定大小的GPU显存块。之后的显存使用将从该显存块分配。如果显存块没有足够的显存,将从GPU请求大小为FLAGS_reallocate_gpu_memory_in_mb的显存块,直到GPU没有剩余显存为止。 +预分配一块指定大小的 GPU 显存块。之后的显存使用将从该显存块分配。如果显存块没有足够的显存,将从 GPU 请求大小为 FLAGS_reallocate_gpu_memory_in_mb 的显存块,直到 GPU 没有剩余显存为止。 取值范围 --------------- -Uint64型,大于0,为初始GPU显存大小,单位为MB。 +Uint64 型,大于 0,为初始 GPU 显存大小,单位为 MB。 示例 ------- -FLAGS_initial_gpu_memory_in_mb=4096 - 分配4GB作为初始GPU显存块大小。 +FLAGS_initial_gpu_memory_in_mb=4096 - 分配 4GB 作为初始 GPU 显存块大小。 注意 ------- -如果设置该flag,则FLAGS_fraction_of_gpu_memory_to_use设置的显存大小将被该flag覆盖。PaddlePaddle将用该flag指定的值分配初始GPU显存。 -如果未设置该flag,即flag默认值为0时,会关闭此显存策略。PaddlePaddle会使用FLAGS_fraction_of_gpu_memory_to_use的策略来分配初始显存块。 +如果设置该 flag,则 FLAGS_fraction_of_gpu_memory_to_use 设置的显存大小将被该 flag 覆盖。PaddlePaddle 将用该 flag 指定的值分配初始 GPU 显存。 +如果未设置该 flag,即 flag 默认值为 0 时,会关闭此显存策略。PaddlePaddle 会使用 FLAGS_fraction_of_gpu_memory_to_use 的策略来分配初始显存块。 FLAGS_memory_fraction_of_eager_deletion ******************************************* -(始于1.4) +(始于 1.4) -垃圾回收策略释放变量的内存大小百分比。如果FLAGS_memory_fraction_of_eager_deletion = 1.0,则将释放网络中的所有临时变量。如果FLAGS_memory_fraction_of_eager_deletion = 0.0,则不会释放网络中的任何临时变量。如果0.0`_ 。 diff --git a/docs/guides/flags/others_cn.rst b/docs/guides/flags/others_cn.rst index 7bec96956ab..10404143e8d 100644 --- a/docs/guides/flags/others_cn.rst +++ b/docs/guides/flags/others_cn.rst @@ -6,13 +6,13 @@ FLAGS_benchmark ******************** -(始于0.12.0) +(始于 0.12.0) -用于基准测试。设置后,它将使局域删除同步,添加一些内存使用日志,并在内核启动后同步所有cuda内核。 +用于基准测试。设置后,它将使局域删除同步,添加一些内存使用日志,并在内核启动后同步所有 cuda 内核。 取值范围 --------------- -Bool型,缺省值为False。 +Bool 型,缺省值为 False。 示例 ------- @@ -21,62 +21,62 @@ FLAGS_benchmark=True - 同步以测试基准。 FLAGS_inner_op_parallelism ******************************************* -(始于1.3.0) +(始于 1.3.0) -大多数Operators都在单线程模式下工作,但对于某些Operators,使用多线程更合适。 例如,优化稀疏梯度的优化Op使用多线程工作会更快。该flag用于设置Op内的线程数。 +大多数 Operators 都在单线程模式下工作,但对于某些 Operators,使用多线程更合适。 例如,优化稀疏梯度的优化 Op 使用多线程工作会更快。该 flag 用于设置 Op 内的线程数。 取值范围 --------------- -Int32型,缺省值为0,这意味着operator将不会在多线程模式下运行。 +Int32 型,缺省值为 0,这意味着 operator 将不会在多线程模式下运行。 示例 ------- -FLAGS_inner_op_parallelism=5 - 将operator内的线程数设为5。 +FLAGS_inner_op_parallelism=5 - 将 operator 内的线程数设为 5。 注意 ------- -目前只有稀疏的adam op支持inner_op_parallelism。 +目前只有稀疏的 adam op 支持 inner_op_parallelism。 FLAGS_max_body_size ******************************************* -(始于1.0.0) +(始于 1.0.0) -控制BRPC中的最大消息大小。 +控制 BRPC 中的最大消息大小。 取值范围 --------------- -Int32型,缺省值为2147483647。 +Int32 型,缺省值为 2147483647。 示例 ------- -FLAGS_max_body_size=2147483647 - 将BRPC消息大小设为2147483647。 +FLAGS_max_body_size=2147483647 - 将 BRPC 消息大小设为 2147483647。 FLAGS_sync_nccl_allreduce ******************************************* -(始于1.3) +(始于 1.3) -如果FLAGS_sync_nccl_allreduce为True,则会在allreduce_op_handle中调用 `cudaStreamSynchronize(nccl_stream)` ,这种模式在某些情况下可以获得更好的性能。 +如果 FLAGS_sync_nccl_allreduce 为 True,则会在 allreduce_op_handle 中调用 `cudaStreamSynchronize(nccl_stream)` ,这种模式在某些情况下可以获得更好的性能。 取值范围 --------------- -Bool型,缺省值为True。 +Bool 型,缺省值为 True。 示例 ------- -FLAGS_sync_nccl_allreduce=True - 在allreduce_op_handle中调用 `cudaStreamSynchronize(nccl_stream)` 。 +FLAGS_sync_nccl_allreduce=True - 在 allreduce_op_handle 中调用 `cudaStreamSynchronize(nccl_stream)` 。 FLAGS_tracer_profile_fname ******************************************* -(始于1.4.0) +(始于 1.4.0) -FLAGS_tracer_profile_fname表示由gperftools生成的命令式跟踪器的分析器文件名。仅在编译选项选择`WITH_PROFILER = ON`时有效。如果禁用则设为empty。 +FLAGS_tracer_profile_fname 表示由 gperftools 生成的命令式跟踪器的分析器文件名。仅在编译选项选择`WITH_PROFILER = ON`时有效。如果禁用则设为 empty。 取值范围 --------------- -String型,缺省值为("gperf")。 +String 型,缺省值为("gperf")。 示例 ------- diff --git a/docs/guides/hardware_support/hardware_info_cn.md b/docs/guides/hardware_support/hardware_info_cn.md index 529d102d122..3e342f90de7 100644 --- a/docs/guides/hardware_support/hardware_info_cn.md +++ b/docs/guides/hardware_support/hardware_info_cn.md @@ -6,43 +6,43 @@ | 分类 | 架构 | 公司 | 型号 | 安装 | 源码编译 | 完全支持训练 | 支持部分模型 | | ---- | ---- | ---- | ---- |---- | ---- |---- | ---- | -| 服务端CPU | x86 | Intel | 常见CPU型号如Xeon、Core全系列 | [安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html) | [源码编译](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/compile/linux-compile.html) | ✔️ | | -| 服务端GPU | | NVIDIA | 常见GPU型号如V100、T4等| [安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html) | [源码编译](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/compile/linux-compile.html) | ✔️ | | -| AI加速芯片 | 达芬奇 | 华为 | 昇腾910 | [安装](./npu_docs/paddle_install_cn.html) | [源码编译](./npu_docs/paddle_install_cn.html#anzhuangfangshi-tongguoyuanmabianyianzhuang) | | ✔️ | -| AI加速芯片 | | 海光 | 海光DCU | [安装](./rocm_docs/paddle_install_cn.html#wheel) | [源码编译](./rocm_docs/paddle_install_cn.html#anzhuangfangshier-tongguoyuanmabianyianzhuang) | ✔️ | [支持模型](./rocm_docs/paddle_rocm_cn.html) | -| AI加速芯片 | XPU | 百度 | 昆仑K200、R200等 | [安装](./xpu_docs/paddle_install_xpu2_cn.html#wheel) | [源码编译](./xpu_docs/paddle_install_xpu2_cn.html#xpu) | | [支持模型](./xpu_docs/paddle_2.0_xpu2_cn.html) | -| AI加速芯片 | IPU | Graphcore | GC200 | | [源码编译](./ipu_docs/paddle_install_cn.html) | | | +| 服务端 CPU | x86 | Intel | 常见 CPU 型号如 Xeon、Core 全系列 | [安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html) | [源码编译](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/compile/linux-compile.html) | ✔️ | | +| 服务端 GPU | | NVIDIA | 常见 GPU 型号如 V100、T4 等| [安装](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html) | [源码编译](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/compile/linux-compile.html) | ✔️ | | +| AI 加速芯片 | 达芬奇 | 华为 | 昇腾 910 | [安装](./npu_docs/paddle_install_cn.html) | [源码编译](./npu_docs/paddle_install_cn.html#anzhuangfangshi-tongguoyuanmabianyianzhuang) | | ✔️ | +| AI 加速芯片 | | 海光 | 海光 DCU | [安装](./rocm_docs/paddle_install_cn.html#wheel) | [源码编译](./rocm_docs/paddle_install_cn.html#anzhuangfangshier-tongguoyuanmabianyianzhuang) | ✔️ | [支持模型](./rocm_docs/paddle_rocm_cn.html) | +| AI 加速芯片 | XPU | 百度 | 昆仑 K200、R200 等 | [安装](./xpu_docs/paddle_install_xpu2_cn.html#wheel) | [源码编译](./xpu_docs/paddle_install_xpu2_cn.html#xpu) | | [支持模型](./xpu_docs/paddle_2.0_xpu2_cn.html) | +| AI 加速芯片 | IPU | Graphcore | GC200 | | [源码编译](./ipu_docs/paddle_install_cn.html) | | | ## Paddle Inference | 分类 | 架构 | 公司 | 型号 | 预编译库 | 源码编译 | 完全支持推理 | 支持部分模型 | | ---- | ---- | ---- | ---- |---- | ---- |---- | ---- | -| 服务端CPU | x86 | Intel | 常见CPU型号如Xeon、Core全系列等 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | -| 服务端GPU | | NVIDIA | 常见GPU型号如V100、T4等 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | -| 移动端GPU | | NVIDIA | Jetson系列 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | -| AI加速芯片 | 达芬奇 | 华为 | 昇腾910 | 即将提供 | | | | -| AI加速芯片 | | 海光 | 海光DCU | [预编译库](./rocm_docs/paddle_install_cn.html) | [源码编译](./rocm_docs/paddle_install_cn.html) | ✔️ | [支持模型](./rocm_docs/paddle_rocm_cn.html) | -| AI加速芯片 | XPU | 百度 | 昆仑K200、R200等 | [预编译库](./xpu_docs/inference_install_example_cn.html#wheel) | [源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/09_hardware_support/xpu_docs/paddle_install_cn.html#id2) | | [支持模型](./xpu_docs/paddle_2.0_xpu_cn.html#xunlianzhichi) | -| 服务端CPU | ARM | 飞腾 | FT-2000+/64、S2500 | |[源码编译](../../install/compile/arm-compile.html#anchor-1) | | | -| 服务端CPU | ARM | 华为 | 鲲鹏 920 2426SK | |[源码编译](../../install/compile/arm-compile.html) | | | -| 服务端CPU | MIPS | 龙芯 | 龙芯3A4000、3A5000、3C5000L | |[源码编译](../../install/compile/mips-compile.html#anchor-0) | | | -| 服务端CPU | x86 | 兆芯 | 全系列CPU | |[源码编译](../../install/compile/zhaoxin-compile.html#anchor-1) | | | +| 服务端 CPU | x86 | Intel | 常见 CPU 型号如 Xeon、Core 全系列等 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | +| 服务端 GPU | | NVIDIA | 常见 GPU 型号如 V100、T4 等 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | +| 移动端 GPU | | NVIDIA | Jetson 系列 | [预编译库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) | [源码编译](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html) | ✔️ | | +| AI 加速芯片 | 达芬奇 | 华为 | 昇腾 910 | 即将提供 | | | | +| AI 加速芯片 | | 海光 | 海光 DCU | [预编译库](./rocm_docs/paddle_install_cn.html) | [源码编译](./rocm_docs/paddle_install_cn.html) | ✔️ | [支持模型](./rocm_docs/paddle_rocm_cn.html) | +| AI 加速芯片 | XPU | 百度 | 昆仑 K200、R200 等 | [预编译库](./xpu_docs/inference_install_example_cn.html#wheel) | [源码编译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/09_hardware_support/xpu_docs/paddle_install_cn.html#id2) | | [支持模型](./xpu_docs/paddle_2.0_xpu_cn.html#xunlianzhichi) | +| 服务端 CPU | ARM | 飞腾 | FT-2000+/64、S2500 | |[源码编译](../../install/compile/arm-compile.html#anchor-1) | | | +| 服务端 CPU | ARM | 华为 | 鲲鹏 920 2426SK | |[源码编译](../../install/compile/arm-compile.html) | | | +| 服务端 CPU | MIPS | 龙芯 | 龙芯 3A4000、3A5000、3C5000L | |[源码编译](../../install/compile/mips-compile.html#anchor-0) | | | +| 服务端 CPU | x86 | 兆芯 | 全系列 CPU | |[源码编译](../../install/compile/zhaoxin-compile.html#anchor-1) | | | ## Paddle Lite | 分类 | 架构 | 公司 | 型号 | 预编译库 | 源码编译 | 完全支持推理 | 支持部分模型 | | ---- | ---- | ---- | ---- |---- | ---- |---- | ---- | -| 移动端CPU | ARM | ARM | Cortex-A系列 | [预编译库](https://paddlelite.paddlepaddle.org.cn/quick_start/release_lib.html) | [源码编译](https://paddlelite.paddlepaddle.org.cn/source_compile/compile_env.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | -| 移动端GPU | | ARM | Mali系列 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/opencl.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | -| 移动端GPU | | 高通 | Adreno系列 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/opencl.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | -| AI加速芯片 | | 华为 | Kirin 810/990/9000 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id1) | -| AI加速芯片 | | 华为 | 昇腾310 | | 即将提供 | | | -| AI加速芯片 | | RockChip | RK1808 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/rockchip_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/rockchip_npu.html#id1) | -| AI加速芯片 | | MTK | NeuroPilot APU | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/mediatek_apu.html#id1) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/mediatek_apu.html#id1) | -| AI加速芯片 | | Imagination | PowerVR 2NX | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id1) | -| AI加速芯片 | | 百度 | 昆仑K200、R200等 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id4) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id1) | -| AI加速芯片 | | 寒武纪 | 思元270 | | 即将提供 | | | -| AI加速芯片 | | 比特大陆 | 算丰BM16系列芯片 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/bitmain.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/bitmain.html#id1) | -| FPGA | | 百度 | 百度Edgeboard开发板 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id4) | | [支持模型](https://ai.baidu.com/ai-doc/HWCE/Qkda68drw) | +| 移动端 CPU | ARM | ARM | Cortex-A 系列 | [预编译库](https://paddlelite.paddlepaddle.org.cn/quick_start/release_lib.html) | [源码编译](https://paddlelite.paddlepaddle.org.cn/source_compile/compile_env.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | +| 移动端 GPU | | ARM | Mali 系列 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/opencl.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | +| 移动端 GPU | | 高通 | Adreno 系列 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/opencl.html) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/introduction/support_model_list.html) | +| AI 加速芯片 | | 华为 | Kirin 810/990/9000 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id1) | +| AI 加速芯片 | | 华为 | 昇腾 310 | | 即将提供 | | | +| AI 加速芯片 | | RockChip | RK1808 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/rockchip_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/rockchip_npu.html#id1) | +| AI 加速芯片 | | MTK | NeuroPilot APU | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/mediatek_apu.html#id1) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/mediatek_apu.html#id1) | +| AI 加速芯片 | | Imagination | PowerVR 2NX | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/huawei_kirin_npu.html#id1) | +| AI 加速芯片 | | 百度 | 昆仑 K200、R200 等 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id4) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id1) | +| AI 加速芯片 | | 寒武纪 | 思元 270 | | 即将提供 | | | +| AI 加速芯片 | | 比特大陆 | 算丰 BM16 系列芯片 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/bitmain.html#id5) | | [支持模型](https://paddlelite.paddlepaddle.org.cn/demo_guides/bitmain.html#id1) | +| FPGA | | 百度 | 百度 Edgeboard 开发板 | | [源码编译](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html#id4) | | [支持模型](https://ai.baidu.com/ai-doc/HWCE/Qkda68drw) | **注意:** 如果你想了解更多芯片支持的信息,请联系我们,邮箱为 Paddle-better@baidu.com。 diff --git a/docs/guides/hardware_support/index_cn.rst b/docs/guides/hardware_support/index_cn.rst index 12c1f6a1dc1..957806cb83e 100644 --- a/docs/guides/hardware_support/index_cn.rst +++ b/docs/guides/hardware_support/index_cn.rst @@ -7,10 +7,10 @@ 你可以通过以下内容,了解飞桨框架硬件支持相关的内容: - `飞桨硬件支持 <./hardware_info_cn.html>`_ : 说明飞桨产品支持的硬件。 -- `昆仑XPU芯片运行飞桨 <./xpu_docs/index_cn.html>`_ : 介绍如何在昆仑XPU芯片环境上安装和使用飞桨。 -- `海光DCU芯片运行飞桨 <./rocm_docs/index_cn.html>`_ : 介绍如何在海光DCU芯片环境上安装和使用飞桨。 -- `昇腾NPU芯片运行飞桨 <./npu_docs/index_cn.html>`_ : 介绍如何在昇腾环境上安装和使用飞桨。 -- `Graphcore IPU芯片运行飞桨 <./ipu_docs/index_cn.html>`_ : 介绍如何在IPU环境上安装和使用飞桨。 +- `昆仑 XPU 芯片运行飞桨 <./xpu_docs/index_cn.html>`_ : 介绍如何在昆仑 XPU 芯片环境上安装和使用飞桨。 +- `海光 DCU 芯片运行飞桨 <./rocm_docs/index_cn.html>`_ : 介绍如何在海光 DCU 芯片环境上安装和使用飞桨。 +- `昇腾 NPU 芯片运行飞桨 <./npu_docs/index_cn.html>`_ : 介绍如何在昇腾环境上安装和使用飞桨。 +- `Graphcore IPU 芯片运行飞桨 <./ipu_docs/index_cn.html>`_ : 介绍如何在 IPU 环境上安装和使用飞桨。 .. toctree:: :hidden: diff --git a/docs/guides/hardware_support/ipu_docs/index_cn.rst b/docs/guides/hardware_support/ipu_docs/index_cn.rst index 71966fc3e4c..8f7ac0f79d6 100644 --- a/docs/guides/hardware_support/ipu_docs/index_cn.rst +++ b/docs/guides/hardware_support/ipu_docs/index_cn.rst @@ -1,17 +1,17 @@ .. _cn_ipu_information: #################### -Graphcore IPU芯片运行飞桨 +Graphcore IPU 芯片运行飞桨 #################### -IPU是Graphcore推出的用于AI计算的专用芯片,Paddle IPU版可以支持在IPU上进行模型训练与预测。 +IPU 是 Graphcore 推出的用于 AI 计算的专用芯片,Paddle IPU 版可以支持在 IPU 上进行模型训练与预测。 -参考以下内容了解和体验在IPU芯片上运行飞桨: +参考以下内容了解和体验在 IPU 芯片上运行飞桨: -- `飞桨框架IPU版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架IPU版安装说明 -- `飞桨框架IPU版训练示例 <./train_example_cn.html>`_ : 飞桨框架IPU版训练示例 -- `飞桨框架IPU版预测示例 <./infer_example_cn.html>`_ : 飞桨框架IPU版预测示例 +- `飞桨框架 IPU 版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架 IPU 版安装说明 +- `飞桨框架 IPU 版训练示例 <./train_example_cn.html>`_ : 飞桨框架 IPU 版训练示例 +- `飞桨框架 IPU 版预测示例 <./infer_example_cn.html>`_ : 飞桨框架 IPU 版预测示例 .. toctree:: :hidden: diff --git a/docs/guides/hardware_support/ipu_docs/infer_example_cn.md b/docs/guides/hardware_support/ipu_docs/infer_example_cn.md index ccb312c2550..1055e382ee6 100644 --- a/docs/guides/hardware_support/ipu_docs/infer_example_cn.md +++ b/docs/guides/hardware_support/ipu_docs/infer_example_cn.md @@ -1,12 +1,12 @@ -# 飞桨框架IPU版预测示例 +# 飞桨框架 IPU 版预测示例 -飞桨框架IPU版支持飞桨原生推理库(Paddle Inference),适用于云端推理。 +飞桨框架 IPU 版支持飞桨原生推理库(Paddle Inference),适用于云端推理。 ## C++预测示例 -**第一步**:源码编译C++预测库 +**第一步**:源码编译 C++预测库 -当前Paddle IPU版只支持通过源码编译的方式提供C++预测库,编译环境准备请参考 [飞桨框架IPU版安装说明](./paddle_install_cn.html)。 +当前 Paddle IPU 版只支持通过源码编译的方式提供 C++预测库,编译环境准备请参考 [飞桨框架 IPU 版安装说明](./paddle_install_cn.html)。 ``` # 下载源码 @@ -33,7 +33,7 @@ make -j$(nproc) git clone https://github.com/PaddlePaddle/Paddle-Inference-Demo ``` -将获得的C++预测库拷贝并重命名一份到 `Paddle-Inference-Demo/c++/lib/paddle_inference`。 +将获得的 C++预测库拷贝并重命名一份到 `Paddle-Inference-Demo/c++/lib/paddle_inference`。 ``` cd Paddle-Inference-Demo/c++/paddle-ipu diff --git a/docs/guides/hardware_support/ipu_docs/paddle_install_cn.md b/docs/guides/hardware_support/ipu_docs/paddle_install_cn.md index db991bb2b12..c247a80c5f9 100644 --- a/docs/guides/hardware_support/ipu_docs/paddle_install_cn.md +++ b/docs/guides/hardware_support/ipu_docs/paddle_install_cn.md @@ -1,11 +1,11 @@ -# 飞桨框架IPU版安装说明 +# 飞桨框架 IPU 版安装说明 -飞桨框架IPU版支持基于 Graphcore IPU 的 Python 的训练和原生推理,当前支持的 Poplar 版本为 2.5.0, 提供两种安装方式: +飞桨框架 IPU 版支持基于 Graphcore IPU 的 Python 的训练和原生推理,当前支持的 Poplar 版本为 2.5.0, 提供两种安装方式: -- Docker镜像方式启动 +- Docker 镜像方式启动 - 通过源代码编译安装 -## Docker镜像方式启动 +## Docker 镜像方式启动 当前 Docker 镜像包含预编译的飞桨框架 IPU 版,镜像基于 Ubuntu18.04 基础镜像构建,内置的 Python 版本为 Python3.7。 @@ -84,7 +84,7 @@ export PATH=/opt/popart/bin:/opt/poplar/lib:${PATH} export LD_LIBRARY_PATH=/opt/popart/lib:/opt/poplar/lib:${LD_LIBRARY_PATH} # PATH 中存在 Python 3.7 -# 注意:镜像中的 python 3.7 通过 miniconda 安装,请通过 conda activate base 命令加载Python 3.7环境 +# 注意:镜像中的 python 3.7 通过 miniconda 安装,请通过 conda activate base 命令加载 Python 3.7 环境 export PATH=/opt/conda/bin:${PATH} ``` diff --git a/docs/guides/hardware_support/ipu_docs/train_example_cn.md b/docs/guides/hardware_support/ipu_docs/train_example_cn.md index 06e9e079b26..d28dcdbcd65 100644 --- a/docs/guides/hardware_support/ipu_docs/train_example_cn.md +++ b/docs/guides/hardware_support/ipu_docs/train_example_cn.md @@ -1,8 +1,8 @@ -# 飞桨框架IPU版训练示例 +# 飞桨框架 IPU 版训练示例 -## BERT-Base训练示例 +## BERT-Base 训练示例 -示例将默认用户已安装飞桨框架IPU版,并且已经配置运行时需要的环境(建议在Docker环境中使用飞桨框架IPU版)。 +示例将默认用户已安装飞桨框架 IPU 版,并且已经配置运行时需要的环境(建议在 Docker 环境中使用飞桨框架 IPU 版)。 示例代码位于 [Paddle-BERT with Graphcore IPUs](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/model_zoo/bert/static_ipu) @@ -23,4 +23,4 @@ pip install -r requirements.txt **第三步**:执行模型训练 -按照 `README.md` 的描述开始BERT-Base模型的预训练和在SQuAD v1.1数据集上的模型微调。 +按照 `README.md` 的描述开始 BERT-Base 模型的预训练和在 SQuAD v1.1 数据集上的模型微调。 diff --git a/docs/guides/hardware_support/npu_docs/index_cn.rst b/docs/guides/hardware_support/npu_docs/index_cn.rst index 4cc0963b2ed..daf112d102e 100644 --- a/docs/guides/hardware_support/npu_docs/index_cn.rst +++ b/docs/guides/hardware_support/npu_docs/index_cn.rst @@ -1,15 +1,15 @@ .. _cn_npu_information: #################### -昇腾NPU芯片运行飞桨 +昇腾 NPU 芯片运行飞桨 #################### -华为昇腾910(Ascend 910)是一款具有超高算力的AI处理器。Paddle NPU 版当前可以支持在华为鲲鹏CPU与昇腾NPU上进行模型训练与推理。 +华为昇腾 910(Ascend 910)是一款具有超高算力的 AI 处理器。Paddle NPU 版当前可以支持在华为鲲鹏 CPU 与昇腾 NPU 上进行模型训练与推理。 参考以下内容可快速了解和体验在昇腾芯片上运行飞桨: -- `飞桨框架昇腾NPU版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昇腾NPU版安装说明 -- `飞桨框架昇腾NPU版训练示例 <./train_example_cn.html>`_ : 飞桨框架昇腾NPU版训练示例 +- `飞桨框架昇腾 NPU 版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昇腾 NPU 版安装说明 +- `飞桨框架昇腾 NPU 版训练示例 <./train_example_cn.html>`_ : 飞桨框架昇腾 NPU 版训练示例 .. toctree:: :hidden: diff --git a/docs/guides/hardware_support/npu_docs/paddle_install_cn.md b/docs/guides/hardware_support/npu_docs/paddle_install_cn.md index eb9565bd2c2..773d795df7f 100644 --- a/docs/guides/hardware_support/npu_docs/paddle_install_cn.md +++ b/docs/guides/hardware_support/npu_docs/paddle_install_cn.md @@ -1,31 +1,31 @@ -# 飞桨框架昇腾NPU版安装说明 +# 飞桨框架昇腾 NPU 版安装说明 -飞桨框架NPU版支持基于华为鲲鹏CPU与昇腾NPU的Python的训练和原生推理。 +飞桨框架 NPU 版支持基于华为鲲鹏 CPU 与昇腾 NPU 的 Python 的训练和原生推理。 ### 环境准备 -当前Paddle昇腾910 NPU版支持的华为CANN社区版5.0.2.alpha005,请先根据华为昇腾910 NPU的要求,进行相关NPU运行环境的部署和配置,参考华为官方文档 [CANN社区版安装指南](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0002.html)。 +当前 Paddle 昇腾 910 NPU 版支持的华为 CANN 社区版 5.0.2.alpha005,请先根据华为昇腾 910 NPU 的要求,进行相关 NPU 运行环境的部署和配置,参考华为官方文档 [CANN 社区版安装指南](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0002.html)。 -Paddle 昇腾910 NPU版目前仅支持源码编译安装,其中编译与运行相关的环境要求如下: +Paddle 昇腾 910 NPU 版目前仅支持源码编译安装,其中编译与运行相关的环境要求如下: -- **CPU处理器:** 鲲鹏920 +- **CPU 处理器:** 鲲鹏 920 - **操作系统:** Ubuntu 18.04 / CentOS 7.6 / KylinV10SP1 / EulerOS 2.8 -- **CANN社区版:** 5.0.2.alpha005 -- **Python版本:** 3.7 -- **CMake版本:** 3.15+ +- **CANN 社区版:** 5.0.2.alpha005 +- **Python 版本:** 3.7 +- **CMake 版本:** 3.15+ - **GCC/G++版本:** 8.2+ ## 安装方式:通过源码编译安装 -**第一步**:准备 CANN 社区版 5.0.2.alpha005 运行环境 (推荐使用Paddle镜像) +**第一步**:准备 CANN 社区版 5.0.2.alpha005 运行环境 (推荐使用 Paddle 镜像) -可以直接从Paddle的官方镜像库拉取预先装有 CANN 社区版 5.0.2.alpha005 的 docker 镜像,或者根据 [CANN社区版安装指南](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0002.html) 来准备相应的开发与运行环境。 +可以直接从 Paddle 的官方镜像库拉取预先装有 CANN 社区版 5.0.2.alpha005 的 docker 镜像,或者根据 [CANN 社区版安装指南](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0002.html) 来准备相应的开发与运行环境。 ```bash # 拉取镜像 docker pull paddlepaddle/paddle:latest-dev-cann5.0.2.alpha005-gcc82-aarch64 -# 启动容器,注意这里的参数 --device,容器仅映射设备ID为4到7的4张NPU卡,如需映射其他卡相应增改设备ID号即可 +# 启动容器,注意这里的参数 --device,容器仅映射设备 ID 为 4 到 7 的 4 张 NPU 卡,如需映射其他卡相应增改设备 ID 号即可 docker run -it --name paddle-npu-dev -v /home/:/workspace \ --pids-limit 409600 --network=host --shm-size=128G \ --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ @@ -39,7 +39,7 @@ docker run -it --name paddle-npu-dev -v /home/:/workspace \ -v /usr/local/dcmi:/usr/local/dcmi \ paddlepaddle/paddle:latest-dev-cann5.0.2.alpha005-gcc82-aarch64 /bin/bash -# 检查容器中是否可以正确识别映射的昇腾DCU设备 +# 检查容器中是否可以正确识别映射的昇腾 DCU 设备 npu-smi info # 预期得到类似如下的结果 @@ -63,7 +63,7 @@ npu-smi info +======================+===============+=============================================+ ``` -**第二步**:下载Paddle源码并编译,CMAKE编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) +**第二步**:下载 Paddle 源码并编译,CMAKE 编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) ```bash # 下载源码 @@ -73,7 +73,7 @@ cd Paddle # 创建编译目录 mkdir build && cd build -# 执行cmake +# 执行 cmake cmake .. -DPY_VERSION=3.7 -DWITH_ASCEND=OFF -DWITH_ARM=ON -DWITH_ASCEND_CL=ON \ -DWITH_ASCEND_INT64=ON -DWITH_DISTRIBUTE=ON -DWITH_TESTING=ON -DON_INFER=ON \ -DCMAKE_BUILD_TYPE=Release -DCMAKE_EXPORT_COMPILE_COMMANDS=ON @@ -82,9 +82,9 @@ cmake .. -DPY_VERSION=3.7 -DWITH_ASCEND=OFF -DWITH_ARM=ON -DWITH_ASCEND_CL=ON \ make TARGET=ARMV8 -j$(nproc) ``` -**第三步**:安装与验证编译生成的wheel包 +**第三步**:安装与验证编译生成的 wheel 包 -编译完成之后进入`Paddle/build/python/dist`目录即可找到编译生成的.whl安装包,安装与验证命令如下: +编译完成之后进入`Paddle/build/python/dist`目录即可找到编译生成的.whl 安装包,安装与验证命令如下: ```bash # 安装命令 @@ -102,7 +102,7 @@ PaddlePaddle is installed successfully! Let's start deep learning with PaddlePad ## 如何卸载 -请使用以下命令卸载Paddle: +请使用以下命令卸载 Paddle: ```bash pip uninstall paddlepaddle-npu diff --git a/docs/guides/hardware_support/npu_docs/train_example_cn.md b/docs/guides/hardware_support/npu_docs/train_example_cn.md index 2781fab67c8..cc7b9f31753 100644 --- a/docs/guides/hardware_support/npu_docs/train_example_cn.md +++ b/docs/guides/hardware_support/npu_docs/train_example_cn.md @@ -1,6 +1,6 @@ -# 飞桨框架昇腾NPU版训练示例 +# 飞桨框架昇腾 NPU 版训练示例 -## YOLOv3训练示例 +## YOLOv3 训练示例 **第一步**:下载并安装 PaddleDetection 套件 @@ -17,7 +17,7 @@ python setup.py install pip install -r requirements.txt ``` -也可以访问PaddleDetection的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleDetection) 下载 develop 分支的源码。 +也可以访问 PaddleDetection 的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleDetection) 下载 develop 分支的源码。 **第二步**:准备 VOC 训练数据集 @@ -52,7 +52,7 @@ INFO:ppdet.utils.voc_eval:mAP(0.50, integral) = 76.78% **第四步**:运行多卡训练 -> 注意:多卡训练请参考本页下一章节进行 "NPU多卡训练配置" 的准备。 +> 注意:多卡训练请参考本页下一章节进行 "NPU 多卡训练配置" 的准备。 ```bash # NPU 多卡训练配置 @@ -75,15 +75,15 @@ python -u tools/eval.py -c configs/yolov3_darknet_roadsign.yml -o use_npu=True INFO:ppdet.utils.voc_eval:mAP(0.50, integral) = 83.00% ``` -## NPU多卡训练配置 +## NPU 多卡训练配置 -**预先要求**:请先根据华为昇腾910 NPU的文档 [配置device的网卡IP](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0105.html) 进行相关NPU运行环境的部署和配置,配置完成后检查机器下存在 `/etc/hccn.conf` 文件。 +**预先要求**:请先根据华为昇腾 910 NPU 的文档 [配置 device 的网卡 IP](https://support.huaweicloud.com/instg-cli-cann502-alpha005/atlasdeploy_03_0105.html) 进行相关 NPU 运行环境的部署和配置,配置完成后检查机器下存在 `/etc/hccn.conf` 文件。 如果是物理机环境,请根据华为官网的 [hccl_tools 说明文档](https://github.com/mindspore-ai/mindspore/tree/v1.4.0/model_zoo/utils/hccl_tools) 进行操作。如果是根据 Paddle 官方镜像启动的容器环境,请根据以下步骤进行操作: -**第一步**:根据容器启动时映射的设备ID,创建容器内的 `/etc/hccn.conf` 文件 +**第一步**:根据容器启动时映射的设备 ID,创建容器内的 `/etc/hccn.conf` 文件 -例如物理机上的8卡的原始 `/etc/hccn.conf` 文件内容如下: +例如物理机上的 8 卡的原始 `/etc/hccn.conf` 文件内容如下: ``` address_0=192.168.10.21 @@ -104,7 +104,7 @@ address_7=192.168.40.22 netmask_7=255.255.255.0 ``` -容器启动命令中映射的设备ID为4到7的4张NPU卡,则创建创建容器内的 `/etc/hccn.conf` 文件内容如下: +容器启动命令中映射的设备 ID 为 4 到 7 的 4 张 NPU 卡,则创建创建容器内的 `/etc/hccn.conf` 文件内容如下: > 注意:这里的 address_4 和 netmask_4 需要相应的修改为 address_0 和 netmask_0,以此类推 @@ -167,12 +167,12 @@ python hccl_tools.py --device_num "[0,4)" --server_ip 127.0.0.1 } ``` -**第三步**:运行Paddle多卡训练之前,需要先配置名为 `RANK_TABLE_FILE` 的环境变量,指向上一步生成的json文件的绝对路径 +**第三步**:运行 Paddle 多卡训练之前,需要先配置名为 `RANK_TABLE_FILE` 的环境变量,指向上一步生成的 json 文件的绝对路径 ```bash # 1) 设置 ranktable 文件的环境变量 export RANK_TABLE_FILE=$(readlink -f hccl_4p_0123_127.0.0.1.json) -# 或者直接修改为json文件的绝对路径 +# 或者直接修改为 json 文件的绝对路径 export RANK_TABLE_FILE=/root/hccl_4p_0123_127.0.0.1.json # 2) 设置 HCCL 相关环境变量 diff --git a/docs/guides/hardware_support/rocm_docs/index_cn.rst b/docs/guides/hardware_support/rocm_docs/index_cn.rst index e050d3a980a..582746e2f15 100644 --- a/docs/guides/hardware_support/rocm_docs/index_cn.rst +++ b/docs/guides/hardware_support/rocm_docs/index_cn.rst @@ -1,17 +1,17 @@ .. _cn_rocm_information: #################### -海光DCU芯片运行飞桨 +海光 DCU 芯片运行飞桨 #################### -DCU(Deep Computing Unit 深度计算器)是 海光(HYGON)推出的一款专门用于AI人工智能和深度学习的加速卡。Paddle ROCm版当前可以支持在海光CPU与DCU上进行模型训练与预测。 +DCU(Deep Computing Unit 深度计算器)是 海光(HYGON)推出的一款专门用于 AI 人工智能和深度学习的加速卡。Paddle ROCm 版当前可以支持在海光 CPU 与 DCU 上进行模型训练与预测。 参考以下内容可快速了解和体验在海光芯片上运行飞桨: -- `飞桨框架ROCm版支持模型 <./paddle_rocm_cn.html>`_ : 飞桨框架ROCm版支持模型 -- `飞桨框架ROCm版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架ROCm版安装说明 -- `飞桨框架ROCm版训练示例 <./train_example_cn.html>`_ : 飞桨框架ROCm版训练示例 -- `飞桨框架ROCm版预测示例 <./infer_example_cn.html>`_ : 飞桨框架ROCm版预测示例 +- `飞桨框架 ROCm 版支持模型 <./paddle_rocm_cn.html>`_ : 飞桨框架 ROCm 版支持模型 +- `飞桨框架 ROCm 版安装说明 <./paddle_install_cn.html>`_ : 飞桨框架 ROCm 版安装说明 +- `飞桨框架 ROCm 版训练示例 <./train_example_cn.html>`_ : 飞桨框架 ROCm 版训练示例 +- `飞桨框架 ROCm 版预测示例 <./infer_example_cn.html>`_ : 飞桨框架 ROCm 版预测示例 .. toctree:: :hidden: diff --git a/docs/guides/hardware_support/rocm_docs/infer_example_cn.md b/docs/guides/hardware_support/rocm_docs/infer_example_cn.md index cd034e40427..ccbebe3714d 100644 --- a/docs/guides/hardware_support/rocm_docs/infer_example_cn.md +++ b/docs/guides/hardware_support/rocm_docs/infer_example_cn.md @@ -1,14 +1,14 @@ -# 飞桨框架ROCm版预测示例 +# 飞桨框架 ROCm 版预测示例 -使用海光CPU/DCU进行预测与使用Intel CPU/Nvidia GPU预测相同,支持飞桨原生推理库(Paddle Inference),适用于高性能服务器端、云端推理。当前Paddle ROCm版本完全兼容Paddle CUDA版本的 C++/Python API,直接使用原有的GPU预测命令和参数即可。 +使用海光 CPU/DCU 进行预测与使用 Intel CPU/Nvidia GPU 预测相同,支持飞桨原生推理库(Paddle Inference),适用于高性能服务器端、云端推理。当前 Paddle ROCm 版本完全兼容 Paddle CUDA 版本的 C++/Python API,直接使用原有的 GPU 预测命令和参数即可。 ## C++预测部署 -**注意**:更多C++预测API使用说明请参考 [Paddle Inference - C++ API](https://paddleinference.paddlepaddle.org.cn/api_reference/cxx_api_index.html) +**注意**:更多 C++预测 API 使用说明请参考 [Paddle Inference - C++ API](https://paddleinference.paddlepaddle.org.cn/api_reference/cxx_api_index.html) -**第一步**:源码编译C++预测库 +**第一步**:源码编译 C++预测库 -当前 Paddle ROCm版只支持通过源码编译的方式提供C++预测库。编译环境准备请参考 [飞桨框架ROCm版安装说明:通过源码编译安装](./paddle_install_cn.html)。 +当前 Paddle ROCm 版只支持通过源码编译的方式提供 C++预测库。编译环境准备请参考 [飞桨框架 ROCm 版安装说明:通过源码编译安装](./paddle_install_cn.html)。 ```bash # 下载源码,切换到 release/2.1 分支 @@ -18,7 +18,7 @@ cd Paddle # 创建编译目录 mkdir build && cd build -# 执行cmake,注意这里需打开预测优化选项 ON_INFER +# 执行 cmake,注意这里需打开预测优化选项 ON_INFER cmake .. -DPY_VERSION=3.7 -DWITH_ROCM=ON -DWITH_TESTING=OFF -DON_INFER=ON \ -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_EXPORT_COMPILE_COMMANDS=ON @@ -60,7 +60,7 @@ build/paddle_inference_install_dir └── version.txt ``` -其中 `version.txt` 文件中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、ROCm/MIOPEN版本号,如: +其中 `version.txt` 文件中记录了该预测库的版本信息,包括 Git Commit ID、使用 OpenBlas 或 MKL 数学库、ROCm/MIOPEN 版本号,如: ```bash GIT COMMIT ID: e75412099f97a49701324788b468d80391293ea9 @@ -143,13 +143,13 @@ I0602 04:12:04.106670 52627 resnet50_test.cc:88] 800 : 3.85254e-25 I0602 04:12:04.106683 52627 resnet50_test.cc:88] 900 : 1.52391e-30 ``` -## Python预测部署示例 +## Python 预测部署示例 -**注意**:更多Python预测API使用说明请参考 [Paddle Inference - Python API](https://paddleinference.paddlepaddle.org.cn/api_reference/python_api_index.html) +**注意**:更多 Python 预测 API 使用说明请参考 [Paddle Inference - Python API](https://paddleinference.paddlepaddle.org.cn/api_reference/python_api_index.html) **第一步**:安装 Python 预测库 -Paddle ROCm 版的 Python 预测库请参考 [飞桨框架ROCm版安装说明](./paddle_install_cn.html) 进行安装或编译。 +Paddle ROCm 版的 Python 预测库请参考 [飞桨框架 ROCm 版安装说明](./paddle_install_cn.html) 进行安装或编译。 **第二步**:准备预测部署模型 @@ -195,13 +195,13 @@ def main(): input_handle.reshape([args.batch_size, 3, 318, 318]) input_handle.copy_from_cpu(fake_input) - # 运行predictor + # 运行 predictor predictor.run() # 获取输出 output_names = predictor.get_output_names() output_handle = predictor.get_output_handle(output_names[0]) - output_data = output_handle.copy_to_cpu() # numpy.ndarray类型 + output_data = output_handle.copy_to_cpu() # numpy.ndarray 类型 print("Output data size is {}".format(output_data.size)) print("Output data shape is {}".format(output_data.shape)) @@ -219,7 +219,7 @@ if __name__ == "__main__": **第四步**:执行预测程序 ```bash -# 参数输入为本章节第2步中下载的 ResNet50 模型 +# 参数输入为本章节第 2 步中下载的 ResNet50 模型 python python_demo.py --model_file ./resnet50/inference.pdmodel \ --params_file ./resnet50/inference.pdiparams \ --batch_size 2 diff --git a/docs/guides/hardware_support/rocm_docs/paddle_install_cn.md b/docs/guides/hardware_support/rocm_docs/paddle_install_cn.md index e3712970a87..75bb4c4bac0 100644 --- a/docs/guides/hardware_support/rocm_docs/paddle_install_cn.md +++ b/docs/guides/hardware_support/rocm_docs/paddle_install_cn.md @@ -1,29 +1,29 @@ -# 飞桨框架ROCm版安装说明 +# 飞桨框架 ROCm 版安装说明 -飞桨框架ROCm版支持基于海光CPU和DCU的Python的训练和原生预测,当前支持的ROCm版本为4.0.1, 提供两种安装方式: +飞桨框架 ROCm 版支持基于海光 CPU 和 DCU 的 Python 的训练和原生预测,当前支持的 ROCm 版本为 4.0.1, 提供两种安装方式: -- 通过预编译的wheel包安装 +- 通过预编译的 wheel 包安装 - 通过源代码编译安装 -## 安装方式一:通过wheel包安装 +## 安装方式一:通过 wheel 包安装 **注意**:当前仅提供基于 CentOS 7.8 & ROCm 4.0.1 的 docker 镜像,与 Python 3.7 的 wheel 安装包。 -**第一步**:准备 ROCm 4.0.1 运行环境 (推荐使用Paddle镜像) +**第一步**:准备 ROCm 4.0.1 运行环境 (推荐使用 Paddle 镜像) -可以直接从Paddle的官方镜像库拉取预先装有 ROCm 4.0.1 的 docker 镜像,或者根据 [ROCm安装文档](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html#centos-rhel) 来准备相应的运行环境。 +可以直接从 Paddle 的官方镜像库拉取预先装有 ROCm 4.0.1 的 docker 镜像,或者根据 [ROCm 安装文档](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html#centos-rhel) 来准备相应的运行环境。 ```bash # 拉取镜像 docker pull paddlepaddle/paddle:latest-dev-rocm4.0-miopen2.11 -# 启动容器,注意这里的参数,例如shm-size, device等都需要配置 +# 启动容器,注意这里的参数,例如 shm-size, device 等都需要配置 docker run -it --name paddle-rocm-dev --shm-size=128G \ --device=/dev/kfd --device=/dev/dri --group-add video \ --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ paddlepaddle/paddle:latest-dev-rocm4.0-miopen2.11 /bin/bash -# 检查容器是否可以正确识别海光DCU设备 +# 检查容器是否可以正确识别海光 DCU 设备 rocm-smi # 预期得到以下结果: @@ -56,21 +56,21 @@ python -c "import paddle; paddle.utils.run_check()" **注意**:当前 Paddle 只支持 CentOS 7.8 & ROCm 4.0.1 编译环境,且根据 ROCm 4.0.1 的需求,支持的编译器为 devtoolset-7。 -**第一步**:准备 ROCm 4.0.1 编译环境 (推荐使用Paddle镜像) +**第一步**:准备 ROCm 4.0.1 编译环境 (推荐使用 Paddle 镜像) -可以直接从Paddle的官方镜像库拉取预先装有 ROCm 4.0.1 的 docker 镜像,或者根据 [ROCm安装文档](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html#centos-rhel) 来准备相应的运行环境。 +可以直接从 Paddle 的官方镜像库拉取预先装有 ROCm 4.0.1 的 docker 镜像,或者根据 [ROCm 安装文档](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html#centos-rhel) 来准备相应的运行环境。 ```bash # 拉取镜像 docker pull paddlepaddle/paddle:latest-dev-rocm4.0-miopen2.11 -# 启动容器,注意这里的参数,例如shm-size, device等都需要配置 +# 启动容器,注意这里的参数,例如 shm-size, device 等都需要配置 docker run -it --name paddle-rocm-dev --shm-size=128G \ --device=/dev/kfd --device=/dev/dri --group-add video \ --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ paddlepaddle/paddle:latest-dev-rocm4.0-miopen2.11 /bin/bash -# 检查容器是否可以正确识别海光DCU设备 +# 检查容器是否可以正确识别海光 DCU 设备 rocm-smi # 预期得到以下结果: @@ -85,7 +85,7 @@ GPU Temp AvgPwr SCLK MCLK Fan Perf PwrCap VRAM% GPU% ============================= End of ROCm SMI Log ============================== ``` -请在编译之前,检查如下的环境变量是否正确,如果没有则需要安装相应的依赖库,并导出相应的环境变量。以Paddle官方的镜像举例,环境变量如下: +请在编译之前,检查如下的环境变量是否正确,如果没有则需要安装相应的依赖库,并导出相应的环境变量。以 Paddle 官方的镜像举例,环境变量如下: ```bash # PATH 与 LD_LIBRARY_PATH 中存在 devtoolset-7,如果没有运行以下命令 @@ -99,11 +99,11 @@ export PATH=/opt/rocm/opencl/bin:/opt/rocm/bin:${PATH} export LD_LIBRARY_PATH=/opt/rocm/lib:${LD_LIBRARY_PATH} # PATH 中存在 Python 3.7 -# 注意:镜像中的 python 3.7 通过 miniconda 安装,请通过 conda activate base 命令加载Python 3.7环境 +# 注意:镜像中的 python 3.7 通过 miniconda 安装,请通过 conda activate base 命令加载 Python 3.7 环境 export PATH=/opt/conda/bin:${PATH} ``` -**第二步**:下载Paddle源码并编译,CMAKE编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) +**第二步**:下载 Paddle 源码并编译,CMAKE 编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) ```bash # 下载源码,默认 develop 分支 @@ -113,7 +113,7 @@ cd Paddle # 创建编译目录 mkdir build && cd build -# 执行cmake +# 执行 cmake cmake .. -DPY_VERSION=3.7 -DWITH_ROCM=ON -DWITH_TESTING=ON -DWITH_DISTRIBUTE=ON \ -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release @@ -121,9 +121,9 @@ cmake .. -DPY_VERSION=3.7 -DWITH_ROCM=ON -DWITH_TESTING=ON -DWITH_DISTRIBUTE=ON make -j$(nproc) ``` -**第三步**:安装与验证编译生成的wheel包 +**第三步**:安装与验证编译生成的 wheel 包 -编译完成之后进入`Paddle/build/python/dist`目录即可找到编译生成的.whl安装包,安装与验证命令如下: +编译完成之后进入`Paddle/build/python/dist`目录即可找到编译生成的.whl 安装包,安装与验证命令如下: ```bash # 安装命令 @@ -135,7 +135,7 @@ python -c "import paddle; paddle.utils.run_check()" ## 如何卸载 -请使用以下命令卸载Paddle: +请使用以下命令卸载 Paddle: ```bash pip uninstall paddlepaddle-rocm diff --git a/docs/guides/hardware_support/rocm_docs/paddle_rocm_cn.md b/docs/guides/hardware_support/rocm_docs/paddle_rocm_cn.md index e1ad470fc4a..489354d7a67 100644 --- a/docs/guides/hardware_support/rocm_docs/paddle_rocm_cn.md +++ b/docs/guides/hardware_support/rocm_docs/paddle_rocm_cn.md @@ -1,7 +1,7 @@ -# 飞桨框架ROCm版支持模型 +# 飞桨框架 ROCm 版支持模型 -目前Paddle ROCm版基于海光CPU(X86)和DCU支持以下模型的单机单卡/单机多卡的训练与推理。 +目前 Paddle ROCm 版基于海光 CPU(X86)和 DCU 支持以下模型的单机单卡/单机多卡的训练与推理。 ## 图像分类 @@ -121,7 +121,7 @@ ## 模型套件 -模型放置在飞桨模型套件中,各领域套件是 github.com/PaddlePaddle 下的独立repo,git clone下载即可获取所需的模型文件: +模型放置在飞桨模型套件中,各领域套件是 github.com/PaddlePaddle 下的独立 repo,git clone 下载即可获取所需的模型文件: | 领域 | 套件名称 | 分支/版本 | | ----------- | --------------- | ---------------- | diff --git a/docs/guides/hardware_support/rocm_docs/train_example_cn.md b/docs/guides/hardware_support/rocm_docs/train_example_cn.md index 4663c8b0632..d4a5f4b6d5d 100644 --- a/docs/guides/hardware_support/rocm_docs/train_example_cn.md +++ b/docs/guides/hardware_support/rocm_docs/train_example_cn.md @@ -1,8 +1,8 @@ -# 飞桨框架ROCm版训练示例 +# 飞桨框架 ROCm 版训练示例 -使用海光CPU/DCU进行训练与使用Intel CPU/Nvidia GPU训练相同,当前Paddle ROCm版本完全兼容Paddle CUDA版本的API,直接使用原有的GPU训练命令和参数即可。 +使用海光 CPU/DCU 进行训练与使用 Intel CPU/Nvidia GPU 训练相同,当前 Paddle ROCm 版本完全兼容 Paddle CUDA 版本的 API,直接使用原有的 GPU 训练命令和参数即可。 -#### ResNet50训练示例 +#### ResNet50 训练示例 **第一步**:下载 ResNet50 代码,并准备 ImageNet1k 数据集 @@ -10,7 +10,7 @@ cd path_to_clone_PaddleClas git clone https://github.com/PaddlePaddle/PaddleClas.git ``` -也可以访问PaddleClas的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleClas) 直接下载源码。请根据[数据说明](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.0/docs/zh_CN/tutorials/data.md)文档准备ImageNet1k数据集。 +也可以访问 PaddleClas 的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleClas) 直接下载源码。请根据[数据说明](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.0/docs/zh_CN/tutorials/data.md)文档准备 ImageNet1k 数据集。 **第二步**:运行训练 @@ -21,17 +21,17 @@ cd PaddleClas/ python -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py -c ./ppcls/configs/ImageNet/ResNet/ResNet50.yaml ``` -**第三步**:获取4卡训练得到的 Best Top1 Accuracy 结果如下 +**第三步**:获取 4 卡训练得到的 Best Top1 Accuracy 结果如下 ```bash -# CUDA 结果为 CUDA 10.1 + 4卡V100 训练 +# CUDA 结果为 CUDA 10.1 + 4 卡 V100 训练 2021-03-24 01:16:08,548 - INFO - The best top1 acc 0.76332, in epoch: 118 -# ROCm 结果为 ROCm 4.0.1 + 4卡DCU 训练 +# ROCm 结果为 ROCm 4.0.1 + 4 卡 DCU 训练 2021-04-07 10:26:31,651 - INFO - The best top1 acc 0.76308, in epoch: 109 ``` -#### YoloV3训练示例 +#### YoloV3 训练示例 **第一步**:下载 YoloV3 代码 @@ -39,7 +39,7 @@ python -m paddle.distributed.launch --gpus="0,1,2,3" tools/train.py -c ./ppcls/c cd path_to_clone_PaddleDetection git clone https://github.com/PaddlePaddle/PaddleDetection.git ``` -也可以访问PaddleDetection的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleDetection) 直接下载源码。 +也可以访问 PaddleDetection 的 [GitHub Repo](https://github.com/PaddlePaddle/PaddleDetection) 直接下载源码。 **第二步**:准备 VOC 数据集 @@ -49,9 +49,9 @@ python download_voc.py python create_list.py ``` -**第三步**:修改config文件的参数 +**第三步**:修改 config 文件的参数 -模型Config文件 `configs/yolov3/yolov3_darknet53_270e_voc.yml` 中的默认参数为8卡设计,使用DCU单机4卡训练需要修改参数如下: +模型 Config 文件 `configs/yolov3/yolov3_darknet53_270e_voc.yml` 中的默认参数为 8 卡设计,使用 DCU 单机 4 卡训练需要修改参数如下: ```bash # 修改 configs/yolov3/_base_/optimizer_270e.yml @@ -70,12 +70,12 @@ cd PaddleDetection/ python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/yolov3/yolov3_darknet53_270e_voc.yml --eval ``` -**第五步**:获取4卡训练得到的 mAP 结果如下 +**第五步**:获取 4 卡训练得到的 mAP 结果如下 ```bash -# CUDA 结果为 CUDA 10.1 + 4卡V100 训练 +# CUDA 结果为 CUDA 10.1 + 4 卡 V100 训练 [03/23 05:26:17] ppdet.metrics.metrics INFO: mAP(0.50, 11point) = 82.59% -# ROCm 结果为 ROCm 4.0.1 + 4卡DCU 训练 +# ROCm 结果为 ROCm 4.0.1 + 4 卡 DCU 训练 [03/28 16:02:52] ppdet.metrics.metrics INFO: mAP(0.50, 11point) = 83.02% ``` diff --git a/docs/guides/hardware_support/xpu_docs/index_cn.rst b/docs/guides/hardware_support/xpu_docs/index_cn.rst index 15a2b0337f7..e23ebab6b4c 100644 --- a/docs/guides/hardware_support/xpu_docs/index_cn.rst +++ b/docs/guides/hardware_support/xpu_docs/index_cn.rst @@ -4,21 +4,21 @@ 昆仑芯片运行飞桨 #################### -百度昆仑AI计算处理器(Baidu KUNLUN AI Computing Processor)是百度集十年AI产业技术实践于2019年推出的全功能AI芯片。基于自主研发的先进XPU架构,为云端和边缘端的人工智能业务而设计。 百度昆仑与飞桨及其他国产软硬件强强组合,打造一个全面领先的国产化AI技术生态,部署和应用于诸多 “人工智能+“的行业领域,包括智能云和高性能计算,智慧制造、智慧城市和安防等。更多昆仑XPU芯片详情及技术指标请 `点击这里 `_ 。 -参考以下内容可快速了解和体验昆仑XPU芯片上运行飞桨: +百度昆仑 AI 计算处理器(Baidu KUNLUN AI Computing Processor)是百度集十年 AI 产业技术实践于 2019 年推出的全功能 AI 芯片。基于自主研发的先进 XPU 架构,为云端和边缘端的人工智能业务而设计。 百度昆仑与飞桨及其他国产软硬件强强组合,打造一个全面领先的国产化 AI 技术生态,部署和应用于诸多 “人工智能+“的行业领域,包括智能云和高性能计算,智慧制造、智慧城市和安防等。更多昆仑 XPU 芯片详情及技术指标请 `点击这里 `_ 。 +参考以下内容可快速了解和体验昆仑 XPU 芯片上运行飞桨: -昆仑芯2代芯片: +昆仑芯 2 代芯片: - - `飞桨对昆仑2代芯片的支持 <./paddle_2.0_xpu_cn.html>`_ : 飞桨支持昆仑2代芯片(R200、R300)运行 - - `飞桨框架昆仑2代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑2代芯片(R200、R300)安装说明 - - `飞桨框架昆仑2代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑2代芯片(R200、R300)训练示例 + - `飞桨对昆仑 2 代芯片的支持 <./paddle_2.0_xpu_cn.html>`_ : 飞桨支持昆仑 2 代芯片(R200、R300)运行 + - `飞桨框架昆仑 2 代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑 2 代芯片(R200、R300)安装说明 + - `飞桨框架昆仑 2 代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑 2 代芯片(R200、R300)训练示例 -昆仑芯1代芯片: +昆仑芯 1 代芯片: - - `飞桨对昆仑1代芯片的支持 <./paddle_2.0_xpu_cn.html>`_ : 飞桨支持昆仑1代芯片(K100、K200)运行 - - `飞桨框架昆仑1代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑1代芯片(K100、K200)安装说明 - - `飞桨框架昆仑1代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑1代芯片(K100、K200)训练示例 - - `飞桨预测库昆仑1代芯片安装及使用 <./inference_install_example_cn.html>`_ : 飞桨预测库昆仑1代芯片(K100、K200)版安装及使用示例 + - `飞桨对昆仑 1 代芯片的支持 <./paddle_2.0_xpu_cn.html>`_ : 飞桨支持昆仑 1 代芯片(K100、K200)运行 + - `飞桨框架昆仑 1 代芯片安装说明 <./paddle_install_cn.html>`_ : 飞桨框架昆仑 1 代芯片(K100、K200)安装说明 + - `飞桨框架昆仑 1 代芯片训练示例 <./train_example_cn.html>`_ : 飞桨框架昆仑 1 代芯片(K100、K200)训练示例 + - `飞桨预测库昆仑 1 代芯片安装及使用 <./inference_install_example_cn.html>`_ : 飞桨预测库昆仑 1 代芯片(K100、K200)版安装及使用示例 .. toctree:: :hidden: diff --git a/docs/guides/hardware_support/xpu_docs/inference_install_example_cn.md b/docs/guides/hardware_support/xpu_docs/inference_install_example_cn.md index d00baa3306f..8d7196732a4 100644 --- a/docs/guides/hardware_support/xpu_docs/inference_install_example_cn.md +++ b/docs/guides/hardware_support/xpu_docs/inference_install_example_cn.md @@ -1,15 +1,15 @@ -# 飞桨预测库昆仑XPU版安装及使用示例 +# 飞桨预测库昆仑 XPU 版安装及使用示例 -在昆仑XPU硬件上常用的高性能预测库主要包括以下3个,分别适用不同的云边端场景: +在昆仑 XPU 硬件上常用的高性能预测库主要包括以下 3 个,分别适用不同的云边端场景: | 名称 | 英文表示 | 适用场景 | 语言支持 | 安装方式 | | ------------------ | ---------------- | ---------------------------- | ---------------------------- |---------------------------- | -| 飞桨原生推理库 | Paddle Inference | 高性能服务器端、云端推理 | Python、C++ |Python版whl包下载或源码编译,C++版源码编译 | +| 飞桨原生推理库 | Paddle Inference | 高性能服务器端、云端推理 | Python、C++ |Python 版 whl 包下载或源码编译,C++版源码编译 | | 飞桨服务化推理框架 | Paddle Serving | 自动服务、模型管理等高阶功能 | Python、C++ | 源码编译 | | 飞桨轻量化推理引擎 | Paddle Lite | 移动端、物联网等 | Python、C++ | 源码编译 | -Paddle Inference 2.2版本的安装及使用方式,请[点击查看](https://paddleinference.paddlepaddle.org.cn/demo_tutorial/paddle_xpu_infer_cn.html)。 +Paddle Inference 2.2 版本的安装及使用方式,请[点击查看](https://paddleinference.paddlepaddle.org.cn/demo_tutorial/paddle_xpu_infer_cn.html)。 -Paddle Serving 0.8.3版本的安装及使用方式,请[点击查看](https://github.com/PaddlePaddle/Serving/blob/v0.8.3/doc/Run_On_XPU_CN.md)。 +Paddle Serving 0.8.3 版本的安装及使用方式,请[点击查看](https://github.com/PaddlePaddle/Serving/blob/v0.8.3/doc/Run_On_XPU_CN.md)。 -Paddle Lite 2.10版本的安装及使用方式,请[点击查看](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html)。 +Paddle Lite 2.10 版本的安装及使用方式,请[点击查看](https://paddlelite.paddlepaddle.org.cn/demo_guides/baidu_xpu.html)。 diff --git a/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu2_cn.md b/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu2_cn.md index 140ca61ffd1..8ffb3b5a91a 100644 --- a/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu2_cn.md +++ b/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu2_cn.md @@ -1,12 +1,12 @@ -# 飞桨对昆仑2代芯片的支持 +# 飞桨对昆仑 2 代芯片的支持 -飞桨自2.3rc版本起支持在昆仑2代芯片上(R200,R300)运行,经验证的模型训练的支持情况如下: +飞桨自 2.3rc 版本起支持在昆仑 2 代芯片上(R200,R300)运行,经验证的模型训练的支持情况如下: ## 训练支持 可进行单机单卡/单机多卡训练的模型,如下所示: -| 模型 | 领域 | 编程范式 | 可用的CPU类型 | 单机单卡支持 | 单机多卡支持 | +| 模型 | 领域 | 编程范式 | 可用的 CPU 类型 | 单机单卡支持 | 单机多卡支持 | | ------------------ | -------- |------------- | ----------------------- | -------------- | -------------- | | ResNet50 | 图像分类 | 动态图 | X86(Intel) | 支持 |- | | MobileNet_v3 | 图像分类 | 动态图 | X86(Intel) | 支持 |- | @@ -22,7 +22,7 @@ -模型放置在飞桨模型套件中,作为github.com/PaddlePaddle下的独立repo存在,git clone下载即可获取所需的模型文件: +模型放置在飞桨模型套件中,作为 github.com/PaddlePaddle 下的独立 repo 存在,git clone 下载即可获取所需的模型文件: | 领域 | 套件名称 | 分支/版本 | | -------- | --------------- | ----------- | @@ -33,4 +33,4 @@ | OCR | [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR) | [dygraph](https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph) | | 推荐 | [PaddleREC](https://github.com/PaddlePaddle/PaddleRec) | [master](https://github.com/PaddlePaddle/PaddleRec/tree/master) | -* 注:支持基于Kermel Primitive算子的昆仑2代芯片支持,[点击这里](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/kernel_primitive_api/index_cn.html)。 +* 注:支持基于 Kermel Primitive 算子的昆仑 2 代芯片支持,[点击这里](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/07_new_op/kernel_primitive_api/index_cn.html)。 diff --git a/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.md b/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.md index 4c52a675a24..cec86ff0855 100644 --- a/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.md +++ b/docs/guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.md @@ -1,12 +1,12 @@ -# 飞桨对昆仑XPU芯片的支持 +# 飞桨对昆仑 XPU 芯片的支持 -飞桨自2.0版本起支持在昆仑XPU上运行,经验证的模型训练和预测的支持情况如下: +飞桨自 2.0 版本起支持在昆仑 XPU 上运行,经验证的模型训练和预测的支持情况如下: ## 训练支持 可进行单机单卡/单机多卡训练的模型,如下所示: -| 模型 | 领域 | 模型readme | 编程范式 | 可用的CPU类型 | 单机多卡支持 | +| 模型 | 领域 | 模型 readme | 编程范式 | 可用的 CPU 类型 | 单机多卡支持 | | ------------------ | -------- | ------------------------------------------------------------ | ------------- | ----------------------- | -------------- | | VGG16/19 | 图像分类 | [模型链接](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/zh_CN/extension/train_on_xpu.md) | 静态图 | X86(Intel) | 支持 | | ResNet50 | 图像分类 | [模型链接](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/zh_CN/extension/train_on_xpu.md) | 静态图 | X86(Intel)ARM(飞腾) | 支持 | @@ -24,7 +24,7 @@ | NAML | 推荐 | [模型链接](https://github.com/PaddlePaddle/PaddleRec/blob/release/2.1.0/models/rank/naml/train_on_kunlun.md) | 静态图 | X86(Intel) | 支持 | | DQN | 强化学习 | [模型链接](https://github.com/PaddlePaddle/PARL/blob/r1.4.3/examples/DQN/train_on_xpu.md) | 静态图 | X86(Intel) | 支持 | -模型放置在飞桨模型套件中,作为github.com/PaddlePaddle下的独立repo存在,git clone下载即可获取所需的模型文件: +模型放置在飞桨模型套件中,作为 github.com/PaddlePaddle 下的独立 repo 存在,git clone 下载即可获取所需的模型文件: | 领域 | 套件名称 | 分支/版本 | | -------- | --------------- | ----------- | @@ -39,10 +39,10 @@ ## 预测支持 -飞桨框架集成了python原生预测功能,安装飞桨框架即可使用。 -在框架之外,飞桨提供多个高性能预测库,包括Paddle Inference、Paddle Serving、Paddle Lite等,支持云边端不同环境下的部署场景,适合相对应的多种硬件平台、操作系统、编程语言,同时提供服务化部署能力。当前预测库验证支持的模型包括: +飞桨框架集成了 python 原生预测功能,安装飞桨框架即可使用。 +在框架之外,飞桨提供多个高性能预测库,包括 Paddle Inference、Paddle Serving、Paddle Lite 等,支持云边端不同环境下的部署场景,适合相对应的多种硬件平台、操作系统、编程语言,同时提供服务化部署能力。当前预测库验证支持的模型包括: -| 模型 | 领域 | 编程范式 | 可用的CPU类型 | +| 模型 | 领域 | 编程范式 | 可用的 CPU 类型 | | ------------------------ | -------- | -------- | ----------------------- | | VGG16/19 | 图像分类 | 静态图 | X86(Intel) | | ResNet50 | 图像分类 | 静态图 | X86(Intel)ARM(飞腾) | @@ -58,4 +58,4 @@ | Ernie-Base | NLP | 静态图 | X86(Intel) | -随着ARM架构的高性能、低功耗、低成本的优势日益突显,ARM CPU更多地进入PC和服务器领域,众多新锐国产CPU也采用ARM架构。在这一趋势下,我们开始尝试在ARM CPU + 昆仑XPU的硬件环境上运行飞桨,当前已验证ResNet50、YOLOv3的训练和预测效果。后续版本将持续增加昆仑XPU在更多模型任务上的验证。 +随着 ARM 架构的高性能、低功耗、低成本的优势日益突显,ARM CPU 更多地进入 PC 和服务器领域,众多新锐国产 CPU 也采用 ARM 架构。在这一趋势下,我们开始尝试在 ARM CPU + 昆仑 XPU 的硬件环境上运行飞桨,当前已验证 ResNet50、YOLOv3 的训练和预测效果。后续版本将持续增加昆仑 XPU 在更多模型任务上的验证。 diff --git a/docs/guides/hardware_support/xpu_docs/paddle_install_cn.md b/docs/guides/hardware_support/xpu_docs/paddle_install_cn.md index a9ed817d9aa..b9f41c30ed3 100644 --- a/docs/guides/hardware_support/xpu_docs/paddle_install_cn.md +++ b/docs/guides/hardware_support/xpu_docs/paddle_install_cn.md @@ -1,26 +1,26 @@ -# 飞桨框架昆仑XPU版安装说明 +# 飞桨框架昆仑 XPU 版安装说明 -飞桨框架支持基于python的训练和原生预测,当前最新版本为2.1,提供两种安装方式: +飞桨框架支持基于 python 的训练和原生预测,当前最新版本为 2.1,提供两种安装方式: -**1. 预编译的支持昆仑XPU的wheel包** +**1. 预编译的支持昆仑 XPU 的 wheel 包** -目前此wheel包只支持两种环境: +目前此 wheel 包只支持两种环境: -英特尔CPU+昆仑XPU+CentOS系统 +英特尔 CPU+昆仑 XPU+CentOS 系统 -飞腾CPU+昆仑XPU+麒麟V10系统 +飞腾 CPU+昆仑 XPU+麒麟 V10 系统 **2. 源码编译安装** 其他环境请选择源码编译安装。 -## 安装方式一:通过Wheel包安装 +## 安装方式一:通过 Wheel 包安装 ### 下载安装包 -**环境1:英特尔CPU+昆仑XPU+CentOS系统** +**环境 1:英特尔 CPU+昆仑 XPU+CentOS 系统** -Linux发行版建议选择CentOS 7系统 +Linux 发行版建议选择 CentOS 7 系统 Python3.7 @@ -42,13 +42,13 @@ wget https://paddle-wheel.bj.bcebos.com/kunlun/paddlepaddle-2.1.0-cp36-cp36m-lin python3.6 -m pip install -U ``paddlepaddle-2.1.0-cp36-cp36m-linux_x86_64.whl ``` -**环境2:飞腾CPU+昆仑XPU+麒麟V10系统** +**环境 2:飞腾 CPU+昆仑 XPU+麒麟 V10 系统** -如果您想使用预编译的支持昆仑XPU的wheel包,请联系飞桨官方邮件组:Paddle-better@baidu.com +如果您想使用预编译的支持昆仑 XPU 的 wheel 包,请联系飞桨官方邮件组:Paddle-better@baidu.com ### 验证安装 -安装完成后您可以使用 python 或 python3 进入python解释器,输入 +安装完成后您可以使用 python 或 python3 进入 python 解释器,输入 ``` import paddle @@ -60,34 +60,34 @@ import paddle paddle.utils.run_check() ``` -如果出现PaddlePaddle is installed successfully!,说明您已成功安装。 +如果出现 PaddlePaddle is installed successfully!,说明您已成功安装。 -## 安装方式二:从源码编译支持昆仑XPU的包 +## 安装方式二:从源码编译支持昆仑 XPU 的包 ### 环境准备 -**英特尔CPU+昆仑XPU+CentOS系统** +**英特尔 CPU+昆仑 XPU+CentOS 系统** - **处理器:Intel(R) Xeon(R) Gold 6148 CPU @2.40GHz** -- **操作系统:CentOS 7.8.2003(建议使用CentOS 7)** -- **Python版本: 3.6/3.7 (64 bit)** -- **pip或pip3版本:9.0.1+ (64 bit)** -- **cmake版本:3.15+** +- **操作系统:CentOS 7.8.2003(建议使用 CentOS 7)** +- **Python 版本: 3.6/3.7 (64 bit)** +- **pip 或 pip3 版本:9.0.1+ (64 bit)** +- **cmake 版本:3.15+** - **gcc/g++版本:8.2+** -**飞腾CPU+昆仑XPU+麒麟V10系统** +**飞腾 CPU+昆仑 XPU+麒麟 V10 系统** - **处理器:Phytium,FT-2000+/64** - **操作系统:Kylin release V10 (SP1)/(Tercel)-aarch64-Build04/20200711** -- **Python版本:3.6/3.7 (64 bit)** -- **pip或pip3版本: 9.0.1+ (64 bit)** -- **cmake版本:3.15+** +- **Python 版本:3.6/3.7 (64 bit)** +- **pip 或 pip3 版本: 9.0.1+ (64 bit)** +- **cmake 版本:3.15+** - **gcc/g++版本:8.2+** ### 源码编译安装步骤: -(1)Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,如果操作系统提供的源包括了合适版本的cmake,直接安装即可,否则需要 +(1)Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,如果操作系统提供的源包括了合适版本的 cmake,直接安装即可,否则需要 ``` wget https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8.tar.gz @@ -95,7 +95,7 @@ tar -xzf cmake-3.16.8.tar.gz && cd cmake-3.16.8 ./bootstrap && make && sudo make install ``` -(2)Paddle内部使用patchelf来修改动态库的rpath,如果操作系统提供的源包括了patchelf,直接安装即可,否则需要源码安装,请参考 +(2)Paddle 内部使用 patchelf 来修改动态库的 rpath,如果操作系统提供的源包括了 patchelf,直接安装即可,否则需要源码安装,请参考 ``` ./bootstrap.sh @@ -105,22 +105,22 @@ make check sudo make install ``` -(3)根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库 +(3)根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库 -(4)将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +(4)将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle ``` -使用较稳定的版本编译,建议切换到release2.1分支下: +使用较稳定的版本编译,建议切换到 release2.1 分支下: ``` git checkout release/2.1 ``` -(5)进行Wheel包的编译,请创建并进入一个叫build的目录下 +(5)进行 Wheel 包的编译,请创建并进入一个叫 build 的目录下 ``` mkdir build && cd build @@ -128,7 +128,7 @@ mkdir build && cd build 具体编译选项含义可参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -**英特尔CPU+昆仑XPU+CentOS系统** +**英特尔 CPU+昆仑 XPU+CentOS 系统** 链接过程中打开文件数较多,可能超过系统默认限制导致编译出错,设置进程允许打开的最大文件数: @@ -136,7 +136,7 @@ mkdir build && cd build ulimit -n 2048 ``` -执行cmake,完成编译 +执行 cmake,完成编译 Python3 @@ -176,9 +176,9 @@ cmake .. -DPY_VERSION=2.7 \ make -j20 ``` -**飞腾CPU+昆仑XPU+麒麟V10系统** +**飞腾 CPU+昆仑 XPU+麒麟 V10 系统** -在该环境下,编译前需要手动拉取XPU SDK,可使用以下命令: +在该环境下,编译前需要手动拉取 XPU SDK,可使用以下命令: ``` wget https://paddle-wheel.bj.bcebos.com/kunlun/xpu_sdk_v2.0.0.61.tar.gz @@ -186,7 +186,7 @@ tar xvf xpu_sdk_v2.0.0.61.tar.gz mv output xpu_sdk_v2.0.0.61 xpu_sdk ``` -执行cmake,完成编译 +执行 cmake,完成编译 ``` ulimit -n 4096 @@ -210,15 +210,15 @@ cmake .. -DPY_VERSION=3.7 \ make VERBOSE=1 TARGET=ARMV8 -j32 ``` -(6)编译成功后进入Paddle/build/python/dist目录下找到生成的.whl包 。 +(6)编译成功后进入 Paddle/build/python/dist 目录下找到生成的.whl 包 。 -(7)将生成的.whl包copy至带有昆仑XPU的目标机器上,并在目标机器上根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库。(如果编译机器同时为带有昆仑XPU的目标机器,略过此步) +(7)将生成的.whl 包 copy 至带有昆仑 XPU 的目标机器上,并在目标机器上根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库。(如果编译机器同时为带有昆仑 XPU 的目标机器,略过此步) -(8)在带有昆仑XPU的目标机器安装编译好的.whl包:pip install -U(whl包的名字)或pip3 install -U(whl包的名字)。恭喜,至此您已完成昆仑XPU机器上PaddlePaddle的编译安装。 +(8)在带有昆仑 XPU 的目标机器安装编译好的.whl 包:pip install -U(whl 包的名字)或 pip3 install -U(whl 包的名字)。恭喜,至此您已完成昆仑 XPU 机器上 PaddlePaddle 的编译安装。 **验证安装** -安装完成后您可以使用 python 或 python3 进入python解释器,输入 +安装完成后您可以使用 python 或 python3 进入 python 解释器,输入 ``` import paddle @@ -230,11 +230,11 @@ import paddle paddle.utils.run_check() ``` -如果出现PaddlePaddle is installed successfully!,说明您已成功安装。 +如果出现 PaddlePaddle is installed successfully!,说明您已成功安装。 ### 如何卸载 -使用以下命令卸载PaddlePaddle: +使用以下命令卸载 PaddlePaddle: ``` pip uninstall paddlepaddle diff --git a/docs/guides/hardware_support/xpu_docs/paddle_install_xpu2_cn.md b/docs/guides/hardware_support/xpu_docs/paddle_install_xpu2_cn.md index afb522d9cf1..1a3f2f2f74c 100644 --- a/docs/guides/hardware_support/xpu_docs/paddle_install_xpu2_cn.md +++ b/docs/guides/hardware_support/xpu_docs/paddle_install_xpu2_cn.md @@ -1,24 +1,24 @@ -# 飞桨框架昆仑2代芯片安装说明 +# 飞桨框架昆仑 2 代芯片安装说明 -在昆仑2代芯片上,飞桨框架支持基于python的训练和原生预测,当前最新版本为2.3rc,提供两种安装方式: +在昆仑 2 代芯片上,飞桨框架支持基于 python 的训练和原生预测,当前最新版本为 2.3rc,提供两种安装方式: -**1. 预编译的支持昆仑2代芯片的wheel包** +**1. 预编译的支持昆仑 2 代芯片的 wheel 包** -目前此wheel包只支持一种环境: +目前此 wheel 包只支持一种环境: -英特尔CPU+昆仑2代芯片+Linux操作系统 +英特尔 CPU+昆仑 2 代芯片+Linux 操作系统 **2. 源码编译安装** 其他环境请选择源码编译安装。 -## 安装方式一:通过Wheel包安装 +## 安装方式一:通过 Wheel 包安装 ### 下载安装包 -**环境1:英特尔CPU+昆仑2代芯片+Linux操作系统** +**环境 1:英特尔 CPU+昆仑 2 代芯片+Linux 操作系统** -Linux发行版建议选择CentOS 7系统 +Linux 发行版建议选择 CentOS 7 系统 Python3.7 @@ -33,7 +33,7 @@ python3.7 -m pip install -U paddlepaddle_xpu-2.3.0rc0-cp37-cp37m-linux_x86_64.wh ### 验证安装 -安装完成后您可以使用 python 或 python3 进入python解释器,输入 +安装完成后您可以使用 python 或 python3 进入 python 解释器,输入 ``` import paddle @@ -45,28 +45,28 @@ import paddle paddle.utils.run_check() ``` -如果出现PaddlePaddle is installed successfully!,说明您已成功安装。 +如果出现 PaddlePaddle is installed successfully!,说明您已成功安装。 -* 注:支持基于Kermel Primitive算子的昆仑2代芯片编译whl包,[点击这里查看](https://www.kunlunxin.com.cn)。 +* 注:支持基于 Kermel Primitive 算子的昆仑 2 代芯片编译 whl 包,[点击这里查看](https://www.kunlunxin.com.cn)。 -## 安装方式二:从源码编译支持昆仑XPU的包 +## 安装方式二:从源码编译支持昆仑 XPU 的包 ### 环境准备 -**英特尔CPU+昆仑2代芯片+CentOS系统** +**英特尔 CPU+昆仑 2 代芯片+CentOS 系统** - **处理器:Intel(R) Xeon(R) Gold 6148 CPU @2.40GHz** -- **操作系统:CentOS 7.8.2003(建议使用CentOS 7)** -- **Python版本: 3.7 (64 bit)** -- **pip或pip3版本:9.0.1+ (64 bit)** -- **cmake版本:3.15+** +- **操作系统:CentOS 7.8.2003(建议使用 CentOS 7)** +- **Python 版本: 3.7 (64 bit)** +- **pip 或 pip3 版本:9.0.1+ (64 bit)** +- **cmake 版本:3.15+** - **gcc/g++版本:8.2+** ### 源码编译安装步骤: -(1)Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,如果操作系统提供的源包括了合适版本的cmake,直接安装即可,否则需要 +(1)Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,如果操作系统提供的源包括了合适版本的 cmake,直接安装即可,否则需要 ``` wget https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8.tar.gz @@ -74,7 +74,7 @@ tar -xzf cmake-3.16.8.tar.gz && cd cmake-3.16.8 ./bootstrap && make && sudo make install ``` -(2)Paddle内部使用patchelf来修改动态库的rpath,如果操作系统提供的源包括了patchelf,直接安装即可,否则需要源码安装,请参考 +(2)Paddle 内部使用 patchelf 来修改动态库的 rpath,如果操作系统提供的源包括了 patchelf,直接安装即可,否则需要源码安装,请参考 ``` ./bootstrap.sh @@ -84,22 +84,22 @@ make check sudo make install ``` -(3)根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库 +(3)根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库 -(4)将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +(4)将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle ``` -使用较稳定的版本编译,建议切换到release2.3分支下: +使用较稳定的版本编译,建议切换到 release2.3 分支下: ``` git checkout release/2.3 ``` -(5)进行Wheel包的编译,请创建并进入一个叫build的目录下 +(5)进行 Wheel 包的编译,请创建并进入一个叫 build 的目录下 ``` mkdir build && cd build @@ -107,7 +107,7 @@ mkdir build && cd build 具体编译选项含义可参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -**英特尔CPU+昆仑2代芯+CentOS系统** +**英特尔 CPU+昆仑 2 代芯+CentOS 系统** 链接过程中打开文件数较多,可能超过系统默认限制导致编译出错,设置进程允许打开的最大文件数: @@ -115,7 +115,7 @@ mkdir build && cd build ulimit -n 4096 ``` -执行cmake,完成编译 +执行 cmake,完成编译 Python3.7 @@ -136,15 +136,15 @@ cmake .. -DPY_VERSION=3.7 \ make -j$(nproc) ``` -(6)编译成功后进入Paddle/build/python/dist目录下找到生成的.whl包 。 +(6)编译成功后进入 Paddle/build/python/dist 目录下找到生成的.whl 包 。 -(7)将生成的.whl包copy至带有昆仑XPU的目标机器上,并在目标机器上根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库。(如果编译机器同时为带有昆仑XPU的目标机器,略过此步) +(7)将生成的.whl 包 copy 至带有昆仑 XPU 的目标机器上,并在目标机器上根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库。(如果编译机器同时为带有昆仑 XPU 的目标机器,略过此步) -(8)在带有昆仑XPU的目标机器安装编译好的.whl包:pip install -U(whl包的名字)或pip3 install -U(whl包的名字)。恭喜,至此您已完成昆仑XPU机器上PaddlePaddle的编译安装。 +(8)在带有昆仑 XPU 的目标机器安装编译好的.whl 包:pip install -U(whl 包的名字)或 pip3 install -U(whl 包的名字)。恭喜,至此您已完成昆仑 XPU 机器上 PaddlePaddle 的编译安装。 **验证安装** -安装完成后您可以使用 python 或 python3 进入python解释器,输入 +安装完成后您可以使用 python 或 python3 进入 python 解释器,输入 ``` import paddle @@ -156,11 +156,11 @@ import paddle paddle.utils.run_check() ``` -如果出现PaddlePaddle is installed successfully!,说明您已成功安装。 +如果出现 PaddlePaddle is installed successfully!,说明您已成功安装。 ### 如何卸载 -使用以下命令卸载PaddlePaddle: +使用以下命令卸载 PaddlePaddle: ``` pip uninstall paddlepaddle @@ -172,4 +172,4 @@ pip uninstall paddlepaddle pip3 uninstall paddlepaddle ``` -* 注:支持基于Kermel Primitive算子的昆仑2代芯片源码编译,[点击这里查看](https://www.kunlunxin.com.cn)。 +* 注:支持基于 Kermel Primitive 算子的昆仑 2 代芯片源码编译,[点击这里查看](https://www.kunlunxin.com.cn)。 diff --git a/docs/guides/hardware_support/xpu_docs/train_example_cn.md b/docs/guides/hardware_support/xpu_docs/train_example_cn.md index aaa56e41771..c5c241e7b12 100644 --- a/docs/guides/hardware_support/xpu_docs/train_example_cn.md +++ b/docs/guides/hardware_support/xpu_docs/train_example_cn.md @@ -1,8 +1,8 @@ -# 飞桨框架昆仑XPU版训练示例 +# 飞桨框架昆仑 XPU 版训练示例 -使用XPU训练与cpu/gpu相同,只需要加上-o use_xpu=True, 表示执行在昆仑设备上。 +使用 XPU 训练与 cpu/gpu 相同,只需要加上-o use_xpu=True, 表示执行在昆仑设备上。 -#### ResNet50下载并运行示例: +#### ResNet50 下载并运行示例: 模型文件下载命令: @@ -10,17 +10,17 @@ cd path_to_clone_PaddleClas git clone -b release/static https://github.com/PaddlePaddle/PaddleClas.git ``` -也可以访问PaddleClas的[github repo](https://github.com/PaddlePaddle/PaddleClas/tree/release/static)直接下载源码。 +也可以访问 PaddleClas 的[github repo](https://github.com/PaddlePaddle/PaddleClas/tree/release/static)直接下载源码。 -配置XPU进行训练的命令非常简单: +配置 XPU 进行训练的命令非常简单: ``` -#FLAGS指定单卡或多卡训练,此示例运行2个卡 +#FLAGS 指定单卡或多卡训练,此示例运行 2 个卡 export FLAGS_selected_xpus=0,1 #启动训练 python3.7 tools/static/train.py -c configs/quick_start/ResNet50_vd_finetune_kunlun.yaml -o use_gpu=False -o use_xpu=True -o is_distributed=False ``` -如果需要指定更多的卡(比如8卡),需要配置合适的训练参数,可使用如下命令自动修改: +如果需要指定更多的卡(比如 8 卡),需要配置合适的训练参数,可使用如下命令自动修改: ``` export FLAGS_selected_xpus=0,1,2,3,4,5,6,7 python3.7 -m paddle.distributed.launch \ @@ -37,4 +37,4 @@ python3.7 -m paddle.distributed.launch \ -o use_xpu=True ``` -其他模型的训练示例可在[飞桨对昆仑XPU芯片的支持](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/xpu_docs/paddle_2.0_xpu_cn.html)中支持模型列表下的模型链接中找到。 +其他模型的训练示例可在[飞桨对昆仑 XPU 芯片的支持](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/xpu_docs/paddle_2.0_xpu_cn.html)中支持模型列表下的模型链接中找到。 diff --git a/docs/guides/hardware_support/xpu_docs/train_example_xpu2_cn.md b/docs/guides/hardware_support/xpu_docs/train_example_xpu2_cn.md index 158450d9f3e..e6a1b59e07c 100644 --- a/docs/guides/hardware_support/xpu_docs/train_example_xpu2_cn.md +++ b/docs/guides/hardware_support/xpu_docs/train_example_xpu2_cn.md @@ -1,8 +1,8 @@ -# 飞桨框架昆仑2代芯片训练示例 +# 飞桨框架昆仑 2 代芯片训练示例 -使用XPU训练与CPU/GPU相同,只需要简单配置XPU,就可以执行在昆仑设备上。 +使用 XPU 训练与 CPU/GPU 相同,只需要简单配置 XPU,就可以执行在昆仑设备上。 -#### ResNet50下载并运行示例: +#### ResNet50 下载并运行示例: 1、 安装依赖: ``` @@ -12,7 +12,7 @@ python -m pip install -r requirements.txt ``` 2、下载数据集: -基于CIFAR100数据集的ResNet50训练任务 +基于 CIFAR100 数据集的 ResNet50 训练任务 ``` cd dataset rm -rf ILSVRC2012 @@ -24,10 +24,10 @@ mv train.txt train_list.txt mv test.txt val_list.txt ``` -3、配置XPU进行训练的命令非常简单: +3、配置 XPU 进行训练的命令非常简单: ``` cd ../.. -#FLAGS指定单卡或多卡训练,此示例运行1个卡 +#FLAGS 指定单卡或多卡训练,此示例运行 1 个卡 export FLAGS_selected_xpus=2 export XPUSIM_DEVICE_MODEL=KUNLUN2 #启动训练 @@ -36,7 +36,7 @@ python tools/train.py \ -o Global.device=xpu ``` -#### YOLOv3-DarkNet53下载并运行示例: +#### YOLOv3-DarkNet53 下载并运行示例: 1、安装依赖: ``` @@ -54,9 +54,9 @@ python create_list.py cd ../.. ``` -3、配置XPU进行训练的命令非常简单: +3、配置 XPU 进行训练的命令非常简单: ``` -#FLAGS指定单卡或多卡训练,此示例运行1个卡 +#FLAGS 指定单卡或多卡训练,此示例运行 1 个卡 export FLAGS_selected_xpus=2 export XPUSIM_DEVICE_MODEL=KUNLUN2 #启动训练 diff --git a/docs/guides/infer/inference/inference_cn.md b/docs/guides/infer/inference/inference_cn.md index 0ffe1efaa65..070fa7445df 100644 --- a/docs/guides/infer/inference/inference_cn.md +++ b/docs/guides/infer/inference/inference_cn.md @@ -2,19 +2,19 @@ Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力。 -由于能力直接基于飞桨的训练算子,因此Paddle Inference 可以通用支持飞桨训练出的所有模型。 +由于能力直接基于飞桨的训练算子,因此 Paddle Inference 可以通用支持飞桨训练出的所有模型。 Paddle Inference 功能特性丰富,性能优异,针对不同平台不同的应用场景进行了深度的适配优化,做到高吞吐、低时延,保证了飞桨模型在服务器端即训即用,快速部署。 一些常见的文档链接如下: -- 完整使用文档位于:[Paddle Inference文档](https://paddle-inference.readthedocs.io/en/latest/index.html) +- 完整使用文档位于:[Paddle Inference 文档](https://paddle-inference.readthedocs.io/en/latest/index.html) - 代码示例位于[inference demo](https://github.com/PaddlePaddle/Paddle-Inference-Demo) -- 点此 [安装与编译Linux预测库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) -- 点此 [安装与编译Windows预测库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#windows) +- 点此 [安装与编译 Linux 预测库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html) +- 点此 [安装与编译 Windows 预测库](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#windows) -## 与主框架model.predict区别 +## 与主框架 model.predict 区别 -飞桨推理产品paddle inference和主框架的Model.predict均可实现推理预测,Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力,主框架的Model 对象是一个具备训练、测试、推理的神经网络。相比于Model.predict,inference可使用MKLDNN、CUDNN、TensorRT进行预测加速,同时支持用 X2Paddle 工具从第三方框架(TensorFlow、Pytorh 、 Caffe 等)产出的模型,可联动PaddleSlim,支持加载量化、裁剪和蒸馏后的模型部署。Model.predict适用于训练好的模型直接进行预测,paddle inference适用于对推理性能、通用性有要求的用户,针对不同平台不同的应用场景进行了深度的适配优化,保证模型在服务器端即训即用,快速部署。 +飞桨推理产品 paddle inference 和主框架的 Model.predict 均可实现推理预测,Paddle Inference 是飞桨的原生推理库, 作用于服务器端和云端,提供高性能的推理能力,主框架的 Model 对象是一个具备训练、测试、推理的神经网络。相比于 Model.predict,inference 可使用 MKLDNN、CUDNN、TensorRT 进行预测加速,同时支持用 X2Paddle 工具从第三方框架(TensorFlow、Pytorh 、 Caffe 等)产出的模型,可联动 PaddleSlim,支持加载量化、裁剪和蒸馏后的模型部署。Model.predict 适用于训练好的模型直接进行预测,paddle inference 适用于对推理性能、通用性有要求的用户,针对不同平台不同的应用场景进行了深度的适配优化,保证模型在服务器端即训即用,快速部署。 ## 预测流程图 @@ -24,31 +24,31 @@ Paddle Inference 功能特性丰富,性能优异,针对不同平台不同的 ### 内存/显存复用提升服务吞吐量 -在推理初始化阶段,对模型中的OP输出Tensor 进行依赖分析,将两两互不依赖的Tensor在内存/显存空间上进行复用,进而增大计算并行量,提升服务吞吐量。 +在推理初始化阶段,对模型中的 OP 输出 Tensor 进行依赖分析,将两两互不依赖的 Tensor 在内存/显存空间上进行复用,进而增大计算并行量,提升服务吞吐量。 -### 细粒度OP横向纵向融合减少计算量 +### 细粒度 OP 横向纵向融合减少计算量 -在推理初始化阶段,按照已有的融合模式将模型中的多个OP融合成一个OP,减少了模型的计算量的同时,也减少了 Kernel Launch的次数,从而能提升推理性能。目前Paddle Inference支持的融合模式多达几十个。 +在推理初始化阶段,按照已有的融合模式将模型中的多个 OP 融合成一个 OP,减少了模型的计算量的同时,也减少了 Kernel Launch 的次数,从而能提升推理性能。目前 Paddle Inference 支持的融合模式多达几十个。 -### 内置高性能的CPU/GPU Kernel +### 内置高性能的 CPU/GPU Kernel -内置同Intel、Nvidia共同打造的高性能kernel,保证了模型推理高性能的执行。 +内置同 Intel、Nvidia 共同打造的高性能 kernel,保证了模型推理高性能的执行。 ## 多功能集成 -### 集成TensorRT加快GPU推理速度 +### 集成 TensorRT 加快 GPU 推理速度 -Paddle Inference采用子图的形式集成TensorRT,针对GPU推理场景,TensorRT可对一些子图进行优化,包括OP的横向和纵向融合,过滤冗余的OP,并为OP自动选择最优的kernel,加快推理速度。 +Paddle Inference 采用子图的形式集成 TensorRT,针对 GPU 推理场景,TensorRT 可对一些子图进行优化,包括 OP 的横向和纵向融合,过滤冗余的 OP,并为 OP 自动选择最优的 kernel,加快推理速度。 -### 集成oneDNN CPU推理加速引擎 +### 集成 oneDNN CPU 推理加速引擎 -一行代码开始oneDNN加速,快捷高效。 +一行代码开始 oneDNN 加速,快捷高效。 -### 支持PaddleSlim量化压缩模型的部署 +### 支持 PaddleSlim 量化压缩模型的部署 -PaddleSlim是飞桨深度学习模型压缩工具,Paddle Inference可联动PaddleSlim,支持加载量化、裁剪和蒸馏后的模型并部署,由此减小模型存储空间、减少计算占用内存、加快模型推理速度。其中在模型量化方面,Paddle Inference在X86 CPU上做了深度优化,常见分类模型的单线程性能可提升近3倍,ERNIE模型的单线程性能可提升2.68倍。 +PaddleSlim 是飞桨深度学习模型压缩工具,Paddle Inference 可联动 PaddleSlim,支持加载量化、裁剪和蒸馏后的模型并部署,由此减小模型存储空间、减少计算占用内存、加快模型推理速度。其中在模型量化方面,Paddle Inference 在 X86 CPU 上做了深度优化,常见分类模型的单线程性能可提升近 3 倍,ERNIE 模型的单线程性能可提升 2.68 倍。 -### 支持X2Paddle转换得到的模型 +### 支持 X2Paddle 转换得到的模型 除支持飞桨训练的模型外,也支持用 X2Paddle 工具从第三方框架(比如 TensorFlow、PyTorch 或者 Caffe 等)产出的模型。 @@ -56,20 +56,20 @@ PaddleSlim是飞桨深度学习模型压缩工具,Paddle Inference可联动Pad ### 主流软硬件环境兼容适配 -支持服务器端X86 CPU、NVIDIA GPU芯片,兼容Linux/Mac/Windows系统,同时对飞腾、鲲鹏、曙光、昆仑等国产CPU/NPU进行适配。。支持所有飞桨训练产出的模型,完全做到即训即用。 +支持服务器端 X86 CPU、NVIDIA GPU 芯片,兼容 Linux/Mac/Windows 系统,同时对飞腾、鲲鹏、曙光、昆仑等国产 CPU/NPU 进行适配。。支持所有飞桨训练产出的模型,完全做到即训即用。 ### 主流、国产操作系统全适配 -适配主流操作系统Linux、Windows、macOS,同时适配麒麟OS、统信OS、普华OS、中科方德等国产操作系统 +适配主流操作系统 Linux、Windows、macOS,同时适配麒麟 OS、统信 OS、普华 OS、中科方德等国产操作系统 ### 多语言接口支持 -支持C++、Python、C、Go、Java和R语言API,对于其他语言,提供了ABI稳定的C API,提供配套的教程、API文档及示例。 +支持 C++、Python、C、Go、Java 和 R 语言 API,对于其他语言,提供了 ABI 稳定的 C API,提供配套的教程、API 文档及示例。 ## 交流与反馈 - 欢迎您通过 GitHub Issues 来提交问题、报告与建议 -- 微信公众号:飞桨PaddlePaddle +- 微信公众号:飞桨 PaddlePaddle - 微信群: 部署交流群

     

diff --git a/docs/guides/infer/mobile/mobile_index_cn.md b/docs/guides/infer/mobile/mobile_index_cn.md index 783e74910ff..7644d82a96a 100644 --- a/docs/guides/infer/mobile/mobile_index_cn.md +++ b/docs/guides/infer/mobile/mobile_index_cn.md @@ -1,6 +1,6 @@ # 移动端/嵌入式部署 — Paddle Lite -Paddle-Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和PaddlePaddle无缝对接外,也兼容支持其他训练框架产出的模型。 +Paddle-Lite 为 Paddle-Mobile 的升级版,定位支持包括手机移动端在内更多场景的轻量化高效预测,支持更广泛的硬件和平台,是一个高性能、轻量级的深度学习预测引擎。在保持和 PaddlePaddle 无缝对接外,也兼容支持其他训练框架产出的模型。 完整使用文档位于 [Paddle-Lite 文档](https://paddle-lite.readthedocs.io/zh/latest/) 。 @@ -8,22 +8,22 @@ Paddle-Lite为Paddle-Mobile的升级版,定位支持包括手机移动端在 ### 轻量级 执行阶段和计算优化阶段实现良好解耦拆分,移动端可以直接部署执行阶段,无任何第三方依赖。 -包含完整的80个 Op+85个 Kernel 的动态库,对于ARMV7只有800K,ARMV8下为1.3M,并可以裁剪到更低。 +包含完整的 80 个 Op+85 个 Kernel 的动态库,对于 ARMV7 只有 800K,ARMV8 下为 1.3M,并可以裁剪到更低。 在应用部署时,载入模型即可直接预测,无需额外分析优化。 ### 高性能 -极致的 ARM CPU 性能优化,针对不同微架构特点实现kernel的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。 +极致的 ARM CPU 性能优化,针对不同微架构特点实现 kernel 的定制,最大发挥计算性能,在主流模型上展现出领先的速度优势。 支持量化模型,结合[PaddleSlim 模型压缩工具](https://github.com/PaddlePaddle/models/tree/v1.5/PaddleSlim) 中量化功能,可以提供高精度高性能的预测能力。 -在Huawei NPU, FPGA上也具有有很好的性能表现。 +在 Huawei NPU, FPGA 上也具有有很好的性能表现。 最新性能数据位于 [Benchmark 文档](https://paddle-lite.readthedocs.io/zh/latest/benchmark/benchmark.html)。 ### 通用性 -硬件方面,Paddle-Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等AI芯片,未来会增加对更多硬件的支持。 +硬件方面,Paddle-Lite 的架构设计为多硬件兼容支持做了良好设计。除了支持 ARM CPU、Mali GPU、Adreno GPU,还特别支持了华为 NPU,以及 FPGA 等边缘设备广泛使用的硬件。即将支持支持包括寒武纪、比特大陆等 AI 芯片,未来会增加对更多硬件的支持。 -模型支持方面,Paddle-Lite和PaddlePaddle训练框架的Op对齐,提供更广泛的模型支持能力。目前已严格验证18个模型85个OP的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的OCR模型的支持。未来会持续增加更多模型的支持验证。 +模型支持方面,Paddle-Lite 和 PaddlePaddle 训练框架的 Op 对齐,提供更广泛的模型支持能力。目前已严格验证 18 个模型 85 个 OP 的精度和性能,对视觉类模型做到了较为充分的支持,覆盖分类、检测和定位,包含了特色的 OCR 模型的支持。未来会持续增加更多模型的支持验证。 -框架兼容方面:除了PaddlePaddle外,对其他训练框架也提供兼容支持。当前,支持Caffe 和 TensorFlow 训练出来的模型,通过[X2Paddle] (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对ONNX等格式模型提供兼容支持。 +框架兼容方面:除了 PaddlePaddle 外,对其他训练框架也提供兼容支持。当前,支持 Caffe 和 TensorFlow 训练出来的模型,通过[X2Paddle] (https://github.com/PaddlePaddle/X2Paddle) 转换工具实现。接下来将会对 ONNX 等格式模型提供兼容支持。 ## 架构 @@ -31,28 +31,28 @@ Paddle-Lite 的架构设计着重考虑了对多硬件和平台的支持,并 ![](https://github.com/Superjomn/_tmp_images/raw/master/images/paddle-lite-architecture.png) -其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。 +其中,Analysis Phase 包括了 MIR(Machine IR) 相关模块,能够对原有的模型的计算图针对具体的硬件列表进行算子融合、计算裁剪 在内的多种优化。Execution Phase 只涉及到 Kernel 的执行,且可以单独部署,以支持极致的轻量级部署。 -## Paddle-Mobile升级为Paddle-Lite的说明 -原Paddle-Mobile作为一个致力于嵌入式平台的PaddlePaddle预测引擎,已支持多种硬件平台,包括ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的GPU Metal实现、ZU5、ZU9等FPGA开发板、树莓派等arm-linux开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/README.md) +## Paddle-Mobile 升级为 Paddle-Lite 的说明 +原 Paddle-Mobile 作为一个致力于嵌入式平台的 PaddlePaddle 预测引擎,已支持多种硬件平台,包括 ARM CPU、 Mali GPU、Adreno GPU,以及支持苹果设备的 GPU Metal 实现、ZU5、ZU9 等 FPGA 开发板、树莓派等 arm-linux 开发板。在百度内已经过广泛业务场景应用验证。对应设计文档可参考: [mobile/README](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/README.md) -Paddle-Mobile 整体升级重构并更名为Paddle-Lite后,原paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原Paddle-mobile代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。 +Paddle-Mobile 整体升级重构并更名为 Paddle-Lite 后,原 paddle-mobile 的底层能力大部分已集成到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下。作为过渡,暂时保留原 Paddle-mobile 代码。 主体代码位于 `mobile/` 目录中,后续一段时间会继续维护,并完成全部迁移。新功能会统一到[新架构 ](https://github.com/PaddlePaddle/Paddle-Lite/tree/develop/lite)下开发。 -metal, web的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的GPU Metal实现的需求及web前端预测需求,可以直接进入这两个目录。 +metal, web 的模块相对独立,会继续在 `./metal` 和 `./web` 目录下开发和维护。对苹果设备的 GPU Metal 实现的需求及 web 前端预测需求,可以直接进入这两个目录。 ## 致谢 Paddle-Lite 借鉴了以下开源项目: - [ARM compute library](https://github.com/ARM-software/ComputeLibrary) -- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin对应底层的一些优化实现已被集成到Paddle-Lite。Anakin作为PaddlePaddle组织下的一个高性能预测项目,极具前瞻性,对Paddle-Lite有重要贡献。Anakin已和本项目实现整合。之后,Anakin不再升级。 +- [Anakin](https://github.com/PaddlePaddle/Anakin) ,Anakin 对应底层的一些优化实现已被集成到 Paddle-Lite。Anakin 作为 PaddlePaddle 组织下的一个高性能预测项目,极具前瞻性,对 Paddle-Lite 有重要贡献。Anakin 已和本项目实现整合。之后,Anakin 不再升级。 ## 交流与反馈 -* 欢迎您通过 GitHub Issues来提交问题、报告与建议 -* 微信公众号:飞桨PaddlePaddle -* QQ群: 696965088 +* 欢迎您通过 GitHub Issues 来提交问题、报告与建议 +* 微信公众号:飞桨 PaddlePaddle +* QQ 群: 696965088

     

-

   微信公众号                官方技术交流QQ群

+

   微信公众号                官方技术交流 QQ 群

-* 论坛: 欢迎大家在[PaddlePaddle论坛](https://ai.baidu.com/forum/topic/list/168)分享在使用PaddlePaddle中遇到的问题和经验, 营造良好的论坛氛围 +* 论坛: 欢迎大家在[PaddlePaddle 论坛](https://ai.baidu.com/forum/topic/list/168)分享在使用 PaddlePaddle 中遇到的问题和经验, 营造良好的论坛氛围 diff --git a/docs/guides/infer/paddleslim/paddle_slim_cn.md b/docs/guides/infer/paddleslim/paddle_slim_cn.md index cf011aa1ec9..10e49ed183a 100755 --- a/docs/guides/infer/paddleslim/paddle_slim_cn.md +++ b/docs/guides/infer/paddleslim/paddle_slim_cn.md @@ -1,13 +1,13 @@ # 模型压缩 — PaddleSlim -PaddleSlim是一个模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略。 +PaddleSlim 是一个模型压缩工具库,包含模型剪裁、定点量化、知识蒸馏、超参搜索和模型结构搜索等一系列模型压缩策略。 -对于业务用户,PaddleSlim提供完整的模型压缩解决方案,可用于图像分类、检测、分割等各种类型的视觉场景。 -同时也在持续探索NLP领域模型的压缩方案。另外,PaddleSlim提供且在不断完善各种压缩策略在经典开源任务的benchmark, +对于业务用户,PaddleSlim 提供完整的模型压缩解决方案,可用于图像分类、检测、分割等各种类型的视觉场景。 +同时也在持续探索 NLP 领域模型的压缩方案。另外,PaddleSlim 提供且在不断完善各种压缩策略在经典开源任务的 benchmark, 以便业务用户参考。 -对于模型压缩算法研究者或开发者,PaddleSlim提供各种压缩策略的底层辅助接口,方便用户复现、调研和使用最新论文方法。 -PaddleSlim会从底层能力、技术咨询合作和业务场景等角度支持开发者进行模型压缩策略相关的创新工作。 +对于模型压缩算法研究者或开发者,PaddleSlim 提供各种压缩策略的底层辅助接口,方便用户复现、调研和使用最新论文方法。 +PaddleSlim 会从底层能力、技术咨询合作和业务场景等角度支持开发者进行模型压缩策略相关的创新工作。 ## 功能 @@ -26,7 +26,7 @@ PaddleSlim会从底层能力、技术咨询合作和业务场景等角度支持 - 神经网络结构自动搜索(NAS) - 支持基于进化算法的轻量神经网络结构自动搜索 - - 支持One-Shot网络结构自动搜索 + - 支持 One-Shot 网络结构自动搜索 - 支持 FLOPS / 硬件延时约束 - 支持多平台模型延时评估 - 支持用户自定义搜索算法和搜索空间 @@ -43,13 +43,13 @@ pip install paddleslim -i https://pypi.org/simple ## 使用 -- [快速开始](https://paddleslim.readthedocs.io/zh_CN/develop/quick_start/index.html):通过简单示例介绍如何快速使用PaddleSlim。 -- [进阶教程](https://paddleslim.readthedocs.io/zh_CN/develop/tutorials/index.html):PaddleSlim高阶教程。 +- [快速开始](https://paddleslim.readthedocs.io/zh_CN/develop/quick_start/index.html):通过简单示例介绍如何快速使用 PaddleSlim。 +- [进阶教程](https://paddleslim.readthedocs.io/zh_CN/develop/tutorials/index.html):PaddleSlim 高阶教程。 - [模型库](https://paddleslim.readthedocs.io/zh_CN/develop/model_zoo.html):各个压缩策略在图像分类、目标检测和图像语义分割模型上的实验结论,包括模型精度、预测速度和可供下载的预训练模型。 -- [API文档](https://paddleslim.readthedocs.io/zh_CN/develop/api_cn/index.html) -- [Paddle检测库](https://github.com/PaddlePaddle/PaddleDetection/tree/master/slim):介绍如何在检测库中使用PaddleSlim。 -- [Paddle分割库](https://github.com/PaddlePaddle/PaddleSlim/tree/develop):介绍如何在分割库中使用PaddleSlim。 -- [PaddleLite](https://paddlepaddle.github.io/Paddle-Lite/):介绍如何使用预测库PaddleLite部署PaddleSlim产出的模型。 +- [API 文档](https://paddleslim.readthedocs.io/zh_CN/develop/api_cn/index.html) +- [Paddle 检测库](https://github.com/PaddlePaddle/PaddleDetection/tree/master/slim):介绍如何在检测库中使用 PaddleSlim。 +- [Paddle 分割库](https://github.com/PaddlePaddle/PaddleSlim/tree/develop):介绍如何在分割库中使用 PaddleSlim。 +- [PaddleLite](https://paddlepaddle.github.io/Paddle-Lite/):介绍如何使用预测库 PaddleLite 部署 PaddleSlim 产出的模型。 ## 部分压缩策略效果 @@ -60,8 +60,8 @@ pip install paddleslim -i https://pypi.org/simple |压缩策略 |精度收益(baseline: 70.91%) |模型大小(baseline: 17.0M)| |:---:|:---:|:---:| | 知识蒸馏(ResNet50)| **+1.06%** |-| -| 知识蒸馏(ResNet50) + int8量化训练 |**+1.10%**| **-71.76%**| -| 剪裁(FLOPs-50%) + int8量化训练|**-1.71%**|**-86.47%**| +| 知识蒸馏(ResNet50) + int8 量化训练 |**+1.10%**| **-71.76%**| +| 剪裁(FLOPs-50%) + int8 量化训练|**-1.71%**|**-86.47%**| ### 图像检测模型 @@ -86,7 +86,7 @@ pip install paddleslim -i https://pypi.org/simple 数据:ImageNet2012; 模型:MobileNetV2 -|硬件环境 | 推理耗时 | Top1准确率(baseline:71.90%) | +|硬件环境 | 推理耗时 | Top1 准确率(baseline:71.90%) | |:---------------:|:---------:|:--------------------:| | RK3288 | **-23%** | +0.07% | | Android cellphone | **-20%** | +0.16% | diff --git a/docs/guides/jit/basic_usage_cn.md b/docs/guides/jit/basic_usage_cn.md index a024e8a3b00..01c6bc16560 100644 --- a/docs/guides/jit/basic_usage_cn.md +++ b/docs/guides/jit/basic_usage_cn.md @@ -11,7 +11,7 @@ + 静态图编程: 采用先编译后执行的方式。需先在代码中预定义完整的神经网络结构,飞桨框架会将神经网络描述为 Program 的数据结构,并对 Program 进行编译优化,再调用执行器获得计算结果。 -动态图编程体验更佳、更易调试,但是因为采用 Python 实时执行的方式,开销较大,在性能方面与 C++ 有一定差距;静态图调试难度大,但是将前端 Python 编写的神经网络预定义为 Program描述,转到 C++ 端重新解析执行,脱离了 Python 依赖,往往执行性能更佳,并且预先拥有完整网络结构也更利于全局优化。 +动态图编程体验更佳、更易调试,但是因为采用 Python 实时执行的方式,开销较大,在性能方面与 C++ 有一定差距;静态图调试难度大,但是将前端 Python 编写的神经网络预定义为 Program 描述,转到 C++ 端重新解析执行,脱离了 Python 依赖,往往执行性能更佳,并且预先拥有完整网络结构也更利于全局优化。 想了解动态图和静态图的详细对比介绍,可参见 [动态图和静态图的差异](https://www.paddlepaddle.org.cn/tutorials/projectdetail/4047189)。 @@ -51,7 +51,7 @@ + **如果发现模型训练 CPU 向 GPU 调度不充分的情况下。** - 如下是模型训练时执行单个 step 的 timeline 示意图,框架通过 CPU 调度底层 Kernel 计算,在某些情况下,如果 CPU 调度时间过长,会导致 GPU 利用率不高(可终端执行watch -n 1 nvidia-smi观察)。 + 如下是模型训练时执行单个 step 的 timeline 示意图,框架通过 CPU 调度底层 Kernel 计算,在某些情况下,如果 CPU 调度时间过长,会导致 GPU 利用率不高(可终端执行 watch -n 1 nvidia-smi 观察)。
@@ -244,7 +244,7 @@ class LinearNet(nn.Layer): @paddle.jit.to_static def forward(self, x, label=None): out = self._linear(x) - # 不规范写法,forward中包括对loss进行计算 + # 不规范写法,forward 中包括对 loss 进行计算 if label: loss = nn.functional.cross_entropy(out, label) avg_loss = nn.functional.mean(loss) @@ -266,7 +266,7 @@ class LinearNet(nn.Layer): def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) - # 规范写法,forward中仅实现预测功能 + # 规范写法,forward 中仅实现预测功能 @paddle.jit.to_static def forward(self, x): return self._linear(x) @@ -320,9 +320,9 @@ class LinearNet(nn.Layer): 接前文动转静训练的示例代码,训练完成后,使用 ``paddle.jit.save`` 对模型和参数进行存储: ```python -# 如果保存模型用于推理部署,则需切换eval()模式 +# 如果保存模型用于推理部署,则需切换 eval()模式 # layer.eval() -# 使用paddle.jit.save保存训练好的静态图模型 +# 使用 paddle.jit.save 保存训练好的静态图模型 path = "example.model/linear" paddle.jit.save(layer, path) ``` @@ -341,7 +341,7 @@ linear.pdmodel // 存放模型的网络结构 linear.pdiparams.info // 存放和参数状态有关的额外信息 ``` -导出的模型可用于在云、边、端不同的硬件环境中部署,可以支持不同语言环境部署,如 C++、Java、Python等。飞桨提供了服务器端部署的 Paddle Inference、移动端/IoT端部署的 Paddle Lite、服务化部署的 Paddle Serving 等,以实现模型的快速部署上线。具体介绍可参见 [推理部署](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/infer/index_cn.html) 章节。 +导出的模型可用于在云、边、端不同的硬件环境中部署,可以支持不同语言环境部署,如 C++、Java、Python 等。飞桨提供了服务器端部署的 Paddle Inference、移动端/IoT 端部署的 Paddle Lite、服务化部署的 Paddle Serving 等,以实现模型的快速部署上线。具体介绍可参见 [推理部署](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/infer/index_cn.html) 章节。 #### 3.2.2 模型加载样例 @@ -372,7 +372,7 @@ EPOCH_NUM = 4 IMAGE_SIZE = 784 CLASS_NUM = 10 -# 载入paddle.jit.save保存的模型 +# 载入 paddle.jit.save 保存的模型 path = "example.model/linear" loaded_layer = paddle.jit.load(path) ``` @@ -452,11 +452,11 @@ class LinearNet(nn.Layer): # 创建一个网络 layer = LinearNet() -# 载入paddle.jit.save保存好的参数 +# 载入 paddle.jit.save 保存好的参数 path = "example.model/linear" state_dict = paddle.load(path) -# 将加载后的参数赋给layer并进行预测 +# 将加载后的参数赋给 layer 并进行预测 layer.set_state_dict(state_dict, use_structured_name=False) layer.eval() x = paddle.randn([1, IMAGE_SIZE], 'float32') @@ -519,7 +519,7 @@ loss_fn = nn.CrossEntropyLoss() # 设置优化器 adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) -# 构建DataLoader数据读取器 +# 构建 DataLoader 数据读取器 dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) loader = paddle.io.DataLoader(dataset, batch_size=BATCH_SIZE, @@ -535,7 +535,7 @@ train(layer, loader, loss_fn, adam) 动态图模型训练完成后,保存为静态图模型用于推理部署,主要包括三个步骤: -1. **切换 ``eval()`` 模式:** 类似 Dropout 、LayerNorm 等接口在 train() 和 eval() 的行为存在较大的差异,在模型导出前,请务必确认模型已切换到正确的模式,否则导出的模型在预测阶段可能出现输出结果不符合预期的情况。用于推理部署切换到eval()模式,用于后续训练调优则切换到train()模式。 +1. **切换 ``eval()`` 模式:** 类似 Dropout 、LayerNorm 等接口在 train() 和 eval() 的行为存在较大的差异,在模型导出前,请务必确认模型已切换到正确的模式,否则导出的模型在预测阶段可能出现输出结果不符合预期的情况。用于推理部署切换到 eval()模式,用于后续训练调优则切换到 train()模式。 2. **构造 ``InputSpec`` 信息:** ``InputSpec`` 用于表示模型输入数据的 shape、dtype、name 信息,是辅助动静转换的必要描述信息。这是由于静态图模型在调用执行器前并不执行实际操作,因此也并不读入实际数据,需要设置 “占位符” 表示输入数据。详细请参见 [InputSpec 的用法介绍](#35) 。 @@ -544,11 +544,11 @@ train(layer, loader, loss_fn, adam) ```python from paddle.static import InputSpec - # 1.切换eval()模式 + # 1.切换 eval()模式 layer.eval() - # 2. 构造InputSpec信息 + # 2. 构造 InputSpec 信息 input_spec = InputSpec([None, 784], 'float32', 'x') - # 3.调用paddle.jit.save接口转为静态图模型 + # 3.调用 paddle.jit.save 接口转为静态图模型 path = "example.dy_model/linear" paddle.jit.save( layer=layer, @@ -584,7 +584,7 @@ EPOCH_NUM = 4 IMAGE_SIZE = 784 CLASS_NUM = 10 -# 载入paddle.jit.save保存的模型 +# 载入 paddle.jit.save 保存的模型 path = "example.model/linear" loaded_layer = paddle.jit.load(path) ``` @@ -619,7 +619,7 @@ pred = loaded_layer(x) def __init__(self): super(LinearNet, self).__init__() self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) - # 输入数据是动态输入(shape中有一个维度是可变的),因此需要添加InputSpec + # 输入数据是动态输入(shape 中有一个维度是可变的),因此需要添加 InputSpec @paddle.jit.to_static(input_spec=[InputSpec(shape=[None, 784], dtype='float32')]) def forward(self, x): return self._linear(x) @@ -636,7 +636,7 @@ pred = loaded_layer(x) inps = paddle.rand([3, 6]) origin = fun(inps) - # 将函数对应的Program结构进行保存 + # 将函数对应的 Program 结构进行保存 paddle.jit.save( fun, path, @@ -645,7 +645,7 @@ pred = loaded_layer(x) shape=[None, 6], dtype='float32', name='x'), ]) - # 载入保存后的fun并执行 + # 载入保存后的 fun 并执行 load_func = paddle.jit.load(path) load_result = load_func(inps) ``` @@ -666,12 +666,12 @@ pred = loaded_layer(x) self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM) self._linear_2 = nn.Linear(IMAGE_SIZE, CLASS_NUM) - # 装饰forward方法,InputSpec指定为None + # 装饰 forward 方法,InputSpec 指定为 None @paddle.jit.to_static(input_spec=[InputSpec(shape=[None, IMAGE_SIZE], dtype='float32')]) def forward(self, x): return self._linear(x) - # 装饰需要保存的非forward方法,InputSpec指定为None + # 装饰需要保存的非 forward 方法,InputSpec 指定为 None @paddle.jit.to_static(input_spec=[InputSpec(shape=[None, IMAGE_SIZE], dtype='float32')]) def another_forward(self, x): return self._linear_2(x) @@ -689,7 +689,7 @@ pred = loaded_layer(x) + 该场景下保存的模型命名规则如下: - + forward 的模型名字为:**模型名+后缀** ,其他函数的模型名字为:**模型名+函数名+后缀** 。每个函数有各自的 pdmodel 和 pdiparams 的文件,所有函数共用 `pdiparams.info` 。上述示例代码将在 `example.model` 文件夹下产生5个文件: ``linear.another_forward.pdiparams`` 、 ``linear.pdiparams`` 、 ``linear.pdmodel`` 、 ``linear.another_forward.pdmodel`` 、``linear.pdiparams.info`` 。 + + forward 的模型名字为:**模型名+后缀** ,其他函数的模型名字为:**模型名+函数名+后缀** 。每个函数有各自的 pdmodel 和 pdiparams 的文件,所有函数共用 `pdiparams.info` 。上述示例代码将在 `example.model` 文件夹下产生 5 个文件: ``linear.another_forward.pdiparams`` 、 ``linear.pdiparams`` 、 ``linear.pdmodel`` 、 ``linear.another_forward.pdmodel`` 、``linear.pdiparams.info`` 。 ### 3.5 ``InputSpec`` 的用法介绍 @@ -743,7 +743,7 @@ print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) import paddle from paddle.static import InputSpec # 省略动态图训练代码 -# 保存时将输入数据传入input_spec参数 +# 保存时将输入数据传入 input_spec 参数 paddle.jit.save( layer=layer, path=path, @@ -784,7 +784,7 @@ class SimpleNet(Layer): def __init__(self): super(SimpleNet, self).__init__() self.linear = paddle.nn.Linear(10, 3) - # 在装饰器中调用InputSpec + # 在装饰器中调用 InputSpec @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) def forward(self, x, y): out = self.linear(x) @@ -917,9 +917,9 @@ paddle.jit.save(net, path='./simple_net') 如下是一个 ResNet50 模型动转静训练时,通过在 ``to_static`` 函数中配置 ``build_strategy`` 参数来开启算子融合 ``fuse_elewise_add_act_ops`` 和 ``enable_addto`` 图优化策略的使用样例。不同的模型可应用的优化策略不同,比如算子融合策略一般与模型中用到的 API 有关系: -+ 若存在 elementwise_ad d后跟 relu等激活函数,则可以尝试开启 ``fuse_elewise_add_act_ops`` ++ 若存在 elementwise_ad d 后跟 relu 等激活函数,则可以尝试开启 ``fuse_elewise_add_act_ops`` -+ 若存在 relu后跟 depthwise_conv2 函数,则可以尝试开启 ``fuse_relu_depthwise_conv`` ++ 若存在 relu 后跟 depthwise_conv2 函数,则可以尝试开启 ``fuse_relu_depthwise_conv`` + 若存在较多的 conv2dAPI 的调用,则可以尝试开启 ``enable_addto`` ,更多策略开关可以参考 [BuildStrategy](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/static/BuildStrategy_cn.html#buildstrategy) 接口文档。 @@ -979,7 +979,7 @@ paddle.jit.save(net, path='./simple_net') # 设置优化器 adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters()) - # 构建DataLoader数据读取器 + # 构建 DataLoader 数据读取器 dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) loader = paddle.io.DataLoader(dataset, batch_size=BATCH_SIZE, @@ -995,7 +995,7 @@ paddle.jit.save(net, path='./simple_net') **自动混合精度(Automatic Mixed Precision,AMP)** 训练的方法,可在模型训练时,自动为算子选择合适的数据计算精度(float32 或 float16 / bfloat16),在保持训练精度(accuracy)不损失的条件下,能够加速训练。 -如下是动态图开启 AMP 训练后,执行单个 step 的 timeline 示意图。相对于FP32训练,开启AMP后,每个 GPU 的 Kernel 计算效率进一步提升,耗时更短(图中蓝框更窄了),但对 CPU 端的调度性能要求也更高了。 +如下是动态图开启 AMP 训练后,执行单个 step 的 timeline 示意图。相对于 FP32 训练,开启 AMP 后,每个 GPU 的 Kernel 计算效率进一步提升,耗时更短(图中蓝框更窄了),但对 CPU 端的调度性能要求也更高了。
diff --git a/docs/guides/jit/basic_usage_en.md b/docs/guides/jit/basic_usage_en.md index 161a9e08850..aea2627656f 100644 --- a/docs/guides/jit/basic_usage_en.md +++ b/docs/guides/jit/basic_usage_en.md @@ -1,9 +1,9 @@ # 基本用法 -## 一、 @to_static概览 +## 一、 @to_static 概览 -动静转换(@to_static)通过解析 Python代码(抽象语法树,下简称:AST) 实现一行代码即可转为静态图功能,即只需在待转化的函数前添加一个装饰器 ``@paddle.jit.to_static`` 。 +动静转换(@to_static)通过解析 Python 代码(抽象语法树,下简称:AST) 实现一行代码即可转为静态图功能,即只需在待转化的函数前添加一个装饰器 ``@paddle.jit.to_static`` 。 如下是一个使用 @to_static 装饰器的 ``Model`` 示例: @@ -83,7 +83,7 @@ class Linear(...): with param_guard(self._parameters), param_guard(self._buffers): # ... forward_pre_hook 逻辑 - outputs = self.forward(*inputs, **kwargs) # 此处为forward函数 + outputs = self.forward(*inputs, **kwargs) # 此处为 forward 函数 # ... forward_post_hook 逻辑 @@ -328,7 +328,7 @@ def depend_tensor_if(x): out = convert_ifelse(paddle.mean(x) > 5.0, true_fn_0, false_fn_0, (x,), (x,), (out,)) ^ ^ ^ ^ ^ ^ ^ ^ | | | | | | | | - 输出 convert_ifelse 判断条件 true分支 false分支 分支输入 分支输入 输出 + 输出 convert_ifelse 判断条件 true 分支 false 分支 分支输入 分支输入 输出 ``` diff --git a/docs/guides/jit/case_analysis_cn.md b/docs/guides/jit/case_analysis_cn.md index a745f9255b5..81595f75b7b 100644 --- a/docs/guides/jit/case_analysis_cn.md +++ b/docs/guides/jit/case_analysis_cn.md @@ -77,7 +77,7 @@ -> 注:InputSpec 接口的高阶用法,请参看 [【使用InputSpec指定模型输入Tensor信息】](./basic_usage_cn.html#inputspec) +> 注:InputSpec 接口的高阶用法,请参看 [【使用 InputSpec 指定模型输入 Tensor 信息】](./basic_usage_cn.html#inputspec) ## 三、内嵌 Numpy 操作? @@ -177,7 +177,7 @@ class SimpleNet(paddle.nn.Layer): out = self.linear(x) out = out + y - out = out * self.mask # <--- 省去重复的assign_op,性能更佳 + out = out * self.mask # <--- 省去重复的 assign_op,性能更佳 return out ``` @@ -273,9 +273,9 @@ jit.save(mode, model_path) 此 flag 继承自 ``nn.Layer`` ,因此可通过 ``model.train()`` 和 ``model.eval()`` 来全局切换所有 sublayers 的分支状态。 -## 七、非forward函数导出 +## 七、非 forward 函数导出 -`@to_static` 与 `jit.save` 接口搭配也支持导出非forward 的其他函数,具体使用方式如下: +`@to_static` 与 `jit.save` 接口搭配也支持导出非 forward 的其他函数,具体使用方式如下: ```python class SimpleNet(paddle.nn.Layer): @@ -340,7 +340,7 @@ def false_fn_0(out): out = convert_ifelse(paddle.mean(x) > 5.0, true_fn_0, false_fn_0, (x,), (x,), (out,)) ^ ^ ^ ^ ^ ^ ^ ^ | | | | | | | | -输出 convert_ifelse 判断条件 true分支 false分支 分支输入 分支输入 输出 +输出 convert_ifelse 判断条件 true 分支 false 分支 分支输入 分支输入 输出 ``` @@ -422,7 +422,7 @@ def forward(x): ```python def forward(self, x): - bs = paddle.shape(x)[0] # <---- x.shape[0] 表示 batch_size,动态shape + bs = paddle.shape(x)[0] # <---- x.shape[0] 表示 batch_size,动态 shape outs = [] for i in range(bs): outs.append(x) diff --git a/docs/guides/jit/debugging_cn.md b/docs/guides/jit/debugging_cn.md index 2fc3cf77d86..9b25a208ca2 100644 --- a/docs/guides/jit/debugging_cn.md +++ b/docs/guides/jit/debugging_cn.md @@ -24,16 +24,16 @@ if __name__ == '__main__': -报错日志从上到下一共可以分为4个部分: +报错日志从上到下一共可以分为 4 个部分: -- **原生的Python报错栈**:如1中的前两行所示,表示`/workspace/Paddle/run_dy2stat_error.py`文件第145行调用的函数`train()`导致的后续一系列报错。 +- **原生的 Python 报错栈**:如 1 中的前两行所示,表示`/workspace/Paddle/run_dy2stat_error.py`文件第 145 行调用的函数`train()`导致的后续一系列报错。 - **动转静报错栈起始标志位**:`In transformed code`,表示动转静报错信息栈,指运行转换后的代码时的报错信息。实际场景中,可以直接搜索`In transformed code`关键字,从这一行以下开始看报错日志即可。 -- **用户代码报错栈**:隐藏了框架层面的无用的报错信息,突用户代码报错栈。我们在出错代码下添加了波浪线和HERE指示词来提示具体的出错位置,并扩展了出错行代码上下文,帮助你快速定位出错位置。如上图3中所示,可以看出最后出错的用户代码为`x = paddle.reshape(x, shape=[1, two])`。 +- **用户代码报错栈**:隐藏了框架层面的无用的报错信息,突用户代码报错栈。我们在出错代码下添加了波浪线和 HERE 指示词来提示具体的出错位置,并扩展了出错行代码上下文,帮助你快速定位出错位置。如上图 3 中所示,可以看出最后出错的用户代码为`x = paddle.reshape(x, shape=[1, two])`。 - **框架层面报错信息**:提供了静态图组网报错信息。一般可以直接根据最后三行的信息,定位具体是在生成哪个 OpDesc 时报的错误,一般是与执行此 Op 的 infershape 逻辑报的错误。 -如上报错信息表明是reshape Op出错,出错原因是tensor x的shape为[3],将其reshape为[1, 2]是不被允许的。 +如上报错信息表明是 reshape Op 出错,出错原因是 tensor x 的 shape 为[3],将其 reshape 为[1, 2]是不被允许的。 **NOTE**:在某些场景下,会识别报错类型并给出修改建议,如下图所示。`Revise suggestion`下面是出错的排查建议,你可以根据建议对代码进行排查修改。 @@ -41,8 +41,8 @@ if __name__ == '__main__': ### 1.2 报错信息定制化展示 #### 1.2.1 未经动转静报错模块处理的原生报错信息 -若你想查看 Paddle 原生报错信息栈,即未被动转静模块处理过的报错信息栈,可以设置环境变量 `TRANSLATOR_DISABLE_NEW_ERROR=1` 关闭动转静报错模块。该环境变量默认值为0,表示默认开启动转静报错模块。 -在1.1小节的代码中添加下面的代码即可以查看原生的报错信息: +若你想查看 Paddle 原生报错信息栈,即未被动转静模块处理过的报错信息栈,可以设置环境变量 `TRANSLATOR_DISABLE_NEW_ERROR=1` 关闭动转静报错模块。该环境变量默认值为 0,表示默认开启动转静报错模块。 +在 1.1 小节的代码中添加下面的代码即可以查看原生的报错信息: ```python import os os.environ["TRANSLATOR_DISABLE_NEW_ERROR"] = '1' @@ -52,17 +52,17 @@ os.environ["TRANSLATOR_DISABLE_NEW_ERROR"] = '1' #### 1.2.2 C++报错栈 -默认会隐藏C++报错栈,你可设置C++端的环境变量 `FLAGS_call_stack_level=2` 来显示 C++ 报错栈信息。如可以在终端输入`export FLAGS_call_stack_level=2`来进行设置,之后可以看到C++端的报错栈: +默认会隐藏 C++报错栈,你可设置 C++端的环境变量 `FLAGS_call_stack_level=2` 来显示 C++ 报错栈信息。如可以在终端输入`export FLAGS_call_stack_level=2`来进行设置,之后可以看到 C++端的报错栈: ## 二、调试方法 在调试前**请确保转换前的动态图代码能够成功运行**,下面介绍动转静中推荐的几种调试方法。 -### 2.1 pdb调试 -pdb是Python中的一个模块,该模块定义了一个交互式Pyhton源代码调试器。它支持在源码行间设置断点和单步执行,列出源代码和变量,运行Python代码等。 +### 2.1 pdb 调试 +pdb 是 Python 中的一个模块,该模块定义了一个交互式 Pyhton 源代码调试器。它支持在源码行间设置断点和单步执行,列出源代码和变量,运行 Python 代码等。 #### 2.1.1 调试步骤 -- step1:在想要进行调试的代码前插入`import pdb; pdb.set_trace()`开启pdb调试。 +- step1:在想要进行调试的代码前插入`import pdb; pdb.set_trace()`开启 pdb 调试。 ```python import paddle import numpy as np @@ -70,7 +70,7 @@ pdb是Python中的一个模块,该模块定义了一个交互式Pyhton源代 @paddle.jit.to_static def func(x): x = paddle.to_tensor(x) - import pdb; pdb.set_trace() # <------ 开启pdb调试 + import pdb; pdb.set_trace() # <------ 开启 pdb 调试 two = paddle.full(shape=[1], fill_value=2, dtype="int32") x = paddle.reshape(x, shape=[1, two]) return x @@ -78,14 +78,14 @@ pdb是Python中的一个模块,该模块定义了一个交互式Pyhton源代 func(np.ones([3]).astype("int32")) ``` -- step2:正常运行.py文件,在终端会出现下面类似结果,在`(Pdb)`位置后输入相应的pdb命令进行调试。 +- step2:正常运行.py 文件,在终端会出现下面类似结果,在`(Pdb)`位置后输入相应的 pdb 命令进行调试。 ``` > /tmp/tmpm0iw5b5d.py(9)func() -> two = paddle.full(shape=[1], fill_value=2, dtype='int32') (Pdb) ``` -- step3:在pdb交互模式下输入l、p等命令可以查看动转静后静态图相应的代码、变量,进而排查相关的问题。 +- step3:在 pdb 交互模式下输入 l、p 等命令可以查看动转静后静态图相应的代码、变量,进而排查相关的问题。 ``` > /tmp/tmpm0iw5b5d.py(9)func() -> two = paddle.full(shape=[1], fill_value=2, dtype='int32') @@ -108,10 +108,10 @@ pdb是Python中的一个模块,该模块定义了一个交互式Pyhton源代 -更多pdb使用使用方法可以查看pdb的[官方文档](https://docs.python.org/zh-cn/3/library/pdb.html) +更多 pdb 使用使用方法可以查看 pdb 的[官方文档](https://docs.python.org/zh-cn/3/library/pdb.html) ### 2.2 打印转换后的静态图代码 -你可以打印转换后的静态图代码,有2种方法: +你可以打印转换后的静态图代码,有 2 种方法: #### 2.2.1 set_code_level() 或 TRANSLATOR_CODE_LEVEL @@ -133,7 +133,7 @@ func(np.ones([1])) 此外,如果你想将转化后的代码也输出到 `sys.stdout` , 可以设置参数 `also_to_stdout` 为 True,否则将仅输出到 `sys.stderr`。 `set_code_level` 函数可以设置查看不同的 AST Transformer 转化后的代码,详情请见 [set_code_level](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/jit/set_code_level_cn.html)。 #### 2.2.2 被装饰后的函数的 code 属性 -如下代码中,装饰器@to_static会将函数 func 转化为一个类对象 StaticFunction,可以使用 StaticFunction 的 code 属性来获得转化后的代码。 +如下代码中,装饰器@to_static 会将函数 func 转化为一个类对象 StaticFunction,可以使用 StaticFunction 的 code 属性来获得转化后的代码。 ```python import paddle import numpy as np @@ -164,7 +164,7 @@ def func(x): return x ``` -### 2.3 使用print查看变量 +### 2.3 使用 print 查看变量 print 函数可以用来查看变量,该函数在动转静中会被转化。当仅打印 Paddle Tensor 时,实际运行时会被转换为 Paddle 算子 Print,否则仍然运行 print。 ```python @@ -175,9 +175,9 @@ import numpy as np def func(x): x = paddle.to_tensor(x) - # 打印x,x是Paddle Tensor,实际运行时会运行Paddle Print(x) + # 打印 x,x 是 Paddle Tensor,实际运行时会运行 Paddle Print(x) print(x) - # 打印注释,非Paddle Tensor,实际运行时仍运行print + # 打印注释,非 Paddle Tensor,实际运行时仍运行 print print("Here call print function.") if len(x) > 3: @@ -188,7 +188,7 @@ def func(x): func(np.ones([1])) ``` -运行后可以看到x的值: +运行后可以看到 x 的值: ``` Variable: assign_0.tmp_0 - lod: {} @@ -199,7 +199,7 @@ Variable: assign_0.tmp_0 - data: [1] ``` ### 2.4 日志打印 -动转静在日志中记录了额外的调试信息,以帮助你了解动转静过程中函数是否被成功转换。 你可以调用 `paddle.jit.set_verbosity(level=0, also_to_stdout=False)` 或设置环境变量 `TRANSLATOR_VERBOSITY=level` 来设置日志详细等级,并查看不同等级的日志信息。目前,`level` 可以取值0-3: +动转静在日志中记录了额外的调试信息,以帮助你了解动转静过程中函数是否被成功转换。 你可以调用 `paddle.jit.set_verbosity(level=0, also_to_stdout=False)` 或设置环境变量 `TRANSLATOR_VERBOSITY=level` 来设置日志详细等级,并查看不同等级的日志信息。目前,`level` 可以取值 0-3: - 0: 无日志 - 1: 包括了动转静转化流程的信息,如转换前的源码、转换的可调用对象 @@ -224,7 +224,7 @@ def func(x): return x paddle.jit.set_verbosity(3) -# 或者设置os.environ["TRANSLATOR_VERBOSITY"] = '3' +# 或者设置 os.environ["TRANSLATOR_VERBOSITY"] = '3' func(np.ones([1])) ``` @@ -274,7 +274,7 @@ RuntimeError: (NotFound) Input("Filter") of ConvOp should not be null. **排查建议:** -- 代码层面,判断是否是上游使用了reshape导致 -1 的污染性传播 +- 代码层面,判断是否是上游使用了 reshape 导致 -1 的污染性传播 > 动态图由于执行时 shape 都是已知的,所以 reshape(x, [-1, 0, 128]) 是没有问题的。但静态图组网时都是编译期的 shape(可能为-1),因此使用 reshape 接口时,尽量减少 -1 的使用。 - 可以结合调试技巧,判断是否是某个 API 的输出 shape 在动静态图下有 diff 行为 @@ -304,7 +304,7 @@ RuntimeError: (NotFound) Input("Filter") of ConvOp should not be null. **排查建议:** -- 每个sublayer 是否继承了 nn.Layer +- 每个 sublayer 是否继承了 nn.Layer ### 3.5 Container 的使用建议 动态图下,提供了如下几种 container 的容器类: diff --git a/docs/guides/jit/grammar_list_cn.md b/docs/guides/jit/grammar_list_cn.md index 7e7d1b7376d..6963c2d2928 100644 --- a/docs/guides/jit/grammar_list_cn.md +++ b/docs/guides/jit/grammar_list_cn.md @@ -17,27 +17,27 @@ ## 二、语法支持速查列表 -|分类 |python语法 | 是否
支持 | 概要 | +|分类 |python 语法 | 是否
支持 | 概要 | |:---:|:---:|:---:|:---:| -|控制流| [if-else](#1) | 支持 | 自适应识别和转为静态图cond接口,或保持python if 执行 | -|| [while](#2) | 支持 |自适应识别和转为静态图while\_loop接口,或保持python while 执行 | -|| [for](#3) | 支持 | `for _ in x`语法支持对Tensor的迭代访问 | -|| [break
continue](#4)| 支持 | 支持循环中任意位置的break和continue | -|| [return](#4)| 支持 | 支持循环体中提前return | -|运算符| +,-,,/,\*, >, <, >= , <=, == | 支持 | 自适应识别和应用paddle的运算符重载 | -|| [and, or, not](#5) | 支持 | 1.如果运算符两个都是Tensor,会组网静态图。
2. 如果运算符都不是Tensor,那么使用原始python语义
3. 如果一个是Tensor一个是非Tensor,那么也会使用python语义,但是结果不会出错。 | -|| [类型转换运算符](#6) | 支持 | 自适应转换为paddle.cast 操作| -|Paddle shape| [Tensor.shape()](#9) | 部分支持 | 支持获取编译期shape信息,可能包含-1 | -|python函数类| [print(x)](#7) | 支持 | 自适应识别和转为静态图的PrintOp | -|| [len(x)](#) | 支持 | 支持返回Tensor编译期shape[0]的值 | +|控制流| [if-else](#1) | 支持 | 自适应识别和转为静态图 cond 接口,或保持 python if 执行 | +|| [while](#2) | 支持 |自适应识别和转为静态图 while\_loop 接口,或保持 python while 执行 | +|| [for](#3) | 支持 | `for _ in x`语法支持对 Tensor 的迭代访问 | +|| [break
continue](#4)| 支持 | 支持循环中任意位置的 break 和 continue | +|| [return](#4)| 支持 | 支持循环体中提前 return | +|运算符| +,-,,/,\*, >, <, >= , <=, == | 支持 | 自适应识别和应用 paddle 的运算符重载 | +|| [and, or, not](#5) | 支持 | 1.如果运算符两个都是 Tensor,会组网静态图。
2. 如果运算符都不是 Tensor,那么使用原始 python 语义
3. 如果一个是 Tensor 一个是非 Tensor,那么也会使用 python 语义,但是结果不会出错。 | +|| [类型转换运算符](#6) | 支持 | 自适应转换为 paddle.cast 操作| +|Paddle shape| [Tensor.shape()](#9) | 部分支持 | 支持获取编译期 shape 信息,可能包含-1 | +|python 函数类| [print(x)](#7) | 支持 | 自适应识别和转为静态图的 PrintOp | +|| [len(x)](#) | 支持 | 支持返回 Tensor 编译期 shape[0]的值 | || [lambda 表达式](#7) | 支持 | 等价转换 | || [函数调用其他函数](#7) | 支持 | 会对内部的函数递归地进行动转静 | || [函数递归调用](#7) | 不支持 | 递归调用不会终止 | -|| [list sort](#8) | 不支持 | list可能会被转化为TensorArray,故不支持此复杂操作 | -|报错异常相关| assert | 支持 | 自适应识别和转换为静态图Assert接口 | 无 | -|Python基本容器| [list](#8) | 部分支持 | 在控制流中转化为TensorArray,支持append,pop | +|| [list sort](#8) | 不支持 | list 可能会被转化为 TensorArray,故不支持此复杂操作 | +|报错异常相关| assert | 支持 | 自适应识别和转换为静态图 Assert 接口 | 无 | +|Python 基本容器| [list](#8) | 部分支持 | 在控制流中转化为 TensorArray,支持 append,pop | || [Dict](#8) | 支持 | 原生支持 | -|第三方库相关| numpy | 部分支持 | 仅支持numpy操作不需要导出到Program| 无 | +|第三方库相关| numpy | 部分支持 | 仅支持 numpy 操作不需要导出到 Program| 无 | @@ -51,34 +51,34 @@ **主要逻辑:** -在动态图中,模型代码是一行一行解释执行的,因此控制流的条件变量是在运行期确定的,意味着False的逻辑分支不会被执行。 +在动态图中,模型代码是一行一行解释执行的,因此控制流的条件变量是在运行期确定的,意味着 False 的逻辑分支不会被执行。 在静态图中,控制流通过`cond`接口实现。每个分支分别通过`true_fn` 和`false_fn` 来表示。 -当 `if`中的`条件`是`Tensor`时,动转静会自动把该`if-elif-else`语句转化为静态图的`cond` API语句。 +当 `if`中的`条件`是`Tensor`时,动转静会自动把该`if-elif-else`语句转化为静态图的`cond` API 语句。 -当`if`中的`条件`不是`Tensor`时,会按普通Python if-else的逻辑运行。 +当`if`中的`条件`不是`Tensor`时,会按普通 Python if-else 的逻辑运行。 ->注:当`条件`为`Tensor`时,只接受`numel()==1`的bool Tensor,否则会报错。 +>注:当`条件`为`Tensor`时,只接受`numel()==1`的 bool Tensor,否则会报错。 **错误修改指南:** 当模型代码中的`if-else`转换或执行报错时,可以参考如下方式排查: -- 使用`if`语句时,请确定`条件变量`是否是`Paddle.Tensor`类型。若不是Tensor类型,则**会当按照常规的python逻辑执行,而不会转化为静态图。** +- 使用`if`语句时,请确定`条件变量`是否是`Paddle.Tensor`类型。若不是 Tensor 类型,则**会当按照常规的 python 逻辑执行,而不会转化为静态图。** -- 若`if`中`条件变量`为Tensor类型,需确保其为boolean类型,且 `tensor.numel()`为1。 +- 若`if`中`条件变量`为 Tensor 类型,需确保其为 boolean 类型,且 `tensor.numel()`为 1。 -### 3.2 while循环 +### 3.2 while 循环 **主要逻辑:** -当 `while` 循环中的条件是Tensor时,动转静会把该while语句转化为静态图中的`while_loop` API语句,否则会按普通Python while运行。 +当 `while` 循环中的条件是 Tensor 时,动转静会把该 while 语句转化为静态图中的`while_loop` API 语句,否则会按普通 Python while 运行。 -> 注:while循环条件中的Tensor须是numel为1的bool Tensor,否则会报错。 +> 注:while 循环条件中的 Tensor 须是 numel 为 1 的 bool Tensor,否则会报错。 **错误修改指南:** @@ -91,18 +91,18 @@ **主要逻辑:** -for循环按照使用方法的不同,语义有所不同。正常而言,for循环的使用分为如下种类: +for 循环按照使用方法的不同,语义有所不同。正常而言,for 循环的使用分为如下种类: -- `for _ in range(len) `循环:动转静会先将其转化为等价的Python while循环,然后按while循环的逻辑进行动静转换。 +- `for _ in range(len) `循环:动转静会先将其转化为等价的 Python while 循环,然后按 while 循环的逻辑进行动静转换。 -- `for _ in x `循环: 当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,会转化为依次获取x[0], x[1], ... 。 -- `for idx, val in enumerate(x)`循环:当x是Python容器或迭代器,则会用普通Python逻辑运行。当x是Tensor时,idx会转化为依次0,1,...的1-D Tensor。val会转化为循环中每次对应拿出x[0], x[1], ... 。 +- `for _ in x `循环: 当 x 是 Python 容器或迭代器,则会用普通 Python 逻辑运行。当 x 是 Tensor 时,会转化为依次获取 x[0], x[1], ... 。 +- `for idx, val in enumerate(x)`循环:当 x 是 Python 容器或迭代器,则会用普通 Python 逻辑运行。当 x 是 Tensor 时,idx 会转化为依次 0,1,...的 1-D Tensor。val 会转化为循环中每次对应拿出 x[0], x[1], ... 。 -从实现而言,for循环最终会转化为对应的while语句,然后使用`WhileOp`来进行组网。 +从实现而言,for 循环最终会转化为对应的 while 语句,然后使用`WhileOp`来进行组网。 **使用样例**: -此处使用上述For的第二个用法举例。如果x是一个多维Tensor,则也是返回 x[0] ,x[1]. ... +此处使用上述 For 的第二个用法举例。如果 x 是一个多维 Tensor,则也是返回 x[0] ,x[1]. ... ```python @@ -122,7 +122,7 @@ def ForTensor(x): **主要逻辑:** -目前的动转静支持for、while等循环中添加break,continue语句改变控制流,也支持在循环内部任意位置添加return语句,支持return不同长度tuple和不同类型的Tensor。 +目前的动转静支持 for、while 等循环中添加 break,continue 语句改变控制流,也支持在循环内部任意位置添加 return 语句,支持 return 不同长度 tuple 和不同类型的 Tensor。 **使用样例**: ```python @@ -135,9 +135,9 @@ def break_usage(x): break # <------- jump out of while loop when break ; return tensor_idx ``` -当时输入 x = Tensor([1.0, 2.0 ,3.0]) 时,输出的tensor_idx是 Tensor([1])。 +当时输入 x = Tensor([1.0, 2.0 ,3.0]) 时,输出的 tensor_idx 是 Tensor([1])。 -> 注:这里虽然idx是-1,但是返回值还是Tensor。因为`tensor_idx` 在 while loop中转化为了`Tensor`。 +> 注:这里虽然 idx 是-1,但是返回值还是 Tensor。因为`tensor_idx` 在 while loop 中转化为了`Tensor`。 ### 3.5 与、或、非 @@ -145,15 +145,15 @@ def break_usage(x): **主要逻辑:** -动转静模块支持将与、或、非三种运算符进行转换并动态判断,按照两个运算符x和y的不同,会有不同的语义: +动转静模块支持将与、或、非三种运算符进行转换并动态判断,按照两个运算符 x 和 y 的不同,会有不同的语义: -- 如果运算符两个都是Tensor,会组网静态图。 +- 如果运算符两个都是 Tensor,会组网静态图。 -- 如果运算符都不是Tensor,那么使用原始python语义。 +- 如果运算符都不是 Tensor,那么使用原始 python 语义。 -- 如果一个是Tensor,那么会走默认的python语义(最后还是tensor的运算符重载结果) +- 如果一个是 Tensor,那么会走默认的 python 语义(最后还是 tensor 的运算符重载结果) -> 注:若按照paddle的语义执行,与、或、非不再支持lazy模式,意味着两个表达式都会被eval,而不是按照x的值来判断是否对y进行eval。 +> 注:若按照 paddle 的语义执行,与、或、非不再支持 lazy 模式,意味着两个表达式都会被 eval,而不是按照 x 的值来判断是否对 y 进行 eval。 **使用样例**: @@ -167,9 +167,9 @@ def and(x, y): **主要逻辑:** -动态图中可以直接用Python的类型转化语法来转化Tensor类型。如若x是Tensor时,float(x)可以将x的类型转化为float。 +动态图中可以直接用 Python 的类型转化语法来转化 Tensor 类型。如若 x 是 Tensor 时,float(x)可以将 x 的类型转化为 float。 -动转静在运行时判断x是否是Tensor,若是,则在动转静时使用静态图`cast`接口转化相应的Tensor类型。 +动转静在运行时判断 x 是否是 Tensor,若是,则在动转静时使用静态图`cast`接口转化相应的 Tensor 类型。 **使用样例**: @@ -181,27 +181,27 @@ def float_convert(x): ``` -### 3.7 对一些python函数调用的转换 +### 3.7 对一些 python 函数调用的转换 **主要逻辑:** -动转静支持大部分的python函数调用。函数调用都会被统一包装成为`convert_xxx()`的形式,在函数运行期判别类型。若是Paddle类型,则转化为静态图的组网;反之则按照原来的python语义执行。常见函数如下: +动转静支持大部分的 python 函数调用。函数调用都会被统一包装成为`convert_xxx()`的形式,在函数运行期判别类型。若是 Paddle 类型,则转化为静态图的组网;反之则按照原来的 python 语义执行。常见函数如下: -- print函数 -若参数是Tensor,在动态图模式中print(x)可以打印x的值。动转静时会转化为静态图的Print接口实现;若参数不是Tensor,则按照Python的print语句执行。 +- print 函数 +若参数是 Tensor,在动态图模式中 print(x)可以打印 x 的值。动转静时会转化为静态图的 Print 接口实现;若参数不是 Tensor,则按照 Python 的 print 语句执行。 - len 函数 -若x是Tensor,在动态图模式中len(x)可以获得x第0维度的长度。动转静时会转化为静态图shape接口,并返回shape的第0维。若x是个TensorArray,那么len(x)将会使用静态图接口`control_flow.array_length`返回TensorArray的长度;对于其他情况,会按照普通Python len函数运行。 +若 x 是 Tensor,在动态图模式中 len(x)可以获得 x 第 0 维度的长度。动转静时会转化为静态图 shape 接口,并返回 shape 的第 0 维。若 x 是个 TensorArray,那么 len(x)将会使用静态图接口`control_flow.array_length`返回 TensorArray 的长度;对于其他情况,会按照普通 Python len 函数运行。 - lambda 表达式 -动转静允许写带有Python lambda表达式的语句,并且我们会适当改写使得返回对应结果。 +动转静允许写带有 Python lambda 表达式的语句,并且我们会适当改写使得返回对应结果。 - 函数内再调用函数(非递归调用) 对于函数内调用其他函数的情况,动转静会对内部的函数递归地进行识别和转写,以实现在最外层函数只需加一次装饰器即可的效果。 **使用样例**: -这里以lambda函数为例,展示使用方法 +这里以 lambda 函数为例,展示使用方法 ```python def lambda_call(x): @@ -224,13 +224,13 @@ def recur_call(x): return recur_call(x * x) # < ------ 如果输入是 x = Tensor([2.0]) ,动态图输出为 Tensor([16]),静态图会出现调用栈溢出 ``` -### 3.8 List和Dict容器 +### 3.8 List 和 Dict 容器 **主要逻辑:** -- List : 若一个list的元素都是Tensor,动转静将其转化为TensorArray。静态图TensorArray仅支持append,pop,修改操作,其他list操作(如sort)暂不支持。若并非所有元素是Tensor,动转静会将其作为普通Python list运行。 +- List : 若一个 list 的元素都是 Tensor,动转静将其转化为 TensorArray。静态图 TensorArray 仅支持 append,pop,修改操作,其他 list 操作(如 sort)暂不支持。若并非所有元素是 Tensor,动转静会将其作为普通 Python list 运行。 -- Dict : 动转静支持原生的Python dict 语法。 +- Dict : 动转静支持原生的 Python dict 语法。 > 注:List 不支持多重嵌套和其他的操作。具体错误案例见下面**不支持用法**。 @@ -238,41 +238,41 @@ def recur_call(x): ```python def list_example(x, y): a = [ x ] # < ------ 支持直接创建 - a.append(x) # < ------ 支持调用append、pop操作 - a[1] = y # < ------ 支持下标修改append + a.append(x) # < ------ 支持调用 append、pop 操作 + a[1] = y # < ------ 支持下标修改 append return a[0] # < ------ 支持下标获取 ``` **不支持用法**: -- List的多重嵌套 +- List 的多重嵌套 - 如 `l = [[tensor1, tensor2], [tensor3, tensor4]] `,因为现在动转静将元素全是Tensor的list转化为TensorArray,但TensorArray还不支持多维数组,因此这种情况下,动转静无法正确运行。遇到这类情况我们建议尽量用一维list,或者自己使用PaddlePaddle的create_array,array_read,array_write接口编写为TensorArray。 + 如 `l = [[tensor1, tensor2], [tensor3, tensor4]] `,因为现在动转静将元素全是 Tensor 的 list 转化为 TensorArray,但 TensorArray 还不支持多维数组,因此这种情况下,动转静无法正确运行。遇到这类情况我们建议尽量用一维 list,或者自己使用 PaddlePaddle 的 create_array,array_read,array_write 接口编写为 TensorArray。 -- List的其他的操作,例如sort之类 +- List 的其他的操作,例如 sort 之类 ```python # 不支持的 list sort 操作 def sort_list(x, y): a = [x, y] - sort(a) # < ----- 不支持,因为转化为TensorArray之后不支持sort操作。但是支持简单的append,pop和按下标修改 + sort(a) # < ----- 不支持,因为转化为 TensorArray 之后不支持 sort 操作。但是支持简单的 append,pop 和按下标修改 return a ``` -### 3.9 paddle shape函数 +### 3.9 paddle shape 函数 **主要逻辑:** -动转静部分支持shape函数: +动转静部分支持 shape 函数: -- 【支持】当直接简单的使用shape时,可以正确获取tensor的shape。 +- 【支持】当直接简单的使用 shape 时,可以正确获取 tensor 的 shape。 -- 【不支持】当直接使用支持改变变量的shape后(例如reshape操作)调用其shape作为PaddlePaddle API参数。 +- 【不支持】当直接使用支持改变变量的 shape 后(例如 reshape 操作)调用其 shape 作为 PaddlePaddle API 参数。 - 如 `x = reshape(x, shape=shape_tensor) `,再使用 x.shape[0] 的值进行其他操作。这种情况会由于动态图和静态图的本质不同而使得动态图能够运行,但静态图运行失败。其原因是动态图情况下,API是直接返回运行结果,因此 x.shape 在经过reshape运算后是确定的。但是在转化为静态图后,因为静态图API只是组网,shape_tensor 的值在组网时是不知道的,所以 reshape 接口组网完,静态图并不知道 x.shape 的值。PaddlePaddle静态图用-1表示未知的shape值,此时 x 的shape每个维度会被设为-1,而不是期望的值。同理,类似expand等更改shape的API,其输出Tensor再调用shape也难以进行动转静。 + 如 `x = reshape(x, shape=shape_tensor) `,再使用 x.shape[0] 的值进行其他操作。这种情况会由于动态图和静态图的本质不同而使得动态图能够运行,但静态图运行失败。其原因是动态图情况下,API 是直接返回运行结果,因此 x.shape 在经过 reshape 运算后是确定的。但是在转化为静态图后,因为静态图 API 只是组网,shape_tensor 的值在组网时是不知道的,所以 reshape 接口组网完,静态图并不知道 x.shape 的值。PaddlePaddle 静态图用-1 表示未知的 shape 值,此时 x 的 shape 每个维度会被设为-1,而不是期望的值。同理,类似 expand 等更改 shape 的 API,其输出 Tensor 再调用 shape 也难以进行动转静。 **使用样例**: @@ -286,5 +286,5 @@ def get_shape(x): def error_shape(x, y): y = y.cast('int32') t = x.reshape(y) - return t.shape[0] # <------- 输入在x = Tensor([2.0, 1.0]),y = Tensor([2])时,动态图输出为2,而静态图输出为 -1 。不支持 + return t.shape[0] # <------- 输入在 x = Tensor([2.0, 1.0]),y = Tensor([2])时,动态图输出为 2,而静态图输出为 -1 。不支持 ``` diff --git a/docs/guides/jit/principle_cn.md b/docs/guides/jit/principle_cn.md index bb2f635d664..d0c7c94df9c 100644 --- a/docs/guides/jit/principle_cn.md +++ b/docs/guides/jit/principle_cn.md @@ -1,6 +1,6 @@ # 转换原理 -在飞桨框架内部,动转静模块在转换上主要包括对输入数据的 InputSpec 的处理,对函数调用的递归转写,对IfElse、For、While 控制语句的转写,以及 Layer 的 Parameters 和 Buffers 变量的转换。下面将介绍动转静模块的转换过程。 +在飞桨框架内部,动转静模块在转换上主要包括对输入数据的 InputSpec 的处理,对函数调用的递归转写,对 IfElse、For、While 控制语句的转写,以及 Layer 的 Parameters 和 Buffers 变量的转换。下面将介绍动转静模块的转换过程。 ## 一、 概述 @@ -176,7 +176,7 @@ def add_two(x, y): + **并非**所有动态图中的 ``if/for/while`` 都会转写为 ``cond_op/while_op`` + **只有**控制流的判断条件 **依赖了``Tensor``**(如 ``shape`` 或 ``value`` ),才会转写为对应 Op -这是因为模型代码中不依赖 Tensor 的 ``if/for/while`` 会正常按照 Python 原生的语法逻辑去执行;而依赖 Tensor 的 ``if/for/while`` 才会调用 [paddle.static.cond](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/static/nn/cond_cn.html#cond) 和 [paddle.static.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/static/nn/while_loop_cn.html#while-loop) 两个飞桨的控制流API。 +这是因为模型代码中不依赖 Tensor 的 ``if/for/while`` 会正常按照 Python 原生的语法逻辑去执行;而依赖 Tensor 的 ``if/for/while`` 才会调用 [paddle.static.cond](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/static/nn/cond_cn.html#cond) 和 [paddle.static.while_loop](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/static/nn/while_loop_cn.html#while-loop) 两个飞桨的控制流 API。 #### 3.2.1 IfElse @@ -184,7 +184,7 @@ def add_two(x, y): **示例一:不依赖 Tensor 的控制流** -如下代码样例中的 `if label is not None`, 此判断只依赖于 `label` 是否为 `None`(存在性),并不依赖 `label` 的Tensor值(数值性),因此属于**不依赖 Tensor 的控制流**。 +如下代码样例中的 `if label is not None`, 此判断只依赖于 `label` 是否为 `None`(存在性),并不依赖 `label` 的 Tensor 值(数值性),因此属于**不依赖 Tensor 的控制流**。 ```python def not_depend_tensor_if(x, label=None): @@ -216,7 +216,7 @@ def not_depend_tensor_if(x, label=None): **示例二:依赖 Tensor 的控制流** -如下代码样例中的 `if paddle.mean(x) > 5`, 此判断直接依赖 `paddle.mean(x)` 返回的Tensor值(数值性),因此属于**依赖 Tensor 的控制流**。 +如下代码样例中的 `if paddle.mean(x) > 5`, 此判断直接依赖 `paddle.mean(x)` 返回的 Tensor 值(数值性),因此属于**依赖 Tensor 的控制流**。 ```python def depend_tensor_if(x): @@ -255,7 +255,7 @@ def depend_tensor_if(x): out = convert_ifelse(paddle.mean(x) > 5.0, true_fn_0, false_fn_0, (x,), (x,), (out,)) ^ ^ ^ ^ ^ ^ ^ ^ | | | | | | | | - 输出 convert_ifelse 判断条件 true分支 false分支 分支输入 分支输入 输出 + 输出 convert_ifelse 判断条件 true 分支 false 分支 分支输入 分支输入 输出 ``` @@ -348,7 +348,7 @@ def depend_tensor_while(x): ## 四、 生成静态图的 Program 和 Parameters -静态图模式下,神经网络会被描述为 Program 的数据结构,并对 Program 进行编译优化,再调用执行器获得计算结果。另外静态图的变量是 Variable 类型(动态图是 Tensor类型),因此要运行静态图模型,需要生成静态图的 Program 和 Parameters。 +静态图模式下,神经网络会被描述为 Program 的数据结构,并对 Program 进行编译优化,再调用执行器获得计算结果。另外静态图的变量是 Variable 类型(动态图是 Tensor 类型),因此要运行静态图模型,需要生成静态图的 Program 和 Parameters。 ### 4.1 动态图 layer 生成 Program @@ -397,7 +397,7 @@ class Linear(...): with param_guard(self._parameters), param_guard(self._buffers): # ... forward_pre_hook 逻辑 - outputs = self.forward(*inputs, **kwargs) # 此处为forward函数 + outputs = self.forward(*inputs, **kwargs) # 此处为 forward 函数 # ... forward_post_hook 逻辑 diff --git a/docs/guides/model_convert/index_cn.rst b/docs/guides/model_convert/index_cn.rst index 675d360202b..3500bf32929 100644 --- a/docs/guides/model_convert/index_cn.rst +++ b/docs/guides/model_convert/index_cn.rst @@ -2,14 +2,14 @@ 模型迁移 ############### -您可以通过下面的内容,了解如何迁移模型到飞桨2.X: +您可以通过下面的内容,了解如何迁移模型到飞桨 2.X: -- `升级指南 <./update_cn.html>`_: 介绍飞桨框架2.0 的主要变化和如何升级到最新版飞桨。 +- `升级指南 <./update_cn.html>`_: 介绍飞桨框架 2.0 的主要变化和如何升级到最新版飞桨。 - `版本迁移工具 <./migration_cn.html>`_: 介绍飞桨框架版本转换工具的使用。 -- `兼容载入旧格式模型 <./load_old_format_model_cn.html>`_: 介绍飞桨框架如何在2.x版本加载1.x版本保存的模型。 -- `Paddle API映射表 <./paddle_api_mapping_cn.html>`_ : 说明 Paddle 1.8 版本与 Paddle 2.0 API对应关系。 -- `PyTorch API映射表 <./pytorch_api_mapping_cn.html>`_ : 说明 PyTorch 1.8 版本与 Paddle 2.0 API对应关系。 +- `兼容载入旧格式模型 <./load_old_format_model_cn.html>`_: 介绍飞桨框架如何在 2.x 版本加载 1.x 版本保存的模型。 +- `Paddle API 映射表 <./paddle_api_mapping_cn.html>`_ : 说明 Paddle 1.8 版本与 Paddle 2.0 API 对应关系。 +- `PyTorch API 映射表 <./pytorch_api_mapping_cn.html>`_ : 说明 PyTorch 1.8 版本与 Paddle 2.0 API 对应关系。 .. toctree:: :hidden: diff --git a/docs/guides/model_convert/load_old_format_model_cn.rst b/docs/guides/model_convert/load_old_format_model_cn.rst index b6f110a13b1..1743343ce4f 100644 --- a/docs/guides/model_convert/load_old_format_model_cn.rst +++ b/docs/guides/model_convert/load_old_format_model_cn.rst @@ -4,9 +4,9 @@ 兼容载入旧格式模型 ==================== -如果你是从飞桨框架1.x切换到2.1,曾经使用飞桨框架1.x的fluid相关接口保存模型或者参数,飞桨框架2.1也对这种情况进行了兼容性支持,包括以下几种情况。 +如果你是从飞桨框架 1.x 切换到 2.1,曾经使用飞桨框架 1.x 的 fluid 相关接口保存模型或者参数,飞桨框架 2.1 也对这种情况进行了兼容性支持,包括以下几种情况。 -飞桨1.x模型准备及训练示例,该示例为后续所有示例的前序逻辑: +飞桨 1.x 模型准备及训练示例,该示例为后续所有示例的前序逻辑: .. code-block:: python @@ -140,7 +140,7 @@ 1.2 仅载入参数 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -如果你仅需要从 ``paddle.fluid.io.save_inference_model`` 的存储结果中载入参数,以state_dict的形式配置到已有代码的模型中,可以使用 ``paddle.load`` 配合 ``**configs`` 载入。 +如果你仅需要从 ``paddle.fluid.io.save_inference_model`` 的存储结果中载入参数,以 state_dict 的形式配置到已有代码的模型中,可以使用 ``paddle.load`` 配合 ``**configs`` 载入。 如果你是按照 ``paddle.fluid.io.save_inference_model`` 的默认格式存储的,可以按照如下方式载入(接前述示例): @@ -167,16 +167,16 @@ load_param_dict = paddle.load(model_path, params_filename="__params__") .. note:: - 一般预测模型不会存储优化器Optimizer的参数,因此此处载入的仅包括模型本身的参数。 + 一般预测模型不会存储优化器 Optimizer 的参数,因此此处载入的仅包括模型本身的参数。 .. note:: - 由于 ``structured_name`` 是动态图下独有的变量命名方式,因此从静态图存储结果载入的state_dict在配置到动态图的Layer中时,需要配置 ``Layer.set_state_dict(use_structured_name=False)`` 。 + 由于 ``structured_name`` 是动态图下独有的变量命名方式,因此从静态图存储结果载入的 state_dict 在配置到动态图的 Layer 中时,需要配置 ``Layer.set_state_dict(use_structured_name=False)`` 。 2 从 ``paddle.fluid.save`` 存储结果中载入参数 ---------------------------------------------------------------------------- - ``paddle.fluid.save`` 的存储格式与2.x动态图接口 ``paddle.save`` 存储格式是类似的,同样存储了dict格式的参数,因此可以直接使用 ``paddle.load`` 载入state_dict,但需要注意不能仅传入保存的路径,而要传入保存参数的文件名,示例如下(接前述示例): + ``paddle.fluid.save`` 的存储格式与 2.x 动态图接口 ``paddle.save`` 存储格式是类似的,同样存储了 dict 格式的参数,因此可以直接使用 ``paddle.load`` 载入 state_dict,但需要注意不能仅传入保存的路径,而要传入保存参数的文件名,示例如下(接前述示例): .. code-block:: python @@ -192,13 +192,13 @@ .. note:: - 由于 ``paddle.fluid.save`` 接口原先在静态图模式下的定位是存储训练时参数,或者说存储Checkpoint,故尽管其同时存储了模型结构,目前也暂不支持从 ``paddle.fluid.save`` 的存储结果中同时载入模型和参数,后续如有需求再考虑支持。 + 由于 ``paddle.fluid.save`` 接口原先在静态图模式下的定位是存储训练时参数,或者说存储 Checkpoint,故尽管其同时存储了模型结构,目前也暂不支持从 ``paddle.fluid.save`` 的存储结果中同时载入模型和参数,后续如有需求再考虑支持。 3 从 ``paddle.fluid.io.save_params/save_persistables`` 保存结果中载入参数 ---------------------------------------------------------------------------- -这两个接口在飞桨1.x版本时,已经不再推荐作为存储模型参数的接口使用,故并未继承至飞桨2.x,之后也不会再推荐使用这两个接口存储参数。 +这两个接口在飞桨 1.x 版本时,已经不再推荐作为存储模型参数的接口使用,故并未继承至飞桨 2.x,之后也不会再推荐使用这两个接口存储参数。 对于使用这两个接口存储参数兼容载入的支持,分为两种情况,下面以 ``paddle.fluid.io.save_params`` 接口为例介绍相关使用方法: @@ -220,9 +220,9 @@ 3.2 指定了参数存储的文件,将所有参数存储至单个文件中 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -将所有参数存储至单个文件中会导致存储结果中丢失Tensor名和Tensor数据之间的映射关系,因此这部分丢失的信息需要用户传入进行补足。为了确保正确性,这里不仅要传入Tensor的name列表,同时要传入Tensor的shape和dtype等描述信息,通过检查和存储数据的匹配性确保严格的正确性,这导致载入数据的恢复过程变得比较复杂,仍然需要一些飞桨1.x的概念支持。后续如果此项需求较为普遍,飞桨将会考虑将该项功能兼容支持到 ``paddle.load`` 中,但由于信息丢失而导致的使用复杂性仍然是存在的,因此建议你避免仅使用这两个接口存储参数。 +将所有参数存储至单个文件中会导致存储结果中丢失 Tensor 名和 Tensor 数据之间的映射关系,因此这部分丢失的信息需要用户传入进行补足。为了确保正确性,这里不仅要传入 Tensor 的 name 列表,同时要传入 Tensor 的 shape 和 dtype 等描述信息,通过检查和存储数据的匹配性确保严格的正确性,这导致载入数据的恢复过程变得比较复杂,仍然需要一些飞桨 1.x 的概念支持。后续如果此项需求较为普遍,飞桨将会考虑将该项功能兼容支持到 ``paddle.load`` 中,但由于信息丢失而导致的使用复杂性仍然是存在的,因此建议你避免仅使用这两个接口存储参数。 -目前暂时推荐你使用 ``paddle.static.load_program_state`` 接口解决此处的载入问题,需要获取原Program中的参数列表传入该方法,使用示例如下(接前述示例): +目前暂时推荐你使用 ``paddle.static.load_program_state`` 接口解决此处的载入问题,需要获取原 Program 中的参数列表传入该方法,使用示例如下(接前述示例): .. code-block:: python @@ -239,7 +239,7 @@ 4 从 ``paddle.static.save`` 保存结果中载入参数 ---------------------------------------------------------------------------- -``paddle.static.save`` 接口生成三个文件: ``*.pdparams`` 、 ``*.pdopt`` 、 ``*.pdmodel`` ,分别保存了组网的参数、优化器的参数、静态图的Program。推荐您使用 ``paddle.load`` 分别加载这三个文件,然后使用 ``set_state_dict`` 接口将参数设置到 ``Program`` 中 。如果您已经在代码中定义了 ``Program`` ,您可以不加载 ``*.pdmodel`` 文件;如果您不需要恢复优化器中的参数,您可以不加载 ``*.pdopt`` 文件。使用示例如下: +``paddle.static.save`` 接口生成三个文件: ``*.pdparams`` 、 ``*.pdopt`` 、 ``*.pdmodel`` ,分别保存了组网的参数、优化器的参数、静态图的 Program。推荐您使用 ``paddle.load`` 分别加载这三个文件,然后使用 ``set_state_dict`` 接口将参数设置到 ``Program`` 中 。如果您已经在代码中定义了 ``Program`` ,您可以不加载 ``*.pdmodel`` 文件;如果您不需要恢复优化器中的参数,您可以不加载 ``*.pdopt`` 文件。使用示例如下: .. code-block:: python diff --git a/docs/guides/model_convert/migration_cn.rst b/docs/guides/model_convert/migration_cn.rst index 7702753c08e..18310749967 100644 --- a/docs/guides/model_convert/migration_cn.rst +++ b/docs/guides/model_convert/migration_cn.rst @@ -3,27 +3,27 @@ 版本迁移工具 ==================== -在飞桨框架2.0中,Paddle API的位置、命名、参数、行为,进行了系统性的调整和规范, 将API体系从1.X版本的 ``paddle.fluid.*`` 迁移到了 ``paddle.*`` 下。``paddle.fluid`` 目录下暂时保留了1.8版本API,主要是兼容性考虑,未来会被删除。 +在飞桨框架 2.0 中,Paddle API 的位置、命名、参数、行为,进行了系统性的调整和规范, 将 API 体系从 1.X 版本的 ``paddle.fluid.*`` 迁移到了 ``paddle.*`` 下。``paddle.fluid`` 目录下暂时保留了 1.8 版本 API,主要是兼容性考虑,未来会被删除。 -使用版本迁移工具自动迁移Paddle 1.X的代码到Paddle 2.0 +使用版本迁移工具自动迁移 Paddle 1.X 的代码到 Paddle 2.0 ------------------------------------ -飞桨提供了版本迁移工具,该工具按 Paddle 2.0 对于 Paddle 1.X的变化,能够自动实现以下功能: +飞桨提供了版本迁移工具,该工具按 Paddle 2.0 对于 Paddle 1.X 的变化,能够自动实现以下功能: -- 按照 :ref:`API映射表 ` ,将转换工具能否转换这列为True的API由Paddle 1.X 转为 Paddle 2.0,为False的API打印WARNING,提示手动升级。 -- 因为Paddle 2.0.0 默认开启动态图,所以删除用于开启动态图上下文的 ``with paddle.fluid.dygraph.guard(place)`` ,并修改该上下文的代码缩进; -- 删除组网API中的 ``act`` 参数,并自动添加相关的激活函数; +- 按照 :ref:`API 映射表 ` ,将转换工具能否转换这列为 True 的 API 由 Paddle 1.X 转为 Paddle 2.0,为 False 的 API 打印 WARNING,提示手动升级。 +- 因为 Paddle 2.0.0 默认开启动态图,所以删除用于开启动态图上下文的 ``with paddle.fluid.dygraph.guard(place)`` ,并修改该上下文的代码缩进; +- 删除组网 API 中的 ``act`` 参数,并自动添加相关的激活函数; -目前,版本迁移工具能够处理的API数量为X个,如果你有代码迁移的需求,使用转换工具能够节省你部分时间,帮助你快速完成代码迁移。 +目前,版本迁移工具能够处理的 API 数量为 X 个,如果你有代码迁移的需求,使用转换工具能够节省你部分时间,帮助你快速完成代码迁移。 .. warning:: - 版本迁移工具并不能处理所有的情况,对于API的处理只能按照 :ref:`API映射表 ` 中的关系完成API的变化。如代码中包含有转换工具能否转换这列为False的API或不在此表中的API,在使用本工具后,仍然需要手工来进行检查并做相应的调整。 + 版本迁移工具并不能处理所有的情况,对于 API 的处理只能按照 :ref:`API 映射表 ` 中的关系完成 API 的变化。如代码中包含有转换工具能否转换这列为 False 的 API 或不在此表中的 API,在使用本工具后,仍然需要手工来进行检查并做相应的调整。 安装 ~~~~ -版本迁移工具可以通过pip的方式安装,方式如下: +版本迁移工具可以通过 pip 的方式安装,方式如下: .. code:: ipython3 @@ -38,27 +38,27 @@ paddle_upgrade_tool 可以使用下面的方式,快速使用: $ paddle_upgrade_tool --inpath /path/to/model.py -这将在命令行中,以\ ``diff``\ 的形式,展示model.py从Paddle 1.x转换为Paddle 2.0的变化。如果你确认上述变化没有问题,只需要再执行: +这将在命令行中,以\ ``diff``\ 的形式,展示 model.py 从 Paddle 1.x 转换为 Paddle 2.0 的变化。如果你确认上述变化没有问题,只需要再执行: .. code:: ipython3 $ paddle_upgrade_tool --inpath /path/to/model.py --write -就会原地改写model.py,将上述变化改写到你的源文件中。 +就会原地改写 model.py,将上述变化改写到你的源文件中。 注意:版本转换工具会默认备份源文件,到~/.paddle_upgrade_tool/下。 参数说明如下: - –inpath 输入文件路径,可以为单个文件或文件夹。 -- –write 是否原地修改输入的文件,默认值False,表示不修改。如果为True,表示对文件进行原地修改。添加此参数也表示对文件进行原地修改。 +- –write 是否原地修改输入的文件,默认值 False,表示不修改。如果为 True,表示对文件进行原地修改。添加此参数也表示对文件进行原地修改。 - –backup 可选,是否备份源文件,默认值为\ ``~/.paddle_upgrade_tool/``\ ,在此路径下备份源文件。 -- –no-log-file 可选,是否需要输出日志文件,默认值为False,即输出日志文件。 +- –no-log-file 可选,是否需要输出日志文件,默认值为 False,即输出日志文件。 - –log-filepath 可选,输出日志的路径,默认值为\ ``report.log``\ ,输出日志文件的路径。 -- –no-confirm 可选,输入文件夹时,是否逐文件确认原地写入,只在\ ``--write``\ 为True时有效,默认值为False,表示需要逐文件确认。 -- –parallel 可选,控制转换文件的并发数,当 \ ``no-confirm`` 为True时不生效,默认值:\ ``None``\ 。 -- –log-level 可选,log级别,可为[‘DEBUG’,‘INFO’,‘WARNING’,‘ERROR’] 默认值:\ ``INFO``\ 。 -- –refactor 可选,debug时使用。 -- –print-match 可选,debug时使用。 +- –no-confirm 可选,输入文件夹时,是否逐文件确认原地写入,只在\ ``--write``\ 为 True 时有效,默认值为 False,表示需要逐文件确认。 +- –parallel 可选,控制转换文件的并发数,当 \ ``no-confirm`` 为 True 时不生效,默认值:\ ``None``\ 。 +- –log-level 可选,log 级别,可为[‘DEBUG’,‘INFO’,‘WARNING’,‘ERROR’] 默认值:\ ``INFO``\ 。 +- –refactor 可选,debug 时使用。 +- –print-match 可选,debug 时使用。 使用教程 ~~~~~~~~ @@ -66,7 +66,7 @@ paddle_upgrade_tool 可以使用下面的方式,快速使用: 开始 ^^^^ -在使用paddle_upgrade_tool前,需要确保已经安装了Paddle 2.0.0+版本。 +在使用 paddle_upgrade_tool 前,需要确保已经安装了 Paddle 2.0.0+版本。 .. code:: ipython3 @@ -138,10 +138,10 @@ paddle_upgrade_tool 可以使用下面的方式,快速使用: --print-match this is a debug option. Print matched code and node for each file. -Paddle 1.x的例子 +Paddle 1.x 的例子 ^^^^^^^^^^^^^^ -这里是一个基于Paddle 1.x实现的一个mnist分类,部分内容如下: +这里是一个基于 Paddle 1.x 实现的一个 mnist 分类,部分内容如下: .. code:: ipython3 @@ -172,16 +172,16 @@ Paddle 1.x的例子 train_reader) -使用paddle_upgrade_tool进行转化 +使用 paddle_upgrade_tool 进行转化 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -paddle_upgrade_tool支持单文件的转化,你可以通过下方的命令直接转化单独的文件。 +paddle_upgrade_tool 支持单文件的转化,你可以通过下方的命令直接转化单独的文件。 .. code:: ipython3 $ paddle_upgrade_tool --inpath models/dygraph/mnist/train.py -注意,对于参数的删除及一些特殊情况,迁移工具都会打印WARNING信息,需要你仔细核对相关内容。 +注意,对于参数的删除及一些特殊情况,迁移工具都会打印 WARNING 信息,需要你仔细核对相关内容。 如果你觉得上述信息没有问题,可以直接对文件进行原地修改,方式如下: .. code:: ipython3 @@ -198,7 +198,7 @@ paddle_upgrade_tool支持单文件的转化,你可以通过下方的命令直 后即开始执行代码迁移。为了高效完成迁移,工具这里采用了原地写入的方式。此外,为了防止特殊情况,工具会备份转换前的代码到 ``~/.paddle_upgrade_tool`` 目录下,如果需要,你可以在备份目录下找到转换前的代码。 -代码迁移完成后,会生成一个report.log文件,记录了迁移的详情。内容如下: +代码迁移完成后,会生成一个 report.log 文件,记录了迁移的详情。内容如下: .. code:: ipython3 @@ -207,9 +207,9 @@ paddle_upgrade_tool支持单文件的转化,你可以通过下方的命令直 注意事项 ~~~~~~~~ -- 本迁移工具不能完成所有API的迁移,有少量的API需要你手动完成迁移,具体信息可见WARNING。 +- 本迁移工具不能完成所有 API 的迁移,有少量的 API 需要你手动完成迁移,具体信息可见 WARNING。 -使用Paddle 2.0 +使用 Paddle 2.0 ~~~~~~~~~~~~~~~~ -完成迁移后,代码就从Paddle 1.x迁移到了Paddle 2.0,你就可以在Paddle 2.0下进行相关的开发。 +完成迁移后,代码就从 Paddle 1.x 迁移到了 Paddle 2.0,你就可以在 Paddle 2.0 下进行相关的开发。 diff --git a/docs/guides/model_convert/paddle_api_mapping_cn.rst b/docs/guides/model_convert/paddle_api_mapping_cn.rst index 5b59f0046f6..0153b42b25a 100644 --- a/docs/guides/model_convert/paddle_api_mapping_cn.rst +++ b/docs/guides/model_convert/paddle_api_mapping_cn.rst @@ -1,18 +1,18 @@ .. _cn_guides_api_mapping: -Paddle 1.8 与 Paddle 2.0 API映射表 +Paddle 1.8 与 Paddle 2.0 API 映射表 ===================== -本文档基于Paddle 1.8 梳理了常用API与Paddle 2.0对应关系。你可以根据对应关系,快速熟悉Paddle 2.0的接口使用。 +本文档基于 Paddle 1.8 梳理了常用 API 与 Paddle 2.0 对应关系。你可以根据对应关系,快速熟悉 Paddle 2.0 的接口使用。 .. note:: - - 2.0版本将会是一个长期维护的版本,我们将会发布新增二位版本号版本进行功能增强、以及性能优化,通过发布新增三位版本号版本进行bugfix。 - - 我们还会继续维护1.8版本,但仅限于严重的bugfix。 + - 2.0 版本将会是一个长期维护的版本,我们将会发布新增二位版本号版本进行功能增强、以及性能优化,通过发布新增三位版本号版本进行 bugfix。 + - 我们还会继续维护 1.8 版本,但仅限于严重的 bugfix。 .. note:: - 其中,迁移工具能否转换,是指使用迁移工具能否直接对PaddlePaddle 1.8的API进行迁移,了解更多关于迁移工具的内容,请参考 :ref:`版本迁移工具 ` + 其中,迁移工具能否转换,是指使用迁移工具能否直接对 PaddlePaddle 1.8 的 API 进行迁移,了解更多关于迁移工具的内容,请参考 :ref:`版本迁移工具 ` .. csv-table:: :header: "序号", "PaddlePaddle 1.8 API", "PaddlePaddle 2.0 API", "迁移工具能否转换" diff --git a/docs/guides/model_convert/pytorch_api_mapping_cn.md b/docs/guides/model_convert/pytorch_api_mapping_cn.md index 5c6d71cba71..1fe89337316 100644 --- a/docs/guides/model_convert/pytorch_api_mapping_cn.md +++ b/docs/guides/model_convert/pytorch_api_mapping_cn.md @@ -1,29 +1,29 @@ -# PyTorch 1.8 与 Paddle 2.0 API映射表 -本文档基于[X2Paddle](https://github.com/PaddlePaddle/X2Paddle)研发过程梳理了PyTorch(v1.8.1)常用API与PaddlePaddle 2.0.0 API对应关系与差异分析。通过本文档,帮助开发者快速迁移PyTorch使用经验,完成模型的开发与调优。 +# PyTorch 1.8 与 Paddle 2.0 API 映射表 +本文档基于[X2Paddle](https://github.com/PaddlePaddle/X2Paddle)研发过程梳理了 PyTorch(v1.8.1)常用 API 与 PaddlePaddle 2.0.0 API 对应关系与差异分析。通过本文档,帮助开发者快速迁移 PyTorch 使用经验,完成模型的开发与调优。 -## X2Paddle介绍 -X2Paddle致力于帮助其它主流深度学习框架开发者快速迁移至飞桨框架,目前提供三大功能 +## X2Paddle 介绍 +X2Paddle 致力于帮助其它主流深度学习框架开发者快速迁移至飞桨框架,目前提供三大功能 - 预测模型转换 - - 支持Caffe/TensorFlow/ONNX/PyTorch的模型一键转为飞桨的预测模型,并使用PaddleInference/PaddleLite进行CPU/GPU/Arm等设备的部署 -- PyTorch训练项目转换 - - 支持PyTorch项目Python代码(包括训练、预测)一键转为基于飞桨框架的项目代码,帮助开发者快速迁移项目,并可享受 AI Studio 平台对于飞桨框架提供的海量免费计算资源 -- API映射文档 - - 详细的API文档对比分析,帮助开发者快速从PyTorch框架的使用迁移至飞桨框架的使用,大大降低学习成本 + - 支持 Caffe/TensorFlow/ONNX/PyTorch 的模型一键转为飞桨的预测模型,并使用 PaddleInference/PaddleLite 进行 CPU/GPU/Arm 等设备的部署 +- PyTorch 训练项目转换 + - 支持 PyTorch 项目 Python 代码(包括训练、预测)一键转为基于飞桨框架的项目代码,帮助开发者快速迁移项目,并可享受 AI Studio 平台对于飞桨框架提供的海量免费计算资源 +- API 映射文档 + - 详细的 API 文档对比分析,帮助开发者快速从 PyTorch 框架的使用迁移至飞桨框架的使用,大大降低学习成本 详细的项目信息与使用方法参考 X2Paddle 在 GitHub 上的开源项目: https://github.com/PaddlePaddle/X2Paddle -## API映射表目录 +## API 映射表目录 | 类别 | 简介 | | ---------- | ------------------------- | -| [基础操作类](#id1) | 主要为`torch.XX`类API | -| [组网类](#id2) | 主要为`torch.nn.XX`类下组网相关的API | -| [Loss类](#lossapi) |主要为`torch.nn.XX`类下loss相关的API | -| [工具类](#id3) | 主要为`torch.nn.XX`类下分布式相关的API和`torch.utils.XX`类API| -| [视觉类](#id4) | 主要为`torchvision.XX`类API | +| [基础操作类](#id1) | 主要为`torch.XX`类 API | +| [组网类](#id2) | 主要为`torch.nn.XX`类下组网相关的 API | +| [Loss 类](#lossapi) |主要为`torch.nn.XX`类下 loss 相关的 API | +| [工具类](#id3) | 主要为`torch.nn.XX`类下分布式相关的 API 和`torch.utils.XX`类 API| +| [视觉类](#id4) | 主要为`torchvision.XX`类 API | -## 基础操作类API映射列表 -梳理了基础操作的PyTorch-PaddlePaddle API映射列表,主要包括了构造Tensor、数学计算、逻辑计算相关的API。 +## 基础操作类 API 映射列表 +梳理了基础操作的 PyTorch-PaddlePaddle API 映射列表,主要包括了构造 Tensor、数学计算、逻辑计算相关的 API。 | 序号 | PyTorch API | PaddlePaddle API | 备注 | | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | @@ -69,57 +69,57 @@ X2Paddle致力于帮助其它主流深度学习框架开发者快速迁移至飞 | 40 | [torch.randperm](https://pytorch.org/docs/stable/generated/torch.randperm.html?highlight=randperm#torch.randperm) | [paddle.randperm](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/randperm_cn.html#randperm) | 功能一致,[参数不一致](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/ops/torch.randperm.md) | | 41 | [torch.save](https://pytorch.org/docs/stable/generated/torch.save.html?highlight=save#torch.save) | [paddle.save](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/save_cn.html#save) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/ops/torch.save.md) | | 42 | [torch.load](https://pytorch.org/docs/stable/generated/torch.load.html?highlight=load#torch.load) | [paddle.load](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/load_cn.html#load) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/ops/torch.load.md) | -| 43 | [torch.abs](https://pytorch.org/docs/stable/generated/torch.abs.html?highlight=abs#torch.abs) | [paddle.abs](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/abs_cn.html#abs) | 功能一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 44 | [torch.absolute](https://pytorch.org/docs/stable/generated/torch.absolute.html?highlight=absolute#torch.absolute) | [paddle.abs](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/abs_cn.html#abs) | 功能一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 45 | [torch.acos](https://pytorch.org/docs/stable/generated/torch.acos.html?highlight=torch%20acos#torch.acos) | [paddle.acos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/acos_cn.html#acos) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 46 | [torch.arccos](https://pytorch.org/docs/stable/generated/torch.arccos.html?highlight=arccos#torch.arccos) | [paddle.acos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/acos_cn.html#acos) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 47 | [torch.add](https://pytorch.org/docs/stable/generated/torch.add.html?highlight=add#torch.add) | [padle.add](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/add_cn.html#add) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 48 | [torch.asin](https://pytorch.org/docs/stable/generated/torch.asin.html?highlight=asin#torch.asin) | [paddle.asin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/asin_cn.html#asin) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 49 | [torch.arcsin](https://pytorch.org/docs/stable/generated/torch.arcsin.html?highlight=arcsin#torch.arcsin) | [paddle.asin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/asin_cn.html#asin) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 50 | [torch.atan](https://pytorch.org/docs/stable/generated/torch.atan.html?highlight=atan#torch.atan) | [paddle.atan](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/atan_cn.html#atan) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 51 | [torch.arctan](https://pytorch.org/docs/stable/generated/torch.arctan.html?highlight=arctan#torch.arctan) | [paddle.atan](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/atan_cn.html#atan) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 52 | [torch.ceil](https://pytorch.org/docs/stable/generated/torch.ceil.html?highlight=ceil#torch.ceil) | [paddle.ceil](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ceil_cn.html#ceil) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 53 | [torch.clamp](https://pytorch.org/docs/stable/generated/torch.clamp.html#torch.clamp) | [paddle.clip](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/clip_cn.html#clip) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 54 | [torch.conj](https://pytorch.org/docs/stable/generated/torch.conj.html?highlight=conj#torch.conj) | [paddle.conj](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/conj_cn.html#conj) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 55 | [torch.cos](https://pytorch.org/docs/stable/generated/torch.cos.html?highlight=cos#torch.cos) | [paddle.cos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/cos_cn.html#cos) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 56 | [torch.cosh](https://pytorch.org/docs/stable/generated/torch.cosh.html?highlight=cosh#torch.cosh) | [paddle.cosh](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/cosh_cn.html#cosh) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | +| 43 | [torch.abs](https://pytorch.org/docs/stable/generated/torch.abs.html?highlight=abs#torch.abs) | [paddle.abs](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/abs_cn.html#abs) | 功能一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 44 | [torch.absolute](https://pytorch.org/docs/stable/generated/torch.absolute.html?highlight=absolute#torch.absolute) | [paddle.abs](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/abs_cn.html#abs) | 功能一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 45 | [torch.acos](https://pytorch.org/docs/stable/generated/torch.acos.html?highlight=torch%20acos#torch.acos) | [paddle.acos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/acos_cn.html#acos) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 46 | [torch.arccos](https://pytorch.org/docs/stable/generated/torch.arccos.html?highlight=arccos#torch.arccos) | [paddle.acos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/acos_cn.html#acos) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 47 | [torch.add](https://pytorch.org/docs/stable/generated/torch.add.html?highlight=add#torch.add) | [padle.add](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/add_cn.html#add) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 48 | [torch.asin](https://pytorch.org/docs/stable/generated/torch.asin.html?highlight=asin#torch.asin) | [paddle.asin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/asin_cn.html#asin) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 49 | [torch.arcsin](https://pytorch.org/docs/stable/generated/torch.arcsin.html?highlight=arcsin#torch.arcsin) | [paddle.asin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/asin_cn.html#asin) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 50 | [torch.atan](https://pytorch.org/docs/stable/generated/torch.atan.html?highlight=atan#torch.atan) | [paddle.atan](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/atan_cn.html#atan) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 51 | [torch.arctan](https://pytorch.org/docs/stable/generated/torch.arctan.html?highlight=arctan#torch.arctan) | [paddle.atan](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/atan_cn.html#atan) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 52 | [torch.ceil](https://pytorch.org/docs/stable/generated/torch.ceil.html?highlight=ceil#torch.ceil) | [paddle.ceil](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/ceil_cn.html#ceil) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 53 | [torch.clamp](https://pytorch.org/docs/stable/generated/torch.clamp.html#torch.clamp) | [paddle.clip](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/clip_cn.html#clip) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 54 | [torch.conj](https://pytorch.org/docs/stable/generated/torch.conj.html?highlight=conj#torch.conj) | [paddle.conj](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/conj_cn.html#conj) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 55 | [torch.cos](https://pytorch.org/docs/stable/generated/torch.cos.html?highlight=cos#torch.cos) | [paddle.cos](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/cos_cn.html#cos) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 56 | [torch.cosh](https://pytorch.org/docs/stable/generated/torch.cosh.html?highlight=cosh#torch.cosh) | [paddle.cosh](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/cosh_cn.html#cosh) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | | 57 | [torch.div](https://pytorch.org/docs/stable/generated/torch.div.html?highlight=div#torch.div) | [paddle.divide](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/divide_cn.html#divide) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/ops/torch.div.md) | | 58 | [torch.divide](https://pytorch.org/docs/stable/generated/torch.divide.html?highlight=divide#torch.divide) | [paddle.divide](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/divide_cn.html#divide) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/ops/torch.divide.md) | -| 59 | [torch.erf](https://pytorch.org/docs/stable/generated/torch.erf.html?highlight=erf#torch.erf) | [paddle.erf](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/erf_cn.html#erf) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 60 | [torch.exp](https://pytorch.org/docs/stable/generated/torch.exp.html?highlight=exp#torch.exp) | [paddle.exp](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/exp_cn.html#exp) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 61 | [torch.floor](https://pytorch.org/docs/stable/generated/torch.floor.html?highlight=floor#torch.floor) | [paddle.floor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/floor_cn.html#floor) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 62 | [torch.floor_divide](https://pytorch.org/docs/stable/generated/torch.floor_divide.html?highlight=floor_divide#torch.floor_divide) | [paddle.floor_divide](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/floor_divide_cn.html#floor-divide) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 63 | [torch.fmod](https://pytorch.org/docs/stable/generated/torch.fmod.html?highlight=fmod#torch.fmod) | [paddle.mod](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 64 | [torch.log](https://pytorch.org/docs/stable/generated/torch.log.html?highlight=log#torch.log) | [paddle.log](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log_cn.html#log) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 65 | [torch.log10](https://pytorch.org/docs/stable/generated/torch.log10.html?highlight=log10#torch.log10) | [paddle.log10](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log10_cn.html#log10) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 66 | [torch.log1p](https://pytorch.org/docs/stable/generated/torch.log1p.html?highlight=log1p#torch.log1p) | [paddle.log1p](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log1p_cn.html#log1p) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 67 | [torch.log2](https://pytorch.org/docs/stable/generated/torch.log2.html?highlight=log2#torch.log2) | [paddle.log2](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log2_cn.html#log2) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | +| 59 | [torch.erf](https://pytorch.org/docs/stable/generated/torch.erf.html?highlight=erf#torch.erf) | [paddle.erf](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/erf_cn.html#erf) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 60 | [torch.exp](https://pytorch.org/docs/stable/generated/torch.exp.html?highlight=exp#torch.exp) | [paddle.exp](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/exp_cn.html#exp) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 61 | [torch.floor](https://pytorch.org/docs/stable/generated/torch.floor.html?highlight=floor#torch.floor) | [paddle.floor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/floor_cn.html#floor) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 62 | [torch.floor_divide](https://pytorch.org/docs/stable/generated/torch.floor_divide.html?highlight=floor_divide#torch.floor_divide) | [paddle.floor_divide](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/floor_divide_cn.html#floor-divide) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 63 | [torch.fmod](https://pytorch.org/docs/stable/generated/torch.fmod.html?highlight=fmod#torch.fmod) | [paddle.mod](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 64 | [torch.log](https://pytorch.org/docs/stable/generated/torch.log.html?highlight=log#torch.log) | [paddle.log](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log_cn.html#log) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 65 | [torch.log10](https://pytorch.org/docs/stable/generated/torch.log10.html?highlight=log10#torch.log10) | [paddle.log10](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log10_cn.html#log10) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 66 | [torch.log1p](https://pytorch.org/docs/stable/generated/torch.log1p.html?highlight=log1p#torch.log1p) | [paddle.log1p](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log1p_cn.html#log1p) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 67 | [torch.log2](https://pytorch.org/docs/stable/generated/torch.log2.html?highlight=log2#torch.log2) | [paddle.log2](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/log2_cn.html#log2) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | | 68 | [torch.logical_and](https://pytorch.org/docs/stable/generated/torch.logical_and.html?highlight=logical_and#torch.logical_and) | [paddle.logical_and](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/logical_and_cn.html#logical-and) | 功能一致,参数名不一致 | | 69 | [torch.logical_not](https://pytorch.org/docs/stable/generated/torch.logical_not.html?highlight=logical_not#torch.logical_not) | [paddle.logical_not](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/logical_not_cn.html#logical-not) | 功能一致,参数名不一致 | | 70 | [torch.logical_or](https://pytorch.org/docs/stable/generated/torch.logical_or.html?highlight=logical_or#torch.logical_or) | [paddle.logical_or](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/logical_or_cn.html#logical-or) | 功能一致,参数名不一致 | | 71 | [torch.logical_xor](https://pytorch.org/docs/stable/generated/torch.logical_xor.html?highlight=logical_xor#torch.logical_xor) | [paddle.logical_xor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/logical_xor_cn.html#logical-xor) | 功能一致,参数名不一致 | -| 72 | [torch.mul](https://pytorch.org/docs/stable/generated/torch.mul.html?highlight=torch%20mul#torch.mul) | [paddle.multiply](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/multiply_cn.html#multiply) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 73 | [torch.multiply](https://pytorch.org/docs/stable/generated/torch.multiply.html?highlight=multiply#torch.multiply) | [paddle.multiply](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/multiply_cn.html#multiply) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 74 | [torch.pow](https://pytorch.org/docs/stable/generated/torch.pow.html?highlight=pow#torch.pow) | [paddle.pow](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/pow_cn.html#pow) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | +| 72 | [torch.mul](https://pytorch.org/docs/stable/generated/torch.mul.html?highlight=torch%20mul#torch.mul) | [paddle.multiply](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/multiply_cn.html#multiply) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 73 | [torch.multiply](https://pytorch.org/docs/stable/generated/torch.multiply.html?highlight=multiply#torch.multiply) | [paddle.multiply](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/multiply_cn.html#multiply) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 74 | [torch.pow](https://pytorch.org/docs/stable/generated/torch.pow.html?highlight=pow#torch.pow) | [paddle.pow](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/pow_cn.html#pow) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | | 75 | [torch.real](https://pytorch.org/docs/stable/generated/torch.real.html?highlight=real#torch.real) | [paddle.real](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/real_cn.html#real) | 功能一致,参数名不一致 | -| 76 | [torch.reciprocal](https://pytorch.org/docs/stable/generated/torch.reciprocal.html?highlight=reciprocal#torch.reciprocal) | [paddle.reciprocal](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/reciprocal_cn.html#reciprocal) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 77 | [torch.remainder](https://pytorch.org/docs/stable/generated/torch.remainder.html?highlight=remainder#torch.remainder) | [paddle.mod](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 78 | [torch.round](https://pytorch.org/docs/stable/generated/torch.round.html?highlight=round#torch.round) | [paddle.round](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/round_cn.html#round) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 79 | [torch.rsqrt](https://pytorch.org/docs/stable/generated/torch.rsqrt.html?highlight=rsqrt#torch.rsqrt) | [paddle.rsqrt](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/rsqrt_cn.html#rsqrt) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 80 | [torch.sign](https://pytorch.org/docs/stable/generated/torch.sign.html?highlight=sign#torch.sign) | [paddle.sign](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sign_cn.html#sign) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 81 | [torch.sin](https://pytorch.org/docs/stable/generated/torch.sin.html?highlight=sin#torch.sin) | [paddle.sin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sin_cn.html#sin) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 82 | [torch.sinh](https://pytorch.org/docs/stable/generated/torch.sinh.html?highlight=sinh#torch.sinh) | [paddle.sinh](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sinh_cn.html#sinh) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 83 | [torch.sqrt](https://pytorch.org/docs/stable/generated/torch.sqrt.html?highlight=sqrt#torch.sqrt) | [paddle.sqrt](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sqrt_cn.html#sqrt) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | +| 76 | [torch.reciprocal](https://pytorch.org/docs/stable/generated/torch.reciprocal.html?highlight=reciprocal#torch.reciprocal) | [paddle.reciprocal](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/reciprocal_cn.html#reciprocal) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 77 | [torch.remainder](https://pytorch.org/docs/stable/generated/torch.remainder.html?highlight=remainder#torch.remainder) | [paddle.mod](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 78 | [torch.round](https://pytorch.org/docs/stable/generated/torch.round.html?highlight=round#torch.round) | [paddle.round](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/round_cn.html#round) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 79 | [torch.rsqrt](https://pytorch.org/docs/stable/generated/torch.rsqrt.html?highlight=rsqrt#torch.rsqrt) | [paddle.rsqrt](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/rsqrt_cn.html#rsqrt) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 80 | [torch.sign](https://pytorch.org/docs/stable/generated/torch.sign.html?highlight=sign#torch.sign) | [paddle.sign](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sign_cn.html#sign) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 81 | [torch.sin](https://pytorch.org/docs/stable/generated/torch.sin.html?highlight=sin#torch.sin) | [paddle.sin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sin_cn.html#sin) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 82 | [torch.sinh](https://pytorch.org/docs/stable/generated/torch.sinh.html?highlight=sinh#torch.sinh) | [paddle.sinh](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sinh_cn.html#sinh) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 83 | [torch.sqrt](https://pytorch.org/docs/stable/generated/torch.sqrt.html?highlight=sqrt#torch.sqrt) | [paddle.sqrt](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/sqrt_cn.html#sqrt) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | | 84 | [torch.argmax](https://pytorch.org/docs/stable/generated/torch.argmax.html?highlight=argmax#torch.argmax) | [paddle.argmax](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/argmax_cn.html#argmax) | 功能一致,参数名不一致 | | 85 | [torch.argmin](https://pytorch.org/docs/stable/generated/torch.argmin.html?highlight=argmin#torch.argmin) | [paddle.argmin](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/argmin_cn.html#argmin) | 功能一致,参数名不一致 | -| 86 | [torch.max](https://pytorch.org/docs/stable/generated/torch.max.html?highlight=max#torch.max) | [paddle.max](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/max_cn.html#max) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | -| 87 | [torch.min](https://pytorch.org/docs/stable/generated/torch.min.html?highlight=min#torch.min) | [paddle.min](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/min_cn.html#min) | 功能一致,参数名不一致,PaddlePaddle未定义`out`参数代表输出Tensor | +| 86 | [torch.max](https://pytorch.org/docs/stable/generated/torch.max.html?highlight=max#torch.max) | [paddle.max](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/max_cn.html#max) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | +| 87 | [torch.min](https://pytorch.org/docs/stable/generated/torch.min.html?highlight=min#torch.min) | [paddle.min](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/min_cn.html#min) | 功能一致,参数名不一致,PaddlePaddle 未定义`out`参数代表输出 Tensor | ***持续更新...*** -## 组网类API映射列表 +## 组网类 API 映射列表 -梳理了与构造网络相关的PyTorch-PaddlePaddle API映射列表。 +梳理了与构造网络相关的 PyTorch-PaddlePaddle API 映射列表。 | 序号 | PyTorch API | PaddlePaddle API | 备注 | | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | @@ -163,34 +163,34 @@ X2Paddle致力于帮助其它主流深度学习框架开发者快速迁移至飞 | 38 | [torch.nn.LSTM](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html?highlight=lstm#torch.nn.LSTM) | [paddle.nn.LSTM](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/LSTM_cn.html#lstm) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/nn/torch.nn.LSTM.md) | | 39 | [torch.nn.GRU](https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=torch%20nn%20gru#torch.nn.GRU) | [paddle.nn.GRU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/GRU_cn.html#gru) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/nn/torch.nn.GRU.md) | | 40 | [torch.nn.Embedding](https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html?highlight=embedding#torch.nn.Embedding) | [paddle.nn.Embedding](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Embedding_cn.html#embedding) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/nn/torch.nn.Embedding.md) | -| 41 | [torch.nn.ELU](https://pytorch.org/docs/stable/generated/torch.nn.ELU.html?highlight=elu#torch.nn.ELU) | [paddle.nn.ELU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/ELU_cn.html#elu) | 功能一致,PaddlePaddle未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | -| 42 | [torch.nn.Hardsigmoid](https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html?highlight=hardsigmoid#torch.nn.Hardsigmoid) | [paddle.nn.Hardsigmoid](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Hardsigmoid_cn.html#hardsigmoid) | 功能一致,PaddlePaddle未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | -| 43 | [torch.nn.LeakyReLU](https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html?highlight=leakyrelu#torch.nn.LeakyReLU) | [paddle.nn.LeakyReLU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/LeakyReLU_cn.html#leakyrelu) | 功能一致,PaddlePaddle未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | +| 41 | [torch.nn.ELU](https://pytorch.org/docs/stable/generated/torch.nn.ELU.html?highlight=elu#torch.nn.ELU) | [paddle.nn.ELU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/ELU_cn.html#elu) | 功能一致,PaddlePaddle 未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | +| 42 | [torch.nn.Hardsigmoid](https://pytorch.org/docs/stable/generated/torch.nn.Hardsigmoid.html?highlight=hardsigmoid#torch.nn.Hardsigmoid) | [paddle.nn.Hardsigmoid](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Hardsigmoid_cn.html#hardsigmoid) | 功能一致,PaddlePaddle 未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | +| 43 | [torch.nn.LeakyReLU](https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html?highlight=leakyrelu#torch.nn.LeakyReLU) | [paddle.nn.LeakyReLU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/LeakyReLU_cn.html#leakyrelu) | 功能一致,PaddlePaddle 未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | | 44 | [torch.nn.PReLU](https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html?highlight=prelu#torch.nn.PReLU) | [paddle.nn.PReLU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/PReLU_cn.html#prelu) | 功能一致 | -| 45 | [torch.nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html?highlight=relu#torch.nn.ReLU) | [paddle.nn.ReLU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/ReLU_cn.html#relu) | 功能一致,PaddlePaddle未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | +| 45 | [torch.nn.ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html?highlight=relu#torch.nn.ReLU) | [paddle.nn.ReLU](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/ReLU_cn.html#relu) | 功能一致,PaddlePaddle 未定义`inplace`参数表示在不更改变量的内存地址的情况下,直接修改变量的值 | | 46 | [torch.nn.Softmax](https://pytorch.org/docs/stable/generated/torch.nn.Softmax.html?highlight=softmax#torch.nn.Softmax) | [paddle.nn.Softmax](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Softmax_cn.html#softmax) | 功能一致,参数名不一致 | ***持续更新...*** -## Loss类API映射列表 -梳理了计算loss相关的PyTorch-PaddlePaddle API映射列表。 +## Loss 类 API 映射列表 +梳理了计算 loss 相关的 PyTorch-PaddlePaddle API 映射列表。 | 序号 | PyTorch API | PaddlePaddle API | 备注 | | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| 1 | [torch.nn.L1Loss](https://pytorch.org/docs/stable/generated/torch.nn.L1Loss.html?highlight=l1loss#torch.nn.L1Loss) | [paddle.nn.L1Loss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/loss/L1Loss_cn.html#l1loss) | 功能一致,PyTorch存在废弃参数`size_average`和`reduce`。 | -| 2 | [torch.nn.MSELoss](https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html?highlight=mseloss#torch.nn.MSELoss) | [paddle.nn.MSELoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/MSELoss_cn.html#mseloss) | 功能一致,PyTorch存在废弃参数`size_average`和`reduce`。 | +| 1 | [torch.nn.L1Loss](https://pytorch.org/docs/stable/generated/torch.nn.L1Loss.html?highlight=l1loss#torch.nn.L1Loss) | [paddle.nn.L1Loss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/loss/L1Loss_cn.html#l1loss) | 功能一致,PyTorch 存在废弃参数`size_average`和`reduce`。 | +| 2 | [torch.nn.MSELoss](https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html?highlight=mseloss#torch.nn.MSELoss) | [paddle.nn.MSELoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/MSELoss_cn.html#mseloss) | 功能一致,PyTorch 存在废弃参数`size_average`和`reduce`。 | | 3 | [torch.nn.CrossEntropyLoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/layer/loss/CrossEntropyLoss_cn.html#crossentropyloss) | [paddle.nn.CrossEntropyLoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/CrossEntropyLoss_cn.html#crossentropyloss) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/loss/torch.nn.CrossEntropyLoss.md) | | 4 | [torch.nn.KLDivLoss](https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html?highlight=kldivloss#torch.nn.KLDivLoss) | [paddle.nn.KLDivLoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/KLDivLoss_cn.html#kldivloss) | [差异对比](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/loss/torch.nn.KLDivLoss.md) | -| 5 | [torch.nn.BCELoss](https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html?highlight=bceloss#torch.nn.BCELoss) | [paddle.nn.BCELoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/BCELoss_cn.html#bceloss) | 功能一致,PyTorch存在废弃参数`size_average`和`reduce`。 | -| 6 | [torch.nn.BCEWithLogitsLoss](https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html?highlight=bcewithlogitsloss#torch.nn.BCEWithLogitsLoss) | [paddle.nn.BCEWithLogitsLoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/BCEWithLogitsLoss_cn.html#bcewithlogitsloss) | 功能一致,PyTorch存在废弃参数`size_average`和`reduce`。 | -| 7 | [torch.nn.SmoothL1Loss](https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html?highlight=torch%20nn%20smoothl1loss#torch.nn.SmoothL1Loss) | [paddle.nn.SmoothL1Loss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/SmoothL1Loss_cn.html#smoothl1loss) | 功能一致,参数名不一致,PyTorch存在废弃参数`size_average`和`reduce`。 | +| 5 | [torch.nn.BCELoss](https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html?highlight=bceloss#torch.nn.BCELoss) | [paddle.nn.BCELoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/BCELoss_cn.html#bceloss) | 功能一致,PyTorch 存在废弃参数`size_average`和`reduce`。 | +| 6 | [torch.nn.BCEWithLogitsLoss](https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html?highlight=bcewithlogitsloss#torch.nn.BCEWithLogitsLoss) | [paddle.nn.BCEWithLogitsLoss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/BCEWithLogitsLoss_cn.html#bcewithlogitsloss) | 功能一致,PyTorch 存在废弃参数`size_average`和`reduce`。 | +| 7 | [torch.nn.SmoothL1Loss](https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html?highlight=torch%20nn%20smoothl1loss#torch.nn.SmoothL1Loss) | [paddle.nn.SmoothL1Loss](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/SmoothL1Loss_cn.html#smoothl1loss) | 功能一致,参数名不一致,PyTorch 存在废弃参数`size_average`和`reduce`。 | ***持续更新...*** -## 工具类API映射列表 -梳理了与数据处理、分布式处理等相关的PyTorch-PaddlePaddle API映射列表。 +## 工具类 API 映射列表 +梳理了与数据处理、分布式处理等相关的 PyTorch-PaddlePaddle API 映射列表。 | 序号 | PyTorch API | PaddlePaddle API | 备注 | | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | @@ -206,9 +206,9 @@ X2Paddle致力于帮助其它主流深度学习框架开发者快速迁移至飞 ***持续更新...*** -## 视觉类API映射列表 +## 视觉类 API 映射列表 -梳理了与视觉处理相关的PyTorch-PaddlePaddle API映射列表。 +梳理了与视觉处理相关的 PyTorch-PaddlePaddle API 映射列表。 | 序号 | PyTorch API | PaddlePaddle API | 备注 | | ---- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------- | @@ -228,6 +228,6 @@ X2Paddle致力于帮助其它主流深度学习框架开发者快速迁移至飞 | 14 | [torchvision.transforms.RandomVerticalFlip](https://pytorch.org/vision/stable/transforms.html?highlight=randomverticalflip#torchvision.transforms.RandomVerticalFlip) | [paddle.vision.transforms.RandomVerticalFlip](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/vision/transforms/RandomVerticalFlip_cn.html#randomverticalflip) | 功能一致 | | 15 | [torchvision.transforms.Lambda](https://pytorch.org/vision/stable/transforms.html?highlight=lambda#torchvision.transforms.Lambda) | 无对应实现 | [组合实现](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/vision/torchvision.transforms.Lambda.md) | | 17 | [torchvision.utils.save_image](https://pytorch.org/vision/stable/utils.html?highlight=save_image#torchvision.utils.save_image) | 无对应实现 | [组合实现](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/vision/torchvision.utils.save_image.md) | -| 18 | [torchvision.models 系列模型](https://pytorch.org/vision/stable/models.html?highlight=torchvision%20models) | X2Paddle提供 | [使用方式](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/vision/torchvision.models.md) | +| 18 | [torchvision.models 系列模型](https://pytorch.org/vision/stable/models.html?highlight=torchvision%20models) | X2Paddle 提供 | [使用方式](https://github.com/PaddlePaddle/X2Paddle/tree/develop/docs/pytorch_project_convertor/API_docs/vision/torchvision.models.md) | ***持续更新...*** diff --git a/docs/guides/model_convert/update_cn.md b/docs/guides/model_convert/update_cn.md index 7f367547d13..e8846b67606 100644 --- a/docs/guides/model_convert/update_cn.md +++ b/docs/guides/model_convert/update_cn.md @@ -1,29 +1,29 @@ # 升级指南 ## 升级概要 -飞桨2.0版本,相对1.8版本有重大升级,涉及开发方面的重要变化如下: +飞桨 2.0 版本,相对 1.8 版本有重大升级,涉及开发方面的重要变化如下: - 动态图功能完善,动态图模式下数据表示概念为`Tensor`,推荐使用动态图模式; - - API目录体系调整,API的命名和别名进行了统一规范化,虽然兼容老版API,但请使用新API体系开发; + - API 目录体系调整,API 的命名和别名进行了统一规范化,虽然兼容老版 API,但请使用新 API 体系开发; - 数据处理、组网方式、模型训练、多卡启动、模型保存和推理等开发流程都有了对应优化,请对应查看说明; -以上变化请仔细阅读本指南。对于已有模型的升级,飞桨还提供了2.0转换工具(见附录)提供更自动化的辅助。 +以上变化请仔细阅读本指南。对于已有模型的升级,飞桨还提供了 2.0 转换工具(见附录)提供更自动化的辅助。 其他一些功能增加方面诸如动态图对量化训练、混合精度的支持、动静转换等方面不在本指南列出,具体可查看[Release Note](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/release_note_cn.html)或对应文档。 ## 一、动态图 ### 推荐优先使用动态图模式 -飞桨2.0版本将会把动态图作为默认模式(如果还想使用静态图,可通过调用`paddle.enable_static`切换)。 +飞桨 2.0 版本将会把动态图作为默认模式(如果还想使用静态图,可通过调用`paddle.enable_static`切换)。 ```python import paddle ``` -### 使用Tensor概念表示数据 -静态图模式下,由于组网时使用的数据不能实时访问,Paddle用`Variable`来表示数据。 +### 使用 Tensor 概念表示数据 +静态图模式下,由于组网时使用的数据不能实时访问,Paddle 用`Variable`来表示数据。 动态图下,从直观性等角度考虑,将数据表示概念统一为`Tensor`。动态图下`Tensor`的创建主要有两种方法: -1. 通过调用`paddle.to_tensor`函数,将`python scalar/list`,或者`numpy.ndarray`数据转换为Paddle的`Tensor`。具体使用方法,请查看官网的API文档。 +1. 通过调用`paddle.to_tensor`函数,将`python scalar/list`,或者`numpy.ndarray`数据转换为 Paddle 的`Tensor`。具体使用方法,请查看官网的 API 文档。 ```python import paddle @@ -34,57 +34,57 @@ paddle.to_tensor((1.1, 2.2)) paddle.to_tensor(np.random.randn(3, 4)) ``` -2. 通过调用 `paddle.zeros, paddle.ones, paddle.full, paddle.arange, paddle.rand, paddle.randn, paddle.randint, paddle.normal, paddle.uniform` 等函数,创建并返回Tensor。 +2. 通过调用 `paddle.zeros, paddle.ones, paddle.full, paddle.arange, paddle.rand, paddle.randn, paddle.randint, paddle.normal, paddle.uniform` 等函数,创建并返回 Tensor。 ## 二、API -### API目录结构 +### API 目录结构 -为了API组织更加简洁和清晰,将原来padddle.fluid.xxx的目录体系全新升级为paddle.xxx,并对子目录的组织进行了系统的条理化优化。同时还增加了高层API,可以高低搭配使用。paddle.fluid目录下暂时保留了1.8版本API,主要是兼容性考虑,未来会被删除。 -**基于2.0的开发任务,请使用paddle目录下的API,不要再使用paddle.fluid目录下的API。** 如果发现Paddle目录下有API缺失的情况,推荐使用基础API进行组合实现;你也可以通过在 [github](https://github.com/paddlepaddle/paddle) 上提issue的方式反馈。 +为了 API 组织更加简洁和清晰,将原来 padddle.fluid.xxx 的目录体系全新升级为 paddle.xxx,并对子目录的组织进行了系统的条理化优化。同时还增加了高层 API,可以高低搭配使用。paddle.fluid 目录下暂时保留了 1.8 版本 API,主要是兼容性考虑,未来会被删除。 +**基于 2.0 的开发任务,请使用 paddle 目录下的 API,不要再使用 paddle.fluid 目录下的 API。** 如果发现 Paddle 目录下有 API 缺失的情况,推荐使用基础 API 进行组合实现;你也可以通过在 [github](https://github.com/paddlepaddle/paddle) 上提 issue 的方式反馈。 -**2.0版本的API 整体目录结构如下**: +**2.0 版本的 API 整体目录结构如下**: -| 目录 | 功能和包含的API | +| 目录 | 功能和包含的 API | | :--- | --------------- | -| paddle.* | paddle根目录下保留了常用API的别名,当前包括:paddle.tensor、paddle.framework和paddle.device目录下的所有API | -| paddle.tensor | tensor操作相关的API,例如创建 zeros 、矩阵运算 matmul 、变换 concat 、计算 add 、查找 argmax 等。| -| paddle.framework | 框架通用API和动态图模式的API,例如 no_grad 、 save 、 load 等。| -| paddle.device | 设备管理相关API,比如:set_device, get_device等 | -| paddle.amp | paddle自动混合精度策略,包括 auto_cast 、 GradScaler 等。| -| paddle.callbacks | paddle日志回调类,包括 ModelCheckpoint 、 ProgBarLogger 等。| -| paddle.nn | 组网相关的API,例如 Linear 、卷积 Conv2D 、循环神经网络 LSTM 、损失函数 CrossEntropyLoss 、激活函数 ReLU 等。 | -| paddle.static | 静态图下基础框架相关API,比如:Variable, Program, Executor等 | -| paddle.static.nn | 静态图下组网专用API,例如全连接层 fc 、控制流 while_loop/cond 。| -| paddle.optimizer | 优化算法相关API,比如:SGD、Adagrad、Adam等。| -| paddle.optimizer.lr | 学习率衰减相关API,例如 NoamDecay 、 StepDecay 、 PiecewiseDecay 等。| -| paddle.metric | 评估指标计算相关的API,比如:Accuracy, Auc等。 | -| paddle.io | 数据输入输出相关API,比如:Dataset, DataLoader等 | -| paddle.distributed | 分布式相关基础API | -| paddle.distributed.fleet | 分布式相关高层API | -| paddle.vision | 视觉领域API,例如数据集 Cifar10 、数据处理 ColorJitter 、常用基础网络结构 ResNet 等。| -| paddle.text | 目前包括NLP领域相关的数据集,如 Imdb 、 Movielens 。| - -### API别名规则 - -- 为了方便使用,API会在不同的路径下建立别名: - - 所有device, framework, tensor目录下的API,均在paddle根目录建立别名;除少数特殊API外,其他API在paddle根目录下均没有别名。 - - paddle.nn目录下除functional目录以外的所有API,在paddle.nn目录下均有别名;functional目录中的API,在paddle.nn目录下均没有别名。 +| paddle.* | paddle 根目录下保留了常用 API 的别名,当前包括:paddle.tensor、paddle.framework 和 paddle.device 目录下的所有 API | +| paddle.tensor | tensor 操作相关的 API,例如创建 zeros 、矩阵运算 matmul 、变换 concat 、计算 add 、查找 argmax 等。| +| paddle.framework | 框架通用 API 和动态图模式的 API,例如 no_grad 、 save 、 load 等。| +| paddle.device | 设备管理相关 API,比如:set_device, get_device 等 | +| paddle.amp | paddle 自动混合精度策略,包括 auto_cast 、 GradScaler 等。| +| paddle.callbacks | paddle 日志回调类,包括 ModelCheckpoint 、 ProgBarLogger 等。| +| paddle.nn | 组网相关的 API,例如 Linear 、卷积 Conv2D 、循环神经网络 LSTM 、损失函数 CrossEntropyLoss 、激活函数 ReLU 等。 | +| paddle.static | 静态图下基础框架相关 API,比如:Variable, Program, Executor 等 | +| paddle.static.nn | 静态图下组网专用 API,例如全连接层 fc 、控制流 while_loop/cond 。| +| paddle.optimizer | 优化算法相关 API,比如:SGD、Adagrad、Adam 等。| +| paddle.optimizer.lr | 学习率衰减相关 API,例如 NoamDecay 、 StepDecay 、 PiecewiseDecay 等。| +| paddle.metric | 评估指标计算相关的 API,比如:Accuracy, Auc 等。 | +| paddle.io | 数据输入输出相关 API,比如:Dataset, DataLoader 等 | +| paddle.distributed | 分布式相关基础 API | +| paddle.distributed.fleet | 分布式相关高层 API | +| paddle.vision | 视觉领域 API,例如数据集 Cifar10 、数据处理 ColorJitter 、常用基础网络结构 ResNet 等。| +| paddle.text | 目前包括 NLP 领域相关的数据集,如 Imdb 、 Movielens 。| + +### API 别名规则 + +- 为了方便使用,API 会在不同的路径下建立别名: + - 所有 device, framework, tensor 目录下的 API,均在 paddle 根目录建立别名;除少数特殊 API 外,其他 API 在 paddle 根目录下均没有别名。 + - paddle.nn 目录下除 functional 目录以外的所有 API,在 paddle.nn 目录下均有别名;functional 目录中的 API,在 paddle.nn 目录下均没有别名。 - **推荐优先使用较短的路径的别名**,比如`paddle.add -> paddle.tensor.add`,推荐优先使用`paddle.add` -- 以下为一些特殊的别名关系,推荐使用左边的API名称: +- 以下为一些特殊的别名关系,推荐使用左边的 API 名称: - paddle.tanh -> paddle.tensor.tanh -> paddle.nn.functional.tanh - paddle.remainder -> paddle.mod -> paddle.floor_mod - paddle.rand -> paddle.uniform - paddle.randn -> paddle.standard_normal - Layer.set_state_dict -> Layer.set_dict -### 常用API名称变化 +### 常用 API 名称变化 - 加、减、乘、除使用全称,不使用简称 -- 对于当前逐元素操作,不加elementwise前缀 -- 对于按照某一轴操作,不加reduce前缀 -- Conv, Pool, Dropout, BatchNorm, Pad组网类API根据输入数据类型增加1D, 2D, 3D后缀 +- 对于当前逐元素操作,不加 elementwise 前缀 +- 对于按照某一轴操作,不加 reduce 前缀 +- Conv, Pool, Dropout, BatchNorm, Pad 组网类 API 根据输入数据类型增加 1D, 2D, 3D 后缀 - | Paddle 1.8 API名称 | Paddle 2.0对应的名称| + | Paddle 1.8 API 名称 | Paddle 2.0 对应的名称| | --------------- | ------------------------ | | paddle.fluid.layers.elementwise_add | paddle.add | | paddle.fluid.layers.elementwise_sub | paddle.subtract | @@ -104,14 +104,14 @@ paddle.to_tensor(np.random.randn(3, 4)) ## 三、开发流程 ### 数据处理 -数据处理推荐使用**paddle.io目录下的Dataset,Sampler, BatchSampler, DataLoader接口**,不推荐reader类接口。一些常用的数据集已经在paddle.vision.datasets和paddle.text.datasets目录实现,具体参考API文档。 +数据处理推荐使用**paddle.io 目录下的 Dataset,Sampler, BatchSampler, DataLoader 接口**,不推荐 reader 类接口。一些常用的数据集已经在 paddle.vision.datasets 和 paddle.text.datasets 目录实现,具体参考 API 文档。 ```python from paddle.io import Dataset class MyDataset(Dataset): """ - 步骤一:继承paddle.io.Dataset类 + 步骤一:继承 paddle.io.Dataset 类 """ def __init__(self, mode='train'): """ @@ -136,7 +136,7 @@ class MyDataset(Dataset): def __getitem__(self, index): """ - 步骤三:实现__getitem__方法,定义指定index时如何获取数据,并返回单条数据(训练数据,对应的标签) + 步骤三:实现__getitem__方法,定义指定 index 时如何获取数据,并返回单条数据(训练数据,对应的标签) """ data = self.data[index][0] label = self.data[index][1] @@ -165,12 +165,12 @@ for data, label in val_dataset: ### 组网方式 #### Sequential 组网 -针对顺序的线性网络结构可以直接使用Sequential来快速完成组网,可以减少类的定义等代码编写。 +针对顺序的线性网络结构可以直接使用 Sequential 来快速完成组网,可以减少类的定义等代码编写。 ```python import paddle -# Sequential形式组网 +# Sequential 形式组网 mnist = paddle.nn.Sequential( paddle.nn.Flatten(), paddle.nn.Linear(784, 512), @@ -180,14 +180,14 @@ mnist = paddle.nn.Sequential( ) ``` -#### SubClass组网 +#### SubClass 组网 - 针对一些比较复杂的网络结构,就可以使用Layer子类定义的方式来进行模型代码编写,在`__init__`构造函数中进行组网Layer的声明,在`forward`中使用声明的Layer变量进行前向计算。子类组网方式也可以实现sublayer的复用,针对相同的layer可以在构造函数中一次性定义,在`forward中多次调用。 + 针对一些比较复杂的网络结构,就可以使用 Layer 子类定义的方式来进行模型代码编写,在`__init__`构造函数中进行组网 Layer 的声明,在`forward`中使用声明的 Layer 变量进行前向计算。子类组网方式也可以实现 sublayer 的复用,针对相同的 layer 可以在构造函数中一次性定义,在`forward 中多次调用。 ```python import paddle -# Layer类继承方式组网 +# Layer 类继承方式组网 class Mnist(paddle.nn.Layer): def __init__(self): super(Mnist, self).__init__() @@ -212,9 +212,9 @@ mnist = Mnist() ### 模型训练 -#### 使用高层API +#### 使用高层 API -增加了`paddle.Model`高层API,大部分任务可以使用此API用于简化训练、评估、预测类代码开发。注意区别Model和Net概念,Net是指继承paddle.nn.Layer的网络结构;而Model是指持有一个Net对象,同时指定损失函数、优化算法、评估指标的可训练、评估、预测的实例。具体参考高层API的代码示例。 +增加了`paddle.Model`高层 API,大部分任务可以使用此 API 用于简化训练、评估、预测类代码开发。注意区别 Model 和 Net 概念,Net 是指继承 paddle.nn.Layer 的网络结构;而 Model 是指持有一个 Net 对象,同时指定损失函数、优化算法、评估指标的可训练、评估、预测的实例。具体参考高层 API 的代码示例。 ```python import paddle @@ -224,10 +224,10 @@ train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor()) test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=ToTensor()) lenet = paddle.vision.models.LeNet() -# Mnist继承paddle.nn.Layer属于Net,model包含了训练功能 +# Mnist 继承 paddle.nn.Layer 属于 Net,model 包含了训练功能 model = paddle.Model(lenet) -# 设置训练模型所需的optimizer, loss, metric +# 设置训练模型所需的 optimizer, loss, metric model.prepare( paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()), paddle.nn.CrossEntropyLoss(), @@ -241,7 +241,7 @@ model.fit(train_dataset, epochs=2, batch_size=64, log_freq=200) model.evaluate(test_dataset, log_freq=20, batch_size=64) ``` -#### 使用基础API +#### 使用基础 API ```python import paddle @@ -258,7 +258,7 @@ train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) def train(): epochs = 2 adam = paddle.optimizer.Adam(learning_rate=0.001, parameters=lenet.parameters()) - # 用Adam作为优化函数 + # 用 Adam 作为优化函数 for epoch in range(epochs): for batch_id, data in enumerate(train_loader()): x_data = data[0] @@ -277,38 +277,38 @@ train() ``` ### 单机多卡启动 -2.0增加`paddle.distributed.spawn`函数来启动单机多卡训练,同时原有的`paddle.distributed.launch`的方式依然保留。 +2.0 增加`paddle.distributed.spawn`函数来启动单机多卡训练,同时原有的`paddle.distributed.launch`的方式依然保留。 -#### 方式1、launch启动 +#### 方式 1、launch 启动 -##### 高层API场景 +##### 高层 API 场景 当调用`paddle.Model`高层来实现训练时,想要启动单机多卡训练非常简单,代码不需要做任何修改,只需要在启动时增加一下参数`-m paddle.distributed.launch`。 ```bash -# 单机单卡启动,默认使用第0号卡 +# 单机单卡启动,默认使用第 0 号卡 $ python train.py # 单机多卡启动,默认使用当前可见的所有卡 $ python -m paddle.distributed.launch train.py -# 单机多卡启动,设置当前使用的第0号和第1号卡 +# 单机多卡启动,设置当前使用的第 0 号和第 1 号卡 $ python -m paddle.distributed.launch --selected_gpus='0,1' train.py -# 单机多卡启动,设置当前使用第0号和第1号卡 +# 单机多卡启动,设置当前使用第 0 号和第 1 号卡 $ export CUDA_VISIBLE_DEVICES=0,1 $ python -m paddle.distributed.launch train.py ``` -##### 基础API场景 +##### 基础 API 场景 -如果使用基础API实现训练,想要启动单机多卡训练,需要对单机单卡的代码进行3处修改,具体如下: +如果使用基础 API 实现训练,想要启动单机多卡训练,需要对单机单卡的代码进行 3 处修改,具体如下: ```python import paddle from paddle.vision.transforms import ToTensor -# 第1处改动,导入分布式训练所需要的包 +# 第 1 处改动,导入分布式训练所需要的包 import paddle.distributed as dist train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor()) @@ -320,14 +320,14 @@ loss_fn = paddle.nn.CrossEntropyLoss() train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True) def train(model): - # 第2处改动,初始化并行环境 + # 第 2 处改动,初始化并行环境 dist.init_parallel_env() - # 第3处改动,增加paddle.DataParallel封装 + # 第 3 处改动,增加 paddle.DataParallel 封装 lenet = paddle.DataParallel(model) epochs = 2 adam = paddle.optimizer.Adam(learning_rate=0.001, parameters=lenet.parameters()) - # 用Adam作为优化函数 + # 用 Adam 作为优化函数 for epoch in range(epochs): for batch_id, data in enumerate(train_loader()): x_data = data[0] @@ -345,7 +345,7 @@ def train(model): train(lenet) ``` -修改完后保存文件,然后使用跟高层API相同的启动方式即可 +修改完后保存文件,然后使用跟高层 API 相同的启动方式即可 **注意:** 单卡训练不支持调用 ``init_parallel_env``,请使用以下几种方式进行分布式训练。 @@ -354,17 +354,17 @@ train(lenet) # 单机多卡启动,默认使用当前可见的所有卡 $ python -m paddle.distributed.launch train.py -# 单机多卡启动,设置当前使用的第0号和第1号卡 +# 单机多卡启动,设置当前使用的第 0 号和第 1 号卡 $ python -m paddle.distributed.launch --selected_gpus '0,1' train.py -# 单机多卡启动,设置当前使用第0号和第1号卡 +# 单机多卡启动,设置当前使用第 0 号和第 1 号卡 $ export CUDA_VISIBLE_DEVICES=0,1 $ python -m paddle.distributed.launch train.py ``` -#### 方式2、spawn启动 +#### 方式 2、spawn 启动 -launch方式启动训练,以文件为单位启动多进程,需要在启动时调用 ``paddle.distributed.launch`` ,对于进程的管理要求较高。飞桨框架2.0版本增加了 ``spawn`` 启动方式,可以更好地控制进程,在日志打印、训练退出时更友好。使用示例如下: +launch 方式启动训练,以文件为单位启动多进程,需要在启动时调用 ``paddle.distributed.launch`` ,对于进程的管理要求较高。飞桨框架 2.0 版本增加了 ``spawn`` 启动方式,可以更好地控制进程,在日志打印、训练退出时更友好。使用示例如下: ```python from __future__ import print_function @@ -410,36 +410,36 @@ def train(print_result=False): adam.step() adam.clear_grad() -# 使用方式1:仅传入训练函数 -# 适用场景:训练函数不需要任何参数,并且需要使用所有当前可见的GPU设备并行训练 +# 使用方式 1:仅传入训练函数 +# 适用场景:训练函数不需要任何参数,并且需要使用所有当前可见的 GPU 设备并行训练 if __name__ == '__main__': dist.spawn(train) -# 使用方式2:传入训练函数和参数 -# 适用场景:训练函数需要一些参数,并且需要使用所有当前可见的GPU设备并行训练 +# 使用方式 2:传入训练函数和参数 +# 适用场景:训练函数需要一些参数,并且需要使用所有当前可见的 GPU 设备并行训练 if __name__ == '__main__': dist.spawn(train, args=(True,)) -# 使用方式3:传入训练函数、参数并指定并行进程数 -# 适用场景:训练函数需要一些参数,并且仅需要使用部分可见的GPU设备并行训练,例如: -# 当前机器有8张GPU卡 {0,1,2,3,4,5,6,7},此时会使用前两张卡 {0,1}; -# 或者当前机器通过配置环境变量 CUDA_VISIBLE_DEVICES=4,5,6,7,仅使4张 -# GPU卡可见,此时会使用可见的前两张卡 {4,5} +# 使用方式 3:传入训练函数、参数并指定并行进程数 +# 适用场景:训练函数需要一些参数,并且仅需要使用部分可见的 GPU 设备并行训练,例如: +# 当前机器有 8 张 GPU 卡 {0,1,2,3,4,5,6,7},此时会使用前两张卡 {0,1}; +# 或者当前机器通过配置环境变量 CUDA_VISIBLE_DEVICES=4,5,6,7,仅使 4 张 +# GPU 卡可见,此时会使用可见的前两张卡 {4,5} if __name__ == '__main__': dist.spawn(train, args=(True,), nprocs=2) -# 使用方式4:传入训练函数、参数、指定进程数并指定当前使用的卡号 -# 使用场景:训练函数需要一些参数,并且仅需要使用部分可见的GPU设备并行训练,但是 -# 可能由于权限问题,无权配置当前机器的环境变量,例如:当前机器有8张GPU卡 -# {0,1,2,3,4,5,6,7},但你无权配置CUDA_VISIBLE_DEVICES,此时可以通过 +# 使用方式 4:传入训练函数、参数、指定进程数并指定当前使用的卡号 +# 使用场景:训练函数需要一些参数,并且仅需要使用部分可见的 GPU 设备并行训练,但是 +# 可能由于权限问题,无权配置当前机器的环境变量,例如:当前机器有 8 张 GPU 卡 +# {0,1,2,3,4,5,6,7},但你无权配置 CUDA_VISIBLE_DEVICES,此时可以通过 # 指定参数 selected_gpus 选择希望使用的卡,例如 selected_gpus='4,5', -# 可以指定使用第4号卡和第5号卡 +# 可以指定使用第 4 号卡和第 5 号卡 if __name__ == '__main__': dist.spawn(train, nprocs=2, selected_gpus='4,5') -# 使用方式5:指定多卡通信的起始端口 +# 使用方式 5:指定多卡通信的起始端口 # 使用场景:端口建立通信时提示需要重试或者通信建立失败 -# Paddle默认会通过在当前机器上寻找空闲的端口用于多卡通信,但当机器使用环境 +# Paddle 默认会通过在当前机器上寻找空闲的端口用于多卡通信,但当机器使用环境 # 较为复杂时,程序找到的端口可能不够稳定,此时可以自行指定稳定的空闲起始 # 端口以获得更稳定的训练体验 if __name__ == '__main__': @@ -447,10 +447,10 @@ if __name__ == '__main__': ``` ### 模型保存 -Paddle保存的模型有两种格式,一种是训练格式,保存模型参数和优化器相关的状态,可用于恢复训练;一种是预测格式,保存预测的静态图网络结构以及参数,用于预测部署。 -#### 高层API场景 +Paddle 保存的模型有两种格式,一种是训练格式,保存模型参数和优化器相关的状态,可用于恢复训练;一种是预测格式,保存预测的静态图网络结构以及参数,用于预测部署。 +#### 高层 API 场景 -高层API下用于预测部署的模型保存方法为: +高层 API 下用于预测部署的模型保存方法为: ```python model = paddle.Model(Mnist()) @@ -459,7 +459,7 @@ model.save('mnist', training=False) # 保存后可以得到预测部署所需要的模型 ``` -#### 基础API场景 +#### 基础 API 场景 动态图训练的模型,可以通过动静转换功能,转换为可部署的静态图模型,具体做法如下: @@ -473,9 +473,9 @@ class SimpleNet(paddle.nn.Layer): super(SimpleNet, self).__init__() self.linear = paddle.nn.Linear(10, 3) - # 第1处改动 - # 通过InputSpec指定输入数据的形状,None表示可变长 - # 通过to_static装饰器将动态图转换为静态图Program + # 第 1 处改动 + # 通过 InputSpec 指定输入数据的形状,None 表示可变长 + # 通过 to_static 装饰器将动态图转换为静态图 Program @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')]) def forward(self, x, y): out = self.linear(x) @@ -485,12 +485,12 @@ class SimpleNet(paddle.nn.Layer): net = SimpleNet() -# 第2处改动 +# 第 2 处改动 # 保存静态图模型,可用于预测部署 paddle.jit.save(net, './simple_net') ``` ### 推理 -推理库Paddle Inference的API做了升级,简化了写法,以及去掉了历史上冗余的概念。API的变化为纯增,原有API保持不变,但推荐新的API体系,旧API在后续版本会逐步删除。 +推理库 Paddle Inference 的 API 做了升级,简化了写法,以及去掉了历史上冗余的概念。API 的变化为纯增,原有 API 保持不变,但推荐新的 API 体系,旧 API 在后续版本会逐步删除。 #### C++ API @@ -506,7 +506,7 @@ API 变更 | 原有命名 | 现有命名 | 行为变化 | | ---------------------------- | ---------------------------- | ----------------------------- | | 头文件 `paddle_infer.h` | 无变化 | 包含旧接口,保持向后兼容 | -| 无 | `paddle_inference_api.h` | 新API,可以与旧接口并存 | +| 无 | `paddle_inference_api.h` | 新 API,可以与旧接口并存 | | `CreatePaddlePredictor` | `CreatePredictor` | 返回值变为 shared_ptr | | `ZeroCopyTensor` | `Tensor` | 无 | | `AnalysisConfig` | `Config` | 无 | @@ -544,19 +544,19 @@ for (...) { #### Python API -Python API 的变更与 C++ 基本对应,会在2.0版发布。 +Python API 的变更与 C++ 基本对应,会在 2.0 版发布。 ## 附录 -### 2.0转换工具 -为了降级代码升级的成本,飞桨提供了转换工具,可以帮助将Paddle 1.8版本开发的代码,升级为2.0的API。由于相比于Paddle 1.8版本,2.0版本的API进行了大量的升级,包括API名称,参数名称,行为等。转换工具当前还不能覆盖所有的API升级;对于无法转换的API,转换工具会报错,提示手动升级。 +### 2.0 转换工具 +为了降级代码升级的成本,飞桨提供了转换工具,可以帮助将 Paddle 1.8 版本开发的代码,升级为 2.0 的 API。由于相比于 Paddle 1.8 版本,2.0 版本的 API 进行了大量的升级,包括 API 名称,参数名称,行为等。转换工具当前还不能覆盖所有的 API 升级;对于无法转换的 API,转换工具会报错,提示手动升级。 https://github.com/PaddlePaddle/paddle_upgrade_tool -对于转换工具没有覆盖的API,请查看官网的API文档,手动升级代码的API。 +对于转换工具没有覆盖的 API,请查看官网的 API 文档,手动升级代码的 API。 -### 2.0文档教程 -以下提供了2.0版本的一些示例教程: +### 2.0 文档教程 +以下提供了 2.0 版本的一些示例教程: 你可以在官网[应用实践](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/practices/index_cn.html)栏目内进行在线浏览,也可以下载在这里提供的源代码: https://github.com/PaddlePaddle/docs/tree/develop/docs/practices diff --git a/docs/guides/model_convert/update_en.md b/docs/guides/model_convert/update_en.md index b702d9fae8b..34d82d18913 100644 --- a/docs/guides/model_convert/update_en.md +++ b/docs/guides/model_convert/update_en.md @@ -85,7 +85,7 @@ In order to make the API organization more concise and clear, the original direc - For operation by an axis, do not add reduce prefix - Conv, Pool, Dropout, BatchNorm, Pad networking APIs add 1D, 2D, 3D suffixes according to input data type - | Paddle 1.8 API Names | Paddle 2.0对应的名称| + | Paddle 1.8 API Names | Paddle 2.0 对应的名称| | --------------- | ------------------------ | | paddle.fluid.layers.elementwise_add | paddle.add | | paddle.fluid.layers.elementwise_sub | paddle.subtract | diff --git a/docs/guides/performance_improving/amp_cn.md b/docs/guides/performance_improving/amp_cn.md index 09910ae10e3..fd5112d09d6 100644 --- a/docs/guides/performance_improving/amp_cn.md +++ b/docs/guides/performance_improving/amp_cn.md @@ -2,15 +2,15 @@ 一般情况下,训练深度学习模型时默认使用的数据类型(dtype)是 float32,每个数据占用 32 位的存储空间。为了节约显存消耗,业界提出了 16 位的数据类型(如 GPU 支持的 float16、bfloat16),每个数据仅需要 16 位的存储空间,比 float32 节省一半的存储空间,并且一些芯片可以在 16 位的数据上获得更快的计算速度,比如按照 NVIDIA 的数据显示,V100 GPU 上 矩阵乘和卷积计算在 float16 的计算速度最大可达 float32 的 8 倍。 -考虑到一些算子(OP)对数据精度的要求较高(如 softmax、cross_entropy),仍然需要采用 float32 进行计算;还有一些算子(如conv2d、matmul)对数据精度不敏感,可以采用 float16 / bfloat16 提升计算速度并降低存储空间,飞桨框架提供了**自动混合精度(Automatic Mixed Precision,以下简称为AMP)训练**的方法,可在模型训练时,自动为算子选择合适的数据计算精度(float32 或 float16 / bfloat16),在保持训练精度(accuracy)不损失的条件下,能够加速训练,可参考2018年百度与NVIDIA联合发表的论文:[MIXED PRECISION TRAINING](https://arxiv.org/pdf/1710.03740.pdf)。本文将介绍如何使用飞桨框架实现自动混合精度训练。 +考虑到一些算子(OP)对数据精度的要求较高(如 softmax、cross_entropy),仍然需要采用 float32 进行计算;还有一些算子(如 conv2d、matmul)对数据精度不敏感,可以采用 float16 / bfloat16 提升计算速度并降低存储空间,飞桨框架提供了**自动混合精度(Automatic Mixed Precision,以下简称为 AMP)训练**的方法,可在模型训练时,自动为算子选择合适的数据计算精度(float32 或 float16 / bfloat16),在保持训练精度(accuracy)不损失的条件下,能够加速训练,可参考 2018 年百度与 NVIDIA 联合发表的论文:[MIXED PRECISION TRAINING](https://arxiv.org/pdf/1710.03740.pdf)。本文将介绍如何使用飞桨框架实现自动混合精度训练。 ## 一、概述 ### 1.1 浮点数据类型 -[Float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) 和 [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format)(brain floating point)都是一种半精度浮点数据类型,在计算机中使用 2 字节(16位)存储。与计算中常用的单精度浮点数(float32)和双精度浮点数(float64)类型相比,float16 及 bfloat16 更适于在精度要求不高的场景中使用。 +[Float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) 和 [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format)(brain floating point)都是一种半精度浮点数据类型,在计算机中使用 2 字节(16 位)存储。与计算中常用的单精度浮点数(float32)和双精度浮点数(float64)类型相比,float16 及 bfloat16 更适于在精度要求不高的场景中使用。 -对比 float32 与 float16 / bfloat16 的浮点格式,如图1所示: +对比 float32 与 float16 / bfloat16 的浮点格式,如图 1 所示:
missing @@ -19,15 +19,15 @@ 上述数据类型存在如下数值特点: -- float32的指数位占8位,尾数位占23位,可表示的数据动态范围是[2^-126, 2^127],是深度学习模型时默认使用的数据类型。 -- float16的指数位占5位,尾数位占10位,相比float32,可表示的数据动态范围更低,最小可表示的正数数值为2^-14,最大可表示的数据为65504,容易出现数值上溢出问题。 -- bfloat16的指数位8位,尾数为7位,其特点是牺牲精度从而获取更大的数据范围,可表示的数据范围与float32一致,但是与float16相比bfloat16可表示的数据精度更低,相比float16更易出现数值下溢出的问题。 +- float32 的指数位占 8 位,尾数位占 23 位,可表示的数据动态范围是[2^-126, 2^127],是深度学习模型时默认使用的数据类型。 +- float16 的指数位占 5 位,尾数位占 10 位,相比 float32,可表示的数据动态范围更低,最小可表示的正数数值为 2^-14,最大可表示的数据为 65504,容易出现数值上溢出问题。 +- bfloat16 的指数位 8 位,尾数为 7 位,其特点是牺牲精度从而获取更大的数据范围,可表示的数据范围与 float32 一致,但是与 float16 相比 bfloat16 可表示的数据精度更低,相比 float16 更易出现数值下溢出的问题。 ### 1.2 AMP 计算过程 #### 1.2.1 auto_cast 策略 -飞桨框架采用了 **auto_cast 策略**实现模型训练过程中计算精度的自动转换及使用。通常情况下,模型参数使用单精度浮点格式存储(float32),在训练过程中,将模型参数从单精度浮点数(float32)转换为半精度浮点数(float16 或 bfloat16)参与前向计算,并得到半精度浮点数表示中间状态,然后使用半精度浮点数计算参数梯度,最后将参数梯度转换为单精度浮点数格式后,更新模型参数。计算过程如下图2所示: +飞桨框架采用了 **auto_cast 策略**实现模型训练过程中计算精度的自动转换及使用。通常情况下,模型参数使用单精度浮点格式存储(float32),在训练过程中,将模型参数从单精度浮点数(float32)转换为半精度浮点数(float16 或 bfloat16)参与前向计算,并得到半精度浮点数表示中间状态,然后使用半精度浮点数计算参数梯度,最后将参数梯度转换为单精度浮点数格式后,更新模型参数。计算过程如下图 2 所示:
missing @@ -36,24 +36,24 @@ 上图中蓝色虚线框内的逻辑即是 AMP 策略下参数精度转换(cast)逻辑,通常 cast 操作所带来的开销是有限的,当使用 float16 / bfloat16 在前向计算(forward compute)及反向传播(backward propagation)过程中取得的计算性能收益大于 cast 所带来的开销时,开启 AMP 训练将得到更优的训练性能。 -当模型参数在训练前即使用半精度浮点格式存数时(float16 / bfloat16),训练过程中将省去图 2 中的 cast 操作,可进一步提升模型训练性能,但是需要注意模型参数采用低精度数据类型进行存储,可能对模型最终的训练精度带来影响。计算过程如下图3所示: +当模型参数在训练前即使用半精度浮点格式存数时(float16 / bfloat16),训练过程中将省去图 2 中的 cast 操作,可进一步提升模型训练性能,但是需要注意模型参数采用低精度数据类型进行存储,可能对模型最终的训练精度带来影响。计算过程如下图 3 所示:
missing -
图 3. float16计算过程示意图
+
图 3. float16 计算过程示意图
-#### 1.2.2 grad_scaler策略 +#### 1.2.2 grad_scaler 策略 如 1.1 所述,半精度浮点数的表示范围远小于单精度浮点数的表示范围,在深度学习领域,参数、中间状态和梯度的值通常很小,因此以半精度浮点数参与计算时容易出现数值下溢(underflow)的情况,即接近零的值下溢为零值。为了避免这个问题,飞桨采用 **grad_scaler 策略**。主要内容是:对训练 loss 乘以一个称为 loss_scaling 的缩放值,根据链式法则,在反向传播过程中,参数梯度也等价于相应地乘以了 loss_scaling 的值,在参数更新时再将梯度值相应地除以 loss_scaling 的值。 -然而,在模型训练过程中,选择合适的 loss_scaling 值是个较大的挑战,因此,飞桨提供了 **动态loss_scaling** 的机制: +然而,在模型训练过程中,选择合适的 loss_scaling 值是个较大的挑战,因此,飞桨提供了 **动态 loss_scaling** 的机制: -1. 训练开始前,为loss_scaling设置一个较大的初始值init_loss_scaling,默认为2.^15,并设置4个用于动态调整loss_scaling大小的参数:incr_ratio=2.0、decr_ratio=0.5、incr_every_n_steps=1000、decr_every_n_nan_or_inf=2; -2. 启动训练后,在每次计算完成梯度后,对所有的梯度之进行检查,判断是否存在nan/inf并记录连续出现nan/inf的次数或连续未出现nan/inf的次数; -3. 当连续incr_every_n_step次迭代未出现nan/inf时,将loss_scaling乘incr_ratio; -4. 当连续decr_every_n_nan_or_inf次迭代出现nan/inf时,将loss_scaling乘decr_ratio; +1. 训练开始前,为 loss_scaling 设置一个较大的初始值 init_loss_scaling,默认为 2.^15,并设置 4 个用于动态调整 loss_scaling 大小的参数:incr_ratio=2.0、decr_ratio=0.5、incr_every_n_steps=1000、decr_every_n_nan_or_inf=2; +2. 启动训练后,在每次计算完成梯度后,对所有的梯度之进行检查,判断是否存在 nan/inf 并记录连续出现 nan/inf 的次数或连续未出现 nan/inf 的次数; +3. 当连续 incr_every_n_step 次迭代未出现 nan/inf 时,将 loss_scaling 乘 incr_ratio; +4. 当连续 decr_every_n_nan_or_inf 次迭代出现 nan/inf 时,将 loss_scaling 乘 decr_ratio; ### 1.3 支持硬件说明 @@ -89,7 +89,7 @@ 以 Nvidia GPU 为例,介绍硬件加速机制: -在使用相同的超参数下,混合精度训练使用半精度浮点(float16 / bfloat16)和单精度(float32)浮点可达到与使用纯单精度(float32)训练相同的准确率,并可加速模型的训练速度,这主要得益于Nvidia 从 Volta 架构开始推出的 Tensor Core 技术。 +在使用相同的超参数下,混合精度训练使用半精度浮点(float16 / bfloat16)和单精度(float32)浮点可达到与使用纯单精度(float32)训练相同的准确率,并可加速模型的训练速度,这主要得益于 Nvidia 从 Volta 架构开始推出的 Tensor Core 技术。 在使用 float16 计算时具有如下特点: @@ -98,7 +98,7 @@ 从 NVIDIA Ampere 架构开始,GPU 支持 bfloat16,其计算性能与 float16 持平。 -> 说明:通过`nvidia-smi`指令可帮助查看NVIDIA显卡架构信息,混合精度训练适用的 NVIDIA GPU 计算能力至少为 7.0 的版本。此外如果已开启自动混合精度训练,飞桨框架会自动检测硬件环境是否符合要求,如不符合则将提供类似如下的警告信息:`UserWarning: AMP only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: Tesla K40m, with Compute Capability: 3.5.`。 +> 说明:通过`nvidia-smi`指令可帮助查看 NVIDIA 显卡架构信息,混合精度训练适用的 NVIDIA GPU 计算能力至少为 7.0 的版本。此外如果已开启自动混合精度训练,飞桨框架会自动检测硬件环境是否符合要求,如不符合则将提供类似如下的警告信息:`UserWarning: AMP only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: Tesla K40m, with Compute Capability: 3.5.`。 ### 1.4 适用场景说明 @@ -110,8 +110,8 @@ 依据 float16 数据类型在模型中的使用程度划分,飞桨框架的混合精度策略分为两个等级: -- **Level = ‘O1’**:采用黑白名单策略进行混合精度训练,黑名单中的 OP 将采用 float32 计算,白名单中的 OP 将采用 float16 计算,auto_cast策略会自动将白名单 OP 的输入参数数据类型从 float32 转为 float16。飞桨框架默认设置了 [黑白名单 OP 列表](../../api/paddle/amp/Overview_cn.html),对于不在黑白名单中的 OP,会依据该 OP 的全部输入数据类型进行推断,当全部输入均为 float16 时,OP 将直接采用 float16 计算,否则采用 float32 计算。计算逻辑可参考图2。 -- **Level = ‘O2’**:采用了比 O1 更为激进的策略,除了框架不支持 float16 计算的 OP 以及 O2 模式下自定义黑名单中的 OP,其他全部采用 float16 计算,此外,飞桨框架提供了将网络参数从 float32 转换为 float16 的接口,相比 O1 将进一步减少 auto_cast 逻辑中的 cast 操作,训练速度会有更明显的提升,但可能影响训练精度。计算逻辑可参考图3。 +- **Level = ‘O1’**:采用黑白名单策略进行混合精度训练,黑名单中的 OP 将采用 float32 计算,白名单中的 OP 将采用 float16 计算,auto_cast 策略会自动将白名单 OP 的输入参数数据类型从 float32 转为 float16。飞桨框架默认设置了 [黑白名单 OP 列表](../../api/paddle/amp/Overview_cn.html),对于不在黑白名单中的 OP,会依据该 OP 的全部输入数据类型进行推断,当全部输入均为 float16 时,OP 将直接采用 float16 计算,否则采用 float32 计算。计算逻辑可参考图 2。 +- **Level = ‘O2’**:采用了比 O1 更为激进的策略,除了框架不支持 float16 计算的 OP 以及 O2 模式下自定义黑名单中的 OP,其他全部采用 float16 计算,此外,飞桨框架提供了将网络参数从 float32 转换为 float16 的接口,相比 O1 将进一步减少 auto_cast 逻辑中的 cast 操作,训练速度会有更明显的提升,但可能影响训练精度。计算逻辑可参考图 3。 飞桨框架推荐使用动态图模式训练模型,下面以动态图模式下单卡(GPU)训练场景为例,分别介绍使用基础 API 和高层 API 开启 AMP 训练的不同使用方式。 @@ -141,11 +141,11 @@ import numpy paddle.seed(100) numpy.random.seed(100) place = paddle.CUDAPlace(0) -# 定义神经网络SimpleNet,该网络由九层Linear组成 +# 定义神经网络 SimpleNet,该网络由九层 Linear 组成 class SimpleNet(paddle.nn.Layer): def __init__(self, input_size, output_size): super(SimpleNet, self).__init__() - # 九层Linear,每层Linear网络由matmul算子及add算子组成 + # 九层 Linear,每层 Linear 网络由 matmul 算子及 add 算子组成 self.linears = paddle.nn.LayerList( [paddle.nn.Linear(input_size, output_size) for i in range(9)]) @@ -161,12 +161,12 @@ class SimpleNet(paddle.nn.Layer): ```python epochs = 2 -input_size = 8192 # 设为较大的值,可更明显地对比AMP训练加速效果 -output_size = 8192 # 设为较大的值,可更明显地对比AMP训练加速效果 -batch_size = 2048 # batch_size为8的倍数加速效果更优 +input_size = 8192 # 设为较大的值,可更明显地对比 AMP 训练加速效果 +output_size = 8192 # 设为较大的值,可更明显地对比 AMP 训练加速效果 +batch_size = 2048 # batch_size 为 8 的倍数加速效果更优 nums_batch = 10 -# 定义Dataloader +# 定义 Dataloader from paddle.io import Dataset class RandomDataset(Dataset): def __init__(self, num_samples): @@ -188,31 +188,31 @@ loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=False, dro ```python mse = paddle.nn.MSELoss() # 定义损失计算函数 -model = SimpleNet(input_size, output_size) # 定义SimpleNet模型 -optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义SGD优化器 +model = SimpleNet(input_size, output_size) # 定义 SimpleNet 模型 +optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义 SGD 优化器 train_time = 0 # 记录总训练时长 for epoch in range(epochs): for i, (data, label) in enumerate(loader): start_time = time.time() # 记录开始训练时刻 - label._to(place) # 将label数据拷贝到gpu - # 前向计算(9层Linear网络,每层由matmul、add算子组成) + label._to(place) # 将 label 数据拷贝到 gpu + # 前向计算(9 层 Linear 网络,每层由 matmul、add 算子组成) output = model(data) - # loss计算 + # loss 计算 loss = mse(output, label) # 反向传播 loss.backward() # 更新参数 optimizer.step() optimizer.clear_grad(set_to_zero=False) - # 记录训练loss及训练时长 + # 记录训练 loss 及训练时长 train_loss = loss.numpy() train_time += time.time() - start_time print("loss:", train_loss) -print("使用float32模式训练耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 float32 模式训练耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [0.6486028] -# 使用float32模式训练耗时:0.529 sec +# 使用 float32 模式训练耗时:0.529 sec ``` > 注:如果该示例代码在你的机器上显示显存不足相关的错误,请尝试将`input_size`、`output_size`、`batch_size`调小。 @@ -221,92 +221,92 @@ print("使用float32模式训练耗时:{:.3f} sec".format(train_time/(epochs*num 使用 AMP-O1 训练,需要在 float32 训练代码的基础上添加两处逻辑: -- 逻辑1:使用 `paddle.amp.auto_cast` 创建 AMP 上下文环境,开启自动混合精度策略`Level = ‘O1’`。在该上下文环境影响范围内,框架会根据预设的黑白名单,自动确定每个 OP 的输入数据类型(float32 或 float16 / bfloat16)。也可以在该 API 中添加自定义黑白名单 OP 列表。 -- 逻辑2:使用 `paddle.amp.GradScaler` 控制 loss 缩放比例,规避浮点数下溢问题。在模型训练过程中,框架默认开启**动态 loss scaling 机制**(`use_dynamic_loss_scaling=True`),具体介绍见 [1.2.2 grad_scaler 策略](#gradscaler)。 +- 逻辑 1:使用 `paddle.amp.auto_cast` 创建 AMP 上下文环境,开启自动混合精度策略`Level = ‘O1’`。在该上下文环境影响范围内,框架会根据预设的黑白名单,自动确定每个 OP 的输入数据类型(float32 或 float16 / bfloat16)。也可以在该 API 中添加自定义黑白名单 OP 列表。 +- 逻辑 2:使用 `paddle.amp.GradScaler` 控制 loss 缩放比例,规避浮点数下溢问题。在模型训练过程中,框架默认开启**动态 loss scaling 机制**(`use_dynamic_loss_scaling=True`),具体介绍见 [1.2.2 grad_scaler 策略](#gradscaler)。 ```python mse = paddle.nn.MSELoss() # 定义损失计算函数 -model = SimpleNet(input_size, output_size) # 定义SimpleNet模型 -optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义SGD优化器 +model = SimpleNet(input_size, output_size) # 定义 SimpleNet 模型 +optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义 SGD 优化器 -# 逻辑2:可选,定义 GradScaler,用于缩放loss比例,避免浮点数溢出,默认开启动态更新loss_scaling机制 +# 逻辑 2:可选,定义 GradScaler,用于缩放 loss 比例,避免浮点数溢出,默认开启动态更新 loss_scaling 机制 scaler = paddle.amp.GradScaler(init_loss_scaling=1024) train_time = 0 # 记录总训练时长 for epoch in range(epochs): for i, (data, label) in enumerate(loader): start_time = time.time() # 记录开始训练时刻 - label._to(place) # 将label数据拷贝到gpu - # 逻辑1:创建 AMP-O1 auto_cast 环境,开启自动混合精度训练,将 add 算子添加到自定义白名单中(custom_white_list), + label._to(place) # 将 label 数据拷贝到 gpu + # 逻辑 1:创建 AMP-O1 auto_cast 环境,开启自动混合精度训练,将 add 算子添加到自定义白名单中(custom_white_list), # 因此前向计算过程中该算子将采用 float16 数据类型计算 with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O1'): - output = model(data) # 前向计算(9层Linear网络,每层由matmul、add算子组成) - loss = mse(output, label) # loss计算 - # 逻辑2:使用 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 - scaled = scaler.scale(loss) # loss缩放,乘以系数loss_scaling + output = model(data) # 前向计算(9 层 Linear 网络,每层由 matmul、add 算子组成) + loss = mse(output, label) # loss 计算 + # 逻辑 2:使用 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 + scaled = scaler.scale(loss) # loss 缩放,乘以系数 loss_scaling scaled.backward() # 反向传播 - scaler.step(optimizer) # 更新参数(参数梯度先除系数loss_scaling再更新参数) - scaler.update() # 基于动态loss_scaling策略更新loss_scaling系数 + scaler.step(optimizer) # 更新参数(参数梯度先除系数 loss_scaling 再更新参数) + scaler.update() # 基于动态 loss_scaling 策略更新 loss_scaling 系数 optimizer.clear_grad(set_to_zero=False) - # 记录训练loss及训练时长 + # 记录训练 loss 及训练时长 train_loss = loss.numpy() train_time += time.time() - start_time print("loss:", train_loss) -print("使用AMP-O1模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O1 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [0.6486219] -# 使用AMP-O1模式耗时:0.118 sec +# 使用 AMP-O1 模式耗时:0.118 sec ``` #### 2.1.3 动态图 AMP-O2 训练 -使用 AMP-O2训练,需要在 float32 训练代码的基础上添加三处逻辑: +使用 AMP-O2 训练,需要在 float32 训练代码的基础上添加三处逻辑: -O2模式采用了比O1更为激进的策略,除了框架不支持FP16计算的OP,其他全部采用FP16计算,需要在训练前将网络参数从FP32转为FP16,在FP32代码的基础上添加三处逻辑: +O2 模式采用了比 O1 更为激进的策略,除了框架不支持 FP16 计算的 OP,其他全部采用 FP16 计算,需要在训练前将网络参数从 FP32 转为 FP16,在 FP32 代码的基础上添加三处逻辑: -- 逻辑1:在训练前使用 `paddle.amp.decorate` 将网络参数从 float32 转换为 float16。 -- 逻辑2:使用 `paddle.amp.auto_cast` 创建 AMP 上下文环境,开启自动混合精度策略`Level = ‘O2’`。在该上下文环境影响范围内,框架会将所有支持 float16 的 OP 均采用 float16 进行计算(自定义的黑名单除外),其他 OP 采用 float32 进行计算。 -- 逻辑3:使用 `paddle.amp.GradScaler` 控制 loss 缩放比例,规避浮点数下溢问题。用法与 AMP-O1 中相同。 +- 逻辑 1:在训练前使用 `paddle.amp.decorate` 将网络参数从 float32 转换为 float16。 +- 逻辑 2:使用 `paddle.amp.auto_cast` 创建 AMP 上下文环境,开启自动混合精度策略`Level = ‘O2’`。在该上下文环境影响范围内,框架会将所有支持 float16 的 OP 均采用 float16 进行计算(自定义的黑名单除外),其他 OP 采用 float32 进行计算。 +- 逻辑 3:使用 `paddle.amp.GradScaler` 控制 loss 缩放比例,规避浮点数下溢问题。用法与 AMP-O1 中相同。 ```python mse = paddle.nn.MSELoss() # 定义损失计算函数 -model = SimpleNet(input_size, output_size) # 定义SimpleNet模型 -optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义SGD优化器 +model = SimpleNet(input_size, output_size) # 定义 SimpleNet 模型 +optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义 SGD 优化器 -# 逻辑1:在level=’O2‘模式下,将网络参数从FP32转换为FP16 +# 逻辑 1:在 level=’O2‘模式下,将网络参数从 FP32 转换为 FP16 model = paddle.amp.decorate(models=model, level='O2') -# 逻辑3:可选,定义 GradScaler,用于缩放loss比例,避免浮点数溢出,默认开启动态更新loss_scaling机制 +# 逻辑 3:可选,定义 GradScaler,用于缩放 loss 比例,避免浮点数溢出,默认开启动态更新 loss_scaling 机制 scaler = paddle.amp.GradScaler(init_loss_scaling=1024) train_time = 0 # 记录总训练时长 for epoch in range(epochs): for i, (data, label) in enumerate(loader): start_time = time.time() # 记录开始训练时刻 - label._to(place) # 将label数据拷贝到gpu - # 逻辑2:创建 AMP-O2 auto_cast 环境,开启自动混合精度训练,前向计算过程中该算子将采用 float16 数据类型计算 + label._to(place) # 将 label 数据拷贝到 gpu + # 逻辑 2:创建 AMP-O2 auto_cast 环境,开启自动混合精度训练,前向计算过程中该算子将采用 float16 数据类型计算 with paddle.amp.auto_cast(level='O2'): - output = model(data) # 前向计算(9层Linear网络,每层由matmul、add算子组成) - loss = mse(output, label) # loss计算 - # 逻辑3:使用 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 - scaled = scaler.scale(loss) # loss缩放,乘以系数loss_scaling + output = model(data) # 前向计算(9 层 Linear 网络,每层由 matmul、add 算子组成) + loss = mse(output, label) # loss 计算 + # 逻辑 3:使用 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 + scaled = scaler.scale(loss) # loss 缩放,乘以系数 loss_scaling scaled.backward() # 反向传播 - scaler.step(optimizer) # 更新参数(参数梯度先除系数loss_scaling再更新参数) - scaler.update() # 基于动态loss_scaling策略更新loss_scaling系数 + scaler.step(optimizer) # 更新参数(参数梯度先除系数 loss_scaling 再更新参数) + scaler.update() # 基于动态 loss_scaling 策略更新 loss_scaling 系数 optimizer.clear_grad(set_to_zero=False) - # 记录训练loss及训练时长 + # 记录训练 loss 及训练时长 train_loss = loss.numpy() train_time += time.time() - start_time print("loss=", train_loss) -print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O2 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss= [0.6743] -# 使用AMP-O2模式耗时:0.102 sec +# 使用 AMP-O2 模式耗时:0.102 sec ``` #### 2.1.4 对比不同模式下训练速度 -动态图FP32及AMP训练的精度速度对比如下表所示: +动态图 FP32 及 AMP 训练的精度速度对比如下表所示: | - | **float32** | **AMP-O1** | **AMP-O2** | | ------------ | ----------- | ---------- | ---------- | @@ -315,7 +315,7 @@ print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch 从上表统计结果可以看出,相比普通的 float32 训练模式, **AMP-O1** 模式训练速度提升约为 **4.5** 倍,**AMP-O2** 模式训练速度提升约为 **5.2** 倍。 -> 注:上述实验构建了一个理想化的实验模型,其matmul算子占比较高,所以加速比较明显,实际模型的加速效果与模型特点有关,理论上数值计算如matmul、conv占比较高的模型加速效果更明显。此外,受机器环境影响,上述示例代码的训练耗时统计可能存在差异,该影响主要包括:GPU 利用率、CPU 利用率等,本示例的测试机器配置如下: +> 注:上述实验构建了一个理想化的实验模型,其 matmul 算子占比较高,所以加速比较明显,实际模型的加速效果与模型特点有关,理论上数值计算如 matmul、conv 占比较高的模型加速效果更明显。此外,受机器环境影响,上述示例代码的训练耗时统计可能存在差异,该影响主要包括:GPU 利用率、CPU 利用率等,本示例的测试机器配置如下: | **Device** | **MEM Clocks** | **SM Clocks** | **Running with CPU Clocks** | | -------------------- | -------------- | ------------- | --------------------------- | @@ -332,16 +332,16 @@ import paddle.vision.transforms as T def run_example_code(): device = paddle.set_device('gpu') - # 利用高层API定义神经网络 + # 利用高层 API 定义神经网络 net = nn.Sequential(nn.Flatten(1), nn.Linear(784, 200), nn.Tanh(), nn.Linear(200, 10)) model = paddle.Model(net) # 定义优化器 optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters()) # 初始化神经网络 amp_configs = { - "level": "O1", # level对应AMP模式:O1、O2 - "custom_white_list": {'conv2d'}, # 自定义白名单,同时还支持custom_black_list - "use_dynamic_loss_scaling": True # 动态loss_scaling策略 + "level": "O1", # level 对应 AMP 模式:O1、O2 + "custom_white_list": {'conv2d'}, # 自定义白名单,同时还支持 custom_black_list + "use_dynamic_loss_scaling": True # 动态 loss_scaling 策略 } model.prepare(optim, paddle.nn.CrossEntropyLoss(), @@ -350,7 +350,7 @@ def run_example_code(): # 数据准备 transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) data = paddle.vision.datasets.MNIST(mode='train', transform=transform) - # 使用amp进行模型训练 + # 使用 amp 进行模型训练 model.fit(data, epochs=2, batch_size=32, verbose=1) if paddle.is_compiled_with_cuda(): @@ -369,42 +369,42 @@ if paddle.is_compiled_with_cuda(): ```python mse = paddle.nn.MSELoss() # 定义损失计算函数 -model = SimpleNet(input_size, output_size) # 定义SimpleNet模型 -optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义SGD优化器 +model = SimpleNet(input_size, output_size) # 定义 SimpleNet 模型 +optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) # 定义 SGD 优化器 accumulate_batchs_num = 10 # 梯度累加中 batch 的数量 -# 定义 GradScaler,用于缩放loss比例,避免浮点数溢出,默认开启动态更新loss_scaling机制 +# 定义 GradScaler,用于缩放 loss 比例,避免浮点数溢出,默认开启动态更新 loss_scaling 机制 scaler = paddle.amp.GradScaler(init_loss_scaling=1024) train_time = 0 # 记录总训练时长 for epoch in range(epochs): for i, (data, label) in enumerate(loader): start_time = time.time() # 记录开始训练时刻 - label._to(place) # 将label数据拷贝到gpu + label._to(place) # 将 label 数据拷贝到 gpu # 创建 AMP-O2 auto_cast 环境,开启自动混合精度训练,前向计算过程中该算子将采用 float16 数据类型计算 with paddle.amp.auto_cast(level='O1'): output = model(data) loss = mse(output, label) # 使用 GradScaler 完成 loss 的缩放,用缩放后的 loss 进行反向传播 - scaled = scaler.scale(loss) # loss缩放,乘以系数loss_scaling + scaled = scaler.scale(loss) # loss 缩放,乘以系数 loss_scaling scaled.backward() # 反向传播 # 当累计的 batch 为 accumulate_batchs_num 时,更新模型参数 if (i + 1) % accumulate_batchs_num == 0: - scaler.step(optimizer) # 更新参数(参数梯度先除系数loss_scaling再更新参数) - scaler.update() # 基于动态loss_scaling策略更新loss_scaling系数 + scaler.step(optimizer) # 更新参数(参数梯度先除系数 loss_scaling 再更新参数) + scaler.update() # 基于动态 loss_scaling 策略更新 loss_scaling 系数 optimizer.clear_grad(set_to_zero=False) - # 记录训练loss及训练时长 + # 记录训练 loss 及训练时长 train_loss = loss.numpy() train_time += time.time() - start_time print("loss:", train_loss) -print("使用AMP-O1模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O1 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [0.6602017] -# 使用AMP-O1模式耗时:0.113 sec +# 使用 AMP-O1 模式耗时:0.113 sec ``` -上面的例子中,每经过 `accumulate_batchs_num`个 batch 的训练步骤,进行1次参数更新。 +上面的例子中,每经过 `accumulate_batchs_num`个 batch 的训练步骤,进行 1 次参数更新。 ### 3.2 静态图训练开启 AMP @@ -415,15 +415,15 @@ print("使用AMP-O1模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch #### 3.2.1 静态图 float32 训练 -采用与 2.1.1节 动态图训练相同的网络结构,静态图网络初始化如下: +采用与 2.1.1 节 动态图训练相同的网络结构,静态图网络初始化如下: ```python paddle.enable_static() # 开启静态图模式 place = paddle.CUDAPlace(0) -# 定义静态图的program +# 定义静态图的 program main_program = paddle.static.default_main_program() startup_program = paddle.static.default_startup_program() -# 定义由9层Linear组成的神经网络 +# 定义由 9 层 Linear 组成的神经网络 model = SimpleNet(input_size, output_size) # 定义损失函数 mse_loss = paddle.nn.MSELoss() @@ -456,9 +456,9 @@ for epoch in range(epochs): train_time += time.time() - start_time print("loss:", train_loss) -print("使用FP32模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 FP32 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [array([0.6486028], dtype=float32)] -# 使用FP32模式耗时:0.531 sec +# 使用 FP32 模式耗时:0.531 sec ``` #### 3.2.2 静态图 AMP-O1 训练 @@ -502,9 +502,9 @@ for epoch in range(epochs): train_time += time.time() - start_time print("loss:", train_loss) -print("使用AMP-O1模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O1 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [array([0.6486222], dtype=float32)] -# 使用AMP-O1模式耗时:0.117 sec +# 使用 AMP-O1 模式耗时:0.117 sec ``` `paddle.static.amp.CustomOpLists`用于自定义黑白名单,将 add 算子加入了白名单中,Linear 网络将全部执行在 float16 下。 @@ -527,7 +527,7 @@ loss = mse_loss(predict, label) optimizer = paddle.optimizer.SGD(learning_rate=0.0001, parameters=model.parameters()) -# 1)通过 `decorate` 对优化器进行封装,use_fp16_guard设置为False,网络的全部op执行FP16计算 +# 1)通过 `decorate` 对优化器进行封装,use_fp16_guard 设置为 False,网络的全部 op 执行 FP16 计算 optimizer = paddle.static.amp.decorate( optimizer=optimizer, init_loss_scaling=128.0, @@ -553,14 +553,14 @@ for epoch in range(epochs): train_time += time.time() - start_time print("loss:", train_loss) -print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O2 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [array([0.6743], dtype=float16)] -# 使用AMP-O2模式耗时:0.098 sec +# 使用 AMP-O2 模式耗时:0.098 sec ``` > 注:在 AMP-O2 模式下,网络参数将从 float32 转为 float16,输入数据需要相应输入 float16 类型数据,因此需要将`class RandomDataset`中初始化的数据类型设置为`float16`。 -2)设置`paddle.static.amp.decorate`的参数`use_pure_fp16`为 True,同时设置参数`use_fp16_guard`为True,通过`paddle.static.amp.fp16_guard`控制使用 float16 的计算范围。 +2)设置`paddle.static.amp.decorate`的参数`use_pure_fp16`为 True,同时设置参数`use_fp16_guard`为 True,通过`paddle.static.amp.fp16_guard`控制使用 float16 的计算范围。 在模型定义的代码中加入`fp16_guard`控制部分网络执行在 float16 下: @@ -574,7 +574,7 @@ class SimpleNet(paddle.nn.Layer): def forward(self, x): for i, l in enumerate(self.linears): if i > 0: - # 在模型定义中通过fp16_guard控制使用float16的计算范围 + # 在模型定义中通过 fp16_guard 控制使用 float16 的计算范围 with paddle.static.amp.fp16_guard(): x = self.linears[i](x) else: @@ -619,14 +619,14 @@ for epoch in range(epochs): train_time += time.time() - start_time print("loss:", train_loss) -print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) +print("使用 AMP-O2 模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch))) # loss: [array([0.6691731], dtype=float32)] -# 使用AMP-O2模式耗时:0.140 sec +# 使用 AMP-O2 模式耗时:0.140 sec ``` #### 3.2.4 对比不同模式下训练速度 -静态图FP32及AMP训练的精度速度对比如下表所示: +静态图 FP32 及 AMP 训练的精度速度对比如下表所示: | - | **FP32** | **AMP-O1** | **AMP-O2** | | -------- | --------- | ---------- | ---------- | @@ -646,7 +646,7 @@ print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch - B 维度为:K x N - C 维度为:M x N - 矩阵乘使用建议如下:根据Tensor Core使用建议,当矩阵维数 M、N、K 是8(A100架构GPU为16)的倍数时(FP16数据下),性能最优。 + 矩阵乘使用建议如下:根据 Tensor Core 使用建议,当矩阵维数 M、N、K 是 8(A100 架构 GPU 为 16)的倍数时(FP16 数据下),性能最优。 2. 卷积计算定义为:`NKPQ = NCHW * KCRS`,其中: @@ -662,31 +662,31 @@ print("使用AMP-O2模式耗时:{:.3f} sec".format(train_time/(epochs*nums_batch 卷积计算使用建议如下: - - 输入/输出数据的通道数(C/K)可以被8整除(FP16),(cudnn7.6.3及以上的版本,如果不是8的倍数将会被自动填充) - - 对于网络第一层,通道数设置为4可以获得最佳的运算性能(NVIDIA为网络的第一层卷积提供了特殊实现,使用4通道性能更优) - - 设置内存中的张量布局为NHWC格式(如果输入NCHW格式,Tesor Core会自动转换为NHWC,当输入输出数值较大的时候,这种转置的开销往往更大) + - 输入/输出数据的通道数(C/K)可以被 8 整除(FP16),(cudnn7.6.3 及以上的版本,如果不是 8 的倍数将会被自动填充) + - 对于网络第一层,通道数设置为 4 可以获得最佳的运算性能(NVIDIA 为网络的第一层卷积提供了特殊实现,使用 4 通道性能更优) + - 设置内存中的张量布局为 NHWC 格式(如果输入 NCHW 格式,Tesor Core 会自动转换为 NHWC,当输入输出数值较大的时候,这种转置的开销往往更大) ## 五、AMP 常见问题及处理方法 飞桨 AMP 常见问题及处理方法如下: -1. 开启AMP训练后无加速效果或速度下降 +1. 开启 AMP 训练后无加速效果或速度下降 - 可能原因1:所用显卡并不支持AMP加速,可在训练日志中查看如下warning信息:`UserWarning: AMP only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: Tesla K40m, with Compute Capability: 3.5.`; + 可能原因 1:所用显卡并不支持 AMP 加速,可在训练日志中查看如下 warning 信息:`UserWarning: AMP only support NVIDIA GPU with Compute Capability 7.0 or higher, current GPU is: Tesla K40m, with Compute Capability: 3.5.`; - 可能原因2:模型是轻计算、重调度的类型,计算负载较大的matmul、conv等操作占比较低,可通过nvidia-smi实时产看显卡显存利用率(Memory Usage 及 GPU_Util 参数)。 + 可能原因 2:模型是轻计算、重调度的类型,计算负载较大的 matmul、conv 等操作占比较低,可通过 nvidia-smi 实时产看显卡显存利用率(Memory Usage 及 GPU_Util 参数)。 针对上述原因,建议关闭混合精度训练。 -2. AMP-O2与分布式训练同时使用时抛出RuntimeError: `For distributed AMP training, you should first use paddle.amp.decorate() to decotate origin model, and then call paddle.DataParallel get distributed model.` +2. AMP-O2 与分布式训练同时使用时抛出 RuntimeError: `For distributed AMP training, you should first use paddle.amp.decorate() to decotate origin model, and then call paddle.DataParallel get distributed model.` - 原因:AMP-O2的分布式训练,要求`paddle.amp.decorate`需要声明在`paddle.DataParallel`初始化分布式训练的网络前。 + 原因:AMP-O2 的分布式训练,要求`paddle.amp.decorate`需要声明在`paddle.DataParallel`初始化分布式训练的网络前。 正确用法如下: ``` import paddle -model = SimpleNet(input_size, output_size) # 定义SimpleNet模型 -model = paddle.amp.decorate(models=model, level='O2') # paddle.amp.decorate需要声明在paddle.DataParallel前 +model = SimpleNet(input_size, output_size) # 定义 SimpleNet 模型 +model = paddle.amp.decorate(models=model, level='O2') # paddle.amp.decorate 需要声明在 paddle.DataParallel 前 dp_model = paddle.DataParallel(model) ``` diff --git a/docs/guides/performance_improving/analysis_tools/benchmark_cn.md b/docs/guides/performance_improving/analysis_tools/benchmark_cn.md index 5f6b23f3ae0..af0a7385cd5 100644 --- a/docs/guides/performance_improving/analysis_tools/benchmark_cn.md +++ b/docs/guides/performance_improving/analysis_tools/benchmark_cn.md @@ -2,9 +2,9 @@ =============== 本文介绍如何给深度学习框架做基准测试。基准测试主要包含验证模型的精度和性能两方面,下文包含搭建测试环境,选择基准测试模型,验证测试结果等几方面内容。 -验证深度学习框架,可分为训练和测试两个阶段, 验证指标略有不同,本文只介绍训练阶段的指标验证。训练阶段关注的是模型训练集上的精度,训练集是完备的,因此关注大batch\_size下的训练速度,关注吞吐量,例如图像模型常用的batch\_size=128, 多卡情况下会加大;预测阶段关注的是在测试集上的精度,线上服务测试数据不能提前收集,因此关注小batch\_size下的预测速度,关注延迟,例如预测服务常用的batch\_size=1, 4等。 +验证深度学习框架,可分为训练和测试两个阶段, 验证指标略有不同,本文只介绍训练阶段的指标验证。训练阶段关注的是模型训练集上的精度,训练集是完备的,因此关注大 batch\_size 下的训练速度,关注吞吐量,例如图像模型常用的 batch\_size=128, 多卡情况下会加大;预测阶段关注的是在测试集上的精度,线上服务测试数据不能提前收集,因此关注小 batch\_size 下的预测速度,关注延迟,例如预测服务常用的 batch\_size=1, 4 等。 -[Fluid](https://github.com/PaddlePaddle/Paddle>)是PaddlePaddle从0.11.0版本开始引入的设计,本文的基准测试在该版本上完成。 +[Fluid](https://github.com/PaddlePaddle/Paddle>)是 PaddlePaddle 从 0.11.0 版本开始引入的设计,本文的基准测试在该版本上完成。 环境搭建 @@ -13,17 +13,17 @@ 基准测试中模型精度和硬件、框架无关,由模型结构和数据共同决定;性能方面由测试硬件和框架性能决定。框架基准测试为了对比框架之间的差异,控制硬件环境,系统库等版本一致。下文中的对比实验都在相同的硬件条件和系统环境条件下进行. -不同架构的GPU卡性能差异巨大,在验证模型在GPU上训练性能时,可使用NVIDIA提供的命令:```nvidia-smi``` 检验当前使用的GPU型号,如果测试多卡训练性能,需确认硬件连接是 [nvlink](https://zh.wikipedia.org/zh/NVLink)或 [PCIe](https://zh.wikipedia.org/zh-hans/PCI_Express)。 同样地,CPU型号会极大影响模型在CPU上的训练性能。可读取`/proc/cpuinfo`中的参数,确认当前正在使用的CPU型号。 +不同架构的 GPU 卡性能差异巨大,在验证模型在 GPU 上训练性能时,可使用 NVIDIA 提供的命令:```nvidia-smi``` 检验当前使用的 GPU 型号,如果测试多卡训练性能,需确认硬件连接是 [nvlink](https://zh.wikipedia.org/zh/NVLink)或 [PCIe](https://zh.wikipedia.org/zh-hans/PCI_Express)。 同样地,CPU 型号会极大影响模型在 CPU 上的训练性能。可读取`/proc/cpuinfo`中的参数,确认当前正在使用的 CPU 型号。 -下载GPU对应的Cuda Tool Kit和 Cudnn,或者使用NVIDIA官方发布的nvidia-docker镜像 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), 镜像内包含了Cuda和Cudnn,本文采用这种方式。 Cuda Tool Kit包含了GPU代码使用到的基础库,影响在此基础上编译出的Fluid二进制运行性能。 +下载 GPU 对应的 Cuda Tool Kit 和 Cudnn,或者使用 NVIDIA 官方发布的 nvidia-docker 镜像 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), 镜像内包含了 Cuda 和 Cudnn,本文采用这种方式。 Cuda Tool Kit 包含了 GPU 代码使用到的基础库,影响在此基础上编译出的 Fluid 二进制运行性能。 -准备好Cuda环境后,从github上下载Paddle代码并编译,会生成对应的最适合当前GPU的sm\_arch二进制[sm\_arch](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html)。另外,cudnn对卷积类任务影响巨大,在基准测试中需要小版本一致,例如Cudnn7.0.2与Cudnn7.1.4在Resnet上有5%以上差异。 +准备好 Cuda 环境后,从 github 上下载 Paddle 代码并编译,会生成对应的最适合当前 GPU 的 sm\_arch 二进制[sm\_arch](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html)。另外,cudnn 对卷积类任务影响巨大,在基准测试中需要小版本一致,例如 Cudnn7.0.2 与 Cudnn7.1.4 在 Resnet 上有 5%以上差异。 选择基准模型 ============ -对框架做基准测试,需要覆盖不同训练任务和不同大小的模型,本文中选取了图像和NLP的最为常用的5个模型。 +对框架做基准测试,需要覆盖不同训练任务和不同大小的模型,本文中选取了图像和 NLP 的最为常用的 5 个模型。 任务种类| 模型名称| 网络结构| 数据集 :---:|:--:|:---:|:---: @@ -33,17 +33,17 @@ 自然语言| Bert| Transformer| Wikipedia 机器翻译| Transformer| Attention| Wikipedia -CycleGAN, SE-ResNeXt50, DeepLab_V3+属于CNN模型, Bert, Transformer是一种比传统RNN模型更好的NLP模型。 +CycleGAN, SE-ResNeXt50, DeepLab_V3+属于 CNN 模型, Bert, Transformer 是一种比传统 RNN 模型更好的 NLP 模型。 [benchmark](https://github.com/PaddlePaddle/Paddle/tree/develop/benchmark/fluid) -基准模型测试脚本中,均跳过了前几个batch的训练过程,原因是加载数据和分配显存受系统当前运行情况影响,会导致统计性能不准确。运行完若干个轮次后,统计对应指标。 +基准模型测试脚本中,均跳过了前几个 batch 的训练过程,原因是加载数据和分配显存受系统当前运行情况影响,会导致统计性能不准确。运行完若干个轮次后,统计对应指标。 -基准模型的数据的选择方面,数据量大且验证效果多的公开数据集为首选。图像模型CycleGAN选择了horse2zebra数据集,SE-ResNeXt50选择了[image-net](http://www.image-net.org/challenges/LSVRC/2012/nnoupb)数据集,图像大小预处理为和Imagenet相同大小,因此性能可直接对比。 -NLP模型的公开且影响力大数据集较少,Bert和Transformer模型都选择了[Wikipedia](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2)数据集。 +基准模型的数据的选择方面,数据量大且验证效果多的公开数据集为首选。图像模型 CycleGAN 选择了 horse2zebra 数据集,SE-ResNeXt50 选择了[image-net](http://www.image-net.org/challenges/LSVRC/2012/nnoupb)数据集,图像大小预处理为和 Imagenet 相同大小,因此性能可直接对比。 +NLP 模型的公开且影响力大数据集较少,Bert 和 Transformer 模型都选择了[Wikipedia](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2)数据集。 -注意,图像模型每条样本大小相同,图像经过变换后大小一致,因此经过的计算路径基本相同,计算速度和显存占用波动较小,可以从若干个batch的数据中采样得到当前的训练性能数据。而NLP模型由于样本长度不定,计算路径和显存占用也不相同,因此只能完整运行若干个轮次后,统计速度和显存消耗。 -显存分配是特别耗时的操作,因此Fluid默认会占用所有可用显存空间形成显存池,用以加速计算过程中的显存分配。如果需要统计模型真实显存消耗,可设置环境变量`FLAGS_fraction_of_gpu_memory_to_use=0.0`,观察最大显存开销。 +注意,图像模型每条样本大小相同,图像经过变换后大小一致,因此经过的计算路径基本相同,计算速度和显存占用波动较小,可以从若干个 batch 的数据中采样得到当前的训练性能数据。而 NLP 模型由于样本长度不定,计算路径和显存占用也不相同,因此只能完整运行若干个轮次后,统计速度和显存消耗。 +显存分配是特别耗时的操作,因此 Fluid 默认会占用所有可用显存空间形成显存池,用以加速计算过程中的显存分配。如果需要统计模型真实显存消耗,可设置环境变量`FLAGS_fraction_of_gpu_memory_to_use=0.0`,观察最大显存开销。 测试过程 @@ -51,17 +51,17 @@ NLP模型的公开且影响力大数据集较少,Bert和Transformer模型都 - GPU 单机单卡测试 -本教程使用了Cuda9, Cudnn7.0.1。来源为:```nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04``` +本教程使用了 Cuda9, Cudnn7.0.1。来源为:```nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04``` ``` nvidia-docker run -it --name CASE_NAME --security-opt seccomp=unconfined -v $PWD/benchmark:/benchmark -v /usr/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu paddlepaddle/paddle:latest-dev /bin/bash ``` -在单卡上测试,设置CUDA的环境变量使用一块GPU,``CUDA_VISIBLE_DEVICES=0`` -然后代码中设置为使用CUDAPlace,如果使用Paddle代码库中的脚本,只需要命令行参数传入 use_gpu=True即可。 +在单卡上测试,设置 CUDA 的环境变量使用一块 GPU,``CUDA_VISIBLE_DEVICES=0`` +然后代码中设置为使用 CUDAPlace,如果使用 Paddle 代码库中的脚本,只需要命令行参数传入 use_gpu=True 即可。 ``` >>> import paddle.fluid as fluid - >>> place = fluid.CUDAPlace(0) // 0 指第0块GPU + >>> place = fluid.CUDAPlace(0) // 0 指第 0 块 GPU ``` 测试结果 diff --git a/docs/guides/performance_improving/analysis_tools/cpu_profiling_cn.md b/docs/guides/performance_improving/analysis_tools/cpu_profiling_cn.md index 198a05a79e1..8b9c492f162 100644 --- a/docs/guides/performance_improving/analysis_tools/cpu_profiling_cn.md +++ b/docs/guides/performance_improving/analysis_tools/cpu_profiling_cn.md @@ -1,6 +1,6 @@ -# CPU性能调优 +# CPU 性能调优 -此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。 +此教程会介绍如何使用 Python 的 cProfile 包、Python 库 yep、Google perftools 来进行性能分析 (profiling) 与调优(performance tuning)。 Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。 @@ -10,11 +10,11 @@ PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大 * Python 与 C++ 混合代码的性能分析 -## Python代码的性能分析 +## Python 代码的性能分析 ### 生成性能分析文件 -Python标准库中提供了性能分析的工具包,[cProfile](https://docs.python.org/2/library/profile.html)。生成Python性能分析的命令如下: +Python 标准库中提供了性能分析的工具包,[cProfile](https://docs.python.org/2/library/profile.html)。生成 Python 性能分析的命令如下: ```bash python -m cProfile -o profile.out main.py @@ -24,15 +24,15 @@ python -m cProfile -o profile.out main.py ### 查看性能分析文件 -`cProfile` 在main.py 运行完毕后输出`profile.out`。我们可以使用[`cprofilev`](https://github.com/ymichael/cprofilev)来查看性能分析结果。`cprofilev`是一个Python的第三方库。使用它会开启一个HTTP服务,将性能分析结果以网页的形式展示出来: +`cProfile` 在 main.py 运行完毕后输出`profile.out`。我们可以使用[`cprofilev`](https://github.com/ymichael/cprofilev)来查看性能分析结果。`cprofilev`是一个 Python 的第三方库。使用它会开启一个 HTTP 服务,将性能分析结果以网页的形式展示出来: ```bash cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ``` -其中`-a`标识HTTP服务绑定的IP。使用`0.0.0.0`允许外网访问这个HTTP服务。`-p`标识HTTP服务的端口。`-f`标识性能分析的结果文件。`main.py`标识被性能分析的源文件。 +其中`-a`标识 HTTP 服务绑定的 IP。使用`0.0.0.0`允许外网访问这个 HTTP 服务。`-p`标识 HTTP 服务的端口。`-f`标识性能分析的结果文件。`main.py`标识被性能分析的源文件。 -用Web浏览器访问对应网址,即可显示性能分析的结果: +用 Web 浏览器访问对应网址,即可显示性能分析的结果: ``` ncalls tottime percall cumtime percall filename:lineno(function) @@ -62,7 +62,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py percall - tottime的每次调用平均时间 + tottime 的每次调用平均时间 cumtime @@ -70,7 +70,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py percall - cumtime的每次调用平均时间 + cumtime 的每次调用平均时间 filename:lineno(function) @@ -84,7 +84,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py 通常`tottime`和`cumtime`是寻找瓶颈的关键指标。这两个指标代表了某一个函数真实的运行时间。 -将性能分析结果按照tottime排序,效果如下: +将性能分析结果按照 tottime 排序,效果如下: ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} @@ -94,7 +94,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() ``` -可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 +可以看到最耗时的函数是 C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 ```text Called By: @@ -119,13 +119,13 @@ Called: -## Python与C++混合代码的性能分析 +## Python 与 C++混合代码的性能分析 ### 生成性能分析文件 -C++的性能分析工具非常多。常见的包括`gprof`, `valgrind`, `google-perftools`。但是调试Python中使用的动态链接库与直接调试原始二进制相比增加了很多复杂度。幸而Python的一个第三方库`yep`提供了方便的和`google-perftools`交互的方法。于是这里使用`yep`进行Python与C++混合代码的性能分析 +C++的性能分析工具非常多。常见的包括`gprof`, `valgrind`, `google-perftools`。但是调试 Python 中使用的动态链接库与直接调试原始二进制相比增加了很多复杂度。幸而 Python 的一个第三方库`yep`提供了方便的和`google-perftools`交互的方法。于是这里使用`yep`进行 Python 与 C++混合代码的性能分析 -使用`yep`前需要安装`google-perftools`与`yep`包。ubuntu下安装命令为 +使用`yep`前需要安装`google-perftools`与`yep`包。ubuntu 下安装命令为 ```bash apt update @@ -141,15 +141,15 @@ python -m yep -v main.py 生成性能分析文件。生成的性能分析文件为`main.py.prof`。 -命令行中的`-v`指定在生成性能分析文件之后,在命令行显示分析结果。我们可以在命令行中简单的看一下生成效果。因为C++与Python不同,编译时可能会去掉调试信息,运行时也可能因为多线程产生混乱不可读的性能分析结果。为了生成更可读的性能分析结果,可以采取下面几点措施: +命令行中的`-v`指定在生成性能分析文件之后,在命令行显示分析结果。我们可以在命令行中简单的看一下生成效果。因为 C++与 Python 不同,编译时可能会去掉调试信息,运行时也可能因为多线程产生混乱不可读的性能分析结果。为了生成更可读的性能分析结果,可以采取下面几点措施: -1. 编译时指定`-g`生成调试信息。使用cmake的话,可以将CMAKE_BUILD_TYPE指定为`RelWithDebInfo`。 +1. 编译时指定`-g`生成调试信息。使用 cmake 的话,可以将 CMAKE_BUILD_TYPE 指定为`RelWithDebInfo`。 2. 编译时一定要开启优化。单纯的`Debug`编译性能会和`-O2`或者`-O3`有非常大的差别。`Debug`模式下的性能测试是没有意义的。 -3. 运行性能分析的时候,先从单线程开始,再开启多线程,进而多机。毕竟单线程调试更容易。可以设置`OMP_NUM_THREADS=1`这个环境变量关闭openmp优化。 +3. 运行性能分析的时候,先从单线程开始,再开启多线程,进而多机。毕竟单线程调试更容易。可以设置`OMP_NUM_THREADS=1`这个环境变量关闭 openmp 优化。 ### 查看性能分析文件 -在运行完性能分析后,会生成性能分析结果文件。我们可以使用[`pprof`](https://github.com/google/pprof)来显示性能分析结果。注意,这里使用了用`Go`语言重构后的`pprof`,因为这个工具具有web服务界面,且展示效果更好。 +在运行完性能分析后,会生成性能分析结果文件。我们可以使用[`pprof`](https://github.com/google/pprof)来显示性能分析结果。注意,这里使用了用`Go`语言重构后的`pprof`,因为这个工具具有 web 服务界面,且展示效果更好。 安装`pprof`的命令和一般的`Go`程序是一样的,其命令如下: @@ -157,13 +157,13 @@ python -m yep -v main.py go get github.com/google/pprof ``` -进而我们可以使用如下命令开启一个HTTP服务: +进而我们可以使用如下命令开启一个 HTTP 服务: ```bash pprof -http=0.0.0.0:3213 `which python` ./main.py.prof ``` -这行命令中,`-http`指开启HTTP服务。`which python`会产生当前Python二进制的完整路径,进而指定了Python可执行文件的路径。`./main.py.prof`输入了性能分析结果。 +这行命令中,`-http`指开启 HTTP 服务。`which python`会产生当前 Python 二进制的完整路径,进而指定了 Python 可执行文件的路径。`./main.py.prof`输入了性能分析结果。 访问对应的网址,我们可以查看性能分析的结果。结果如下图所示: @@ -172,12 +172,12 @@ pprof -http=0.0.0.0:3213 `which python` ./main.py.prof ### 寻找性能瓶颈 -与寻找Python代码的性能瓶颈类似,寻找Python与C++混合代码的性能瓶颈也是要看`tottime`和`cumtime`。而`pprof`展示的调用图也可以帮助我们发现性能中的问题。 +与寻找 Python 代码的性能瓶颈类似,寻找 Python 与 C++混合代码的性能瓶颈也是要看`tottime`和`cumtime`。而`pprof`展示的调用图也可以帮助我们发现性能中的问题。 例如下图中, ![kernel_perf](./pprof_2.png) -在一次训练中,乘法和乘法梯度的计算占用2%-4%左右的计算时间。而`MomentumOp`占用了17%左右的计算时间。显然,`MomentumOp`的性能有问题。 +在一次训练中,乘法和乘法梯度的计算占用 2%-4%左右的计算时间。而`MomentumOp`占用了 17%左右的计算时间。显然,`MomentumOp`的性能有问题。 在`pprof`中,对于性能的关键路径都做出了红色标记。先检查关键路径的性能问题,再检查其他部分的性能问题,可以更有次序的完成性能的优化。 diff --git a/docs/guides/performance_improving/analysis_tools/index_cn.rst b/docs/guides/performance_improving/analysis_tools/index_cn.rst index c0a50dfb9d0..1e1a5d1cbe2 100644 --- a/docs/guides/performance_improving/analysis_tools/index_cn.rst +++ b/docs/guides/performance_improving/analysis_tools/index_cn.rst @@ -13,6 +13,6 @@ 本模块介绍 Fluid 使用过程中的调优方法,包括: -- `CPU性能调优 `_:介绍如何使用 cProfile 包、yep库、Google perftools 进行性能分析与调优 +- `CPU 性能调优 `_:介绍如何使用 cProfile 包、yep 库、Google perftools 进行性能分析与调优 - `堆内存分析和优化 `_:介绍如何使用 gperftool 进行堆内存分析和优化,以解决内存泄漏的问题 -- `Timeline工具简介 `_ :介绍如何使用 Timeline 工具进行性能分析和调优 +- `Timeline 工具简介 `_ :介绍如何使用 Timeline 工具进行性能分析和调优 diff --git a/docs/guides/performance_improving/analysis_tools/timeline_cn.md b/docs/guides/performance_improving/analysis_tools/timeline_cn.md index e40afcf3f4c..ef5b98d65e1 100644 --- a/docs/guides/performance_improving/analysis_tools/timeline_cn.md +++ b/docs/guides/performance_improving/analysis_tools/timeline_cn.md @@ -1,11 +1,11 @@ -# timeline工具简介 +# timeline 工具简介 ## 本地使用 -1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 +1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个 profile 的记录文件。 **提示:** - 请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。 + 请不要在 timeline 记录信息时运行太多次迭代,因为 timeline 中的记录数量和迭代次数是成正比的。 ```python import numpy as np @@ -50,28 +50,28 @@ python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline ``` -1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 +1. 打开 chrome 浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 -1. 结果如下图所示,可以放大来查看timeline的细节信息。 +1. 结果如下图所示,可以放大来查看 timeline 的细节信息。 ![chrome timeline](./timeline.jpeg) ## 分布式使用 -一般来说,分布式的训练程序都会有两种程序:pserver和trainer。我们提供了把pserver和trainer的profile日志用timeline来显示的方式。 +一般来说,分布式的训练程序都会有两种程序:pserver 和 trainer。我们提供了把 pserver 和 trainer 的 profile 日志用 timeline 来显示的方式。 -1. trainer打开方式与[本地使用](#local)部分的第1步相同 +1. trainer 打开方式与[本地使用](#local)部分的第 1 步相同 -1. pserver可以通过加两个环境变量打开profile,例如: +1. pserver 可以通过加两个环境变量打开 profile,例如: ``` FLAGS_rpc_server_profile_period=10 FLAGS_rpc_server_profile_path=./tmp/pserver python train.py ``` -3. 把pserver和trainer的profile文件生成一个timeline文件,例如: +3. 把 pserver 和 trainer 的 profile 文件生成一个 timeline 文件,例如: ``` python /paddle/tools/timeline.py --profile_path trainer0=local_profile_10_pass0_0,trainer1=local_profile_10_pass0_1,pserver0=./pserver_0,pserver1=./pserver_1 --timeline_path ./dist.timeline ``` -4. 在chrome中加载dist.timeline文件,方法和[本地使用](#local)第4步相同。 +4. 在 chrome 中加载 dist.timeline 文件,方法和[本地使用](#local)第 4 步相同。 diff --git a/docs/guides/performance_improving/device_switching.md b/docs/guides/performance_improving/device_switching.md index 2e46207b438..3c15919c503 100644 --- a/docs/guides/performance_improving/device_switching.md +++ b/docs/guides/performance_improving/device_switching.md @@ -1,14 +1,14 @@ # 运行时设备切换 -Paddle提供了[fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)以及[fluid.CPUPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CPUPlace_cn.html)用于指定运行时的设备。这两个接口用于指定全局的设备,从1.8版本开始,Paddle提供了[device_guard](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/fluid_cn/device_guard_cn.html)接口,用于指定部分OP的运行设备,此教程会介绍device_guard的使用场景,以及如何使用该接口对模型进行优化。 +Paddle 提供了[fluid.CUDAPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CUDAPlace_cn.html)以及[fluid.CPUPlace](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/CPUPlace_cn.html)用于指定运行时的设备。这两个接口用于指定全局的设备,从 1.8 版本开始,Paddle 提供了[device_guard](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/fluid_cn/device_guard_cn.html)接口,用于指定部分 OP 的运行设备,此教程会介绍 device_guard 的使用场景,以及如何使用该接口对模型进行优化。 -如果使用了`fluid.CUDAPlace`设置了全局的执行设备,框架将尽可能地将OP设置在GPU上执行,因此有可能会遇到显存不够的情况。`device_guard`可以用于设置OP的执行设备,如果将部分层设置在CPU上运行,就能够充分利用CPU大内存的优势,避免显存超出。 +如果使用了`fluid.CUDAPlace`设置了全局的执行设备,框架将尽可能地将 OP 设置在 GPU 上执行,因此有可能会遇到显存不够的情况。`device_guard`可以用于设置 OP 的执行设备,如果将部分层设置在 CPU 上运行,就能够充分利用 CPU 大内存的优势,避免显存超出。 -有时尽管指定了全局的执行设备为GPU,但框架在自动分配OP执行设备时,可能会将部分OP设置在CPU上执行。另外,个别OP会将输出存储在CPU上。在以上的场景中,常常会发生不同设备间的数据传输,可能会影响模型的性能。使用`device_guard`可以避免模型运行中不必要的数据传输。在下面的内容中,将会详细介绍如何通过[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/profiler_cn.html)工具分析数据传输开销,以及如何使用`device_guard`避免不必要的数据传输,从而提升模型性能。 +有时尽管指定了全局的执行设备为 GPU,但框架在自动分配 OP 执行设备时,可能会将部分 OP 设置在 CPU 上执行。另外,个别 OP 会将输出存储在 CPU 上。在以上的场景中,常常会发生不同设备间的数据传输,可能会影响模型的性能。使用`device_guard`可以避免模型运行中不必要的数据传输。在下面的内容中,将会详细介绍如何通过[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/profiler_cn.html)工具分析数据传输开销,以及如何使用`device_guard`避免不必要的数据传输,从而提升模型性能。 ## 如何避免显存超出 -下面示例代码中的`embedding`层,其参数`size`包含两个元素,第一个元素为`vocab_size` (词表大小), 第二个为`emb_size`(`embedding`层维度)。实际场景中,词表可能会非常大。示例代码中,词表大小被设置为10000000。如果在GPU模式下运行,该层创建的权重矩阵的大小为(10000000, 150),仅这一层就需要5.59G的显存,如果词表大小继续增加,极有可能会导致显存超出。 +下面示例代码中的`embedding`层,其参数`size`包含两个元素,第一个元素为`vocab_size` (词表大小), 第二个为`emb_size`(`embedding`层维度)。实际场景中,词表可能会非常大。示例代码中,词表大小被设置为 10000000。如果在 GPU 模式下运行,该层创建的权重矩阵的大小为(10000000, 150),仅这一层就需要 5.59G 的显存,如果词表大小继续增加,极有可能会导致显存超出。 ```python import paddle.fluid as fluid @@ -29,7 +29,7 @@ exe.run(fluid.default_startup_program()) result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) ``` -`embedding`是根据`input`中的`id`信息从`embedding`矩阵中查询对应`embedding`信息,在CPU上进行计算,其速度也是可接受的。因此,可以参考如下代码,使用`device_guard`将`embedding`层设置在CPU上,以利用CPU内存资源。那么,除了`embedding`层,其他各层都会在GPU上运行。 +`embedding`是根据`input`中的`id`信息从`embedding`矩阵中查询对应`embedding`信息,在 CPU 上进行计算,其速度也是可接受的。因此,可以参考如下代码,使用`device_guard`将`embedding`层设置在 CPU 上,以利用 CPU 内存资源。那么,除了`embedding`层,其他各层都会在 GPU 上运行。 ```python import paddle.fluid as fluid @@ -54,7 +54,7 @@ result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) 在显存足够的情况下,可不必进行这样的设置。 ## 如何减少数据传输 -### 使用profile工具确认是否发生了数据传输 +### 使用 profile 工具确认是否发生了数据传输 首先对模型的性能数据进行分析,找到发生数据传输的原因。如下列代码所示,可以利用[profile](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/profiler_cn.html)工具进行分析。 ```python @@ -76,7 +76,7 @@ with profiler.profiler('All', 'total') as prof: result = exe.run(program=compiled_prog, fetch_list=[out]) ``` -在程序运行结束后,将会自动地打印出profile report。在下面的profile report中,可以看到 `GpuMemCpy Summary`中给出了2项数据传输的调用耗时。在OP执行过程中,如果输入Tensor所在的设备与OP执行的设备不同,就会发生`GpuMemcpySync`,通常我们可以直接优化的就是这一项。进一步分析,可以看到`slice`和`crop_tensor`执行中都发生了`GpuMemcpySync`。尽管我们在程序中设置了GPU模式运行,但是框架中有些OP,例如shape,会将输出结果放在CPU上。 +在程序运行结束后,将会自动地打印出 profile report。在下面的 profile report 中,可以看到 `GpuMemCpy Summary`中给出了 2 项数据传输的调用耗时。在 OP 执行过程中,如果输入 Tensor 所在的设备与 OP 执行的设备不同,就会发生`GpuMemcpySync`,通常我们可以直接优化的就是这一项。进一步分析,可以看到`slice`和`crop_tensor`执行中都发生了`GpuMemcpySync`。尽管我们在程序中设置了 GPU 模式运行,但是框架中有些 OP,例如 shape,会将输出结果放在 CPU 上。 ```text -------------------------> Profiling Report <------------------------- @@ -113,12 +113,12 @@ eager_deletion 30 0.287236 ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.047864 0.047864 (1.000000) 0.000000 (0.000000) 0.003668 0.011592 0.0047864 0.00179718 InitLocalVars 1 0.022981 0.022981 (1.000000) 0.000000 (0.000000) 0.022981 0.022981 0.022981 0.000862883 ``` -### 通过log查看发生数据传输的具体位置 +### 通过 log 查看发生数据传输的具体位置 -以上的示例程序比较简单,我们只用看profile report就能知道具体是哪些算子发生了数据传输。但是当模型比较复杂时,可能需要去查看更加详细的调试信息,可以打印出运行时的log去确定发生数据传输的具体位置。依然以上述程序为例,执行`GLOG_vmodule=operator=3 python test_case.py`,会得到如下log信息,会发现发生了2次数据传输: +以上的示例程序比较简单,我们只用看 profile report 就能知道具体是哪些算子发生了数据传输。但是当模型比较复杂时,可能需要去查看更加详细的调试信息,可以打印出运行时的 log 去确定发生数据传输的具体位置。依然以上述程序为例,执行`GLOG_vmodule=operator=3 python test_case.py`,会得到如下 log 信息,会发现发生了 2 次数据传输: -- `shape`输出的结果在CPU上,在`slice`运行时,`shape`的输出被拷贝到GPU上 -- `slice`执行完的结果在GPU上,当`crop_tensor`执行时,它会被拷贝到CPU上。 +- `shape`输出的结果在 CPU 上,在`slice`运行时,`shape`的输出被拷贝到 GPU 上 +- `slice`执行完的结果在 GPU 上,当`crop_tensor`执行时,它会被拷贝到 CPU 上。 ```text I0406 14:56:23.286592 17516 operator.cc:180] CUDAPlace(0) Op(shape), inputs:{Input[fill_constant_1.tmp_0:float[1, 3, 5, 5]({})]}, outputs:{Out[shape_0.tmp_0:int[4]({})]}. @@ -133,9 +133,9 @@ I0406 14:56:23.287220 17516 tensor_util.cu:129] TensorCopySync 4 from CUDAPlace( I0406 14:56:23.287473 17516 operator.cc:180] CUDAPlace(0) Op(crop_tensor), inputs:{Offsets[], OffsetsTensor[], Shape[slice_0.tmp_0:int[4]({})], ShapeTensor[], X[fill_constant_0.tmp_0:float[1, 3, 8, 8]({})]}, outputs:{Out[crop_tensor_0.tmp_0:float[1, 3, 5, 5]({})]}. ``` -### 使用device_guard避免不必要的数据传输 +### 使用 device_guard 避免不必要的数据传输 -在上面的例子中,`shape`输出的是一个1-D的Tensor,因此对于`slice`而言计算量很小。这种情况下如果将`slice`设置在CPU上运行,就可以避免2次数据传输。修改后的程序如下: +在上面的例子中,`shape`输出的是一个 1-D 的 Tensor,因此对于`slice`而言计算量很小。这种情况下如果将`slice`设置在 CPU 上运行,就可以避免 2 次数据传输。修改后的程序如下: ```python import paddle.fluid as fluid @@ -156,7 +156,7 @@ with profiler.profiler('All', 'total') as prof: for i in range(10): result = exe.run(program=compiled_prog, fetch_list=[out]) ``` -再次观察profile report中`GpuMemCpy Summary`的内容,可以看到`GpuMemCpySync`已经被消除。在实际的模型中,若`GpuMemCpySync` 调用耗时占比较大,并且可以通过设置`device_guard`避免,那么就能够带来一定的性能提升。 +再次观察 profile report 中`GpuMemCpy Summary`的内容,可以看到`GpuMemCpySync`已经被消除。在实际的模型中,若`GpuMemCpySync` 调用耗时占比较大,并且可以通过设置`device_guard`避免,那么就能够带来一定的性能提升。 ```text -------------------------> Profiling Report <------------------------- @@ -193,7 +193,7 @@ ScopeBufferedMonitor::pre_local_exec_scopes_process 10 0.032231 ### 总结 -- 使用profile工具对模型进行分析,看是否存在GpuMemcpySync的调用耗时。若存在,则进一步分析发生数据传输的原因。 -- 可以通过profile report找到发生GpuMemcpySync的OP。如果需要,可以通过打印log,找到GpuMemcpySync发生的具体位置。 -- 尝试使用`device_guard`设置部分OP的运行设备,来减少GpuMemcpySync的调用。 -- 最后可以通过比较修改前后模型的profile report,或者其他用来衡量性能的指标,确认修改后是否带来了性能提升。 +- 使用 profile 工具对模型进行分析,看是否存在 GpuMemcpySync 的调用耗时。若存在,则进一步分析发生数据传输的原因。 +- 可以通过 profile report 找到发生 GpuMemcpySync 的 OP。如果需要,可以通过打印 log,找到 GpuMemcpySync 发生的具体位置。 +- 尝试使用`device_guard`设置部分 OP 的运行设备,来减少 GpuMemcpySync 的调用。 +- 最后可以通过比较修改前后模型的 profile report,或者其他用来衡量性能的指标,确认修改后是否带来了性能提升。 diff --git a/docs/guides/performance_improving/memory_optimize.rst b/docs/guides/performance_improving/memory_optimize.rst index 23d19d912fc..681f09510f2 100644 --- a/docs/guides/performance_improving/memory_optimize.rst +++ b/docs/guides/performance_improving/memory_optimize.rst @@ -4,64 +4,64 @@ 存储分配与优化 ########### -1. PaddlePaddle的显存分配策略 +1. PaddlePaddle 的显存分配策略 =========================== -1.1. 显存自增长AutoGrowth策略 +1.1. 显存自增长 AutoGrowth 策略 -------------------------- -自1.6+的版本起,PaddlePaddle支持显存自增长AutoGrowth策略,按需分配显存,且已于1.7+版本中默认开启,方便用户在同一张GPU卡上同时运行多个任务。 +自 1.6+的版本起,PaddlePaddle 支持显存自增长 AutoGrowth 策略,按需分配显存,且已于 1.7+版本中默认开启,方便用户在同一张 GPU 卡上同时运行多个任务。 -由于原生的CUDA系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。 -因此显存自增长AutoGrowth策略会缓存已分配到的显存,供后续分配使用,具体方式为: +由于原生的 CUDA 系统调用 :code:`cudaMalloc` 和 :code:`cudaFree` 均是同步操作,非常耗时。 +因此显存自增长 AutoGrowth 策略会缓存已分配到的显存,供后续分配使用,具体方式为: -- 在前几次显存分配时,框架会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给GPU,而是在框架内部缓存起来。 +- 在前几次显存分配时,框架会调用 :code:`cudaMalloc` 按需分配,但释放时不会调用 :code:`cudaFree` 返回给 GPU,而是在框架内部缓存起来。 -- 在随后的显存分配时,框架会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从GPU中分配。随后的显存释放亦会缓存起来供后续分配使用。 +- 在随后的显存分配时,框架会首先检查缓存的显存中是否有合适的块,若有则从中分割出所需的显存空间返回,否则才调用 :code:`cudaMalloc` 直接从 GPU 中分配。随后的显存释放亦会缓存起来供后续分配使用。 -因此,显存自增长AutoGrowth策略会在前几个batch训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。 +因此,显存自增长 AutoGrowth 策略会在前几个 batch 训练时分配较慢(因为频繁调用 :code:`cudaMalloc` ),在随后训练过程中基本不会影响模型训练速度。 1.2. 显存预分配策略 ---------------- -除了显存自增长AutoGrowth策略以外,PaddlePaddle还提供了显存预分配策略。显存预分配策略是PaddlePaddle 1.7版本前的默认显存分配策略。 +除了显存自增长 AutoGrowth 策略以外,PaddlePaddle 还提供了显存预分配策略。显存预分配策略是 PaddlePaddle 1.7 版本前的默认显存分配策略。 -显存预分配策略会在第一次分配时分配很大chunk_size的显存块,随后的显存分配大多从预分配的显存块中切分获得。 -其中,chunk_size由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size的计算公式为: +显存预分配策略会在第一次分配时分配很大 chunk_size 的显存块,随后的显存分配大多从预分配的显存块中切分获得。 +其中,chunk_size 由环境变量 :code:`FLAGS_fraction_of_gpu_memory_to_use` 确定,chunk_size 的计算公式为: .. code-block:: python - chunk_size = FLAGS_fraction_of_gpu_memory_to_use * 单张GPU卡的当前可用显存值 + chunk_size = FLAGS_fraction_of_gpu_memory_to_use * 单张 GPU 卡的当前可用显存值 -:code:`FLAGS_fraction_of_gpu_memory_to_use` 的默认值为0.92,即框架预先分配显卡92%的当前可用显存值。 +:code:`FLAGS_fraction_of_gpu_memory_to_use` 的默认值为 0.92,即框架预先分配显卡 92%的当前可用显存值。 显存预分配策略分配显存的具体方式为: -- 在分配requested_size大小的显存时, - - 若requested_size <= chunk_size,则框架会预先分配chunk_size大小的显存池chunk,并从chunk中分出requested_size大小的块返回。之后每次申请显存都会从chunk中分配。 - - 若requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配requested_size大小的显存返回。 +- 在分配 requested_size 大小的显存时, + - 若 requested_size <= chunk_size,则框架会预先分配 chunk_size 大小的显存池 chunk,并从 chunk 中分出 requested_size 大小的块返回。之后每次申请显存都会从 chunk 中分配。 + - 若 requested_size > chunk_size,则框架会直接调用 :code:`cudaMalloc` 分配 requested_size 大小的显存返回。 -- 在释放free_size大小的显存时, - - 若free_size <= chunk_size,则框架会将该显存放回预分配的chunk中,而不是直接返回给CUDA。 - - 若free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给CUDA。 +- 在释放 free_size 大小的显存时, + - 若 free_size <= chunk_size,则框架会将该显存放回预分配的 chunk 中,而不是直接返回给 CUDA。 + - 若 free_size > chunk_size,则框架会直接调用 :code:`cudaFree` 将显存返回给 CUDA。 -若你的GPU卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的显存块,例如: +若你的 GPU 卡上有其他任务占用显存,你可以适当将 :code:`FLAGS_fraction_of_gpu_memory_to_use` 减少,保证框架能预分配到合适的显存块,例如: .. code-block:: shell - export FLAGS_fraction_of_gpu_memory_to_use=0.4 # 预先40%的GPU显存 + export FLAGS_fraction_of_gpu_memory_to_use=0.4 # 预先 40%的 GPU 显存 -若 :code:`FLAGS_fraction_of_gpu_memory_to_use` 设为0,则每次显存分配和释放均会调用 :code:`cudaMalloc` 和 :code:`cudaFree` ,会严重影响性能,不建议你使用。 -只有当你想测量网络的实际显存占用量时,你可以设置 :code:`FLAGS_fraction_of_gpu_memory_to_use` 为0,观察nvidia-smi显示的显存占用情况。 +若 :code:`FLAGS_fraction_of_gpu_memory_to_use` 设为 0,则每次显存分配和释放均会调用 :code:`cudaMalloc` 和 :code:`cudaFree` ,会严重影响性能,不建议你使用。 +只有当你想测量网络的实际显存占用量时,你可以设置 :code:`FLAGS_fraction_of_gpu_memory_to_use` 为 0,观察 nvidia-smi 显示的显存占用情况。 1.3. 显存分配策略的选择方式 ----------------------- -自1.6+版本起,PaddlePaddle同时支持显存自增长AutoGrowth策略和显存预分配策略,并通过环境变量 :code:`FLAGS_allocator_strategy` 控制。 +自 1.6+版本起,PaddlePaddle 同时支持显存自增长 AutoGrowth 策略和显存预分配策略,并通过环境变量 :code:`FLAGS_allocator_strategy` 控制。 -选择显存自增长AutoGrowth的方式为: +选择显存自增长 AutoGrowth 的方式为: .. code-block:: shell - export FLAGS_allocator_strategy=auto_growth # 选择显存自增长AutoGrowth策略 + export FLAGS_allocator_strategy=auto_growth # 选择显存自增长 AutoGrowth 策略 选择显存预分配策略的方式为: @@ -69,72 +69,72 @@ export FLAGS_allocator_strategy=naive_best_fit # 选择显存预分配策略 -此外,自1.7.2+版本起,PaddlePaddle提供了环境变量 :code:`FLAGS_gpu_memory_limit_mb` ,用于控制单个任务进程可分配的最大显存,单位是MB。默认值是0,表示没有限制,可分配全部显存。如果设置为大于0的值,则会在分配的显存超过限制时报错,即使此时系统还存在空闲的显存空间。 +此外,自 1.7.2+版本起,PaddlePaddle 提供了环境变量 :code:`FLAGS_gpu_memory_limit_mb` ,用于控制单个任务进程可分配的最大显存,单位是 MB。默认值是 0,表示没有限制,可分配全部显存。如果设置为大于 0 的值,则会在分配的显存超过限制时报错,即使此时系统还存在空闲的显存空间。 -2. PaddlePaddle的存储优化策略 +2. PaddlePaddle 的存储优化策略 =========================== -PaddlePaddle提供了多种通用存储优化方法,优化你的网络的存储占用(包括显存和内存)。 +PaddlePaddle 提供了多种通用存储优化方法,优化你的网络的存储占用(包括显存和内存)。 -2.1. GC策略: 存储垃圾及时回收 +2.1. GC 策略: 存储垃圾及时回收 ------------------------- -GC(Garbage Collection)的原理是在网络运行阶段及时释放无用变量的存储空间,达到节省存储空间的目的。GC适用于使用Executor,ParallelExecutor做模型训练/预测的场合,但不适用于C++预测库接口。 +GC(Garbage Collection)的原理是在网络运行阶段及时释放无用变量的存储空间,达到节省存储空间的目的。GC 适用于使用 Executor,ParallelExecutor 做模型训练/预测的场合,但不适用于 C++预测库接口。 -**GC策略已于1.6+版本中默认开启。** +**GC 策略已于 1.6+版本中默认开启。** -GC策略由三个环境变量控制: +GC 策略由三个环境变量控制: - :code:`FLAGS_eager_delete_tensor_gb` -GC策略的使能开关,double类型,在<1.6的版本中默认值为-1,在1.6+版本中默认值为0。GC策略会积攒一定大小的存储垃圾后再统一释放,:code:`FLAGS_eager_delete_tensor_gb` 控制的是存储垃圾的阈值,单位是GB。**建议用户设置** :code:`FLAGS_eager_delete_tensor_gb=0` 。 +GC 策略的使能开关,double 类型,在<1.6 的版本中默认值为-1,在 1.6+版本中默认值为 0。GC 策略会积攒一定大小的存储垃圾后再统一释放,:code:`FLAGS_eager_delete_tensor_gb` 控制的是存储垃圾的阈值,单位是 GB。**建议用户设置** :code:`FLAGS_eager_delete_tensor_gb=0` 。 若 :code:`FLAGS_eager_delete_tensor_gb=0` ,则一旦有存储垃圾则马上回收,最为节省存储空间。 -若 :code:`FLAGS_eager_delete_tensor_gb=1` ,则存储垃圾积攒到1G后才触发回收。 +若 :code:`FLAGS_eager_delete_tensor_gb=1` ,则存储垃圾积攒到 1G 后才触发回收。 -若 :code:`FLAGS_eager_delete_tensor_gb<0` ,则GC策略关闭。 +若 :code:`FLAGS_eager_delete_tensor_gb<0` ,则 GC 策略关闭。 - :code:`FLAGS_memory_fraction_of_eager_deletion` -GC策略的调节flag,double类型,默认值为1,范围为[0,1],仅适用于使用ParallelExecutor或CompiledProgram+with_data_parallel的场合。 -GC内部会根据变量占用的存储空间大小,对变量进行降序排列,且仅回收前 :code:`FLAGS_memory_fraction_of_eager_deletion` 大的变量的存储空间。**建议用户维持默认值**,即 :code:`FLAGS_memory_fraction_of_eager_deletion=1` 。 +GC 策略的调节 flag,double 类型,默认值为 1,范围为[0,1],仅适用于使用 ParallelExecutor 或 CompiledProgram+with_data_parallel 的场合。 +GC 内部会根据变量占用的存储空间大小,对变量进行降序排列,且仅回收前 :code:`FLAGS_memory_fraction_of_eager_deletion` 大的变量的存储空间。**建议用户维持默认值**,即 :code:`FLAGS_memory_fraction_of_eager_deletion=1` 。 -若 :code:`FLAGS_memory_fraction_of_eager_deletion=0.6` ,则表示仅回收存储占用60%大的变量的存储空间。 +若 :code:`FLAGS_memory_fraction_of_eager_deletion=0.6` ,则表示仅回收存储占用 60%大的变量的存储空间。 -若 :code:`FLAGS_memory_fraction_of_eager_deletion=0` ,则表示不回收任何变量的存储空间,GC策略关闭。 +若 :code:`FLAGS_memory_fraction_of_eager_deletion=0` ,则表示不回收任何变量的存储空间,GC 策略关闭。 若 :code:`FLAGS_memory_fraction_of_eager_deletion=1` ,则表示回收所有变量的存储空间。 - :code:`FLAGS_fast_eager_deletion_mode` -快速GC策略的开关,bool类型,默认值为True,表示使用快速GC策略。快速GC策略会不等待CUDA Kernel结束直接释放显存。**建议用户维持默认值**,即 :code:`FLAGS_fast_eager_deletion_mode=True` 。 +快速 GC 策略的开关,bool 类型,默认值为 True,表示使用快速 GC 策略。快速 GC 策略会不等待 CUDA Kernel 结束直接释放显存。**建议用户维持默认值**,即 :code:`FLAGS_fast_eager_deletion_mode=True` 。 -2.2. Inplace策略: Op内部的输出复用输入 +2.2. Inplace 策略: Op 内部的输出复用输入 ---------------------------------- -Inplace策略的原理是Op的输出复用Op输入的存储空间。例如,reshape操作的输出和输入可复用同一片存储空间。 +Inplace 策略的原理是 Op 的输出复用 Op 输入的存储空间。例如,reshape 操作的输出和输入可复用同一片存储空间。 -Inplace策略适用于使用ParallelExecutor或CompiledProgram+with_data_parallel的场合,通过 :code:`BuildStrategy` 设置。此策略不支持使用Executor+Program做单卡训练、使用C++预测库接口等场合。 +Inplace 策略适用于使用 ParallelExecutor 或 CompiledProgram+with_data_parallel 的场合,通过 :code:`BuildStrategy` 设置。此策略不支持使用 Executor+Program 做单卡训练、使用 C++预测库接口等场合。 -**Inplace策略已于1.6+版本中默认开启。** +**Inplace 策略已于 1.6+版本中默认开启。** 具体方式为: .. code-block:: python build_strategy = fluid.BuildStrategy() - build_strategy.enable_inplace = True # 开启Inplace策略 + build_strategy.enable_inplace = True # 开启 Inplace 策略 compiled_program = fluid.CompiledProgram(train_program) .with_data_parallel(loss_name=loss.name, build_strategy=build_strategy) -在<1.6的版本中,由于设计上的一些问题,在开启Inplace策略后,必须保证后续exe.run中fetch_list的变量是persistable的,即假如你后续需要fetch的变量为loss和acc,则必须设置: +在<1.6 的版本中,由于设计上的一些问题,在开启 Inplace 策略后,必须保证后续 exe.run 中 fetch_list 的变量是 persistable 的,即假如你后续需要 fetch 的变量为 loss 和 acc,则必须设置: .. code-block:: python @@ -142,16 +142,16 @@ Inplace策略适用于使用ParallelExecutor或CompiledProgram+with_data_paralle acc.persistable = True -**在1.6+的版本中,无需设置fetch变量为persistable。** +**在 1.6+的版本中,无需设置 fetch 变量为 persistable。** -3. 存储优化Best Practice +3. 存储优化 Best Practice ======================= 我们推荐你的最佳存储优化策略为: -- 开启GC策略:设置 :code:`FLAGS_eager_delete_tensor_gb=0` 。 +- 开启 GC 策略:设置 :code:`FLAGS_eager_delete_tensor_gb=0` 。 -- 开启Inplace策略:设置 :code:`build_strategy.enable_inplace = True` ,并在<1.6版本中设置fetch_list中的 :code:`var.persistable = True` 。 +- 开启 Inplace 策略:设置 :code:`build_strategy.enable_inplace = True` ,并在<1.6 版本中设置 fetch_list 中的 :code:`var.persistable = True` 。 -**在1.6+的版本中,上述最佳策略均已默认打开,无需手动配置,亦无需设置fetch_list变量为persistable。** +**在 1.6+的版本中,上述最佳策略均已默认打开,无需手动配置,亦无需设置 fetch_list 变量为 persistable。** diff --git a/docs/guides/performance_improving/paddle_tensorrt_infer.md b/docs/guides/performance_improving/paddle_tensorrt_infer.md index 2877b064d38..2890eceb4ab 100644 --- a/docs/guides/performance_improving/paddle_tensorrt_infer.md +++ b/docs/guides/performance_improving/paddle_tensorrt_infer.md @@ -1,6 +1,6 @@ -# 使用Paddle-TensorRT库预测 +# 使用 Paddle-TensorRT 库预测 -NVIDIA TensorRT 是一个高性能的深度学习预测库,可为深度学习推理应用程序提供低延迟和高吞吐量。PaddlePaddle 采用子图的形式对TensorRT进行了集成,即我们可以使用该模块来提升Paddle模型的预测性能。该模块依旧在持续开发中,目前支持的模型如下表所示: +NVIDIA TensorRT 是一个高性能的深度学习预测库,可为深度学习推理应用程序提供低延迟和高吞吐量。PaddlePaddle 采用子图的形式对 TensorRT 进行了集成,即我们可以使用该模块来提升 Paddle 模型的预测性能。该模块依旧在持续开发中,目前支持的模型如下表所示: |分类模型|检测模型|分割模型| |---|---|---| @@ -13,29 +13,29 @@ NVIDIA TensorRT 是一个高性能的深度学习预测库,可为深度学习 |GoogLeNet|mobilenet-SSD|| |DPN||| -在这篇文档中,我们将会对Paddle-TensorRT库的获取、使用和原理进行介绍。 +在这篇文档中,我们将会对 Paddle-TensorRT 库的获取、使用和原理进行介绍。 **Note:** -1. 从源码编译时,TensorRT预测库目前仅支持使用GPU编译,且需要设置编译选项TENSORRT_ROOT为TensorRT所在的路径。 -2. Windows支持需要TensorRT 版本5.0以上。 -3. Paddle-TRT目前仅支持固定输入shape。 -4. 下载安装TensorRT后,需要手动在`NvInfer.h`文件中为`class IPluginFactory`和`class IGpuAllocator`分别添加虚析构函数: +1. 从源码编译时,TensorRT 预测库目前仅支持使用 GPU 编译,且需要设置编译选项 TENSORRT_ROOT 为 TensorRT 所在的路径。 +2. Windows 支持需要 TensorRT 版本 5.0 以上。 +3. Paddle-TRT 目前仅支持固定输入 shape。 +4. 下载安装 TensorRT 后,需要手动在`NvInfer.h`文件中为`class IPluginFactory`和`class IGpuAllocator`分别添加虚析构函数: ``` c++ virtual ~IPluginFactory() {}; virtual ~IGpuAllocator() {}; ``` ## 内容 -- [Paddle-TRT使用介绍](#Paddle-TRT使用介绍) -- [Paddle-TRT样例编译测试](#Paddle-TRT样例编译测试) -- [Paddle-TRT INT8使用](#Paddle-TRT_INT8使用) -- [Paddle-TRT子图运行原理](#Paddle-TRT子图运行原理) -- [Paddle-TRT性能测试](#Paddle-TRT性能测试) +- [Paddle-TRT 使用介绍](#Paddle-TRT 使用介绍) +- [Paddle-TRT 样例编译测试](#Paddle-TRT 样例编译测试) +- [Paddle-TRT INT8 使用](#Paddle-TRT_INT8 使用) +- [Paddle-TRT 子图运行原理](#Paddle-TRT 子图运行原理) +- [Paddle-TRT 性能测试](#Paddle-TRT 性能测试) -## Paddle-TRT使用介绍 +## Paddle-TRT 使用介绍 -在使用AnalysisPredictor时,我们通过配置AnalysisConfig中的接口 +在使用 AnalysisPredictor 时,我们通过配置 AnalysisConfig 中的接口 ``` c++ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, @@ -45,22 +45,22 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, false /* use_static*/, false /* use_calib_mode*/); ``` -的方式来指定使用Paddle-TRT子图方式来运行。 +的方式来指定使用 Paddle-TRT 子图方式来运行。 该接口中的参数的详细介绍如下: -- **`workspace_size`**,类型:int,默认值为1 << 20。指定TensorRT使用的工作空间大小,TensorRT会在该大小限制下筛选合适的kernel执行预测运算。 -- **`max_batch_size`**,类型:int,默认值为1。需要提前设置最大的batch大小,运行时batch大小不得超过此限定值。 -- **`min_subgraph_size`**,类型:int,默认值为3。Paddle-TRT是以子图的形式运行,为了避免性能损失,当子图内部节点个数大于`min_subgraph_size`的时候,才会使用Paddle-TRT运行。 -- **`precision`**,类型:`enum class Precision {kFloat32 = 0, kHalf, kInt8,};`, 默认值为`AnalysisConfig::Precision::kFloat32`。指定使用TRT的精度,支持FP32(kFloat32),FP16(kHalf),Int8(kInt8)。若需要使用Paddle-TRT int8离线量化校准,需设定`precision`为 `AnalysisConfig::Precision::kInt8`, 且设置`use_calib_mode` 为true。 -- **`use_static`**,类型:bool, 默认值为false。如果指定为true,在初次运行程序的时候会将TRT的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成。 -- **`use_calib_mode`**,类型:bool, 默认值为false。若要运行Paddle-TRT int8离线量化校准,需要将此选项设置为true。 +- **`workspace_size`**,类型:int,默认值为 1 << 20。指定 TensorRT 使用的工作空间大小,TensorRT 会在该大小限制下筛选合适的 kernel 执行预测运算。 +- **`max_batch_size`**,类型:int,默认值为 1。需要提前设置最大的 batch 大小,运行时 batch 大小不得超过此限定值。 +- **`min_subgraph_size`**,类型:int,默认值为 3。Paddle-TRT 是以子图的形式运行,为了避免性能损失,当子图内部节点个数大于`min_subgraph_size`的时候,才会使用 Paddle-TRT 运行。 +- **`precision`**,类型:`enum class Precision {kFloat32 = 0, kHalf, kInt8,};`, 默认值为`AnalysisConfig::Precision::kFloat32`。指定使用 TRT 的精度,支持 FP32(kFloat32),FP16(kHalf),Int8(kInt8)。若需要使用 Paddle-TRT int8 离线量化校准,需设定`precision`为 `AnalysisConfig::Precision::kInt8`, 且设置`use_calib_mode` 为 true。 +- **`use_static`**,类型:bool, 默认值为 false。如果指定为 true,在初次运行程序的时候会将 TRT 的优化信息进行序列化到磁盘上,下次运行时直接加载优化的序列化信息而不需要重新生成。 +- **`use_calib_mode`**,类型:bool, 默认值为 false。若要运行 Paddle-TRT int8 离线量化校准,需要将此选项设置为 true。 -**Note:** Paddle-TRT目前只支持固定shape的输入,不支持变化shape的输入。 +**Note:** Paddle-TRT 目前只支持固定 shape 的输入,不支持变化 shape 的输入。 -## Paddle-TRT样例编译测试 +## Paddle-TRT 样例编译测试 -1. 下载或编译带有 TensorRT 的paddle预测库,参考[安装与编译C++预测库](../../inference_deployment/inference/build_and_install_lib_cn.html)。 -2. 从[NVIDIA官网](https://developer.nvidia.com/nvidia-tensorrt-download)下载对应本地环境中cuda和cudnn版本的TensorRT,需要登陆NVIDIA开发者账号。 +1. 下载或编译带有 TensorRT 的 paddle 预测库,参考[安装与编译 C++预测库](../../inference_deployment/inference/build_and_install_lib_cn.html)。 +2. 从[NVIDIA 官网](https://developer.nvidia.com/nvidia-tensorrt-download)下载对应本地环境中 cuda 和 cudnn 版本的 TensorRT,需要登陆 NVIDIA 开发者账号。 3. 下载[预测样例](https://paddle-inference-dist.bj.bcebos.com/tensorrt_test/paddle_inference_sample_v1.7.tar.gz)并解压,进入`sample/paddle-TRT`目录下。 `paddle-TRT` 文件夹目录结构如下: @@ -78,9 +78,9 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, └── run_impl.sh ``` - - `mobilenet_test.cc` 为使用paddle-TRT预测的C++源文件 - - `fluid_generate_calib_test.cc` 为使用TRT int8离线量化校准的C++源文件 - - `fluid_int8_test.cc` 为使用TRT执行int8预测的C++源文件 + - `mobilenet_test.cc` 为使用 paddle-TRT 预测的 C++源文件 + - `fluid_generate_calib_test.cc` 为使用 TRT int8 离线量化校准的 C++源文件 + - `fluid_int8_test.cc` 为使用 TRT 执行 int8 预测的 C++源文件 - `mobilenetv1` 为模型文件夹 - `run.sh` 为预测运行脚本文件 @@ -91,12 +91,12 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, 编译运行预测样例之前,需要根据运行环境配置编译与运行脚本`run.sh`。`run.sh`的选项与路径配置的部分如下: ```shell - # 设置是否开启MKL、GPU、TensorRT,如果要使用TensorRT,必须打开GPU + # 设置是否开启 MKL、GPU、TensorRT,如果要使用 TensorRT,必须打开 GPU WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON - # 按照运行环境设置预测库路径、CUDA库路径、CUDNN库路径、TensorRT路径、模型路径 + # 按照运行环境设置预测库路径、CUDA 库路径、CUDNN 库路径、TensorRT 路径、模型路径 LIB_DIR=YOUR_LIB_DIR CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR @@ -104,28 +104,28 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, MODEL_DIR=YOUR_MODEL_DIR ``` - 按照实际运行环境配置`run.sh`中的选项开关和所需lib路径。 + 按照实际运行环境配置`run.sh`中的选项开关和所需 lib 路径。 5. 编译与运行样例 -## Paddle-TRT INT8使用 +## Paddle-TRT INT8 使用 1. Paddle-TRT INT8 简介 - 神经网络的参数在一定程度上是冗余的,在很多任务上,我们可以在保证模型精度的前提下,将Float32的模型转换成Int8的模型。目前,Paddle-TRT支持离线将预训练好的Float32模型转换成Int8的模型,具体的流程如下: + 神经网络的参数在一定程度上是冗余的,在很多任务上,我们可以在保证模型精度的前提下,将 Float32 的模型转换成 Int8 的模型。目前,Paddle-TRT 支持离线将预训练好的 Float32 模型转换成 Int8 的模型,具体的流程如下: - 1) **生成校准表**(Calibration table):我们准备500张左右的真实输入数据,并将数据输入到模型中去,Paddle-TRT会统计模型中每个op输入和输出值的范围信息,并将其记录到校准表中,这些信息有效减少了模型转换时的信息损失。 + 1) **生成校准表**(Calibration table):我们准备 500 张左右的真实输入数据,并将数据输入到模型中去,Paddle-TRT 会统计模型中每个 op 输入和输出值的范围信息,并将其记录到校准表中,这些信息有效减少了模型转换时的信息损失。 - 2) 生成校准表后,再次运行模型,**Paddle-TRT会自动加载校准表**,并进行INT8模式下的预测。 + 2) 生成校准表后,再次运行模型,**Paddle-TRT 会自动加载校准表**,并进行 INT8 模式下的预测。 -2. 编译测试INT8样例 +2. 编译测试 INT8 样例 将`run.sh`文件中的`mobilenet_test`改为`fluid_generate_calib_test`,运行 ``` shell sh run.sh ``` - 即可执行生成校准表样例,在该样例中,我们随机生成了500个输入来模拟这一过程,在实际业务中,建议大家使用真实样例。运行结束后,在 `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` 模型目录下会多出一个名字为trt_calib_*的文件,即校准表。 + 即可执行生成校准表样例,在该样例中,我们随机生成了 500 个输入来模拟这一过程,在实际业务中,建议大家使用真实样例。运行结束后,在 `SAMPLE_BASE_DIR/sample/paddle-TRT/build/mobilenetv1/_opt_cache` 模型目录下会多出一个名字为 trt_calib_*的文件,即校准表。 生成校准表后,将带校准表的模型文件拷贝到特定地址 @@ -139,11 +139,11 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, sh run.sh ``` - 即可执行int8预测样例。 + 即可执行 int8 预测样例。 -## Paddle-TRT子图运行原理 +## Paddle-TRT 子图运行原理 - PaddlePaddle采用子图的形式对TensorRT进行集成,当模型加载后,神经网络可以表示为由变量和运算节点组成的计算图。Paddle TensorRT实现的功能是对整个图进行扫描,发现图中可以使用TensorRT优化的子图,并使用TensorRT节点替换它们。在模型的推断期间,如果遇到TensorRT节点,Paddle会调用TensorRT库对该节点进行优化,其他的节点调用Paddle的原生实现。TensorRT在推断期间能够进行Op的横向和纵向融合,过滤掉冗余的Op,并对特定平台下的特定的Op选择合适的kernel等进行优化,能够加快模型的预测速度。 + PaddlePaddle 采用子图的形式对 TensorRT 进行集成,当模型加载后,神经网络可以表示为由变量和运算节点组成的计算图。Paddle TensorRT 实现的功能是对整个图进行扫描,发现图中可以使用 TensorRT 优化的子图,并使用 TensorRT 节点替换它们。在模型的推断期间,如果遇到 TensorRT 节点,Paddle 会调用 TensorRT 库对该节点进行优化,其他的节点调用 Paddle 的原生实现。TensorRT 在推断期间能够进行 Op 的横向和纵向融合,过滤掉冗余的 Op,并对特定平台下的特定的 Op 选择合适的 kernel 等进行优化,能够加快模型的预测速度。 下图使用一个简单的模型展示了这个过程: @@ -158,9 +158,9 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/,

- 我们可以在原始模型网络中看到,绿色节点表示可以被TensorRT支持的节点,红色节点表示网络中的变量,黄色表示Paddle只能被Paddle原生实现执行的节点。那些在原始网络中的绿色节点被提取出来汇集成子图,并由一个TensorRT节点代替,成为转换后网络中的`block-25` 节点。在网络运行过程中,如果遇到该节点,Paddle将调用TensorRT库来对其执行。 + 我们可以在原始模型网络中看到,绿色节点表示可以被 TensorRT 支持的节点,红色节点表示网络中的变量,黄色表示 Paddle 只能被 Paddle 原生实现执行的节点。那些在原始网络中的绿色节点被提取出来汇集成子图,并由一个 TensorRT 节点代替,成为转换后网络中的`block-25` 节点。在网络运行过程中,如果遇到该节点,Paddle 将调用 TensorRT 库来对其执行。 -## Paddle-TRT性能测试 +## Paddle-TRT 性能测试 ### 测试环境 - CPU:Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz GPU:Tesla P4 @@ -170,9 +170,9 @@ config->EnableTensorRtEngine(1 << 20 /* workspace_size*/, ### 测试对象 **PaddlePaddle, PyTorch, TensorFlow** -- 在测试中,PaddlePaddle 使用子图优化的方式集成了TensorRT, 模型[地址](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)。 -- PyTorch 使用了原生的实现, 模型[地址1](https://github.com/pytorch/vision/tree/master/torchvision/models)、[地址2](https://github.com/marvis/pytorch-mobilenet)。 -- 对 TensorFlow 测试包括了对TF的原生的测试,和对TF—TRT的测试,**对TF—TRT的测试并没有达到预期的效果,后期会对其进行补充**, 模型[地址](https://github.com/tensorflow/models)。 +- 在测试中,PaddlePaddle 使用子图优化的方式集成了 TensorRT, 模型[地址](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)。 +- PyTorch 使用了原生的实现, 模型[地址 1](https://github.com/pytorch/vision/tree/master/torchvision/models)、[地址 2](https://github.com/marvis/pytorch-mobilenet)。 +- 对 TensorFlow 测试包括了对 TF 的原生的测试,和对 TF—TRT 的测试,**对 TF—TRT 的测试并没有达到预期的效果,后期会对其进行补充**, 模型[地址](https://github.com/tensorflow/models)。 #### ResNet50 diff --git a/docs/guides/performance_improving/paddle_tensorrt_infer_en.md b/docs/guides/performance_improving/paddle_tensorrt_infer_en.md index f013e12a6f8..0acc384ab2a 100644 --- a/docs/guides/performance_improving/paddle_tensorrt_infer_en.md +++ b/docs/guides/performance_improving/paddle_tensorrt_infer_en.md @@ -163,7 +163,7 @@ A simple model expresses the process : - PaddlePaddle integrates TensorRT with subgraph, model[link](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/image_classification/models)。 - PyTorch uses original kernels, model[link1](https://github.com/pytorch/vision/tree/master/torchvision/models), [link2](https://github.com/marvis/pytorch-mobilenet)。 -- We tested TF original and TF-TRT**对TF—TRT的测试并没有达到预期的效果,后期会对其进行补充**, model[link](https://github.com/tensorflow/models)。 +- We tested TF original and TF-TRT**对 TF—TRT 的测试并没有达到预期的效果,后期会对其进行补充**, model[link](https://github.com/tensorflow/models)。 #### ResNet50 diff --git a/docs/guides/performance_improving/profiling_model.md b/docs/guides/performance_improving/profiling_model.md index cb8f746676c..406ffbed4cb 100644 --- a/docs/guides/performance_improving/profiling_model.md +++ b/docs/guides/performance_improving/profiling_model.md @@ -1,21 +1,21 @@ # 模型性能分析 -Paddle Profiler是飞桨框架自带的低开销性能分析器,可以对模型运行过程的性能数据进行收集、统计和展示。性能分析器提供的数据可以帮助定位模型的瓶颈,识别造成程序运行时间过长或者GPU利用率低的原因,从而寻求优化方案来获得性能的提升。 +Paddle Profiler 是飞桨框架自带的低开销性能分析器,可以对模型运行过程的性能数据进行收集、统计和展示。性能分析器提供的数据可以帮助定位模型的瓶颈,识别造成程序运行时间过长或者 GPU 利用率低的原因,从而寻求优化方案来获得性能的提升。 -在这篇文档中,主要介绍如何使用Profiler工具来调试程序性能,以及阐述当前提供的所有功能特性。主要内容如下: +在这篇文档中,主要介绍如何使用 Profiler 工具来调试程序性能,以及阐述当前提供的所有功能特性。主要内容如下: -- [使用Profiler工具调试程序性能](#profiler) +- [使用 Profiler 工具调试程序性能](#profiler) - [功能特性](#ergongnengtexing) - [更多细节](#sangengduoxijie) -## 一、使用Profiler工具调试程序性能 +## 一、使用 Profiler 工具调试程序性能 在模型性能分析中,通常采用如下四个步骤: -- 获取模型正常运行时的ips(iterations per second, 每秒的迭代次数),给出baseline数据。 +- 获取模型正常运行时的 ips(iterations per second, 每秒的迭代次数),给出 baseline 数据。 - 开启性能分析器,定位性能瓶颈点。 - 优化程序,检查优化效果。 -- 获取优化后模型正常运行时的ips,和baseline比较,计算真实的提升幅度。 +- 获取优化后模型正常运行时的 ips,和 baseline 比较,计算真实的提升幅度。 -下面是Paddle的应用实践教学中关于[使用神经网络对cifar10进行分类](../../practices/cv/convnet_image_classification.html)的示例代码,里面加上了启动性能分析的代码。通过这个比较简单的示例,来看性能分析工具是如何通过上述四个步骤在调试程序性能中发挥作用。 +下面是 Paddle 的应用实践教学中关于[使用神经网络对 cifar10 进行分类](../../practices/cv/convnet_image_classification.html)的示例代码,里面加上了启动性能分析的代码。通过这个比较简单的示例,来看性能分析工具是如何通过上述四个步骤在调试程序性能中发挥作用。 ```python def train(model): @@ -34,13 +34,13 @@ def train(model): # 创建性能分析器相关的代码 def my_on_trace_ready(prof): # 定义回调函数,性能分析器结束采集数据时会被调用 - callback = profiler.export_chrome_tracing('./profiler_demo') # 创建导出性能数据到profiler_demo文件夹的回调函数 + callback = profiler.export_chrome_tracing('./profiler_demo') # 创建导出性能数据到 profiler_demo 文件夹的回调函数 callback(prof) # 执行该导出函数 - prof.summary(sorted_by=profiler.SortedKeys.GPUTotal) # 打印表单,按GPUTotal排序表单项 + prof.summary(sorted_by=profiler.SortedKeys.GPUTotal) # 打印表单,按 GPUTotal 排序表单项 - p = profiler.Profiler(scheduler = [3,14], on_trace_ready=my_on_trace_ready, timer_only=True) # 初始化Profiler对象 + p = profiler.Profiler(scheduler = [3,14], on_trace_ready=my_on_trace_ready, timer_only=True) # 初始化 Profiler 对象 - p.start() # 性能分析器进入第0个step + p.start() # 性能分析器进入第 0 个 step for epoch in range(epoch_num): for batch_id, data in enumerate(train_loader()): @@ -57,7 +57,7 @@ def train(model): opt.step() opt.clear_grad() - p.step() # 指示性能分析器进入下一个step + p.step() # 指示性能分析器进入下一个 step if batch_id == 19: p.stop() # 关闭性能分析器 exit() # 做性能分析时,可以将程序提前退出 @@ -85,9 +85,9 @@ def train(model): ``` -### 1. 获取性能调试前模型正常运行的ips +### 1. 获取性能调试前模型正常运行的 ips -上述程序在创建Profiler时候,timer_only设置的值为True,此时将只开启benchmark功能,不开启性能分析器,程序输出模型正常运行时的benchmark信息如下 +上述程序在创建 Profiler 时候,timer_only 设置的值为 True,此时将只开启 benchmark 功能,不开启性能分析器,程序输出模型正常运行时的 benchmark 信息如下 ```text ============================================Perf Summary============================================ Reader Ratio: 53.514% @@ -97,23 +97,23 @@ Time Unit: s, IPS Unit: steps/s | batch_cost | 0.02555 | 0.02381 | 0.02220 | | ips | 39.13907 | 45.03588 | 41.99930 | ``` -其中Reader Ratio表示数据读取部分占训练batch迭代过程的时间占比,reader_cost代表数据读取时间,batch_cost代表batch迭代的时间,ips表示每秒能迭代多少次,即跑多少个batch。可以看到,此时的ips为39.1,可将这个值作为优化对比的baseline。 +其中 Reader Ratio 表示数据读取部分占训练 batch 迭代过程的时间占比,reader_cost 代表数据读取时间,batch_cost 代表 batch 迭代的时间,ips 表示每秒能迭代多少次,即跑多少个 batch。可以看到,此时的 ips 为 39.1,可将这个值作为优化对比的 baseline。 ### 2. 开启性能分析器,定位性能瓶颈点 -修改程序,将Profiler的timer_only参数设置为False, 此时代表不只开启benchmark功能,还将开启性能分析器,进行详细的性能分析。 +修改程序,将 Profiler 的 timer_only 参数设置为 False, 此时代表不只开启 benchmark 功能,还将开启性能分析器,进行详细的性能分析。 ```python p = profiler.Profiler(scheduler = [3,14], on_trace_ready=my_on_trace_ready, timer_only=False) ``` -性能分析器会收集程序在第3到14次(不包括14)训练迭代过程中的性能数据,并在profiler_demo文件夹中输出一个json格式的文件,用于展示程序执行过程的timeline,可通过chrome浏览器的[chrome://tracing](chrome://tracing)插件打开这个文件进行查看。 +性能分析器会收集程序在第 3 到 14 次(不包括 14)训练迭代过程中的性能数据,并在 profiler_demo 文件夹中输出一个 json 格式的文件,用于展示程序执行过程的 timeline,可通过 chrome 浏览器的[chrome://tracing](chrome://tracing)插件打开这个文件进行查看。


-性能分析器还会直接在终端打印统计表单(建议重定向到文件中查看),查看程序输出的Model Summary表单 +性能分析器还会直接在终端打印统计表单(建议重定向到文件中查看),查看程序输出的 Model Summary 表单 ```text -----------------------------------------------Model Summary----------------------------------------------- @@ -129,13 +129,13 @@ ProfileStep 11 294.53 / 26.78 / 35.28 / 24.56 / 100.00 13.22 / 1.20 Others - 45.66 / - / - / - / 15.50 0.53 / - / - / - / 3.96 --------------- ------ ---------------------------------------- ---------------------------------------- ``` -其中ProfileStep表示训练batch的迭代step过程,对应代码中每两次调用`p.step()`的间隔时间;Dataloader表示数据读取的时间,即`for batch_id, data in enumerate(train_loader())`的执行时间;Forward表示模型前向的时间,即`logits = model(x_data)`的执行时间,Backward表示反向传播的时间,即`loss.backward()`的执行时间;Optimization表示优化器的时间,即`opt.step()`的执行时间。 -通过timeline可以看到,Dataloader占了执行过程的很大比重,Model Summary显示其甚至接近了50%。分析程序发现,这是由于模型本身比较简单,需要的计算量小,再加上Dataloader -准备数据时只用了单进程来读取,使得程序读取数据时和执行计算时没有并行操作,导致Dataloader占比过大。 +其中 ProfileStep 表示训练 batch 的迭代 step 过程,对应代码中每两次调用`p.step()`的间隔时间;Dataloader 表示数据读取的时间,即`for batch_id, data in enumerate(train_loader())`的执行时间;Forward 表示模型前向的时间,即`logits = model(x_data)`的执行时间,Backward 表示反向传播的时间,即`loss.backward()`的执行时间;Optimization 表示优化器的时间,即`opt.step()`的执行时间。 +通过 timeline 可以看到,Dataloader 占了执行过程的很大比重,Model Summary 显示其甚至接近了 50%。分析程序发现,这是由于模型本身比较简单,需要的计算量小,再加上 Dataloader +准备数据时只用了单进程来读取,使得程序读取数据时和执行计算时没有并行操作,导致 Dataloader 占比过大。 ### 3. 优化程序,检查优化效果 -识别到了问题产生的原因,对程序继续做如下修改,将Dataloader的num_workers设置为4,使得能有多个进程并行读取数据。 +识别到了问题产生的原因,对程序继续做如下修改,将 Dataloader 的 num_workers 设置为 4,使得能有多个进程并行读取数据。 ```python train_loader = paddle.io.DataLoader(cifar10_train, shuffle=True, @@ -143,7 +143,7 @@ train_loader = paddle.io.DataLoader(cifar10_train, num_workers=4) ``` -重新对程序进行性能分析,新的timeline和Model Summary如下所示 +重新对程序进行性能分析,新的 timeline 和 Model Summary 如下所示


@@ -163,10 +163,10 @@ ProfileStep 11 90.94 / 8.27 / 11.82 / 7.85 / 100.00 13.27 / 1.21 Others - 26.79 / - / - / - / 29.46 0.52 / - / - / - / 3.80 --------------- ------ ---------------------------------------- ---------------------------------------- ``` -可以看到,从Dataloader中取数据的时间大大减少,变成了平均只占一个step的2%,并且平均一个step所需要的时间也相应减少了。 +可以看到,从 Dataloader 中取数据的时间大大减少,变成了平均只占一个 step 的 2%,并且平均一个 step 所需要的时间也相应减少了。 -### 4. 获取优化后模型正常运行的ips,确定真实提升幅度 -重新将timer_only设置的值为True,获取优化后模型正常运行时的benchmark信息 +### 4. 获取优化后模型正常运行的 ips,确定真实提升幅度 +重新将 timer_only 设置的值为 True,获取优化后模型正常运行时的 benchmark 信息 ```text ============================================Perf Summary============================================ @@ -177,35 +177,35 @@ Time Unit: s, IPS Unit: steps/s | batch_cost | 0.00660 | 0.00629 | 0.00587 | | ips | 151.45498 | 170.28927 | 159.06308 | ``` -此时ips的值变成了151.5,相比优化前的baseline 39.1,模型真实性能提升了287%。 +此时 ips 的值变成了 151.5,相比优化前的 baseline 39.1,模型真实性能提升了 287%。 -**Note** 由于Profiler开启的时候,收集性能数据本身也会造成程序性能的开销,因此正常跑程序时请不要开启性能分析器,性能分析器只作为调试程序性能时使用。如果想获得程序正常运行时候的 -benchmark信息(如ips),可以像示例一样将Profiler的timer_only参数设置为True,此时不会进行详尽的性能数据收集,几乎不影响程序正常运行的性能,所获得的benchmark信息也趋于真实。 -此外,benchmark信息计算的数据范围是从调用Profiler的start方法开始,到调用stop方法结束这个过程的数据。而Timeline和性能数据的统计表单的数据范围是所指定的采集区间,如这个例子中的第3到14次迭代,这会导致开启性能分析器时统计表单和benchmark信息输出的值不同(如统计到的Dataloader的时间占比)。此外,当benchmark统计的范围和性能分析器统计的范围不同时, -由于benchmark统计的是平均时间,如果benchmark统计的范围覆盖了性能分析器开启的范围,也覆盖了关闭性能调试时的正常执行的范围,此时benchmark的值没有意义,因此**开启性能分析器时请以性能分析器输出的统计表单为参考**,这也是为何上面示例里在开启性能分析器时没贴benchmark信息的原因。 +**Note** 由于 Profiler 开启的时候,收集性能数据本身也会造成程序性能的开销,因此正常跑程序时请不要开启性能分析器,性能分析器只作为调试程序性能时使用。如果想获得程序正常运行时候的 +benchmark 信息(如 ips),可以像示例一样将 Profiler 的 timer_only 参数设置为 True,此时不会进行详尽的性能数据收集,几乎不影响程序正常运行的性能,所获得的 benchmark 信息也趋于真实。 +此外,benchmark 信息计算的数据范围是从调用 Profiler 的 start 方法开始,到调用 stop 方法结束这个过程的数据。而 Timeline 和性能数据的统计表单的数据范围是所指定的采集区间,如这个例子中的第 3 到 14 次迭代,这会导致开启性能分析器时统计表单和 benchmark 信息输出的值不同(如统计到的 Dataloader 的时间占比)。此外,当 benchmark 统计的范围和性能分析器统计的范围不同时, +由于 benchmark 统计的是平均时间,如果 benchmark 统计的范围覆盖了性能分析器开启的范围,也覆盖了关闭性能调试时的正常执行的范围,此时 benchmark 的值没有意义,因此**开启性能分析器时请以性能分析器输出的统计表单为参考**,这也是为何上面示例里在开启性能分析器时没贴 benchmark 信息的原因。 ## 二、功能特性 -当前Profiler提供Timeline、统计表单、benchmark信息共三个方面的展示功能。 +当前 Profiler 提供 Timeline、统计表单、benchmark 信息共三个方面的展示功能。 -### 1. Timeline展示 -对于采集的性能数据,导出为chrome tracing timeline格式的文件后,可以进行可视化分析。当前,所采用的可视化工具为chrome浏览器里的[tracing插件](chrome://tracing),可以按照如下方式进行查看 +### 1. Timeline 展示 +对于采集的性能数据,导出为 chrome tracing timeline 格式的文件后,可以进行可视化分析。当前,所采用的可视化工具为 chrome 浏览器里的[tracing 插件](chrome://tracing),可以按照如下方式进行查看


- Timeline使用Demo + Timeline 使用 Demo

-目前Timeline提供以下特性: +目前 Timeline 提供以下特性: -1. 查看CPU和GPU在不同线程或stream下的事件发生的时间线。将同一线程下所记录的数据分为Python层和C++层,以便根据需要进行折叠和展开。对于有名字的线程,标注线程名字。 +1. 查看 CPU 和 GPU 在不同线程或 stream 下的事件发生的时间线。将同一线程下所记录的数据分为 Python 层和 C++层,以便根据需要进行折叠和展开。对于有名字的线程,标注线程名字。 2. 所展示的事件名字上标注事件所持续的时间,点击具体的事件,可在下方的说明栏中看到更详细的事件信息。通过按键'w'、's'可进行放大和缩小,通过'a'、'd'可进行左移和右移。 -3. 对于GPU上的事件,可以通过点击下方的'launch'链接查看所发起它的CPU上的事件。 +3. 对于 GPU 上的事件,可以通过点击下方的'launch'链接查看所发起它的 CPU 上的事件。 ### 2. 统计表单展示 -统计表单负责对采集到的数据(Event)从多个不同的角度进行解读,也可以理解为对timeline进行一些量化的指标计算。 -目前提供Device Summary、Overview Summary、Model Summary、Distributed Summary、Operator Summary、Kernel Summary、Memory Manipulation Summary和UserDefined Summary的统计表单,每个表单从不同的角度进行统计计算。每个表单的统计内容简要叙述如下: +统计表单负责对采集到的数据(Event)从多个不同的角度进行解读,也可以理解为对 timeline 进行一些量化的指标计算。 +目前提供 Device Summary、Overview Summary、Model Summary、Distributed Summary、Operator Summary、Kernel Summary、Memory Manipulation Summary 和 UserDefined Summary 的统计表单,每个表单从不同的角度进行统计计算。每个表单的统计内容简要叙述如下: - Device Summary ```text @@ -224,10 +224,10 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ---------------------------------------------------- ``` - DeviceSummary提供CPU和GPU的平均利用率信息。其中 - - CPU(Process): 指的是进程的cpu平均利用率,算的是从Profiler开始记录数据到结束这一段过程,进程所利用到的 **cpu core的总时间**与**该段时间**的占比。因此如果是多核的情况,对于进程来说cpu平均利用率是有可能超过100%的,因为同时用到的多个core的时间进行了累加。 - - CPU(System): 指的是整个系统的cpu平均利用率,算的是从Profiler开始记录数据到结束这一段过程,整个系统所有进程利用到的**cpu core总时间**与**该段时间乘以cpu core的数量**的占比。可以当成是从cpu的视角来算的利用率。 - - GPU: 指的是进程的gpu平均利用率,算的是从Profiler开始记录数据到结束这一段过程,进程在gpu上所调用的**kernel的执行时间** 与 **该段时间** 的占比。 + DeviceSummary 提供 CPU 和 GPU 的平均利用率信息。其中 + - CPU(Process): 指的是进程的 cpu 平均利用率,算的是从 Profiler 开始记录数据到结束这一段过程,进程所利用到的 **cpu core 的总时间**与**该段时间**的占比。因此如果是多核的情况,对于进程来说 cpu 平均利用率是有可能超过 100%的,因为同时用到的多个 core 的时间进行了累加。 + - CPU(System): 指的是整个系统的 cpu 平均利用率,算的是从 Profiler 开始记录数据到结束这一段过程,整个系统所有进程利用到的**cpu core 总时间**与**该段时间乘以 cpu core 的数量**的占比。可以当成是从 cpu 的视角来算的利用率。 + - GPU: 指的是进程的 gpu 平均利用率,算的是从 Profiler 开始记录数据到结束这一段过程,进程在 gpu 上所调用的**kernel 的执行时间** 与 **该段时间** 的占比。 - Overview Summary @@ -270,7 +270,7 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ---------------------------------------------------------------------------------------------------------- ``` - Overview Summary用于展示每种类型的Event一共分别消耗了多少时间,对于多线程或多stream下,如果同一类型的Event有重叠的时间段,采取取并集操作,不对重叠的时间进行重复计算。 + Overview Summary 用于展示每种类型的 Event 一共分别消耗了多少时间,对于多线程或多 stream 下,如果同一类型的 Event 有重叠的时间段,采取取并集操作,不对重叠的时间进行重复计算。 - Model Summary @@ -289,7 +289,7 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 --------------- ------ ------------------------------------------- ------------------------------------------- ``` - Model Summary用于展示模型训练或者推理过程中,dataloader、forward、backward、optimization所消耗的时间。其中GPU Time对应着在该段过程内所发起的GPU侧活动的时间。 + Model Summary 用于展示模型训练或者推理过程中,dataloader、forward、backward、optimization 所消耗的时间。其中 GPU Time 对应着在该段过程内所发起的 GPU 侧活动的时间。 @@ -307,13 +307,13 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ------------------------- ------------------------- ------------------------- ``` - Distribution Summary用于展示分布式训练中通信(Communication)、计算(Computation)以及这两者Overlap的时间。 + Distribution Summary 用于展示分布式训练中通信(Communication)、计算(Computation)以及这两者 Overlap 的时间。 - Communication: 所有和通信有关活动的时间,包括和分布式相关的算子(op)以及gpu上的kernel的时间等。 + Communication: 所有和通信有关活动的时间,包括和分布式相关的算子(op)以及 gpu 上的 kernel 的时间等。 - Computation: 即是所有kernel在GPU上的执行时间,但是去除了和通信相关的kernel的时间。 + Computation: 即是所有 kernel 在 GPU 上的执行时间,但是去除了和通信相关的 kernel 的时间。 - Overlap: Communication和Computation的重叠时间 + Overlap: Communication 和 Computation 的重叠时间 - Operator Summary ```text @@ -369,7 +369,7 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 void phi::funcs::SplitKernel_(float cons... 16 - / - / - / - / - 6.93 / 0.43 / 0.76 / 0.34 / 1.03 ``` - Operator Summary用于展示框架中算子(op)的执行信息。对于每一个Op,可以通过打印表单时候的op_detail选项控制是否打印出Op执行过程里面的子过程。同时展示每个子过程中的GPU上的活动,且子过程的活动算时间占比时以上层的时间为总时间。 + Operator Summary 用于展示框架中算子(op)的执行信息。对于每一个 Op,可以通过打印表单时候的 op_detail 选项控制是否打印出 Op 执行过程里面的子过程。同时展示每个子过程中的 GPU 上的活动,且子过程的活动算时间占比时以上层的时间为总时间。 - Kernel Summary ```text @@ -393,7 +393,7 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 maxwell_scudnn_winograd_128x128_ldg1_ldg4_tile228n_nt 48 90.87 / 1.89 / 2.09 / 1.69 / 3.26 maxwell_scudnn_128x128_stridedB_small_nn 24 87.58 / 3.65 / 4.00 / 3.53 / 3.14 ``` - Kernel Summary用于展示在GPU执行的kernel的信息。 + Kernel Summary 用于展示在 GPU 执行的 kernel 的信息。 - Memory Manipulation Summary ```text @@ -410,7 +410,7 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 --------------------------------- ------ ---------------------------------------- ---------------------------------------- ``` - Memory Manipulation Summary用于展示框架中调用内存操作所花费的时间。 + Memory Manipulation Summary 用于展示框架中调用内存操作所花费的时间。 - UserDefined Summary @@ -426,10 +426,10 @@ benchmark信息(如ips),可以像示例一样将Profiler的timer_only参 ``` - UserDefined Summary用于展示用户自定义记录的Event所花费的时间。 + UserDefined Summary 用于展示用户自定义记录的 Event 所花费的时间。 -### 3. Benchmark信息 -benckmark信息用于展示模型的吞吐量以及时间开销。 +### 3. Benchmark 信息 +benckmark 信息用于展示模型的吞吐量以及时间开销。 ```text ============================================Perf Summary============================================ Reader Ratio: 0.989% @@ -439,9 +439,9 @@ Time Unit: s, IPS Unit: steps/s | batch_cost | 0.00986 | 0.00798 | 0.00786 | | ips | 101.41524 | 127.25977 | 125.29320 | ``` -其中ReaderRatio表示数据读取部分占batch迭代过程的时间占比,reader_cost代表数据读取时间,batch_cost代表batch迭代的时间,ips表示每秒能迭代多少次,即跑多少个batch。 +其中 ReaderRatio 表示数据读取部分占 batch 迭代过程的时间占比,reader_cost 代表数据读取时间,batch_cost 代表 batch 迭代的时间,ips 表示每秒能迭代多少次,即跑多少个 batch。 ## 三、更多细节 -关于paddle.profiler模块更详细的使用说明,可以参考[API文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/profiler/Overview_cn.html)。目前Paddle的性能分析工具主要还只提供时间方面的分析,之后会提供更多信息的收集来辅助做更全面的分析,如提供显存分析来监控显存泄漏问题。此外,Paddle的可视化工具VisualDL正在对Profiler的数据展示进行开发,敬请期待。 +关于 paddle.profiler 模块更详细的使用说明,可以参考[API 文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api/paddle/profiler/Overview_cn.html)。目前 Paddle 的性能分析工具主要还只提供时间方面的分析,之后会提供更多信息的收集来辅助做更全面的分析,如提供显存分析来监控显存泄漏问题。此外,Paddle 的可视化工具 VisualDL 正在对 Profiler 的数据展示进行开发,敬请期待。 diff --git a/docs/guides/performance_improving/quantization.md b/docs/guides/performance_improving/quantization.md index 7e43c49ffaa..fb9ac40b88b 100644 --- a/docs/guides/performance_improving/quantization.md +++ b/docs/guides/performance_improving/quantization.md @@ -4,7 +4,7 @@ 模型量化作为一种常见的模型压缩方法,使用整数替代浮点数进行存储和计算,可以减少模型存储空间、加快推理速度、降低计算内存,助力深度学习应用的落地。 -飞桨提供了模型量化全流程解决方案,首先使用PaddleSlim产出量化模型,然后使用Paddle Inference和Paddle Lite部署量化模型。 +飞桨提供了模型量化全流程解决方案,首先使用 PaddleSlim 产出量化模型,然后使用 Paddle Inference 和 Paddle Lite 部署量化模型。
missing @@ -13,9 +13,9 @@ ## 产出量化模型 -飞桨模型量化全流程解决方案中,PaddleSlim负责产出量化模型。 +飞桨模型量化全流程解决方案中,PaddleSlim 负责产出量化模型。 -PaddleSlim支持三种模型量化方法:动态离线量化方法、静态离线量化方法和量化训练方法。这三种量化方法的特点如下图。 +PaddleSlim 支持三种模型量化方法:动态离线量化方法、静态离线量化方法和量化训练方法。这三种量化方法的特点如下图。
missing @@ -24,7 +24,7 @@ PaddleSlim支持三种模型量化方法:动态离线量化方法、静态离 动态离线量化方法不需要使用样本数据,也不会对模型进行训练。在模型产出阶段,动态离线量化方法将模型权重从浮点数量化成整数。在模型部署阶段,将权重从整数反量化成浮点数,使用浮点数运算进行预测推理。这种方式主要减少模型存储空间,对权重读取费时的模型有一定加速作用,对模型精度影响较小。 -静态离线量化方法要求有少量无标签样本数据,需要执行模型的前向计算,不会对模型进行训练。在模型产出阶段,静态离线量化方法使用样本数据执行模型的前向计算,同时对量化OP的输入输出进行采样,然后计算量化信息。在模型部署阶段,使用计算好的量化信息对输入进行量化,基于整数运算进行预测推理。静态离线量化方法可以减少模型存储空间、加快模型推理速度、降低计算内存,同时量化模型只存在较小的精度损失。 +静态离线量化方法要求有少量无标签样本数据,需要执行模型的前向计算,不会对模型进行训练。在模型产出阶段,静态离线量化方法使用样本数据执行模型的前向计算,同时对量化 OP 的输入输出进行采样,然后计算量化信息。在模型部署阶段,使用计算好的量化信息对输入进行量化,基于整数运算进行预测推理。静态离线量化方法可以减少模型存储空间、加快模型推理速度、降低计算内存,同时量化模型只存在较小的精度损失。 量化训练方法要求有大量有标签样本数据,需要对模型进行较长时间的训练。在模型产出阶段,量化训练方法使用模拟量化的思想,在模型训练过程中会更新权重,实现拟合、减少量化误差的目的。在模型部署阶段,量化训练方法和静态离线量化方法一致,采用相同的预测推理方式,在存储空间、推理速度、计算内存三方面实现相同的收益。更重要的是,量化训练方法对模型精度只有极小的影响。 @@ -36,22 +36,22 @@ PaddleSlim支持三种模型量化方法:动态离线量化方法、静态离
图 3. 选择量化方法
-产出量化模型的使用方法、Demo和API,请参考[PaddleSlim文档](https://paddleslim.readthedocs.io/zh_CN/latest/index.html)。 +产出量化模型的使用方法、Demo 和 API,请参考[PaddleSlim 文档](https://paddleslim.readthedocs.io/zh_CN/latest/index.html)。 ## 部署量化模型 -飞桨模型量化全流程解决方案中,Paddle Inference负责在服务器端(X86 CPU和Nvidia GPU)部署量化模型,Paddle Lite负责在移动端(ARM CPU)上部署量化模型。 +飞桨模型量化全流程解决方案中,Paddle Inference 负责在服务器端(X86 CPU 和 Nvidia GPU)部署量化模型,Paddle Lite 负责在移动端(ARM CPU)上部署量化模型。 -X86 CPU和Nvidia GPU上支持部署PaddleSlim静态离线量化方法和量化训练方法产出的量化模型。 -ARM CPU上支持部署PaddleSlim动态离线量化方法、静态离线量化方法和量化训练方法产出的量化模型。 +X86 CPU 和 Nvidia GPU 上支持部署 PaddleSlim 静态离线量化方法和量化训练方法产出的量化模型。 +ARM CPU 上支持部署 PaddleSlim 动态离线量化方法、静态离线量化方法和量化训练方法产出的量化模型。 -因为动态离线量化方法产出的量化模型主要是为了压缩模型体积,主要应用于移动端部署,所以在X86 CPU和Nvidia GPU上暂不支持这类量化模型。 +因为动态离线量化方法产出的量化模型主要是为了压缩模型体积,主要应用于移动端部署,所以在 X86 CPU 和 Nvidia GPU 上暂不支持这类量化模型。 -### NV GPU上部署量化模型 +### NV GPU 上部署量化模型 -使用PaddleSlim静态离线量化方法和量化训练方法产出量化模型后,可以使用Paddle Inference在Nvidia GPU上部署该量化模型。 +使用 PaddleSlim 静态离线量化方法和量化训练方法产出量化模型后,可以使用 Paddle Inference 在 Nvidia GPU 上部署该量化模型。 -Nvidia GPU上部署常规模型的流程是:准备TensorRT环境、配置Config、创建Predictor、执行。Nvidia GPU上部署量化模型和常规模型大体相似,需要改动的是:指定TensorRT配置时将precision_mode设置为paddle_infer.PrecisionType.Int8,将use_calib_mode设为False。 +Nvidia GPU 上部署常规模型的流程是:准备 TensorRT 环境、配置 Config、创建 Predictor、执行。Nvidia GPU 上部署量化模型和常规模型大体相似,需要改动的是:指定 TensorRT 配置时将 precision_mode 设置为 paddle_infer.PrecisionType.Int8,将 use_calib_mode 设为 False。 ``` config.enable_tensorrt_engine( @@ -63,27 +63,27 @@ config.enable_tensorrt_engine( use_calib_mode=False) ``` -Paddle Inference的详细说明,请参考[文档](https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html)。 +Paddle Inference 的详细说明,请参考[文档](https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html)。 -Nvidia GPU上部署量化模型的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/optimize/paddle_trt.html)。 +Nvidia GPU 上部署量化模型的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/optimize/paddle_trt.html)。 -### X86 CPU上部署量化模型 +### X86 CPU 上部署量化模型 -使用PaddleSlim静态离线量化方法和量化训练方法产出量化模型后,可以使用Paddle Inference在X86 CPU上部署该量化模型。 +使用 PaddleSlim 静态离线量化方法和量化训练方法产出量化模型后,可以使用 Paddle Inference 在 X86 CPU 上部署该量化模型。 -X86 CPU上部署量化模型,首先检查X86 CPU支持指令集,然后转换量化模型,最后在X86 CPU上执行预测。 +X86 CPU 上部署量化模型,首先检查 X86 CPU 支持指令集,然后转换量化模型,最后在 X86 CPU 上执行预测。 -Paddle Inference的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/#)。 +Paddle Inference 的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/#)。 -X86 CPU上部署量化模型的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/optimize/paddle_x86_cpu_int8.html)。 +X86 CPU 上部署量化模型的详细说明,请参考[文档](https://paddle-inference.readthedocs.io/en/latest/optimize/paddle_x86_cpu_int8.html)。 -1)检查X86 CPU支持指令集 +1)检查 X86 CPU 支持指令集 -大家可以在命令行中输入lscpu查看本机支持指令。 +大家可以在命令行中输入 lscpu 查看本机支持指令。 -在支持avx512、不支持avx512_vnni的X86 CPU上(如:SkyLake, Model name:Intel(R) Xeon(R) Gold X1XX),量化模型性能为原始模型性能的1.5倍左右。 +在支持 avx512、不支持 avx512_vnni 的 X86 CPU 上(如:SkyLake, Model name:Intel(R) Xeon(R) Gold X1XX),量化模型性能为原始模型性能的 1.5 倍左右。 -在支持avx512和avx512_vnni的X86 CPU上(如:Casecade Lake, Model name: Intel(R) Xeon(R) Gold X2XX),量化模型的精度和性能最高,量化模型性能为原始模型性能的3~3.7倍。 +在支持 avx512 和 avx512_vnni 的 X86 CPU 上(如:Casecade Lake, Model name: Intel(R) Xeon(R) Gold X2XX),量化模型的精度和性能最高,量化模型性能为原始模型性能的 3~3.7 倍。 2)转换量化模型 @@ -101,13 +101,13 @@ python save_quant_model.py \ 3)执行预测 -准备预测库,加载转换后的量化模型,创建Predictor,进行预测。 +准备预测库,加载转换后的量化模型,创建 Predictor,进行预测。 -注意,在X86 CPU预测端部署量化模型,必须开启MKLDNN,不要开启IrOptim(模型已经转换好)。 +注意,在 X86 CPU 预测端部署量化模型,必须开启 MKLDNN,不要开启 IrOptim(模型已经转换好)。 4)数据展示 -**图像分类INT8模型在 Intel(R) Xeon(R) Gold 6271 上精度** +**图像分类 INT8 模型在 Intel(R) Xeon(R) Gold 6271 上精度** | Model | FP32 Top1 Accuracy | INT8 Top1 Accuracy | Top1 Diff | FP32 Top5 Accuracy | INT8 Top5 Accuracy | Top5 Diff | |:------------:|:------------------:|:------------------:|:---------:|:------------------:|:------------------:|:---------:| @@ -118,7 +118,7 @@ python save_quant_model.py \ | VGG16 | 72.08% | 71.74% | -0.34% | 90.63% | 89.71% | -0.92% | | VGG19 | 72.57% | 72.12% | -0.45% | 90.84% | 90.15% | -0.69% | -**图像分类INT8模型在 Intel(R) Xeon(R) Gold 6271 单核上性能** +**图像分类 INT8 模型在 Intel(R) Xeon(R) Gold 6271 单核上性能** | Model | FP32 (images/s) | INT8 (images/s) | Ratio (INT8/FP32) | |:------------:|:---------------:|:---------------:|:-----------------:| @@ -144,23 +144,23 @@ python save_quant_model.py \ | 20 threads | 22.08 | 12.57 | 1.76X | -### ARM CPU上部署量化模型 +### ARM CPU 上部署量化模型 -Paddle Lite可以在ARM CPU上部署PaddleSlim动态离线量化方法、静态离线量化方法和量化训练方法产出的量化模型。 +Paddle Lite 可以在 ARM CPU 上部署 PaddleSlim 动态离线量化方法、静态离线量化方法和量化训练方法产出的量化模型。 -Paddle Lite部署量化模型的方法和常规非量化模型完全相同,主要包括使用opt工具进行模型优化、执行预测。 +Paddle Lite 部署量化模型的方法和常规非量化模型完全相同,主要包括使用 opt 工具进行模型优化、执行预测。 -Paddle Lite的详细说明,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/index.html)。 +Paddle Lite 的详细说明,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/index.html)。 -Paddle Lite部署动态离线量化方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_post_dynamic.html)。 +Paddle Lite 部署动态离线量化方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_post_dynamic.html)。 -Paddle Lite部署静态离线量化方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_post_static.html)。 +Paddle Lite 部署静态离线量化方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_post_static.html)。 -Paddle Lite部署量化训练方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_aware.html)。 +Paddle Lite 部署量化训练方法产出的量化模型,请参考[文档](https://paddle-lite.readthedocs.io/zh/latest/user_guides/quant_aware.html)。 **模型量化前后性能对比** -| 骁龙855 | armv7(ms) | armv7(ms) | armv7(ms) | armv8(ms) | armv8(ms) | armv8(ms) | +| 骁龙 855 | armv7(ms) | armv7(ms) | armv7(ms) | armv8(ms) | armv8(ms) | armv8(ms) | |:------:|:---------:|:---------: | :-------: | :--------:| :--------:| :--------:| | threads num| 1 | 2 | 4 | 1 | 2 | 4 | | mobilenet_v1_fp32 | 32.19 | 18.75 | 11.02 | 29.50 | 17.50 | 9.58 | diff --git a/docs/guides/performance_improving/training_best_practice.rst b/docs/guides/performance_improving/training_best_practice.rst index 95e71abd70e..af48bbdd543 100644 --- a/docs/guides/performance_improving/training_best_practice.rst +++ b/docs/guides/performance_improving/training_best_practice.rst @@ -8,9 +8,9 @@ 开始优化您的单机训练任务 ------------------------- -PaddlePaddle Fluid可以支持在现代CPU、GPU平台上进行训练。如果您发现Fluid进行单机训练的速度较慢,您可以根据这篇文档的建议对您的Fluid程序进行优化。 +PaddlePaddle Fluid 可以支持在现代 CPU、GPU 平台上进行训练。如果您发现 Fluid 进行单机训练的速度较慢,您可以根据这篇文档的建议对您的 Fluid 程序进行优化。 -神经网络训练代码通常由三个步骤组成:网络构建、数据准备、模型训练。这篇文档将分别从这三个方向介绍Fluid训练中常用的优化方法。 +神经网络训练代码通常由三个步骤组成:网络构建、数据准备、模型训练。这篇文档将分别从这三个方向介绍 Fluid 训练中常用的优化方法。 1. 网络构建过程中的配置优化 @@ -18,10 +18,10 @@ PaddlePaddle Fluid可以支持在现代CPU、GPU平台上进行训练。如果 这部分优化与具体的模型有关,在这里,我们列举出一些优化过程中遇到过的一些示例。 -1.1 cuDNN操作的选择 +1.1 cuDNN 操作的选择 ^^^^^^^^^^^^^^^^ -cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经网络中常用算子,Paddle中的部分Op底层调用的是cuDNN库,例如 :code:`conv2d` : +cuDNN 是 NVIDIA 提供的深度神经网络计算库,其中包含了很多神经网络中常用算子,Paddle 中的部分 Op 底层调用的是 cuDNN 库,例如 :code:`conv2d` : .. code-block:: python @@ -39,14 +39,14 @@ cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经 name=None, data_format="NCHW") -在 :code:`use_cudnn=True` 时,框架底层调用的是cuDNN中的卷积操作。 +在 :code:`use_cudnn=True` 时,框架底层调用的是 cuDNN 中的卷积操作。 -通常cuDNN库提供的操作具有很好的性能表现,其性能明显优于Paddle原生的CUDA实现,比如 :code:`conv2d` 。但是cuDNN中有些操作的性能较差,比如: :code:`conv2d_transpose` 在 :code:`batch_size=1` 时、:code:`pool2d` 在 :code:`global_pooling=True` 时等,这些情况下,cuDNN实现的性能差于Paddle的CUDA实现,建议手动设置 :code:`use_cudnn=False` 。 +通常 cuDNN 库提供的操作具有很好的性能表现,其性能明显优于 Paddle 原生的 CUDA 实现,比如 :code:`conv2d` 。但是 cuDNN 中有些操作的性能较差,比如: :code:`conv2d_transpose` 在 :code:`batch_size=1` 时、:code:`pool2d` 在 :code:`global_pooling=True` 时等,这些情况下,cuDNN 实现的性能差于 Paddle 的 CUDA 实现,建议手动设置 :code:`use_cudnn=False` 。 -1.2 减少模型中Layer的个数 +1.2 减少模型中 Layer 的个数 ^^^^^^^^^^^^^^^^^^ -为方便用户使用,飞桨提供一些不同粒度的Layer,其中有些Layer的组合可以通过单个Layer完成。比如: +为方便用户使用,飞桨提供一些不同粒度的 Layer,其中有些 Layer 的组合可以通过单个 Layer 完成。比如: (1) :code:`fluid.layers.softmax_with_cross_entropy` ,该操作其实是 :code:`fluid.layers.softmax` 和 :code:`fluid.layers.cross_entropy` 的组合,因此如果模型中有出现 @@ -62,16 +62,16 @@ cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经 loss = fluid.layers.softmax_with_cross_entropy(logits, label, ignore_index=255, numeric_stable_mode=True) -(2) 如果模型中需要对数据进行标准化,可以直接使用 :code:`fluid.layers.data_norm` ,而不用通过一系列layer组合出数据的标准化操作。 +(2) 如果模型中需要对数据进行标准化,可以直接使用 :code:`fluid.layers.data_norm` ,而不用通过一系列 layer 组合出数据的标准化操作。 -因此,建议在构建模型时优先使用飞桨提供的单个Layer完成所需操作,这样减少模型中Layer的个数,并因此加速模型训练。 +因此,建议在构建模型时优先使用飞桨提供的单个 Layer 完成所需操作,这样减少模型中 Layer 的个数,并因此加速模型训练。 2. 数据准备优化 ============= -数据准备通常分为两部分:第一部分是数据加载,即程序从磁盘中加载训练/预测数据;第二部分是数据预处理,程序对加载的数据进行预处理,比如图像任务通常需要进行数据增强、Shuffle等。 -这两部分需要用户根据自己的模型需要进行设置,只需要最后得到Data Reader接口即可。Data Reader返回iterable对象,可以每次返回一条样本或者一组样本。代码示例如下: +数据准备通常分为两部分:第一部分是数据加载,即程序从磁盘中加载训练/预测数据;第二部分是数据预处理,程序对加载的数据进行预处理,比如图像任务通常需要进行数据增强、Shuffle 等。 +这两部分需要用户根据自己的模型需要进行设置,只需要最后得到 Data Reader 接口即可。Data Reader 返回 iterable 对象,可以每次返回一条样本或者一组样本。代码示例如下: .. code-block:: python @@ -83,7 +83,7 @@ cuDNN是NVIDIA提供的深度神经网络计算库,其中包含了很多神经 train_data_reader = data_reader(32, 32) -Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use_numpy_array_as_train_data` 和 :ref:`user_guides_use_py_reader` ,详情请参考文档 :ref:`user_guide_prepare_data` 。 +Paddle 提供了两种方式从 Data Reader 中读取数据: :ref:`user_guide_use_numpy_array_as_train_data` 和 :ref:`user_guides_use_py_reader` ,详情请参考文档 :ref:`user_guide_prepare_data` 。 2.1 同步数据读取 ^^^^^^^^^^^^^^^^ @@ -101,7 +101,7 @@ Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use avg_loss = fluid.layers.mean(loss) # …… # 读取数据 - # paddle.dataset.mnist.train()返回数据读取的Reader,每次可以从Reader中读取一条样本,batch_size为128 + # paddle.dataset.mnist.train()返回数据读取的 Reader,每次可以从 Reader 中读取一条样本,batch_size 为 128 train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) # 读取数据 @@ -114,16 +114,16 @@ Paddle提供了两种方式从Data Reader中读取数据: :ref:`user_guide_use end = time.time() -用户首先需要通过 :code:`fluid.data` 定义模型的输入,然后根据输入构建模型,最后从事先自定义的Reader函数中获取一个batch的数据,并将数据传递给执行器。 +用户首先需要通过 :code:`fluid.data` 定义模型的输入,然后根据输入构建模型,最后从事先自定义的 Reader 函数中获取一个 batch 的数据,并将数据传递给执行器。 -采用同步数据读取方式时,用户可通过加入Python计时函数 :code:`time.time()` 来统计数据准备部分和执行部分所占用的时间。 +采用同步数据读取方式时,用户可通过加入 Python 计时函数 :code:`time.time()` 来统计数据准备部分和执行部分所占用的时间。 由于数据准备和执行是顺序进行的,所以程序的执行速度可能较慢。如果用户想进行模型调试的话,同步数据读取是一个不错的选择。 2.2 异步数据读取 ^^^^^^^^^^^^^^^^ -Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来实现异步数据读取,代码示例如下: +Paddle 里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来实现异步数据读取,代码示例如下: .. code-block:: python @@ -144,7 +144,7 @@ Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来 train_reader = paddle.batch(paddle.dataset.mnist.train(), 128) data_loader.set_batch_generator(train_reader, places=places) - # 启动data_loader + # 启动 data_loader data_loader.start() batch_id = 0 try: @@ -159,22 +159,22 @@ Paddle里面使用 paddle.fluid.io. :ref:`cn_api_fluid_io_DataLoader` 接口来 except fluid.core.EOFException: data_loader.reset() -用户首先需要通过 :code:`fluid.io.DataLoader.from_generator` 定义DataLoader对象,并使用 :code:`set_batch_generator` 方法将自定义的Reader与DataLoader绑定。 -若DataLoader被定义成不可迭代的( :code:`iterable=False` ),在训练开始之前,通过调用 :code:`start()` 方法来启动数据读取。 -在数据读取结束之后, :code:`executor.run` 会抛出 :code:`fluid.core.EOFException` ,表示训练已经遍历完Reader中的所有数据。 +用户首先需要通过 :code:`fluid.io.DataLoader.from_generator` 定义 DataLoader 对象,并使用 :code:`set_batch_generator` 方法将自定义的 Reader 与 DataLoader 绑定。 +若 DataLoader 被定义成不可迭代的( :code:`iterable=False` ),在训练开始之前,通过调用 :code:`start()` 方法来启动数据读取。 +在数据读取结束之后, :code:`executor.run` 会抛出 :code:`fluid.core.EOFException` ,表示训练已经遍历完 Reader 中的所有数据。 -采用异步数据读取时,Python端和C++端共同维护一个数据队列,Python端启动一个线程,负责向队列中插入数据,C++端在训练/预测过程中,从数据队列中获取数据,并将该数据从对队列中移除。 +采用异步数据读取时,Python 端和 C++端共同维护一个数据队列,Python 端启动一个线程,负责向队列中插入数据,C++端在训练/预测过程中,从数据队列中获取数据,并将该数据从对队列中移除。 用户可以在程序运行过程中,监测数据队列是否为空,如果队列始终不为空,表明数据准备的速度比模型执行的速度快,这种情况下数据读取可能不是瓶颈。 -另外,Paddle提供的一些FLAGS也能很好的帮助分析性能。如果用户希望评估一下在完全没有数据读取开销情况下模型的性能,可以设置一下环境变量::code:`FLAGS_reader_queue_speed_test_mode` ,在该变量为True情况下,C++端从数据队列中获取数据之后,不会从数据队列中移除,这样能够保证数据队列始终不为空,从而避免了C++端读取数据时的等待开销。 +另外,Paddle 提供的一些 FLAGS 也能很好的帮助分析性能。如果用户希望评估一下在完全没有数据读取开销情况下模型的性能,可以设置一下环境变量::code:`FLAGS_reader_queue_speed_test_mode` ,在该变量为 True 情况下,C++端从数据队列中获取数据之后,不会从数据队列中移除,这样能够保证数据队列始终不为空,从而避免了 C++端读取数据时的等待开销。 **需要特别注意的是,** :code:`FLAGS_reader_queue_speed_test_mode` **只能在性能分析的时候打开,正常训练模型时需要关闭。** 为降低训练的整体时间,建议用户使用异步数据读取的方式,并开启 :code:`use_double_buffer=True` 。用户可根据模型的实际情况设置数据队列的大小。 -如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,就需要考虑对数据读取Reader进行加速。 -常用的方法是 **使用Python多进程准备数据** ,一个简单的使用多进程准备数据的示例,可以参考 `YOLOv3 `_ 。 +如果数据准备的时间大于模型执行的时间,或者出现了数据队列为空的情况,就需要考虑对数据读取 Reader 进行加速。 +常用的方法是 **使用 Python 多进程准备数据** ,一个简单的使用多进程准备数据的示例,可以参考 `YOLOv3 `_ 。 -Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相应功能的API,可将这部分预处理功能写到模型配置中,如此Paddle就可以使用GPU来完成该预处理功能,这样也可以减轻CPU预处理数据的负担,提升总体训练速度。 +Python 端的数据预处理,都是使用 CPU 完成。如果 Paddle 提供了相应功能的 API,可将这部分预处理功能写到模型配置中,如此 Paddle 就可以使用 GPU 来完成该预处理功能,这样也可以减轻 CPU 预处理数据的负担,提升总体训练速度。 3. 模型训练相关优化 ============= @@ -182,27 +182,27 @@ Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相 3.1 执行器介绍 ^^^^^^^^^^^^^^^^ -目前Paddle的Python API中提供了 :code:`fluid.compiler.CompiledProgram` 的概念,用户可以通过 :code:`CompiledProgram` 将传入的program进行编译。 -如果希望采用数据并行模式训练,只需要将 :code:`CompiledProgram` 返回的对象调用一下 :code:`with_data_parallel` 即可,最后统一通过 :code:`executor.run(…)` 执行compiled_program。 +目前 Paddle 的 Python API 中提供了 :code:`fluid.compiler.CompiledProgram` 的概念,用户可以通过 :code:`CompiledProgram` 将传入的 program 进行编译。 +如果希望采用数据并行模式训练,只需要将 :code:`CompiledProgram` 返回的对象调用一下 :code:`with_data_parallel` 即可,最后统一通过 :code:`executor.run(…)` 执行 compiled_program。 -虽然统一通过 :code:`executor.run(…)` 接口来执行,实际底层的执行策略有两种,对应C++部分的两个执行器,即 :code:`Executor` 和 :code:`ParallelExecutor` ,如果用户采用数据并行模式,C++部分使用的是 :code:`ParallelExecutor` ,除此之外都是使用 :code:`Executor` 。 +虽然统一通过 :code:`executor.run(…)` 接口来执行,实际底层的执行策略有两种,对应 C++部分的两个执行器,即 :code:`Executor` 和 :code:`ParallelExecutor` ,如果用户采用数据并行模式,C++部分使用的是 :code:`ParallelExecutor` ,除此之外都是使用 :code:`Executor` 。 这两个执行器的差别: .. csv-table:: :header: "执行器 ", "执行对象", "执行策略" :widths: 3, 3, 5 - ":code:`Executor`", ":code:`Program`", "根据 :code:`Program` 中Operator定义的先后顺序依次运行。" - ":code:`ParallelExecutor`", "SSA Graph", "根据Graph中各个节点之间的依赖关系,通过多线程运行。" + ":code:`Executor`", ":code:`Program`", "根据 :code:`Program` 中 Operator 定义的先后顺序依次运行。" + ":code:`ParallelExecutor`", "SSA Graph", "根据 Graph 中各个节点之间的依赖关系,通过多线程运行。" -可以看出, :code:`Executor` 的内部逻辑非常简单,但性能可能会弱一些,因为 :code:`Executor` 对于program中的操作是串行执行的。 -而 :code:`ParallelExecutor` 首先会将program转变为计算图,并分析计算图中节点间的连接关系,对图中没有相互依赖的节点(OP),通过多线程并行执行。 +可以看出, :code:`Executor` 的内部逻辑非常简单,但性能可能会弱一些,因为 :code:`Executor` 对于 program 中的操作是串行执行的。 +而 :code:`ParallelExecutor` 首先会将 program 转变为计算图,并分析计算图中节点间的连接关系,对图中没有相互依赖的节点(OP),通过多线程并行执行。 因此, :code:`Executor` 是一个轻量级的执行器,目前主要用于参数初始化、模型保存、模型加载。 :code:`ParallelExecutor` 是 :code:`Executor` 的升级版本,目前 :code:`ParallelExecutor` 主要用于模型训练,包括单机单卡、单机多卡以及多机多卡训练。 -:code:`ParallelExecutor` 执行计算图之前,可以对计算图进行一些优化,比如使计算图中的一些操作是In-place的、将计算图中的参数更新操作进行融合等。 +:code:`ParallelExecutor` 执行计算图之前,可以对计算图进行一些优化,比如使计算图中的一些操作是 In-place 的、将计算图中的参数更新操作进行融合等。 用户还可以调整 :code:`ParallelExecutor` 执行过程中的一些配置,比如执行计算图的线程数等。这些配置分别是构建策略(BuildStrategy)和执行策略(ExecutionStrategy)参数来设置的。 @@ -224,7 +224,7 @@ Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相 place = fluid.CUDAPlace(0) exe = Executor(place) - # 使用DataLoader读取数据,因此执行时不需要设置feed + # 使用 DataLoader 读取数据,因此执行时不需要设置 feed fetch_outs = exe.run(train_program, fetch_list=[loss.name]) @@ -232,7 +232,7 @@ Python端的数据预处理,都是使用CPU完成。如果Paddle提供了相 3.2 构建策略(BuildStrategy)配置参数介绍 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -BuildStrategy中提供了一些关于计算图优化的策略,这些策略可以在不同程度上提升模型的训练速度,但是其中一些策略与模型的结构有关,比如 :code:`fuse_all_optimizer_ops` 不支持sparse梯度,我们正在积极的完善这些策略,并在下一个版本将这些策略默认打开。 +BuildStrategy 中提供了一些关于计算图优化的策略,这些策略可以在不同程度上提升模型的训练速度,但是其中一些策略与模型的结构有关,比如 :code:`fuse_all_optimizer_ops` 不支持 sparse 梯度,我们正在积极的完善这些策略,并在下一个版本将这些策略默认打开。 构建策略的详细介绍如下: @@ -243,66 +243,66 @@ BuildStrategy中提供了一些关于计算图优化的策略,这些策略可 ":code:`reduce_strategy`", ":code:`fluid.BuildStrategy.ReduceStrategy`", ":code:`fluid.BuildStrategy.ReduceStrategy.AllReduce`", "使用数据并行训练模型时选用 :code:`AllReduce` 模式训练还是 :code:`Reduce` 模式训练。" ":code:`enable_backward_optimizer_op_deps`", "bool", "True", "在反向操作和参数更新操作之间添加依赖,保证在所有的反向操作都运行结束之后才开始运行参数更新操作。" ":code:`fuse_all_optimizer_ops`", "bool", "False", "对模型中的参数更新算法进行融合。" - ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将all_reduce操作进行融合。" - ":code:`fuse_relu_depthwise_conv`", "bool", "False", "如果模型中存在relu和depthwise_conv,并且是连接的,即relu->depthwise_conv,该选项可以将这两个操作合并为一个。" - ":code:`fuse_broadcast_ops`", "bool", "False", "在 :code:`Reduce` 模式下,将最后的多个Broadcast操作融合为一个。" - ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是CPU训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用MKLDNN库。默认情况下,模型中用到的操作如果在Paddle目前支持的可以使用mkldnn库计算的列表中,这些操作都会调用mkldnn库的接口进行计算。" - ":code:`debug_graphviz_path`", "str", "{}", "将Graph以graphviz格式输出到debug_graphviz_path所指定的文件中。" + ":code:`fuse_all_reduce_ops`", "bool", "False", "多卡训练时,将 all_reduce 操作进行融合。" + ":code:`fuse_relu_depthwise_conv`", "bool", "False", "如果模型中存在 relu 和 depthwise_conv,并且是连接的,即 relu->depthwise_conv,该选项可以将这两个操作合并为一个。" + ":code:`fuse_broadcast_ops`", "bool", "False", "在 :code:`Reduce` 模式下,将最后的多个 Broadcast 操作融合为一个。" + ":code:`mkldnn_enabled_op_types`", "list", "{}", "如果是 CPU 训练,可以用 :code:`mkldnn_enabled_op_types` 指明模型中的那些操作可以使用 MKLDNN 库。默认情况下,模型中用到的操作如果在 Paddle 目前支持的可以使用 mkldnn 库计算的列表中,这些操作都会调用 mkldnn 库的接口进行计算。" + ":code:`debug_graphviz_path`", "str", "{}", "将 Graph 以 graphviz 格式输出到 debug_graphviz_path 所指定的文件中。" 参数说明: -(1) 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数Broadcast到其他节点。即:如果模型中有100个参数需要更新,训练时使用的是4个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这100个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这25个参数进行更新,最后将更新的参数Broadcast到其他节点上。注意:如果是使用CPU进行数据并行训练,在Reduce模式下,不同CPUPlace上的参数是共享的,所以在各个CPUPlace上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace。 +(1) 关于 :code:`reduce_strategy` ,在 :code:`ParallelExecutor` 对于数据并行支持两种参数更新模式: :code:`AllReduce` 和 :code:`Reduce` 。在 :code:`AllReduce` 模式下,各个节点上计算得到梯度之后,调用 :code:`AllReduce` 操作,梯度在各个节点上聚合,然后各个节点分别进行参数更新。在 :code:`Reduce` 模式下,参数的更新操作被均匀的分配到各个节点上,即各个节点计算得到梯度之后,将梯度在指定的节点上进行 :code:`Reduce` ,然后在该节点上,最后将更新之后的参数 Broadcast 到其他节点。即:如果模型中有 100 个参数需要更新,训练时使用的是 4 个节点,在 :code:`AllReduce` 模式下,各个节点需要分别对这 100 个参数进行更新;在 :code:`Reduce` 模式下,各个节点需要分别对这 25 个参数进行更新,最后将更新的参数 Broadcast 到其他节点上。注意:如果是使用 CPU 进行数据并行训练,在 Reduce 模式下,不同 CPUPlace 上的参数是共享的,所以在各个 CPUPlace 上完成参数更新之后不用将更新后的参数 Broadcast 到其他 CPUPlace。 (2) 关于 :code:`enable_backward_optimizer_op_deps` ,在多卡训练时,打开该选项可能会提升训练速度。 -(3) 关于 :code:`fuse_all_optimizer_ops` ,目前只支持SGD、Adam和Momentum算法。 **注意:目前不支持sparse参数梯度** 。 +(3) 关于 :code:`fuse_all_optimizer_ops` ,目前只支持 SGD、Adam 和 Momentum 算法。 **注意:目前不支持 sparse 参数梯度** 。 -(4) 关于 :code:`fuse_all_reduce_ops` ,多GPU训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一layer中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有Weight和Bias两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合AllReduce操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输64MB的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。 **注意:目前不支持sparse参数梯度** 。 +(4) 关于 :code:`fuse_all_reduce_ops` ,多 GPU 训练时,可以对 :code:`AllReduce` 操作进行融合,以减少 :code:`AllReduce` 的调用次数。默认情况下会将同一 layer 中参数的梯度的 :code:`AllReduce` 操作合并成一个,比如对于 :code:`fluid.layers.fc` 中有 Weight 和 Bias 两个参数,打开该选项之后,原本需要两次 :code:`AllReduce` 操作,现在只用一次 :code:`AllReduce` 操作。此外,为支持更大粒度的参数梯度融合,Paddle 提供了 :code:`FLAGS_fuse_parameter_memory_size` 选项,用户可以指定融合 AllReduce 操作之后,每个 :code:`AllReduce` 操作的梯度字节数,比如希望每次 :code:`AllReduce` 调用传输 64MB 的梯度,:code:`export FLAGS_fuse_parameter_memory_size=64` 。 **注意:目前不支持 sparse 参数梯度** 。 -(5) 关于 :code:`mkldnn_enabled_op_types` ,目前Paddle的Op中可以使用mkldnn库计算的操作包括:transpose、sum、softmax、requantize、quantize、pool2d、lrn、gaussian_random、fc、dequantize、conv2d_transpose、conv2d、conv3d、concat、batch_norm、relu、tanh、sqrt、abs。 +(5) 关于 :code:`mkldnn_enabled_op_types` ,目前 Paddle 的 Op 中可以使用 mkldnn 库计算的操作包括:transpose、sum、softmax、requantize、quantize、pool2d、lrn、gaussian_random、fc、dequantize、conv2d_transpose、conv2d、conv3d、concat、batch_norm、relu、tanh、sqrt、abs。 3.3 执行策略(ExecutionStrategy)配置参数介绍 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -ExecutionStrategy中提供了关于计算图执行时的一些配置,这些配置可能会影响模型的训练速度。同时,这些配置与模型的结构有关,如果用户希望模型训练速度更快,可以调整一下这些配置。在后续的优化中,我们会对这部分进行优化,根据输入模型结构动态调整这些设置。 +ExecutionStrategy 中提供了关于计算图执行时的一些配置,这些配置可能会影响模型的训练速度。同时,这些配置与模型的结构有关,如果用户希望模型训练速度更快,可以调整一下这些配置。在后续的优化中,我们会对这部分进行优化,根据输入模型结构动态调整这些设置。 -ExecutionStrategy配置选项说明: +ExecutionStrategy 配置选项说明: .. csv-table:: :header: "选项", "类型", "默认值", "说明" :widths: 3, 3, 5, 5 - ":code:`num_iteration_per_drop_scope`", "INT", "100", "经过多少次迭代之后清理一次local execution scope" - ":code:`num_threads`", "INT", "对于CPU:2*dev_count;对于GPU:4*dev_count. (这是一个经验值)", ":code:`ParallelExecutor` 中执行所有Op使用的线程池大小" + ":code:`num_iteration_per_drop_scope`", "INT", "100", "经过多少次迭代之后清理一次 local execution scope" + ":code:`num_threads`", "INT", "对于 CPU:2*dev_count;对于 GPU:4*dev_count. (这是一个经验值)", ":code:`ParallelExecutor` 中执行所有 Op 使用的线程池大小" 说明: -(1) 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,默认每经过一个batch就要清理一下临时变量。由于GPU是异步设备,在清理之前需要对所有的GPU调用一次同步操作,因此耗费的时间较长。为此我们在execution_strategy中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次。 +(1) 关于 :code:`num_iteration_per_drop_scope` ,框架在运行过程中会产生一些临时变量,默认每经过一个 batch 就要清理一下临时变量。由于 GPU 是异步设备,在清理之前需要对所有的 GPU 调用一次同步操作,因此耗费的时间较长。为此我们在 execution_strategy 中添加了 :code:`num_iteration_per_drop_scope` 选项。用户可以指定经过多少次迭代之后清理一次。 -(2) 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据Op之间的依赖关系确定Op的执行顺序,即:当Op的输入都已经变为ready状态之后,该Op会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有Ready的Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于CPU任务,:code:`num_threads=2*dev_count` 时性能较好,对于GPU任务,:code:`num_threads=4*dev_count` 时性能较好。 **注意:线程池不是越大越好** 。 +(2) 关于 :code:`num_threads` ,:code:`ParallelExecutor` 根据 Op 之间的依赖关系确定 Op 的执行顺序,即:当 Op 的输入都已经变为 ready 状态之后,该 Op 会被放到一个队列中,等待被执行。 :code:`ParallelExecutor` 内部有一个任务调度线程和一个线程池,任务调度线程从队列中取出所有 Ready 的 Op,并将其放到线程队列中。 :code:`num_threads` 表示线程池的大小。根据以往的经验,对于 CPU 任务,:code:`num_threads=2*dev_count` 时性能较好,对于 GPU 任务,:code:`num_threads=4*dev_count` 时性能较好。 **注意:线程池不是越大越好** 。 -4. 运行时FLAGS设置优化 +4. 运行时 FLAGS 设置优化 ================= -Paddle中有一些FLAGS可以有助于性能优化: +Paddle 中有一些 FLAGS 可以有助于性能优化: -(1) :code:`FLAGS_cudnn_exhaustive_search` 表示在调用cuDNN中的卷积操作时,根据输入数据的shape等信息,采取穷举搜索的策略从算法库中选取到更快的卷积算法,进而实现对模型中卷积操作的加速。需要注意的是: - - 在搜索算法过程中需要使用较多的显存,如果用户的模型中卷积操作较多,或者GPU卡显存较小,可能会出现显存不足问题。 - - 通过穷举搜索选择好算法之后,该算法会进入Cache,以便下次运行时,如果输入数据的shape等信息不变,直接使用Cache中算法。 +(1) :code:`FLAGS_cudnn_exhaustive_search` 表示在调用 cuDNN 中的卷积操作时,根据输入数据的 shape 等信息,采取穷举搜索的策略从算法库中选取到更快的卷积算法,进而实现对模型中卷积操作的加速。需要注意的是: + - 在搜索算法过程中需要使用较多的显存,如果用户的模型中卷积操作较多,或者 GPU 卡显存较小,可能会出现显存不足问题。 + - 通过穷举搜索选择好算法之后,该算法会进入 Cache,以便下次运行时,如果输入数据的 shape 等信息不变,直接使用 Cache 中算法。 -(2) :code:`FLAGS_enable_cublas_tensor_op_math` 表示是否使用TensorCore加速cuBLAS等NV提供的库中的操作。需要注意的是,这个环境变量只在Tesla V100以及更新的GPU上适用,且可能会带来一定的精度损失,通常该损失不会影响模型的收敛性。 +(2) :code:`FLAGS_enable_cublas_tensor_op_math` 表示是否使用 TensorCore 加速 cuBLAS 等 NV 提供的库中的操作。需要注意的是,这个环境变量只在 Tesla V100 以及更新的 GPU 上适用,且可能会带来一定的精度损失,通常该损失不会影响模型的收敛性。 5. 优秀实践 ================= -(1) 尽可能的使用飞桨提供的单个layer实现所需操作。 +(1) 尽可能的使用飞桨提供的单个 layer 实现所需操作。 (2) 采用异步数据读取。 (3) 模型训练相关优化: - - 使用ParallelExecutor作为底层执行器。单卡训练,也可以调用with_data_parallel方法。代码示例: + - 使用 ParallelExecutor 作为底层执行器。单卡训练,也可以调用 with_data_parallel 方法。代码示例: .. code-block:: python @@ -310,16 +310,16 @@ Paddle中有一些FLAGS可以有助于性能优化: fluid.default_main_program()).with_data_parallel( loss_name=loss.name) - - 如果模型中参数的梯度都是非sparse的,可以打开fuse_all_optimizer_ops选项,将多个参数更新操作融合为一个。 - - 如果是多卡训练,可以打开enable_backward_optimizer_op_deps、fuse_all_reduce_ops选项。如果想指定每次每次AllReduce操作的数据大小,可以设置 :code:`FLAGS_fuse_parameter_memory_size`,比如 :code:`export FLAGS_fuse_parameter_memory_size=1` ,表示每次AllReduce调用传输1MB的梯度。 - - 使用CPU做数据并行训练时,推荐使用Reduce模型,因为在使用CPU进行数据并行训练时,在Reduce模式下,不同CPUPlace 上的参数是共享的,所以在各个CPUPlace 上完成参数更新之后不用将更新后的参数Broadcast到其他CPUPlace上,这对提升速度也有很大帮助。 - - 如果是Reduce模式,可打开fuse_broadcast_ops选项。 - - 如果用户的模型较小,比如mnist、language_model等,可以将num_threads设为1。 - - 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置为100,这样可以避免反复地申请和释放内存。 + - 如果模型中参数的梯度都是非 sparse 的,可以打开 fuse_all_optimizer_ops 选项,将多个参数更新操作融合为一个。 + - 如果是多卡训练,可以打开 enable_backward_optimizer_op_deps、fuse_all_reduce_ops 选项。如果想指定每次每次 AllReduce 操作的数据大小,可以设置 :code:`FLAGS_fuse_parameter_memory_size`,比如 :code:`export FLAGS_fuse_parameter_memory_size=1` ,表示每次 AllReduce 调用传输 1MB 的梯度。 + - 使用 CPU 做数据并行训练时,推荐使用 Reduce 模型,因为在使用 CPU 进行数据并行训练时,在 Reduce 模式下,不同 CPUPlace 上的参数是共享的,所以在各个 CPUPlace 上完成参数更新之后不用将更新后的参数 Broadcast 到其他 CPUPlace 上,这对提升速度也有很大帮助。 + - 如果是 Reduce 模式,可打开 fuse_broadcast_ops 选项。 + - 如果用户的模型较小,比如 mnist、language_model 等,可以将 num_threads 设为 1。 + - 在显存足够的前提下,建议将 :code:`exec_strategy.num_iteration_per_drop_scope` 设置成一个较大的值,比如设置为 100,这样可以避免反复地申请和释放内存。 目前我们正在推进这些配置自动化的工作:即根据输入的模型结构自动配置这些选项,争取在下一个版本中实现,敬请期待。 -(4) FLAGS设置 +(4) FLAGS 设置 .. code-block:: bash @@ -327,7 +327,7 @@ Paddle中有一些FLAGS可以有助于性能优化: FLAGS_enable_cublas_tensor_op_math = True -6. 使用Profile工具进行性能分析 +6. 使用 Profile 工具进行性能分析 ====================== -为方便用户更好的发现程序中的性能瓶颈,Paddle提供了多种Profile工具,这些工具的详细介绍和使用说明请参考 :ref:`api_guide_analysis_tools` 。 +为方便用户更好的发现程序中的性能瓶颈,Paddle 提供了多种 Profile 工具,这些工具的详细介绍和使用说明请参考 :ref:`api_guide_analysis_tools` 。 diff --git a/docs/install/FAQ.md b/docs/install/FAQ.md index b6e374f79a0..0cc2ecf46ac 100644 --- a/docs/install/FAQ.md +++ b/docs/install/FAQ.md @@ -1,15 +1,15 @@ # **FAQ** -- 报错“nccl.h找不到” +- 报错“nccl.h 找不到” - > 请[安装nccl2](https://developer.nvidia.com/nccl/nccl-download) + > 请[安装 nccl2](https://developer.nvidia.com/nccl/nccl-download) - 报错`Cannot uninstall 'six'.` - > 此问题可能与系统中已有Python有关,请使用`pip install paddlepaddle --ignore-installed six`(CPU)或`pip install paddlepaddle --ignore-installed six`(GPU)解决 + > 此问题可能与系统中已有 Python 有关,请使用`pip install paddlepaddle --ignore-installed six`(CPU)或`pip install paddlepaddle --ignore-installed six`(GPU)解决 -- CentOS6下如何编译python2.7为共享库? +- CentOS6 下如何编译 python2.7 为共享库? > 使用以下指令: @@ -18,15 +18,15 @@ -- Ubuntu18.04下libidn11找不到? +- Ubuntu18.04 下 libidn11 找不到? > 使用以下指令: apt install libidn11 -- Ubuntu编译时出现大量的代码段不能识别? +- Ubuntu 编译时出现大量的代码段不能识别? - > 这可能是由于cmake版本不匹配造成的,请在gcc的安装目录下使用以下指令: + > 这可能是由于 cmake 版本不匹配造成的,请在 gcc 的安装目录下使用以下指令: apt install gcc-4.8 g++-4.8 cp gcc gcc.bak @@ -36,20 +36,20 @@ ln -s gcc-4.8 gcc ln -s g++-4.8 g++ -- 遇到paddlepaddle.whl is not a supported wheel on this platform? +- 遇到 paddlepaddle.whl is not a supported wheel on this platform? - > 出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。 请检查Python版本是否为2.7系列。另外最新的pip官方源中的安装包默认是manylinux1标准, 需要使用最新的pip (>9.0.0) 才可以安装。您可以执行以下指令更新您的pip: + > 出现这个问题的主要原因是,没有找到和当前系统匹配的 paddlepaddle 安装包。 请检查 Python 版本是否为 2.7 系列。另外最新的 pip 官方源中的安装包默认是 manylinux1 标准, 需要使用最新的 pip (>9.0.0) 才可以安装。您可以执行以下指令更新您的 pip: pip install --upgrade pip 或者 python -c "import pip; print(pip.pep425tags.get_supported())" - > 如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级pip版本到最新; 如果系统支持 manylinux1_x86_64 而安装包 (本地)是 linux_x86_64, 可以重命名这个whl包为 manylinux1_x86_64 再安装。 + > 如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级 pip 版本到最新; 如果系统支持 manylinux1_x86_64 而安装包 (本地)是 linux_x86_64, 可以重命名这个 whl 包为 manylinux1_x86_64 再安装。 -- 使用Docker编译出现问题? +- 使用 Docker 编译出现问题? - > 请参照GitHub上[Issue12079](https://github.com/PaddlePaddle/Paddle/issues/12079) + > 请参照 GitHub 上[Issue12079](https://github.com/PaddlePaddle/Paddle/issues/12079) - 可以用 IDE 吗? @@ -71,11 +71,11 @@ > 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考 [这篇文章](https://zaiste.net/posts/removing_docker_containers) 来清理这些内容。 -- 在DockerToolbox下使用book时`http://localhost:8888/`无法打开? +- 在 DockerToolbox 下使用 book 时`http://localhost:8888/`无法打开? - > 需要将localhost替换成虚拟机ip,一般需要在浏览器中输入:`http://192.168.99.100:8888/` + > 需要将 localhost 替换成虚拟机 ip,一般需要在浏览器中输入:`http://192.168.99.100:8888/` -- pip install gpu版本的PaddlePaddle后运行出现SegmentFault如下: +- pip install gpu 版本的 PaddlePaddle 后运行出现 SegmentFault 如下: @ 0x7f6c8d214436 paddle::platform::EnforceNotMet::EnforceNotMet() @@ -84,12 +84,12 @@ @ 0x7f6c8d2b93b6 paddle::framework::InitDevices() - > 出现这个问题原因主要是由于您的显卡驱动低于对应CUDA版本的要求,请保证您的显卡驱动支持所使用的CUDA版本 + > 出现这个问题原因主要是由于您的显卡驱动低于对应 CUDA 版本的要求,请保证您的显卡驱动支持所使用的 CUDA 版本 -- macOS 下安装 PaddlePaddle 后import paddle.fluid出现`Fatal Python error: PyThreadState_Get: no current thread running`错误 +- macOS 下安装 PaddlePaddle 后 import paddle.fluid 出现`Fatal Python error: PyThreadState_Get: no current thread running`错误 - For Python2.7.x (install by brew): 请使用`export LD_LIBRARY_PATH=/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/usr/local/Cellar/python@2/2.7.15_1/Frameworks/Python.framework/Versions/2.7` - For Python2.7.x (install by Python.org): 请使用`export LD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7 && export DYLD_LIBRARY_PATH=/Library/Frameworks/Python.framework/Versions/2.7` @@ -97,14 +97,14 @@ -- MACOS下使用自定义的openblas 详见issue: +- MACOS 下使用自定义的 openblas 详见 issue: > [ISSUE 13217](https://github.com/PaddlePaddle/Paddle/issues/13721) -- 已经安装swig但是仍旧出现swig找不到的问题 详见issue: +- 已经安装 swig 但是仍旧出现 swig 找不到的问题 详见 issue: > [ISSUE 13759](https://github.com/PaddlePaddle/Paddle/issues/13759) -- 出现 “target pattern contain no '%'.”的问题 详见issue: +- 出现 “target pattern contain no '%'.”的问题 详见 issue: > [ISSUE 13806](https://github.com/PaddlePaddle/Paddle/issues/13806) diff --git a/docs/install/Tables.md b/docs/install/Tables.md index 33b364e4b6c..fa709dcb140 100644 --- a/docs/install/Tables.md +++ b/docs/install/Tables.md @@ -23,13 +23,13 @@ GCC (Linux Only) 5.4 / 8.2 - 推荐使用CentOS的devtools2 + 推荐使用 CentOS 的 devtools2 Clang (macOS Only) - 9.0及以上 - 通常使用 macOS 10.11及以上的系统对应的Clang版本即可 + 9.0 及以上 + 通常使用 macOS 10.11 及以上的系统对应的 Clang 版本即可 @@ -84,7 +84,7 @@ patchELF any - apt install patchelf 或参见github patchELF 官方文档 + apt install patchelf 或参见 github patchELF 官方文档 go @@ -125,22 +125,22 @@ WITH_GPU - 是否支持CUDA + 是否支持 CUDA ON WITH_ROCM - 是否支持ROCM + 是否支持 ROCM OFF WITH_AVX - 是否编译含有AVX指令集的PaddlePaddle二进制文件 + 是否编译含有 AVX 指令集的 PaddlePaddle 二进制文件 ON WITH_PYTHON - 是否内嵌PYTHON解释器 + 是否内嵌 PYTHON 解释器 ON @@ -150,12 +150,12 @@ WITH_MKL - 是否使用MKL数学库,如果为否则是用OpenBLAS + 是否使用 MKL 数学库,如果为否则是用 OpenBLAS ON WITH_SYSTEM_BLAS - 是否使用系统自带的BLAS + 是否使用系统自带的 BLAS OFF @@ -165,7 +165,7 @@ WITH_BRPC_RDMA - 是否使用BRPC RDMA作为RPC协议 + 是否使用 BRPC RDMA 作为 RPC 协议 OFF @@ -176,14 +176,14 @@ CUDA_ARCH_NAME - 是否只针对当前CUDA架构编译 - All:编译所有可支持的CUDA架构 可选:Auto 自动识别当前环境的架构编译 + 是否只针对当前 CUDA 架构编译 + All:编译所有可支持的 CUDA 架构 可选:Auto 自动识别当前环境的架构编译 TENSORRT_ROOT - 指定TensorRT路径 - Windows下默认值为'/',Linux下默认值为 '/usr/' + 指定 TensorRT 路径 + Windows 下默认值为'/',Linux 下默认值为 '/usr/' @@ -195,23 +195,23 @@ **BLAS** -PaddlePaddle支持 [MKL](https://software.intel.com/en-us/mkl) 和 [OpenBlAS](http://www.openblas.net) 两种BLAS库。默认使用MKL。如果使用MKL并且机器含有AVX2指令集,还会下载MKL-DNN数学库,详细参考[这里](https://github.com/PaddlePaddle/Paddle/tree/release/0.11.0/doc/design/mkldnn#cmake) 。 +PaddlePaddle 支持 [MKL](https://software.intel.com/en-us/mkl) 和 [OpenBlAS](http://www.openblas.net) 两种 BLAS 库。默认使用 MKL。如果使用 MKL 并且机器含有 AVX2 指令集,还会下载 MKL-DNN 数学库,详细参考[这里](https://github.com/PaddlePaddle/Paddle/tree/release/0.11.0/doc/design/mkldnn#cmake) 。 -如果关闭MKL,则会使用OpenBLAS作为BLAS库。 +如果关闭 MKL,则会使用 OpenBLAS 作为 BLAS 库。 **CUDA/cuDNN** -PaddlePaddle在编译时/运行时会自动找到系统中安装的CUDA和cuDNN库进行编译和执行。 使用参数 `-DCUDA_ARCH_NAME=Auto` 可以指定开启自动检测SM架构,加速编译。 +PaddlePaddle 在编译时/运行时会自动找到系统中安装的 CUDA 和 cuDNN 库进行编译和执行。 使用参数 `-DCUDA_ARCH_NAME=Auto` 可以指定开启自动检测 SM 架构,加速编译。 -PaddlePaddle可以使用cuDNN v5.1之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的cuDNN是同一个版本。 我们推荐使用最新版本的cuDNN。 +PaddlePaddle 可以使用 cuDNN v5.1 之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的 cuDNN 是同一个版本。 我们推荐使用最新版本的 cuDNN。 **编译选项的设置** -PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( `/usr/lib` 和 `/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用`-D`命令可以设置,例如: +PaddePaddle 通过编译时指定路径来实现引用各种 BLAS/CUDA/cuDNN 库。cmake 编译时,首先在系统路径( `/usr/lib` 和 `/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用`-D`命令可以设置,例如: > `cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCUDNN_ROOT=/opt/cudnnv5` -**注意**:这几个编译选项的设置,只在第一次cmake的时候有效。如果之后想要重新设置,推荐清理整个编译目录( rm -rf )后,再指定。 +**注意**:这几个编译选项的设置,只在第一次 cmake 的时候有效。如果之后想要重新设置,推荐清理整个编译目录( rm -rf )后,再指定。 @@ -229,25 +229,25 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。 paddlepaddle==[版本号] 例如 paddlepaddle==2.2.1 - 只支持CPU对应版本的PaddlePaddle,具体版本请参见Pypi + 只支持 CPU 对应版本的 PaddlePaddle,具体版本请参见Pypi paddlepaddle-gpu==[版本号] 例如 paddlepaddle-gpu==2.2.1 - 默认安装支持CUDA 10.2和cuDNN 7的对应[版本号]的PaddlePaddle安装包 + 默认安装支持 CUDA 10.2 和 cuDNN 7 的对应[版本号]的 PaddlePaddle 安装包

-您可以在 [Release History](https://pypi.org/project/paddlepaddle-gpu/#history) 中找到PaddlePaddle-gpu的各个发行版本。 -> 其中`postXX` 对应的是CUDA和cuDNN的版本,`postXX`之前的数字代表Paddle的版本 +您可以在 [Release History](https://pypi.org/project/paddlepaddle-gpu/#history) 中找到 PaddlePaddle-gpu 的各个发行版本。 +> 其中`postXX` 对应的是 CUDA 和 cuDNN 的版本,`postXX`之前的数字代表 Paddle 的版本 -需要注意的是,命令中 paddlepaddle-gpu==2.2.1 在windows环境下,会默认安装支持CUDA 10.2和cuDNN 7的对应[版本号]的PaddlePaddle安装包 +需要注意的是,命令中 paddlepaddle-gpu==2.2.1 在 windows 环境下,会默认安装支持 CUDA 10.2 和 cuDNN 7 的对应[版本号]的 PaddlePaddle 安装包

-## **多版本whl包列表-Release** +## **多版本 whl 包列表-Release**

@@ -456,30 +456,30 @@ PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。 - 纵轴 -cpu-mkl: 支持CPU训练和预测,使用Intel mkl数学库 +cpu-mkl: 支持 CPU 训练和预测,使用 Intel mkl 数学库 -cuda10_cudnn7-mkl: 支持GPU训练和预测,使用Intel mkl数学库 +cuda10_cudnn7-mkl: 支持 GPU 训练和预测,使用 Intel mkl 数学库 - 横轴 一般是类似于“cp37-cp37m”的形式,其中: -37:python tag,指python3.7,类似的还有“36”、“38”、“39”等 +37:python tag,指 python3.7,类似的还有“36”、“38”、“39”等 -mu:指unicode版本python,若为m则指非unicode版本python +mu:指 unicode 版本 python,若为 m 则指非 unicode 版本 python - 安装包命名规则 -每个安装包都有一个专属的名字,它们是按照Python的官方规则 来命名的,形式如下: +每个安装包都有一个专属的名字,它们是按照 Python 的官方规则 来命名的,形式如下: {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl -其中build tag可以缺少,其他部分不能缺少 +其中 build tag 可以缺少,其他部分不能缺少 -distribution: wheel名称version: 版本,例如0.14.0 (要求必须是数字格式) +distribution: wheel 名称 version: 版本,例如 0.14.0 (要求必须是数字格式) -python tag: 类似'py36', 'py37', 'py38', 'py39',用于标明对应的python版本 +python tag: 类似'py36', 'py37', 'py38', 'py39',用于标明对应的 python 版本 abi tag: 类似'cp33m', 'abi3', 'none' @@ -487,7 +487,7 @@ platform tag: 类似 'linux_x86_64', 'any'

-## **多版本whl包列表-develop** +## **多版本 whl 包列表-develop**

@@ -644,10 +644,10 @@ platform tag: 类似 'linux_x86_64', 'any'

-## 在Docker中执行PaddlePaddle训练程序 +## 在 Docker 中执行 PaddlePaddle 训练程序 -假设您已经在当前目录(比如在/home/work)编写了一个PaddlePaddle的程序: `train.py` (可以参考 +假设您已经在当前目录(比如在/home/work)编写了一个 PaddlePaddle 的程序: `train.py` (可以参考 [PaddlePaddleBook](https://github.com/PaddlePaddle/book/blob/develop/01.fit_a_line/README.cn.md) 编写),就可以使用下面的命令开始执行训练: @@ -659,10 +659,10 @@ docker run -it -v $PWD:/work registry.baidubce.com/paddlepaddle/paddle /work/tra ``` 上述命令中,`-it` 参数说明容器已交互式运行;`-v $PWD:/work` -指定将当前路径(Linux中PWD变量会展开为当前路径的绝对路径)挂载到容器内部的:`/work` +指定将当前路径(Linux 中 PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的:`/work` 目录: `registry.baidubce.com/paddlepaddle/paddle` 指定需要使用的容器; 最后`/work/train.py`为容器内执行的命令,即运行训练程序。 -当然,您也可以进入到Docker容器中,以交互式的方式执行或调试您的代码: +当然,您也可以进入到 Docker 容器中,以交互式的方式执行或调试您的代码: ``` docker run -it -v $PWD:/work registry.baidubce.com/paddlepaddle/paddle /bin/bash @@ -674,19 +674,19 @@ cd /work python train.py ``` -**注:PaddlePaddle Docker镜像为了减小体积,默认没有安装vim,您可以在容器中执行** `apt-get install -y vim` **安装后,在容器中编辑代码。** +**注:PaddlePaddle Docker 镜像为了减小体积,默认没有安装 vim,您可以在容器中执行** `apt-get install -y vim` **安装后,在容器中编辑代码。**

-## 使用Docker启动PaddlePaddle Book教程 +## 使用 Docker 启动 PaddlePaddle Book 教程 -使用Docker可以快速在本地启动一个包含了PaddlePaddle官方Book教程的Jupyter Notebook,可以通过网页浏览。 -PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Notebook。 -如果您想要更深入了解deep learning,可以参考PaddlePaddle Book。 +使用 Docker 可以快速在本地启动一个包含了 PaddlePaddle 官方 Book 教程的 Jupyter Notebook,可以通过网页浏览。 +PaddlePaddle Book 是为用户和开发者制作的一个交互式的 Jupyter Notebook。 +如果您想要更深入了解 deep learning,可以参考 PaddlePaddle Book。 大家可以通过它阅读教程,或者制作和分享带有代码、公式、图表、文字的交互式文档。 -我们提供可以直接运行PaddlePaddle Book的Docker镜像,直接运行: +我们提供可以直接运行 PaddlePaddle Book 的 Docker 镜像,直接运行: ``` docker run -p 8888:8888 registry.baidubce.com/paddlepaddle/book @@ -706,18 +706,18 @@ http://localhost:8888/

-## 使用Docker执行GPU训练 +## 使用 Docker 执行 GPU 训练 -为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用 +为了保证 GPU 驱动能够在镜像里面正常运行,我们推荐使用 [nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。 -请不要忘记提前在物理机上安装GPU最新驱动。 +请不要忘记提前在物理机上安装 GPU 最新驱动。 ``` nvidia-docker run -it -v $PWD:/work registry.baidubce.com/paddlepaddle/paddle:latest-gpu /bin/bash ``` -**注: 如果没有安装nvidia-docker,可以尝试以下的方法,将CUDA库和Linux设备挂载到Docker容器内:** +**注: 如果没有安装 nvidia-docker,可以尝试以下的方法,将 CUDA 库和 Linux 设备挂载到 Docker 容器内:** ``` export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') \ diff --git a/docs/install/compile/arm-compile.md b/docs/install/compile/arm-compile.md index 1b57b2f11f8..d80b30a6c8b 100644 --- a/docs/install/compile/arm-compile.md +++ b/docs/install/compile/arm-compile.md @@ -3,20 +3,20 @@ ## 环境准备 * **处理器:FT2000+/Kunpeng 920 2426SK** -* **操作系统:麒麟v10/UOS** +* **操作系统:麒麟 v10/UOS** * **Python 版本 2.7.15+/3.5.1+/3.6/3.7/3.8 (64 bit)** * **pip 或 pip3 版本 9.0.1+ (64 bit)** -飞腾FT2000+和鲲鹏920处理器均为ARMV8架构,在该架构上编译Paddle的方式一致,本文以FT2000+为例,介绍Paddle的源码编译。 +飞腾 FT2000+和鲲鹏 920 处理器均为 ARMV8 架构,在该架构上编译 Paddle 的方式一致,本文以 FT2000+为例,介绍 Paddle 的源码编译。 ## 安装步骤 -目前在FT2000+处理器加国产化操作系统(麒麟UOS)上安装Paddle,只支持源码编译的方式,接下来详细介绍各个步骤。 +目前在 FT2000+处理器加国产化操作系统(麒麟 UOS)上安装 Paddle,只支持源码编译的方式,接下来详细介绍各个步骤。 ### **源码编译** -1. Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,如果操作系统提供的源包括了合适版本的cmake,直接安装即可,否则需要[源码安装](https://github.com/Kitware/CMake) +1. Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,如果操作系统提供的源包括了合适版本的 cmake,直接安装即可,否则需要[源码安装](https://github.com/Kitware/CMake) ``` wget https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8.tar.gz @@ -30,7 +30,7 @@ ./bootstrap && make && sudo make install ``` -2. Paddle内部使用patchelf来修改动态库的rpath,如果操作系统提供的源包括了patchelf,直接安装即可,否则需要源码安装,请参考[patchelf官方文档](https://github.com/NixOS/patchelf),后续会考虑在ARM上移出该依赖。 +2. Paddle 内部使用 patchelf 来修改动态库的 rpath,如果操作系统提供的源包括了 patchelf,直接安装即可,否则需要源码安装,请参考[patchelf 官方文档](https://github.com/NixOS/patchelf),后续会考虑在 ARM 上移出该依赖。 ``` ./bootstrap.sh @@ -52,9 +52,9 @@ sudo make install ``` -3. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库,在飞腾加国产化操作系统环境中,pip安装可能失败或不能正常工作,主要依赖通过源或源码安装的方式安装依赖库,建议使用系统提供源的方式安装依赖库。 +3. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库,在飞腾加国产化操作系统环境中,pip 安装可能失败或不能正常工作,主要依赖通过源或源码安装的方式安装依赖库,建议使用系统提供源的方式安装依赖库。 -4. 将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +4. 将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -70,7 +70,7 @@ git checkout develop ``` -6. 并且请创建并进入一个叫build的目录下: +6. 并且请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build @@ -82,7 +82,7 @@ ulimit -n 4096 ``` -8. 执行cmake: +8. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) @@ -96,7 +96,7 @@ cmake .. -DPY_VERSION=3 -DPYTHON_EXECUTABLE=`which python3` -DWITH_ARM=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release -DON_INFER=ON -DWITH_XBYAK=OFF ``` -9. 使用以下命令来编译,注意,因为处理器为ARM架构,如果不加`TARGET=ARMV8`则会在编译的时候报错。 +9. 使用以下命令来编译,注意,因为处理器为 ARM 架构,如果不加`TARGET=ARMV8`则会在编译的时候报错。 ``` make TARGET=ARMV8 -j$(nproc) @@ -107,19 +107,19 @@ 11. 在当前机器或目标机器安装编译好的`.whl`包: ``` - pip install -U(whl包的名字)`或`pip3 install -U(whl包的名字) + pip install -U(whl 包的名字)`或`pip3 install -U(whl 包的名字) ``` -恭喜,至此您已完成PaddlePaddle在FT环境下的编译安装。 +恭喜,至此您已完成 PaddlePaddle 在 FT 环境下的编译安装。 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 -在mobilenetv1和resnet50模型上测试 +在 mobilenetv1 和 resnet50 模型上测试 ``` wget -O profile.tar https://paddle-cetc15.bj.bcebos.com/profile.tar?authorization=bce-auth-v1/4409a3f3dd76482ab77af112631f01e4/2020-10-09T10:11:53Z/-1/host/786789f3445f498c6a1fd4d9cd3897ac7233700df0c6ae2fd78079eba89bf3fb @@ -143,7 +143,7 @@ python ernie.py --model_dir ernieL3H128_model/ ``` ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: ``` pip uninstall paddlepaddle @@ -156,6 +156,6 @@ pip3 uninstall paddlepaddle ## **备注** -已在ARM架构下测试过resnet50, mobilenetv1, ernie, ELMo等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 +已在 ARM 架构下测试过 resnet50, mobilenetv1, ernie, ELMo 等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 预测文档见[doc](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/native_infer.html),使用示例见[Paddle-Inference-Demo](https://github.com/PaddlePaddle/Paddle-Inference-Demo) diff --git a/docs/install/compile/linux-compile.md b/docs/install/compile/linux-compile.md index eb514ab975d..f123830b2db 100644 --- a/docs/install/compile/linux-compile.md +++ b/docs/install/compile/linux-compile.md @@ -1,143 +1,143 @@ -# **Linux下从源码编译** +# **Linux 下从源码编译** ## 环境准备 * **Linux 版本 (64 bit)** * **CentOS 6 (不推荐,不提供编译出现问题时的官方支持)** - * **CentOS 7 (GPU 版本支持CUDA 10.1/10.2/11.0/11.1/11.2)** + * **CentOS 7 (GPU 版本支持 CUDA 10.1/10.2/11.0/11.1/11.2)** * **Ubuntu 14.04 (不推荐,不提供编译出现问题时的官方支持)** * **Ubuntu 16.04 (GPU 版本支持 CUDA 10.1/10.2/11.0/11.1/11.2)** * **Ubuntu 18.04 (GPU 版本支持 CUDA 10.1/10.2/11.0/11.1/11.2)** * **Python 版本 3.6/3.7/3.8/3.9 (64 bit)** -## 选择CPU/GPU +## 选择 CPU/GPU -* 如果您的计算机没有 NVIDIA® GPU,请安装CPU版本的PaddlePaddle +* 如果您的计算机没有 NVIDIA® GPU,请安装 CPU 版本的 PaddlePaddle -* 如果您的计算机有NVIDIA® GPU,请确保满足以下条件以编译GPU版PaddlePaddle +* 如果您的计算机有 NVIDIA® GPU,请确保满足以下条件以编译 GPU 版 PaddlePaddle - * **CUDA 工具包10.1/10.2配合cuDNN 7 (cuDNN版本>=7.6.5, 如需多卡支持,需配合NCCL2.7及更高)** - * **CUDA 工具包11.0配合cuDNN v8.0.4(如需多卡支持,需配合NCCL2.7及更高)** - * **CUDA 工具包11.1配合cuDNN v8.1.1(如需多卡支持,需配合NCCL2.7及更高)** - * **CUDA 工具包11.2配合cuDNN v8.1.1(如需多卡支持,需配合NCCL2.7及更高)** - * **GPU运算能力超过3.5的硬件设备** + * **CUDA 工具包 10.1/10.2 配合 cuDNN 7 (cuDNN 版本>=7.6.5, 如需多卡支持,需配合 NCCL2.7 及更高)** + * **CUDA 工具包 11.0 配合 cuDNN v8.0.4(如需多卡支持,需配合 NCCL2.7 及更高)** + * **CUDA 工具包 11.1 配合 cuDNN v8.1.1(如需多卡支持,需配合 NCCL2.7 及更高)** + * **CUDA 工具包 11.2 配合 cuDNN v8.1.1(如需多卡支持,需配合 NCCL2.7 及更高)** + * **GPU 运算能力超过 3.5 的硬件设备** - 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + 您可参考 NVIDIA 官方文档了解 CUDA 和 CUDNN 的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) ## 安装步骤 -在Linux的系统下有2种编译方式,推荐使用Docker编译。 -Docker环境中已预装好编译Paddle需要的各种依赖,相较本机编译环境更简单。 +在 Linux 的系统下有 2 种编译方式,推荐使用 Docker 编译。 +Docker 环境中已预装好编译 Paddle 需要的各种依赖,相较本机编译环境更简单。 -* [使用Docker编译](#compile_from_docker)(不提供在CentOS 6下编译中遇到问题的支持) -* [本机编译](#compile_from_host)(不提供在CentOS 6下编译中遇到问题的支持) +* [使用 Docker 编译](#compile_from_docker)(不提供在 CentOS 6 下编译中遇到问题的支持) +* [本机编译](#compile_from_host)(不提供在 CentOS 6 下编译中遇到问题的支持) -### 使用Docker编译 +### 使用 Docker 编译 -[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用 Docker,既可以将 PaddlePaddle 的安装&使用与系统环境隔离,也可以与主机共享 GPU、网络等资源 -使用Docker编译PaddlePaddle,您需要: +使用 Docker 编译 PaddlePaddle,您需要: -- 在本地主机上[安装Docker](https://docs.docker.com/engine/install/) +- 在本地主机上[安装 Docker](https://docs.docker.com/engine/install/) -- 如需在Linux开启GPU支持,请[安装nvidia-docker](https://github.com/NVIDIA/nvidia-docker) +- 如需在 Linux 开启 GPU 支持,请[安装 nvidia-docker](https://github.com/NVIDIA/nvidia-docker) 请您按照以下步骤安装: -#### 1. 请首先选择您希望储存PaddlePaddle的路径,然后在该路径下使用以下命令将PaddlePaddle的源码从github克隆到本地当前目录下名为Paddle的文件夹中: +#### 1. 请首先选择您希望储存 PaddlePaddle 的路径,然后在该路径下使用以下命令将 PaddlePaddle 的源码从 github 克隆到本地当前目录下名为 Paddle 的文件夹中: ``` git clone https://github.com/PaddlePaddle/Paddle.git ``` -#### 2. 进入Paddle目录下: +#### 2. 进入 Paddle 目录下: ``` cd Paddle ``` -#### 3. 拉取PaddlePaddle镜像 +#### 3. 拉取 PaddlePaddle 镜像 -对于国内用户,因为网络问题下载docker比较慢时,可使用百度提供的镜像: +对于国内用户,因为网络问题下载 docker 比较慢时,可使用百度提供的镜像: -* CPU版的PaddlePaddle: +* CPU 版的 PaddlePaddle: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:latest-dev ``` -* GPU版的PaddlePaddle: +* GPU 版的 PaddlePaddle: ``` nvidia-docker pull registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev ``` -如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: +如果您的机器不在中国大陆地区,可以直接从 DockerHub 拉取镜像: -* CPU版的PaddlePaddle: +* CPU 版的 PaddlePaddle: ``` docker pull paddlepaddle/paddle:latest-dev ``` -* GPU版的PaddlePaddle: +* GPU 版的 PaddlePaddle: ``` nvidia-docker pull paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev ``` -上例中,`latest-gpu-cuda10.2-cudnn7-dev` 仅作示意用,表示安装GPU版的镜像。如果您还想安装其他cuda/cudnn版本的镜像,可以将其替换成`latest-dev-cuda11.2-cudnn8-gcc82`、`latest-gpu-cuda10.1-cudnn7-gcc82-dev`、`latest-gpu-cuda10.1-cudnn7-gcc54-dev`等。 +上例中,`latest-gpu-cuda10.2-cudnn7-dev` 仅作示意用,表示安装 GPU 版的镜像。如果您还想安装其他 cuda/cudnn 版本的镜像,可以将其替换成`latest-dev-cuda11.2-cudnn8-gcc82`、`latest-gpu-cuda10.1-cudnn7-gcc82-dev`、`latest-gpu-cuda10.1-cudnn7-gcc54-dev`等。 您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 -#### 4. 创建并进入已配置好编译环境的Docker容器: +#### 4. 创建并进入已配置好编译环境的 Docker 容器: -* 编译CPU版本的PaddlePaddle: +* 编译 CPU 版本的 PaddlePaddle: ``` docker run --name paddle-test -v $PWD:/paddle --network=host -it registry.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash ``` - - `--name paddle-test`:为您创建的Docker容器命名为paddle-test; + - `--name paddle-test`:为您创建的 Docker 容器命名为 paddle-test; - - `-v $PWD:/paddle`: 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + - `-v $PWD:/paddle`: 将当前目录挂载到 Docker 容器中的/paddle 目录下(Linux 中 PWD 变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); - `-it`: 与宿主机保持交互状态; - - `registry.baidubce.com/paddlepaddle/paddle:latest-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + - `registry.baidubce.com/paddlepaddle/paddle:latest-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建 Docker 容器,/bin/bash 进入容器后启动/bin/bash 命令。 -* 编译GPU版本的PaddlePaddle: +* 编译 GPU 版本的 PaddlePaddle: ``` nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev /bin/bash ``` - - `--name paddle-test`:为您创建的Docker容器命名为paddle-test; + - `--name paddle-test`:为您创建的 Docker 容器命名为 paddle-test; - - `-v $PWD:/paddle`: 将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); + - `-v $PWD:/paddle`: 将当前目录挂载到 Docker 容器中的/paddle 目录下(Linux 中 PWD 变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)); - `-it`: 与宿主机保持交互状态; - - `registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令。 + - `registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-gpu-cuda10.2-cudnn7-dev`的镜像创建 Docker 容器,/bin/bash 进入容器后启动/bin/bash 命令。 注意: -请确保至少为docker分配4g以上的内存,否则编译过程可能因内存不足导致失败。 +请确保至少为 docker 分配 4g 以上的内存,否则编译过程可能因内存不足导致失败。 -#### 5. 进入Docker后进入paddle目录下: +#### 5. 进入 Docker 后进入 paddle 目录下: ``` cd /paddle ``` -#### 6. 切换到develop版本进行编译: +#### 6. 切换到 develop 版本进行编译: ``` git checkout develop ``` -注意:python3.6、python3.7版本从release/1.2分支开始支持, python3.8版本从release/1.8分支开始支持, python3.9版本从release/2.1分支开始支持 +注意:python3.6、python3.7 版本从 release/1.2 分支开始支持, python3.8 版本从 release/1.8 分支开始支持, python3.9 版本从 release/2.1 分支开始支持 -#### 7. 创建并进入/paddle/build路径下: +#### 7. 创建并进入/paddle/build 路径下: ``` mkdir -p /paddle/build && cd /paddle/build @@ -145,37 +145,37 @@ mkdir -p /paddle/build && cd /paddle/build #### 8. 使用以下命令安装相关依赖: -- 安装protobuf。 +- 安装 protobuf。 ``` pip3.7 install protobuf ``` -注意:以上用Python3.7命令来举例,如您的Python版本为3.6/3.8/3.9,请将上述命令中的pip3.7改成pip3.6/pip3.8/pip3.9 +注意:以上用 Python3.7 命令来举例,如您的 Python 版本为 3.6/3.8/3.9,请将上述命令中的 pip3.7 改成 pip3.6/pip3.8/pip3.9 -- 安装patchelf,PatchELF 是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH。 +- 安装 patchelf,PatchELF 是一个小而实用的程序,用于修改 ELF 可执行文件的动态链接器和 RPATH。 ``` apt install patchelf ``` -#### 9. 执行cmake: +#### 9. 执行 cmake: -* 对于需要编译**CPU版本PaddlePaddle**的用户: +* 对于需要编译**CPU 版本 PaddlePaddle**的用户: ``` cmake .. -DPY_VERSION=3.7 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` -* 对于需要编译**GPU版本PaddlePaddle**的用户: +* 对于需要编译**GPU 版本 PaddlePaddle**的用户: ``` cmake .. -DPY_VERSION=3.7 -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` - 具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -- 请注意修改参数`-DPY_VERSION`为您希望编译使用的python版本, 例如`-DPY_VERSION=3.7`表示python版本为3.7 +- 请注意修改参数`-DPY_VERSION`为您希望编译使用的 python 版本, 例如`-DPY_VERSION=3.7`表示 python 版本为 3.7 -- 我们目前不支持CentOS 6下使用Docker编译GPU版本的PaddlePaddle +- 我们目前不支持 CentOS 6 下使用 Docker 编译 GPU 版本的 PaddlePaddle #### 10. 执行编译: @@ -186,7 +186,7 @@ make -j$(nproc) ``` 注意: -编译过程中需要从github上下载依赖,请确保您的编译环境能正常从github下载代码。 +编译过程中需要从 github 上下载依赖,请确保您的编译环境能正常从 github 下载代码。 #### 11. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: @@ -199,13 +199,13 @@ cd /paddle/build/python/dist For Python3: ``` -pip3.7 install -U [whl包的名字] +pip3.7 install -U [whl 包的名字] ``` 注意: -以上用Python3.7命令来举例,如您的Python版本为3.6/3.8/3.9,请将上述命令中的pip3.7改成pip3.6/pip3.8/pip3.9。 +以上用 Python3.7 命令来举例,如您的 Python 版本为 3.6/3.8/3.9,请将上述命令中的 pip3.7 改成 pip3.6/pip3.8/pip3.9。 -#### 恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) +#### 恭喜,至此您已完成 PaddlePaddle 的编译安装。您只需要进入 Docker 容器后运行 PaddlePaddle,即可开始使用。更多 Docker 使用请参见[Docker 官方文档](https://docs.docker.com) @@ -227,7 +227,7 @@ uname -m && cat /etc/*release yum update ``` - 并添加必要的yum源: + 并添加必要的 yum 源: ``` yum install -y epel-release @@ -242,9 +242,9 @@ uname -m && cat /etc/*release ``` -#### 3. 安装NCCL(可选) +#### 3. 安装 NCCL(可选) -* 如果您需要使用GPU多卡,请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CUDA10.2,cuDNN7下nccl2的安装指令,更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl)): +* 如果您需要使用 GPU 多卡,请确保您已经正确安装 nccl2,或者按照以下指令安装 nccl2(这里提供的是 CUDA10.2,cuDNN7 下 nccl2 的安装指令,更多版本的安装信息请参考 NVIDIA[官方网站](https://developer.nvidia.com/nccl)): * **CentOS 系统可以参考以下命令** @@ -292,7 +292,7 @@ uname -m && cat /etc/*release yum install -y make ``` - cmake 需要3.15以上,建议使用3.16.0: + cmake 需要 3.15 以上,建议使用 3.16.0: ``` wget -q https://cmake.org/files/v3.16/cmake-3.16.0-Linux-x86_64.tar.gz @@ -310,7 +310,7 @@ uname -m && cat /etc/*release PATH=/home/cmake-3.16.0-Linux-x86_64/bin:$PATH ``` - gcc 需要5.4以上,建议使用8.2.0: + gcc 需要 5.4 以上,建议使用 8.2.0: ``` wget -q https://paddle-docker-tar.bj.bcebos.com/home/users/tianshuo/bce-python-sdk-0.8.27/gcc-8.2.0.tar.xz && \ @@ -335,7 +335,7 @@ uname -m && cat /etc/*release apt install -y make ``` - cmake 需要3.15以上,建议使用3.16.0: + cmake 需要 3.15 以上,建议使用 3.16.0: ``` wget -q https://cmake.org/files/v3.16/cmake-3.16.0-Linux-x86_64.tar.gz @@ -353,7 +353,7 @@ uname -m && cat /etc/*release PATH=/home/cmake-3.16.0-Linux-x86_64/bin:$PATH ``` - gcc 需要5.4以上,建议使用8.2.0: + gcc 需要 5.4 以上,建议使用 8.2.0: ``` wget -q https://paddle-docker-tar.bj.bcebos.com/home/users/tianshuo/bce-python-sdk-0.8.27/gcc-8.2.0.tar.xz && \ @@ -367,26 +367,26 @@ uname -m && cat /etc/*release make -j8 && make install ``` -#### 5. 我们支持使用virtualenv进行编译安装,首先请使用以下命令创建一个名为`paddle-venv`的虚环境: +#### 5. 我们支持使用 virtualenv 进行编译安装,首先请使用以下命令创建一个名为`paddle-venv`的虚环境: -* a. 安装Python-dev: +* a. 安装 Python-dev: - (请参照Python官方流程安装) + (请参照 Python 官方流程安装) -* b. 安装pip: +* b. 安装 pip: - (请参照Python官方流程安装, 并保证拥有20.2.2及以上的pip3版本,请注意,python3.6及以上版本环境下,pip3并不一定对应python版本,如python3.7下默认只有pip3.7) + (请参照 Python 官方流程安装, 并保证拥有 20.2.2 及以上的 pip3 版本,请注意,python3.6 及以上版本环境下,pip3 并不一定对应 python 版本,如 python3.7 下默认只有 pip3.7) -* c.(Only For Python3)设置Python3相关的环境变量,这里以python3.7版本示例,请替换成您使用的版本(3.6、3.8、3.9): +* c.(Only For Python3)设置 Python3 相关的环境变量,这里以 python3.7 版本示例,请替换成您使用的版本(3.6、3.8、3.9): 1. 首先使用 ``` find `dirname $(dirname $(which python3))` -name "libpython3.so" ``` - 找到Python lib的路径,如果是3.6、3.7、3.8、3.9,请将`python3`改成`python3.6`、`python3.7`、`python3.8`、`python3.9`,然后将下面[python-lib-path]替换为找到文件路径 + 找到 Python lib 的路径,如果是 3.6、3.7、3.8、3.9,请将`python3`改成`python3.6`、`python3.7`、`python3.8`、`python3.9`,然后将下面[python-lib-path]替换为找到文件路径 - 2. 设置PYTHON_LIBRARIES: + 2. 设置 PYTHON_LIBRARIES: ``` export PYTHON_LIBRARY=[python-lib-path] ``` @@ -395,9 +395,9 @@ uname -m && cat /etc/*release ``` find `dirname $(dirname $(which python3))`/include -name "python3.7m" ``` - 找到Python Include的路径,请注意python版本,然后将下面[python-include-path]替换为找到文件路径 + 找到 Python Include 的路径,请注意 python 版本,然后将下面[python-include-path]替换为找到文件路径 - 4. 设置PYTHON_INCLUDE_DIR: + 4. 设置 PYTHON_INCLUDE_DIR: ``` export PYTHON_INCLUDE_DIRS=[python-include-path] ``` @@ -408,7 +408,7 @@ uname -m && cat /etc/*release ``` (这里将[python-lib-path]的最后两级目录替换为/bin/) -* d. 安装虚环境`virtualenv`以及`virtualenvwrapper`并创建名为`paddle-venv`的虚环境:(请注意对应python版本的pip3的命令,如pip3.6、pip3.7、pip3.8、pip3.9) +* d. 安装虚环境`virtualenv`以及`virtualenvwrapper`并创建名为`paddle-venv`的虚环境:(请注意对应 python 版本的 pip3 的命令,如 pip3.6、pip3.7、pip3.8、pip3.9) 1. 安装`virtualenv` ``` @@ -430,14 +430,14 @@ uname -m && cat /etc/*release ``` find / -name virtualenvwrapper.sh ``` - (请找到对应Python版本的`virtualenvwrapper.sh`) + (请找到对应 Python 版本的`virtualenvwrapper.sh`) 4. 查看`virtualenvwrapper.sh`中的安装方法: ``` cat vitualenvwrapper.sh ``` - 该shell文件中描述了步骤及命令 + 该 shell 文件中描述了步骤及命令 5. 按照`virtualenvwrapper.sh`中的描述,安装`virtualwrapper` - 6. 设置VIRTUALENVWRAPPER_PYTHON: + 6. 设置 VIRTUALENVWRAPPER_PYTHON: ``` export VIRTUALENVWRAPPER_PYTHON=[python-lib-path]:$PATH ``` @@ -460,9 +460,9 @@ workon paddle-venv ``` yum install patchelf ``` - > 不能使用yum安装的用户请参见patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) + > 不能使用 yum 安装的用户请参见 patchElF github[官方文档](https://gist.github.com/ruario/80fefd174b3395d34c14) -#### 8. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: +#### 8. 将 PaddlePaddle 的源码 clone 在当下目录下的 Paddle 的文件夹中,并进入 Padde 目录下: ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -472,23 +472,23 @@ git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle ``` -#### 9. 切换到develop分支进行编译: +#### 9. 切换到 develop 分支进行编译: ``` git checkout develop ``` -#### 10. 并且请创建并进入一个叫build的目录下: +#### 10. 并且请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build ``` -#### 11. 执行cmake: +#### 11. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -* 对于需要编译**CPU版本PaddlePaddle**的用户: +* 对于需要编译**CPU 版本 PaddlePaddle**的用户: ``` @@ -496,13 +496,13 @@ mkdir build && cd build -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` - > 如果遇到`Could NOT find PROTOBUF (missing: PROTOBUF_LIBRARY PROTOBUF_INCLUDE_DIR)`可以重新执行一次cmake指令。 - > 请注意PY_VERSION参数更换为您需要的python版本 + > 如果遇到`Could NOT find PROTOBUF (missing: PROTOBUF_LIBRARY PROTOBUF_INCLUDE_DIR)`可以重新执行一次 cmake 指令。 + > 请注意 PY_VERSION 参数更换为您需要的 python 版本 -* 对于需要编译**GPU版本PaddlePaddle**的用户:(**仅支持CentOS7(CUDA11.2/CUDA11.0/CUDA10.2/CUDA10.1)**) +* 对于需要编译**GPU 版本 PaddlePaddle**的用户:(**仅支持 CentOS7(CUDA11.2/CUDA11.0/CUDA10.2/CUDA10.1)**) - 1. 请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CUDA10.2,cuDNN7下nccl2的安装指令,更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl)): + 1. 请确保您已经正确安装 nccl2,或者按照以下指令安装 nccl2(这里提供的是 CUDA10.2,cuDNN7 下 nccl2 的安装指令,更多版本的安装信息请参考 NVIDIA[官方网站](https://developer.nvidia.com/nccl)): * **CentOS 系统可以参考以下命令** @@ -532,13 +532,13 @@ mkdir build && cd build sudo apt install -y libnccl2=2.7.8-1+cuda10.2 libnccl-dev=2.7.8-1+cuda10.2 ``` - 2. 如果您已经正确安装了`nccl2`,就可以开始cmake了:(*For Python3: 请给PY_VERSION参数配置正确的python版本*) + 2. 如果您已经正确安装了`nccl2`,就可以开始 cmake 了:(*For Python3: 请给 PY_VERSION 参数配置正确的 python 版本*) ``` - cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的Python3的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的 Python3 的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的 PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的 PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` -注意:以上涉及Python3的命令,用Python3.7来举例,如您的Python版本为3.6/3.8/3.9,请将上述命令中的Python3.7改成Python3.6/Python3.8/Python3.9 +注意:以上涉及 Python3 的命令,用 Python3.7 来举例,如您的 Python 版本为 3.6/3.8/3.9,请将上述命令中的 Python3.7 改成 Python3.6/Python3.8/Python3.9 @@ -551,7 +551,7 @@ make -j$(nproc) > 使用多核编译 -> 如果编译过程中显示“Too many open files”错误时,请使用指令 ulimit -n 8192 来增大当前进程允许打开的文件数,一般来说8192可以保证编译完成。 +> 如果编译过程中显示“Too many open files”错误时,请使用指令 ulimit -n 8192 来增大当前进程允许打开的文件数,一般来说 8192 可以保证编译完成。 #### 13. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: ``` @@ -561,17 +561,17 @@ cd /paddle/build/python/dist #### 14. 在当前机器或目标机器安装编译好的`.whl`包: ``` -pip install -U(whl包的名字) +pip install -U(whl 包的名字) ``` 或 ``` -pip3 install -U(whl包的名字) +pip3 install -U(whl 包的名字) ``` -#### 恭喜,至此您已完成PaddlePaddle的编译安装 +#### 恭喜,至此您已完成 PaddlePaddle 的编译安装 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入 ``` import paddle @@ -586,9 +586,9 @@ paddle.utils.run_check() 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: -* **CPU版本的PaddlePaddle**: +* **CPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle ``` @@ -597,7 +597,7 @@ paddle.utils.run_check() pip3 uninstall paddlepaddle ``` -* **GPU版本的PaddlePaddle**: +* **GPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle-gpu ``` @@ -606,4 +606,4 @@ paddle.utils.run_check() pip3 uninstall paddlepaddle-gpu ``` -使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip +使用 Docker 安装 PaddlePaddle 的用户,请进入包含 PaddlePaddle 的容器中使用上述命令,注意使用对应版本的 pip diff --git a/docs/install/compile/linux-compile_en.md b/docs/install/compile/linux-compile_en.md index 193d8546ecc..76d087c0732 100644 --- a/docs/install/compile/linux-compile_en.md +++ b/docs/install/compile/linux-compile_en.md @@ -507,7 +507,7 @@ mkdir build && cd build ``` - cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的Python3的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release + cmake .. -DPYTHON_EXECUTABLE:FILEPATH=[您可执行的 Python3 的路径] -DPYTHON_INCLUDE_DIR:PATH=[之前的 PYTHON_INCLUDE_DIRS] -DPYTHON_LIBRARY:FILEPATH=[之前的 PYTHON_LIBRARY] -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` diff --git a/docs/install/compile/macos-compile.md b/docs/install/compile/macos-compile.md index b0c514ffc9a..8493c43afe0 100644 --- a/docs/install/compile/macos-compile.md +++ b/docs/install/compile/macos-compile.md @@ -1,59 +1,59 @@ -# **macOS下从源码编译** +# **macOS 下从源码编译** ## 环境准备 -* **macOS 版本 10.x/11.x (64 bit) (不支持GPU版本)** +* **macOS 版本 10.x/11.x (64 bit) (不支持 GPU 版本)** * **Python 版本 3.6/3.7/3.8/3.9 (64 bit)** -## 选择CPU/GPU +## 选择 CPU/GPU * 目前仅支持在 macOS 环境下编译安装 CPU 版本的 PaddlePaddle ## 安装步骤 -在 macOS 系统下有2种编译方式,推荐使用Docker编译。 -Docker环境中已预装好编译Paddle需要的各种依赖,相较本机编译环境更简单。 +在 macOS 系统下有 2 种编译方式,推荐使用 Docker 编译。 +Docker 环境中已预装好编译 Paddle 需要的各种依赖,相较本机编译环境更简单。 -* [Docker源码编译](#compile_from_docker) +* [Docker 源码编译](#compile_from_docker) * [本机源码编译](#compile_from_host) -### **使用Docker编译** +### **使用 Docker 编译** -[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用 Docker,既可以将 PaddlePaddle 的安装&使用与系统环境隔离,也可以与主机共享 GPU、网络等资源 -使用Docker编译PaddlePaddle,您需要: +使用 Docker 编译 PaddlePaddle,您需要: -- 在本地主机上[安装Docker](https://docs.docker.com/engine/install/) +- 在本地主机上[安装 Docker](https://docs.docker.com/engine/install/) -- 使用Docker ID登陆Docker,以避免出现`Authenticate Failed`错误 +- 使用 Docker ID 登陆 Docker,以避免出现`Authenticate Failed`错误 请您按照以下步骤安装: -#### 1. 进入Mac的终端 +#### 1. 进入 Mac 的终端 -#### 2. 请选择您希望储存PaddlePaddle的路径,然后在该路径下使用以下命令将PaddlePaddle的源码从github克隆到本地当前目录下名为Paddle的文件夹中: +#### 2. 请选择您希望储存 PaddlePaddle 的路径,然后在该路径下使用以下命令将 PaddlePaddle 的源码从 github 克隆到本地当前目录下名为 Paddle 的文件夹中: ``` git clone https://github.com/PaddlePaddle/Paddle.git ``` -#### 3. 进入Paddle目录下: +#### 3. 进入 Paddle 目录下: ``` cd Paddle ``` -#### 4. 拉取PaddlePaddle镜像 +#### 4. 拉取 PaddlePaddle 镜像 -对于国内用户,因为网络问题下载docker比较慢时,可使用百度提供的镜像: +对于国内用户,因为网络问题下载 docker 比较慢时,可使用百度提供的镜像: -* CPU版的PaddlePaddle: +* CPU 版的 PaddlePaddle: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:latest-dev ``` -如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: +如果您的机器不在中国大陆地区,可以直接从 DockerHub 拉取镜像: -* CPU版的PaddlePaddle: +* CPU 版的 PaddlePaddle: ``` docker pull paddlepaddle/paddle:latest-dev ``` @@ -61,38 +61,38 @@ cd Paddle 您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 -#### 5. 创建并进入满足编译环境的Docker容器: +#### 5. 创建并进入满足编译环境的 Docker 容器: ``` docker run --name paddle-test -v $PWD:/paddle --network=host -it registry.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash ``` -- `--name paddle-test`:为您创建的Docker容器命名为paddle-test +- `--name paddle-test`:为您创建的 Docker 容器命名为 paddle-test -- `-v:$PWD:/paddle`:将当前目录挂载到Docker容器中的/paddle目录下(Linux中PWD变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)) +- `-v:$PWD:/paddle`:将当前目录挂载到 Docker 容器中的/paddle 目录下(Linux 中 PWD 变量会展开为当前路径的[绝对路径](https://baike.baidu.com/item/绝对路径/481185)) - `-it`:与宿主机保持交互状态 -- `registry.baidubce.com/paddlepaddle/paddle:latest-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建Docker容器,/bin/bash 进入容器后启动/bin/bash命令 +- `registry.baidubce.com/paddlepaddle/paddle:latest-dev`:使用名为`registry.baidubce.com/paddlepaddle/paddle:latest-dev`的镜像创建 Docker 容器,/bin/bash 进入容器后启动/bin/bash 命令 注意: -请确保至少为docker分配4g以上的内存,否则编译过程可能因内存不足导致失败。您可以在docker用户界面的“Preferences-Resources”中设置容器的内存分配上限。 +请确保至少为 docker 分配 4g 以上的内存,否则编译过程可能因内存不足导致失败。您可以在 docker 用户界面的“Preferences-Resources”中设置容器的内存分配上限。 -#### 6. 进入Docker后进入paddle目录下: +#### 6. 进入 Docker 后进入 paddle 目录下: ``` cd /paddle ``` -#### 7. 切换到develop版本进行编译: +#### 7. 切换到 develop 版本进行编译: ``` git checkout develop ``` -注意:python3.6、python3.7版本从release/1.2分支开始支持, python3.8版本从release/1.8分支开始支持, python3.9版本从release/2.1分支开始支持 +注意:python3.6、python3.7 版本从 release/1.2 分支开始支持, python3.8 版本从 release/1.8 分支开始支持, python3.9 版本从 release/2.1 分支开始支持 -#### 8. 创建并进入/paddle/build路径下: +#### 8. 创建并进入/paddle/build 路径下: ``` mkdir -p /paddle/build && cd /paddle/build @@ -100,30 +100,30 @@ mkdir -p /paddle/build && cd /paddle/build #### 9. 使用以下命令安装相关依赖: -- 安装protobuf 3.1.0。 +- 安装 protobuf 3.1.0。 ``` pip3.7 install protobuf==3.1.0 ``` -注意:以上用Python3.7命令来举例,如您的Python版本为3.6/3.8/3.9,请将上述命令中的pip3.7改成pip3.6/pip3.8/pip3.9 +注意:以上用 Python3.7 命令来举例,如您的 Python 版本为 3.6/3.8/3.9,请将上述命令中的 pip3.7 改成 pip3.6/pip3.8/pip3.9 -- 安装patchelf,PatchELF 是一个小而实用的程序,用于修改ELF可执行文件的动态链接器和RPATH。 +- 安装 patchelf,PatchELF 是一个小而实用的程序,用于修改 ELF 可执行文件的动态链接器和 RPATH。 ``` apt install patchelf ``` -#### 10. 执行cmake: +#### 10. 执行 cmake: -* 对于需要编译**CPU版本PaddlePaddle**的用户(我们目前不支持 macOS 下 GPU 版本 PaddlePaddle 的编译): +* 对于需要编译**CPU 版本 PaddlePaddle**的用户(我们目前不支持 macOS 下 GPU 版本 PaddlePaddle 的编译): ``` cmake .. -DPY_VERSION=3.7 -DWITH_GPU=OFF -DWITH_TESTING=OFF -DWITH_AVX=OFF -DCMAKE_BUILD_TYPE=Release ``` - 具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -- 请注意修改参数`-DPY_VERSION`为您希望编译使用的python版本, 例如`-DPY_VERSION=3.7`表示python版本为3.7 +- 请注意修改参数`-DPY_VERSION`为您希望编译使用的 python 版本, 例如`-DPY_VERSION=3.7`表示 python 版本为 3.7 #### 11. 执行编译: @@ -134,7 +134,7 @@ make -j$(nproc) ``` 注意: -编译过程中需要从github上下载依赖,请确保您的编译环境能正常从github下载代码。 +编译过程中需要从 github 上下载依赖,请确保您的编译环境能正常从 github 下载代码。 #### 12. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的`.whl`包: ``` @@ -144,13 +144,13 @@ cd /paddle/build/python/dist #### 13. 在当前机器或目标机器安装编译好的`.whl`包: ``` -pip3.7 install -U [whl包的名字] +pip3.7 install -U [whl 包的名字] ``` 注意: -以上用Python3.7命令来举例,如您的Python版本为3.6/3.8/3.9,请将上述命令中的pip3.7改成pip3.6/pip3.8/pip3.9。 +以上用 Python3.7 命令来举例,如您的 Python 版本为 3.6/3.8/3.9,请将上述命令中的 pip3.7 改成 pip3.6/pip3.8/pip3.9。 -#### 恭喜,至此您已完成PaddlePaddle的编译安装。您只需要进入Docker容器后运行PaddlePaddle,即可开始使用。更多Docker使用请参见[Docker官方文档](https://docs.docker.com) +#### 恭喜,至此您已完成 PaddlePaddle 的编译安装。您只需要进入 Docker 容器后运行 PaddlePaddle,即可开始使用。更多 Docker 使用请参见[Docker 官方文档](https://docs.docker.com) @@ -165,30 +165,30 @@ uname -m ``` 并且在`关于本机`中查看系统版本。并提前安装[OpenCV](https://opencv.org/releases.html) -#### 2. 安装Python以及pip: +#### 2. 安装 Python 以及 pip: -> **请不要使用 macOS 中自带Python**,我们强烈建议您使用[Homebrew](https://brew.sh)安装python(对于**Python3**请使用python[官方下载](https://www.python.org/downloads/mac-osx/)python3.6.x、python3.7.x、python3.8、python3.9), pip以及其他的依赖,这将会使您高效编译。 +> **请不要使用 macOS 中自带 Python**,我们强烈建议您使用[Homebrew](https://brew.sh)安装 python(对于**Python3**请使用 python[官方下载](https://www.python.org/downloads/mac-osx/)python3.6.x、python3.7.x、python3.8、python3.9), pip 以及其他的依赖,这将会使您高效编译。 -使用Python官网安装 +使用 Python 官网安装 -> 请注意,当您的mac上安装有多个python时请保证您正在使用的python是您希望使用的python。 +> 请注意,当您的 mac 上安装有多个 python 时请保证您正在使用的 python 是您希望使用的 python。 -#### 3. (Only For Python3)设置Python相关的环境变量: +#### 3. (Only For Python3)设置 Python 相关的环境变量: - a. 首先使用 ``` find `dirname $(dirname $(which python3))` -name "libpython3.*.dylib" ``` - 找到Pythonlib的路径(弹出的第一个对应您需要使用的python的dylib路径),然后(下面[python-lib-path]替换为找到文件路径) + 找到 Pythonlib 的路径(弹出的第一个对应您需要使用的 python 的 dylib 路径),然后(下面[python-lib-path]替换为找到文件路径) -- b. 设置PYTHON_LIBRARIES: +- b. 设置 PYTHON_LIBRARIES: ``` export PYTHON_LIBRARY=[python-lib-path] ``` -- c. 其次使用找到PythonInclude的路径(通常是找到[python-lib-path]的上一级目录为同级目录的include,然后找到该目录下python3.x的路径),然后(下面[python-include-path]替换为找到路径) -- d. 设置PYTHON_INCLUDE_DIR: +- c. 其次使用找到 PythonInclude 的路径(通常是找到[python-lib-path]的上一级目录为同级目录的 include,然后找到该目录下 python3.x 的路径),然后(下面[python-include-path]替换为找到路径) +- d. 设置 PYTHON_INCLUDE_DIR: ``` export PYTHON_INCLUDE_DIRS=[python-include-path] ``` @@ -209,7 +209,7 @@ uname -m ``` (这里[python-ld-path]为[python-bin-path]的上一级目录) -- g. (可选)如果您是在 macOS 10.14上编译 PaddlePaddle,请保证您已经安装了[对应版本](http://developer.apple.com/download)的Xcode。 +- g. (可选)如果您是在 macOS 10.14 上编译 PaddlePaddle,请保证您已经安装了[对应版本](http://developer.apple.com/download)的 Xcode。 #### 4. **执行编译前**请您确认您的环境中安装有[编译依赖表](/documentation/docs/zh/install/Tables.html#third_party)中提到的相关依赖,否则我们强烈推荐使用`Homebrew`安装相关依赖。 @@ -217,17 +217,17 @@ uname -m - a. 这里特别说明一下**CMake**的安装: - CMake我们支持3.15以上版本,推荐使用CMake3.16,请根据以下步骤安装: + CMake 我们支持 3.15 以上版本,推荐使用 CMake3.16,请根据以下步骤安装: - 1. 从CMake[官方网站](https://cmake.org/files/v3.16/cmake-3.16.0-Darwin-x86_64.dmg)下载CMake镜像并安装 + 1. 从 CMake[官方网站](https://cmake.org/files/v3.16/cmake-3.16.0-Darwin-x86_64.dmg)下载 CMake 镜像并安装 2. 在控制台输入 ``` sudo "/Applications/CMake.app/Contents/bin/cmake-gui" –install ``` -- b. 如果您不想使用系统默认的blas而希望使用自己安装的OPENBLAS请参见[FAQ](../FAQ.html/#OPENBLAS) +- b. 如果您不想使用系统默认的 blas 而希望使用自己安装的 OPENBLAS 请参见[FAQ](../FAQ.html/#OPENBLAS) -#### 5. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中,并进入Padde目录下: +#### 5. 将 PaddlePaddle 的源码 clone 在当下目录下的 Paddle 的文件夹中,并进入 Padde 目录下: ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -237,32 +237,32 @@ git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle ``` -#### 6. 切换到develop分支进行编译: +#### 6. 切换到 develop 分支进行编译: ``` git checkout develop ``` -注意:python3.6、python3.7版本从release/1.2分支开始支持, python3.8版本从release/1.8分支开始支持, python3.9版本从release/2.1分支开始支持 +注意:python3.6、python3.7 版本从 release/1.2 分支开始支持, python3.8 版本从 release/1.8 分支开始支持, python3.9 版本从 release/2.1 分支开始支持 -#### 7. 并且请创建并进入一个叫build的目录下: +#### 7. 并且请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build ``` -#### 8. 执行cmake: +#### 8. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) -* 对于需要编译**CPU版本PaddlePaddle**的用户: +* 对于需要编译**CPU 版本 PaddlePaddle**的用户: ``` cmake .. -DPY_VERSION=3.7 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} \ -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release ``` ->`-DPY_VERSION=3.7`请修改为安装环境的Python版本 +>`-DPY_VERSION=3.7`请修改为安装环境的 Python 版本 #### 9. 使用以下命令来编译: @@ -278,18 +278,18 @@ cd /paddle/build/python/dist #### 11. 在当前机器或目标机器安装编译好的`.whl`包: ``` -pip install -U(whl包的名字) +pip install -U(whl 包的名字) ``` 或 ``` -pip3 install -U(whl包的名字) +pip3 install -U(whl 包的名字) ``` -#### 恭喜,至此您已完成PaddlePaddle的编译安装 +#### 恭喜,至此您已完成 PaddlePaddle 的编译安装 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入 ``` import paddle ``` @@ -301,9 +301,9 @@ paddle.utils.run_check() 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 ## **如何卸载** -请使用以下命令卸载PaddlePaddle +请使用以下命令卸载 PaddlePaddle -* **CPU版本的PaddlePaddle**: +* **CPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle ``` @@ -312,4 +312,4 @@ paddle.utils.run_check() pip3 uninstall paddlepaddle ``` -使用Docker安装PaddlePaddle的用户,请进入包含PaddlePaddle的容器中使用上述命令,注意使用对应版本的pip +使用 Docker 安装 PaddlePaddle 的用户,请进入包含 PaddlePaddle 的容器中使用上述命令,注意使用对应版本的 pip diff --git a/docs/install/compile/mips-compile.md b/docs/install/compile/mips-compile.md index 1a0cca8fe79..d77220916cc 100644 --- a/docs/install/compile/mips-compile.md +++ b/docs/install/compile/mips-compile.md @@ -7,28 +7,28 @@ * **Python 版本 2.7.15+/3.5.1+/3.6/3.7/3.8 (64 bit)** * **pip 或 pip3 版本 20.2.2+ (64 bit)** -本文以Loongson-3A4000为例,介绍Paddle在MIPS架构下的源码编译。 +本文以 Loongson-3A4000 为例,介绍 Paddle 在 MIPS 架构下的源码编译。 ## 安装步骤 -目前在MIPS龙芯处理器加龙芯国产化操作系统上安装Paddle,只支持源码编译的方式,接下来详细介绍各个步骤。 +目前在 MIPS 龙芯处理器加龙芯国产化操作系统上安装 Paddle,只支持源码编译的方式,接下来详细介绍各个步骤。 ### **源码编译** -1. 龙芯操作系统`Loongnix release 1.0`默认安装的gcc版本是4.9,但yum源提供了gcc-7的工具链,在此处安装gcc-7。可以参考龙芯龙芯开源社区[文章](http://www.loongnix.org/index.php/Gcc7.3.0) +1. 龙芯操作系统`Loongnix release 1.0`默认安装的 gcc 版本是 4.9,但 yum 源提供了 gcc-7 的工具链,在此处安装 gcc-7。可以参考龙芯龙芯开源社区[文章](http://www.loongnix.org/index.php/Gcc7.3.0) ``` sudo yum install devtoolset-7-gcc.mips64el devtoolset-7-gcc-c++.mips64el devtoolset-7.mips64el ``` - 设置环境变量使得gcc-7生效 + 设置环境变量使得 gcc-7 生效 ``` source /opt/rh/devtoolset-7/enable ``` -2. 龙芯系统自带的python都是基于gcc4.9,在第1步时选择使用了gcc-7.3,此处需要源码安装Python,此处以Python3.7为例。 +2. 龙芯系统自带的 python 都是基于 gcc4.9,在第 1 步时选择使用了 gcc-7.3,此处需要源码安装 Python,此处以 Python3.7 为例。 ``` sudo yum install libffi-devel.mips64el openssl-devel.mips64el libsqlite3x-devel.mips64el sqlite-devel.mips64el lbzip2-utils.mips64el lzma.mips64el tk.mips64el uuid.mips64el gdbm-devel.mips64el gdbm.mips64el openjpeg-devel.mips64el zlib-devel.mips64el libjpeg-turbo-devel.mips64el openjpeg-devel.mips64el @@ -50,23 +50,23 @@ make install ``` - 设置环境变量,使得python37生效 + 设置环境变量,使得 python37 生效 ``` export PATH=$HOME/python37/bin:$PATH export LD_LIBRARY_PATH=$HOME/python37/lib:$LD_LIBRARY_PATH ``` -3. Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,龙芯操作系统源提供cmake的版本是3.9,且尝试源码编译cmake失败,此处临时的处理方式是修改Paddle主目录的`CMakeLists.txt`, `cmake_minimum_required(VERSION 3.15)` 修改为 `cmake_minimum_required(VERSION 3.9)`。等到龙芯系统支持cmake >= 3.15后则不需要其它操作。 +3. Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,龙芯操作系统源提供 cmake 的版本是 3.9,且尝试源码编译 cmake 失败,此处临时的处理方式是修改 Paddle 主目录的`CMakeLists.txt`, `cmake_minimum_required(VERSION 3.15)` 修改为 `cmake_minimum_required(VERSION 3.9)`。等到龙芯系统支持 cmake >= 3.15 后则不需要其它操作。 -4. Paddle内部使用patchelf来修改动态库的rpath,操作系统提供的源包括了patchelf,直接安装即可,后续会考虑在MIPS上移出该依赖。 +4. Paddle 内部使用 patchelf 来修改动态库的 rpath,操作系统提供的源包括了 patchelf,直接安装即可,后续会考虑在 MIPS 上移出该依赖。 ``` sudo yum install patchelf.mips64el ``` -5. 将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +5. 将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -76,7 +76,7 @@ cd Paddle ``` -6. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库。 +6. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库。 7. 切换到`develop`分支下进行编译: @@ -85,7 +85,7 @@ git checkout develop ``` -6. 并且请创建并进入一个叫build的目录下: +6. 并且请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build @@ -97,7 +97,7 @@ ulimit -n 4096 ``` -8. 执行cmake: +8. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) @@ -122,19 +122,19 @@ 11. 在当前机器或目标机器安装编译好的`.whl`包: ``` - python -m pip install -U(whl包的名字)`或`python3 -m pip install -U(whl包的名字) + python -m pip install -U(whl 包的名字)`或`python3 -m pip install -U(whl 包的名字) ``` -恭喜,至此您已完成PaddlePaddle在龙芯环境下的编译安装。 +恭喜,至此您已完成 PaddlePaddle 在龙芯环境下的编译安装。 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 -在mobilenetv1和resnet50模型上测试 +在 mobilenetv1 和 resnet50 模型上测试 ``` wget -O profile.tar https://paddle-cetc15.bj.bcebos.com/profile.tar?authorization=bce-auth-v1/4409a3f3dd76482ab77af112631f01e4/2020-10-09T10:11:53Z/-1/host/786789f3445f498c6a1fd4d9cd3897ac7233700df0c6ae2fd78079eba89bf3fb @@ -158,7 +158,7 @@ python ernie.py --model_dir ernieL3H128_model/ ``` ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: ``` python -m pip uninstall paddlepaddle @@ -171,6 +171,6 @@ python3 -m pip uninstall paddlepaddle ## **备注** -已在MIPS架构下测试过resnet50, mobilenetv1, ernie, ELMo等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 +已在 MIPS 架构下测试过 resnet50, mobilenetv1, ernie, ELMo 等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 预测文档见[doc](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/native_infer.html),使用示例见[Paddle-Inference-Demo](https://github.com/PaddlePaddle/Paddle-Inference-Demo) diff --git a/docs/install/compile/sw-compile.md b/docs/install/compile/sw-compile.md index 0b17affd1d0..a85da8d4130 100644 --- a/docs/install/compile/sw-compile.md +++ b/docs/install/compile/sw-compile.md @@ -7,16 +7,16 @@ * **Python 版本 2.7.15+/3.5.1+/3.6/3.7/3.8 (64 bit)** * **pip 或 pip3 版本 9.0.1+ (64 bit)** -申威机器为SW架构,目前生态支持的软件比较有限,本文以比较trick的方式在申威机器上源码编译Paddle,未来会随着申威软件的完善不断更新。 +申威机器为 SW 架构,目前生态支持的软件比较有限,本文以比较 trick 的方式在申威机器上源码编译 Paddle,未来会随着申威软件的完善不断更新。 ## 安装步骤 -本文在申威处理器下安装Paddle,接下来详细介绍各个步骤。 +本文在申威处理器下安装 Paddle,接下来详细介绍各个步骤。 ### **源码编译** -1. 将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +1. 将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -32,25 +32,25 @@ git checkout develop ``` -3. Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,检查操作系统源提供cmake的版本,使用源的方式直接安装cmake, `apt install cmake`, 检查cmake版本, `cmake --version`, 如果cmake >= 3.15则不需要额外的操作,否则请修改Paddle主目录的`CMakeLists.txt`, `cmake_minimum_required(VERSION 3.15)` 修改为 `cmake_minimum_required(VERSION 3.0)`. +3. Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,检查操作系统源提供 cmake 的版本,使用源的方式直接安装 cmake, `apt install cmake`, 检查 cmake 版本, `cmake --version`, 如果 cmake >= 3.15 则不需要额外的操作,否则请修改 Paddle 主目录的`CMakeLists.txt`, `cmake_minimum_required(VERSION 3.15)` 修改为 `cmake_minimum_required(VERSION 3.0)`. -4. 申威支持openblas,使用 `yum` 安装openblas及其相关的依赖(如果安装失败,需要联系厂商解决安装问题)。 - 安装openblas,得到openblas库文件及头文件cblas.h; - 安装lapack: +4. 申威支持 openblas,使用 `yum` 安装 openblas 及其相关的依赖(如果安装失败,需要联系厂商解决安装问题)。 + 安装 openblas,得到 openblas 库文件及头文件 cblas.h; + 安装 lapack: ``` yum install lapack-devel.sw_64 ``` - lapack的搜索地址与openblas相同。 + lapack 的搜索地址与 openblas 相同。 - 编译时出现以下log信息,表明openblas库链接成功: + 编译时出现以下 log 信息,表明 openblas 库链接成功: ``` -- Found OpenBLAS (include: /usr/include/openblas, library: /usr/lib/libopenblas.so) -- Found lapack in OpenBLAS (include: /usr/include) ``` -5. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库,注意在申威系统中一般无法直接使用pip或源码编译安装python依赖包,建议使用源的方式安装,如果遇到部分依赖包无法安装的情况,请联系操作系统服务商提供支持。此外也可以通过pip安装的时候加--no-deps的方式来避免依赖包的安装,但该种方式可能导致包由于缺少依赖不可用。 +5. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库,注意在申威系统中一般无法直接使用 pip 或源码编译安装 python 依赖包,建议使用源的方式安装,如果遇到部分依赖包无法安装的情况,请联系操作系统服务商提供支持。此外也可以通过 pip 安装的时候加--no-deps 的方式来避免依赖包的安装,但该种方式可能导致包由于缺少依赖不可用。 -6. 请创建并进入一个叫build的目录下: +6. 请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build @@ -62,7 +62,7 @@ ulimit -n 4096 ``` -8. 执行cmake: +8. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) @@ -86,18 +86,18 @@ 11. 在当前机器或目标机器安装编译好的`.whl`包: ``` - python2 -m pip install -U(whl包的名字)`或`python3 -m pip install -U(whl包的名字) + python2 -m pip install -U(whl 包的名字)`或`python3 -m pip install -U(whl 包的名字) ``` -恭喜,至此您已完成PaddlePaddle在FT环境下的编译安装。 +恭喜,至此您已完成 PaddlePaddle 在 FT 环境下的编译安装。 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 -在mobilenetv1和resnet50模型上测试 +在 mobilenetv1 和 resnet50 模型上测试 ``` wget -O profile.tar https://paddle-cetc15.bj.bcebos.com/profile.tar?authorization=bce-auth-v1/4409a3f3dd76482ab77af112631f01e4/2020-10-09T10:11:53Z/-1/host/786789f3445f498c6a1fd4d9cd3897ac7233700df0c6ae2fd78079eba89bf3fb @@ -121,7 +121,7 @@ python ernie.py --model_dir ernieL3H128_model/ ``` ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: ``` python3 -m pip uninstall paddlepaddle @@ -133,6 +133,6 @@ python3 -m pip uninstall paddlepaddle ## **备注** -已在申威下测试过resnet50, mobilenetv1, ernie, ELMo等模型,基本保证了预测使用算子的正确性,但可能会遇到浮点异常的问题,该问题我们后续会和申威一起解决,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 +已在申威下测试过 resnet50, mobilenetv1, ernie, ELMo 等模型,基本保证了预测使用算子的正确性,但可能会遇到浮点异常的问题,该问题我们后续会和申威一起解决,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 预测文档见[doc](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/native_infer.html),使用示例见[Paddle-Inference-Demo](https://github.com/PaddlePaddle/Paddle-Inference-Demo) diff --git a/docs/install/compile/windows-compile.md b/docs/install/compile/windows-compile.md index cd674cb60bd..04db20b5e22 100644 --- a/docs/install/compile/windows-compile.md +++ b/docs/install/compile/windows-compile.md @@ -1,6 +1,6 @@ -# **Windows下从源码编译** +# **Windows 下从源码编译** -在Windows系统下提供1种编译方式: +在 Windows 系统下提供 1 种编译方式: * [本机编译](#compile_from_host) @@ -10,34 +10,34 @@ * **Python 版本 3.6/3.7/3.8/3.9 (64 bit)** * **Visual Studio 2017 社区版/专业版/企业版** -## 选择CPU/GPU +## 选择 CPU/GPU -* 如果你的计算机硬件没有 NVIDIA® GPU,请编译CPU版本的PaddlePaddle +* 如果你的计算机硬件没有 NVIDIA® GPU,请编译 CPU 版本的 PaddlePaddle -* 如果你的计算机硬件有 NVIDIA® GPU,推荐编译GPU版本的PaddlePaddle,建议安装 **CUDA 10.1/10.2/11.0/11.1/11.2** +* 如果你的计算机硬件有 NVIDIA® GPU,推荐编译 GPU 版本的 PaddlePaddle,建议安装 **CUDA 10.1/10.2/11.0/11.1/11.2** ## 本机编译过程 1. 安装必要的工具 cmake, git, python, Visual studio 2017: - > **cmake**:建议安装CMake3.17版本, 官网下载[链接](https://cmake.org/files/v3.17/cmake-3.17.0-win64-x64.msi)。安装时注意勾选 `Add CMake to the system PATH for all users`,将CMake添加到环境变量中。 + > **cmake**:建议安装 CMake3.17 版本, 官网下载[链接](https://cmake.org/files/v3.17/cmake-3.17.0-win64-x64.msi)。安装时注意勾选 `Add CMake to the system PATH for all users`,将 CMake 添加到环境变量中。 > **git**:官网下载[链接](https://github.com/git-for-windows/git/releases/download/v2.35.1.windows.2/Git-2.35.1.2-64-bit.exe),使用默认选项安装。 - > **python**:官网[链接](https://www.python.org/downloads/windows/),可选择3.6/3.7/3.8/3.9中任一版本的 Windows installer(64-bit)安装。安装时注意勾选 `Add Python 3.x to PATH`,将Python添加到环境变量中。 + > **python**:官网[链接](https://www.python.org/downloads/windows/),可选择 3.6/3.7/3.8/3.9 中任一版本的 Windows installer(64-bit)安装。安装时注意勾选 `Add Python 3.x to PATH`,将 Python 添加到环境变量中。 - > **Visual studio 2017**:官网[链接](https://visualstudio.microsoft.com/zh-hans/vs/older-downloads/#visual-studio-2017-and-other-products),需要登录后下载,建议下载Community社区版。在安装时需要在工作负荷一栏中勾选 `使用C++的桌面开发` 和 `通用Windows平台开发`,并在语言包一栏中选择 `英语`。 + > **Visual studio 2017**:官网[链接](https://visualstudio.microsoft.com/zh-hans/vs/older-downloads/#visual-studio-2017-and-other-products),需要登录后下载,建议下载 Community 社区版。在安装时需要在工作负荷一栏中勾选 `使用 C++的桌面开发` 和 `通用 Windows 平台开发`,并在语言包一栏中选择 `英语`。 -2. 在Windows桌面下方的搜索栏中搜索 `x64 Native Tools Command Prompt for VS 2017` 或 `适用于VS 2017 的x64本机工具命令提示符`,右键以管理员身份打开终端。之后的命令均在该终端中执行。 +2. 在 Windows 桌面下方的搜索栏中搜索 `x64 Native Tools Command Prompt for VS 2017` 或 `适用于 VS 2017 的 x64 本机工具命令提示符`,右键以管理员身份打开终端。之后的命令均在该终端中执行。 -3. 使用`pip`命令安装Python依赖: - * 通过 `python --version` 检查默认python版本是否是预期版本,因为你的计算机可能安装有多个python,你可通过修改系统环境变量的顺序来修改默认Python版本。 +3. 使用`pip`命令安装 Python 依赖: + * 通过 `python --version` 检查默认 python 版本是否是预期版本,因为你的计算机可能安装有多个 python,你可通过修改系统环境变量的顺序来修改默认 Python 版本。 * 安装 numpy, protobuf, wheel, ninja ``` pip install numpy protobuf wheel ninja ``` -4. 创建编译Paddle的文件夹(例如D:\workspace),进入该目录并下载源码: +4. 创建编译 Paddle 的文件夹(例如 D:\workspace),进入该目录并下载源码: ``` mkdir D:\workspace && cd /d D:\workspace @@ -47,7 +47,7 @@ cd Paddle ``` -5. 创建名为build的目录并进入: +5. 创建名为 build 的目录并进入: ``` mkdir build @@ -55,15 +55,15 @@ cd build ``` -6. 执行cmake: +6. 执行 cmake: - 编译CPU版本的Paddle: + 编译 CPU 版本的 Paddle: ``` cmake .. -GNinja -DWITH_GPU=OFF ``` - 编译GPU版本的Paddle: + 编译 GPU 版本的 Paddle: ``` cmake .. -GNinja -DWITH_GPU=ON @@ -72,8 +72,8 @@ 其他编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile)。 > 注意: - > 1. 如果本机安装了多个CUDA,将使用最新安装的CUDA版本,且无法指定。 - > 2. 如果本机安装了多个Python,将使用最新安装的Python版本。若需要指定Python版本,则需要指定Python路径,例如: + > 1. 如果本机安装了多个 CUDA,将使用最新安装的 CUDA 版本,且无法指定。 + > 2. 如果本机安装了多个 Python,将使用最新安装的 Python 版本。若需要指定 Python 版本,则需要指定 Python 路径,例如: ``` cmake .. -GNinja -DWITH_GPU=ON -DPYTHON_EXECUTABLE=C:\Python38\python.exe -DPYTHON_INCLUDE_DIR=C:\Python38\include -DPYTHON_LIBRARY=C:\Python38\libs\python38.lib ``` @@ -93,15 +93,15 @@ 9. 安装编译好的 `.whl` 包: ``` - pip install(whl包的名字)--force-reinstall + pip install(whl 包的名字)--force-reinstall ``` -恭喜,至此你已完成PaddlePaddle的编译安装 +恭喜,至此你已完成 PaddlePaddle 的编译安装 ## **验证安装** -安装完成后你可以使用 `python` 进入python解释器,输入: +安装完成后你可以使用 `python` 进入 python 解释器,输入: ``` import paddle @@ -114,14 +114,14 @@ paddle.utils.run_check() 如果出现`PaddlePaddle is installed successfully!`,说明你已成功安装。 ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: -* **CPU版本的PaddlePaddle**: +* **CPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle ``` -* **GPU版本的PaddlePaddle**: +* **GPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle-gpu ``` diff --git a/docs/install/compile/zhaoxin-compile.md b/docs/install/compile/zhaoxin-compile.md index 4329d6c8fcf..3f2f7b437f1 100644 --- a/docs/install/compile/zhaoxin-compile.md +++ b/docs/install/compile/zhaoxin-compile.md @@ -7,16 +7,16 @@ * **Python 版本 2.7.15+/3.5.1+/3.6/3.7/3.8 (64 bit)** * **pip 或 pip3 版本 9.0.1+ (64 bit)** -兆芯为x86架构,编译方法与[Linux下从源码编译cpu版](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/compile_Linux.html)一致。 +兆芯为 x86 架构,编译方法与[Linux 下从源码编译 cpu 版](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/compile/compile_Linux.html)一致。 ## 安装步骤 -本文在ZHAOXIN处理器下安装Paddle,接下来详细介绍各个步骤。 +本文在 ZHAOXIN 处理器下安装 Paddle,接下来详细介绍各个步骤。 ### **源码编译** -1. Paddle依赖cmake进行编译构建,需要cmake版本>=3.15,如果操作系统提供的源包括了合适版本的cmake,直接安装即可,否则需要[源码安装](https://github.com/Kitware/CMake) +1. Paddle 依赖 cmake 进行编译构建,需要 cmake 版本>=3.15,如果操作系统提供的源包括了合适版本的 cmake,直接安装即可,否则需要[源码安装](https://github.com/Kitware/CMake) ``` wget https://github.com/Kitware/CMake/releases/download/v3.16.8/cmake-3.16.8.tar.gz @@ -30,7 +30,7 @@ ./bootstrap && make && sudo make install ``` -2. Paddle内部使用patchelf来修改动态库的rpath,如果操作系统提供的源包括了patchelf,直接安装即可,否则需要源码安装,请参考[patchelf官方文档](https://github.com/NixOS/patchelf)。 +2. Paddle 内部使用 patchelf 来修改动态库的 rpath,如果操作系统提供的源包括了 patchelf,直接安装即可,否则需要源码安装,请参考[patchelf 官方文档](https://github.com/NixOS/patchelf)。 ``` ./bootstrap.sh @@ -52,7 +52,7 @@ sudo make install ``` -3. 将Paddle的源代码克隆到当下目录下的Paddle文件夹中,并进入Paddle目录 +3. 将 Paddle 的源代码克隆到当下目录下的 Paddle 文件夹中,并进入 Paddle 目录 ``` git clone https://github.com/PaddlePaddle/Paddle.git @@ -68,13 +68,13 @@ git checkout develop ``` -5. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装Python依赖库。 +5. 根据[requirments.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/python/requirements.txt)安装 Python 依赖库。 ``` pip install -r python/requirments.txt ``` -6. 请创建并进入一个叫build的目录下: +6. 请创建并进入一个叫 build 的目录下: ``` mkdir build && cd build @@ -86,7 +86,7 @@ ulimit -n 4096 ``` -8. 执行cmake: +8. 执行 cmake: >具体编译选项含义请参见[编译选项表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/install/Tables.html#Compile) @@ -110,14 +110,14 @@ 11. 在当前机器或目标机器安装编译好的`.whl`包: ``` - python2 -m pip install -U(whl包的名字)`或`python3 -m pip install -U(whl包的名字) + python2 -m pip install -U(whl 包的名字)`或`python3 -m pip install -U(whl 包的名字) ``` -恭喜,至此您已完成PaddlePaddle在FT环境下的编译安装。 +恭喜,至此您已完成 PaddlePaddle 在 FT 环境下的编译安装。 ## **验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入 ``` import paddle ``` @@ -128,7 +128,7 @@ paddle.utils.run_check() 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 -在mobilenetv1和resnet50模型上测试 +在 mobilenetv1 和 resnet50 模型上测试 ``` wget -O profile.tar https://paddle-cetc15.bj.bcebos.com/profile.tar?authorization=bce-auth-v1/4409a3f3dd76482ab77af112631f01e4/2020-10-09T10:11:53Z/-1/host/786789f3445f498c6a1fd4d9cd3897ac7233700df0c6ae2fd78079eba89bf3fb @@ -152,7 +152,7 @@ python ernie.py --model_dir ernieL3H128_model/ ``` ## **如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: ``` python3 -m pip uninstall paddlepaddle @@ -164,6 +164,6 @@ python3 -m pip uninstall paddlepaddle ## **备注** -已在ZHAOXIN下测试过resnet50, mobilenetv1, ernie, ELMo等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 +已在 ZHAOXIN 下测试过 resnet50, mobilenetv1, ernie, ELMo 等模型,基本保证了预测使用算子的正确性,如果您在使用过程中遇到计算结果错误,编译失败等问题,请到[issue](https://github.com/PaddlePaddle/Paddle/issues)中留言,我们会及时解决。 预测文档见[doc](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/native_infer.html),使用示例见[Paddle-Inference-Demo](https://github.com/PaddlePaddle/Paddle-Inference-Demo) diff --git a/docs/install/conda/fromconda.rst b/docs/install/conda/fromconda.rst index 5ee1b787c78..1a14f0b524f 100644 --- a/docs/install/conda/fromconda.rst +++ b/docs/install/conda/fromconda.rst @@ -1,5 +1,5 @@ =========================== -**Conda安装** +**Conda 安装** =========================== .. toctree:: diff --git a/docs/install/conda/linux-conda.md b/docs/install/conda/linux-conda.md index 68decb4d2e0..485d28cbeda 100644 --- a/docs/install/conda/linux-conda.md +++ b/docs/install/conda/linux-conda.md @@ -1,11 +1,11 @@ -# Linux下的Conda安装 +# Linux 下的 Conda 安装 -[Anaconda](https://www.anaconda.com/)是一个免费开源的Python和R语言的发行版本,用于计算科学,Anaconda致力于简化包管理和部署。Anaconda的包使用软件包管理系统Conda进行管理。Conda是一个开源包管理系统和环境管理系统,可在Windows、macOS和Linux上运行。 +[Anaconda](https://www.anaconda.com/)是一个免费开源的 Python 和 R 语言的发行版本,用于计算科学,Anaconda 致力于简化包管理和部署。Anaconda 的包使用软件包管理系统 Conda 进行管理。Conda 是一个开源包管理系统和环境管理系统,可在 Windows、macOS 和 Linux 上运行。 ## 一、环境准备 -在进行PaddlePaddle安装之前请确保您的Anaconda软件环境已经正确安装。软件下载和安装参见Anaconda官网(https://www.anaconda.com/)。在您已经正确安装Anaconda的情况下请按照下列步骤安装PaddlePaddle。 +在进行 PaddlePaddle 安装之前请确保您的 Anaconda 软件环境已经正确安装。软件下载和安装参见 Anaconda 官网(https://www.anaconda.com/)。在您已经正确安装 Anaconda 的情况下请按照下列步骤安装 PaddlePaddle。 * conda 版本 4.8.3+ (64 bit) @@ -14,44 +14,44 @@ #### 1.1.1 安装环境 -首先根据具体的Python版本创建Anaconda虚拟环境,PaddlePaddle的Anaconda安装支持以下五种Python安装环境。 +首先根据具体的 Python 版本创建 Anaconda 虚拟环境,PaddlePaddle 的 Anaconda 安装支持以下五种 Python 安装环境。 -如果您想使用的python版本为3.6: +如果您想使用的 python 版本为 3.6: ``` conda create -n paddle_env python=3.6 ``` -如果您想使用的python版本为3.7: +如果您想使用的 python 版本为 3.7: ``` conda create -n paddle_env python=3.7 ``` -如果您想使用的python版本为3.8: +如果您想使用的 python 版本为 3.8: ``` conda create -n paddle_env python=3.8 ``` -如果您想使用的python版本为3.9: +如果您想使用的 python 版本为 3.9: ``` conda create -n paddle_env python=3.9 ``` -#### 1.1.2进入Anaconda虚拟环境 +#### 1.1.2 进入 Anaconda 虚拟环境 ``` conda activate paddle_env ``` -## 1.2其他环境检查 +## 1.2 其他环境检查 -确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或x64、AMD64)"即可: +确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或 x64、AMD64)"即可: ``` python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" @@ -60,12 +60,12 @@ python -c "import platform;print(platform.architecture()[0]);print(platform.mach ## 二、开始安装 -本文档为您介绍conda安装方式 +本文档为您介绍 conda 安装方式 ### 添加清华源(可选) -对于国内用户无法连接到Anaconda官方源的可以按照以下命令添加清华源。 +对于国内用户无法连接到 Anaconda 官方源的可以按照以下命令添加清华源。 ``` conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ @@ -76,35 +76,35 @@ conda config --set show_channel_urls yes ### 根据版本进行安装 -确定您的环境满足条件后可以开始安装了,选择下面您要安装的PaddlePaddle +确定您的环境满足条件后可以开始安装了,选择下面您要安装的 PaddlePaddle -#### CPU版的PaddlePaddle +#### CPU 版的 PaddlePaddle -如果您的计算机没有 NVIDIA® GPU设备,请安装 CPU 版的 PaddlePaddle +如果您的计算机没有 NVIDIA® GPU 设备,请安装 CPU 版的 PaddlePaddle ``` conda install paddlepaddle --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -#### GPU版的PaddlePaddle +#### GPU 版的 PaddlePaddle -如果您的计算机有 NVIDIA® GPU设备 +如果您的计算机有 NVIDIA® GPU 设备 -* 如果您是使用 CUDA 10.1,cuDNN 7.6+,安装GPU版本的命令为: +* 如果您是使用 CUDA 10.1,cuDNN 7.6+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=10.1 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -* 如果您是使用 CUDA 10.2,cuDNN 7.6+,安装GPU版本的命令为: +* 如果您是使用 CUDA 10.2,cuDNN 7.6+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=10.2 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -* 如果您是使用 CUDA 11.2,cuDNN 8.1.1+,安装GPU版本的命令为: +* 如果您是使用 CUDA 11.2,cuDNN 8.1.1+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=11.2 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge @@ -113,7 +113,7 @@ conda install paddlepaddle --channel https://mirrors.tuna.tsinghua.edu.cn/anacon ## **三、验证安装** -安装完成后您可以使用 `python` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 diff --git a/docs/install/conda/macos-conda.md b/docs/install/conda/macos-conda.md index 75649aa09d3..55e8981fc4f 100644 --- a/docs/install/conda/macos-conda.md +++ b/docs/install/conda/macos-conda.md @@ -1,56 +1,56 @@ -# macOS下的Conda安装 +# macOS 下的 Conda 安装 -[Anaconda](https://www.anaconda.com/)是一个免费开源的Python和R语言的发行版本,用于计算科学,Anaconda致力于简化包管理和部署。Anaconda的包使用软件包管理系统Conda进行管理。Conda是一个开源包管理系统和环境管理系统,可在Windows、macOS和Linux上运行。 +[Anaconda](https://www.anaconda.com/)是一个免费开源的 Python 和 R 语言的发行版本,用于计算科学,Anaconda 致力于简化包管理和部署。Anaconda 的包使用软件包管理系统 Conda 进行管理。Conda 是一个开源包管理系统和环境管理系统,可在 Windows、macOS 和 Linux 上运行。 ## 一、环境准备 -在进行PaddlePaddle安装之前请确保您的Anaconda软件环境已经正确安装。软件下载和安装参见Anaconda官网(https://www.anaconda.com/)。在您已经正确安装Anaconda的情况下请按照下列步骤安装PaddlePaddle。 +在进行 PaddlePaddle 安装之前请确保您的 Anaconda 软件环境已经正确安装。软件下载和安装参见 Anaconda 官网(https://www.anaconda.com/)。在您已经正确安装 Anaconda 的情况下请按照下列步骤安装 PaddlePaddle。 -* macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持GPU版本) +* macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持 GPU 版本) * conda 版本 4.8.3+ (64 bit) ### 1.1 创建虚拟环境 #### 1.1.1 安装环境 -首先根据具体的Python版本创建Anaconda虚拟环境,PaddlePaddle的Anaconda安装支持以下五种Python安装环境。 +首先根据具体的 Python 版本创建 Anaconda 虚拟环境,PaddlePaddle 的 Anaconda 安装支持以下五种 Python 安装环境。 -如果您想使用的python版本为3.6: +如果您想使用的 python 版本为 3.6: ``` conda create -n paddle_env python=3.6 ``` -如果您想使用的python版本为3.7: +如果您想使用的 python 版本为 3.7: ``` conda create -n paddle_env python=3.7 ``` -如果您想使用的python版本为3.8: +如果您想使用的 python 版本为 3.8: ``` conda create -n paddle_env python=3.8 ``` -如果您想使用的python版本为3.9: +如果您想使用的 python 版本为 3.9: ``` conda create -n paddle_env python=3.9 ``` -#### 1.1.2进入Anaconda虚拟环境 +#### 1.1.2 进入 Anaconda 虚拟环境 ``` conda activate paddle_env ``` -## 1.2其他环境检查 +## 1.2 其他环境检查 -确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或x64、AMD64)"即可: +确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或 x64、AMD64)"即可: ``` python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" @@ -58,11 +58,11 @@ python -c "import platform;print(platform.architecture()[0]);print(platform.mach ## 二、开始安装 -本文档为您介绍conda安装方式 +本文档为您介绍 conda 安装方式 ### 添加清华源(可选) -对于国内用户无法连接到Anaconda官方源的可以按照以下命令添加清华源。 +对于国内用户无法连接到 Anaconda 官方源的可以按照以下命令添加清华源。 ``` conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ @@ -76,7 +76,7 @@ conda config --set show_channel_urls yes ### 根据版本进行安装 -确定您的环境满足条件后可以开始安装了,选择下面您要安装的PaddlePaddle +确定您的环境满足条件后可以开始安装了,选择下面您要安装的 PaddlePaddle * 请参考如下命令安装: @@ -86,7 +86,7 @@ conda config --set show_channel_urls yes ## **三、验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 diff --git a/docs/install/conda/windows-conda.md b/docs/install/conda/windows-conda.md index 7d304c65dc9..231d7d76b9c 100644 --- a/docs/install/conda/windows-conda.md +++ b/docs/install/conda/windows-conda.md @@ -1,10 +1,10 @@ -# Windows下的Conda安装 +# Windows 下的 Conda 安装 -[Anaconda](https://www.anaconda.com/)是一个免费开源的Python和R语言的发行版本,用于计算科学,Anaconda致力于简化包管理和部署。Anaconda的包使用软件包管理系统Conda进行管理。Conda是一个开源包管理系统和环境管理系统,可在Windows、macOS和Linux上运行。 +[Anaconda](https://www.anaconda.com/)是一个免费开源的 Python 和 R 语言的发行版本,用于计算科学,Anaconda 致力于简化包管理和部署。Anaconda 的包使用软件包管理系统 Conda 进行管理。Conda 是一个开源包管理系统和环境管理系统,可在 Windows、macOS 和 Linux 上运行。 ## 一、环境准备 -在进行PaddlePaddle安装之前请确保您的Anaconda软件环境已经正确安装。软件下载和安装参见Anaconda官网(https://www.anaconda.com/)。在您已经正确安装Anaconda的情况下请按照下列步骤安装PaddlePaddle。 +在进行 PaddlePaddle 安装之前请确保您的 Anaconda 软件环境已经正确安装。软件下载和安装参见 Anaconda 官网(https://www.anaconda.com/)。在您已经正确安装 Anaconda 的情况下请按照下列步骤安装 PaddlePaddle。 * Windows 7/8/10 专业版/企业版 (64bit) * conda 版本 4.8.3+ (64 bit) @@ -13,44 +13,44 @@ #### 1.1.1 安装环境 -首先根据具体的Python版本创建Anaconda虚拟环境,PaddlePaddle的Anaconda安装支持以下五种Python安装环境。 +首先根据具体的 Python 版本创建 Anaconda 虚拟环境,PaddlePaddle 的 Anaconda 安装支持以下五种 Python 安装环境。 -如果您想使用的python版本为3.6: +如果您想使用的 python 版本为 3.6: ``` conda create -n paddle_env python=3.6 ``` -如果您想使用的python版本为3.7: +如果您想使用的 python 版本为 3.7: ``` conda create -n paddle_env python=3.7 ``` -如果您想使用的python版本为3.8: +如果您想使用的 python 版本为 3.8: ``` conda create -n paddle_env python=3.8 ``` -如果您想使用的python版本为3.9: +如果您想使用的 python 版本为 3.9: ``` conda create -n paddle_env python=3.9 ``` -#### 1.1.2进入Anaconda虚拟环境 +#### 1.1.2 进入 Anaconda 虚拟环境 ``` conda activate paddle_env ``` -## 1.2其他环境检查 +## 1.2 其他环境检查 -确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或x64、AMD64)"即可: +确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64(或 x64、AMD64)"即可: ``` python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" @@ -59,12 +59,12 @@ python -c "import platform;print(platform.architecture()[0]);print(platform.mach ## 二、开始安装 -本文档为您介绍conda安装方式 +本文档为您介绍 conda 安装方式 ### 添加清华源(可选) -对于国内用户无法连接到Anaconda官方源的可以按照以下命令添加清华源。 +对于国内用户无法连接到 Anaconda 官方源的可以按照以下命令添加清华源。 ``` conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ @@ -75,35 +75,35 @@ conda config --set show_channel_urls yes ### 根据版本进行安装 -确定您的环境满足条件后可以开始安装了,选择下面您要安装的PaddlePaddle +确定您的环境满足条件后可以开始安装了,选择下面您要安装的 PaddlePaddle -#### CPU版的PaddlePaddle +#### CPU 版的 PaddlePaddle -如果您的计算机没有 NVIDIA® GPU设备,请安装 CPU 版的 PaddlePaddle +如果您的计算机没有 NVIDIA® GPU 设备,请安装 CPU 版的 PaddlePaddle ``` conda install paddlepaddle --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -#### GPU版的PaddlePaddle +#### GPU 版的 PaddlePaddle -如果您的计算机有 NVIDIA® GPU设备 +如果您的计算机有 NVIDIA® GPU 设备 -* 如果您是使用 CUDA 10.1,cuDNN 7.6+,安装GPU版本的命令为: +* 如果您是使用 CUDA 10.1,cuDNN 7.6+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=10.1 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -* 如果您是使用 CUDA 10.2,cuDNN 7.6+,安装GPU版本的命令为: +* 如果您是使用 CUDA 10.2,cuDNN 7.6+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=10.2 --channel https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ ``` -* 如果您是使用 CUDA 11.2,cuDNN 8.1.1+,安装GPU版本的命令为: +* 如果您是使用 CUDA 11.2,cuDNN 8.1.1+,安装 GPU 版本的命令为: ``` conda install paddlepaddle-gpu==2.1.0 cudatoolkit=11.2 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle/ -c conda-forge @@ -112,7 +112,7 @@ conda install paddlepaddle --channel https://mirrors.tuna.tsinghua.edu.cn/anacon ## **三、验证安装** -安装完成后您可以使用 `python` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 diff --git a/docs/install/docker/fromdocker.rst b/docs/install/docker/fromdocker.rst index aeeb05626f1..5f80c8cd003 100644 --- a/docs/install/docker/fromdocker.rst +++ b/docs/install/docker/fromdocker.rst @@ -1,5 +1,5 @@ =========================== -**Docker安装** +**Docker 安装** =========================== .. toctree:: diff --git a/docs/install/docker/linux-docker.md b/docs/install/docker/linux-docker.md index d8eca36f7cf..c65434780bd 100644 --- a/docs/install/docker/linux-docker.md +++ b/docs/install/docker/linux-docker.md @@ -1,60 +1,60 @@ -# **Linux下的Docker安装** +# **Linux 下的 Docker 安装** -[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用 Docker,既可以将 PaddlePaddle 的安装&使用与系统环境隔离,也可以与主机共享 GPU、网络等资源 ## 环境准备 -- 目前支持的系统类型,请见[安装说明](../index_cn.html),请注意目前暂不支持在CentOS 6使用Docker +- 目前支持的系统类型,请见[安装说明](../index_cn.html),请注意目前暂不支持在 CentOS 6 使用 Docker -- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) +- 在本地主机上[安装 Docker](https://hub.docker.com/search/?type=edition&offering=community) -- 如需在Linux开启GPU支持,请[安装nvidia-docker](https://github.com/NVIDIA/nvidia-docker) +- 如需在 Linux 开启 GPU 支持,请[安装 nvidia-docker](https://github.com/NVIDIA/nvidia-docker) ## 安装步骤 -1. 拉取PaddlePaddle镜像 +1. 拉取 PaddlePaddle 镜像 - * CPU版的PaddlePaddle: + * CPU 版的 PaddlePaddle: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:[版本号] ``` - * CPU版的PaddlePaddle,且镜像中预装好了 jupyter: + * CPU 版的 PaddlePaddle,且镜像中预装好了 jupyter: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:[版本号]-jupyter ``` - * GPU版的PaddlePaddle: + * GPU 版的 PaddlePaddle: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:[版本号]-gpu-cuda10.2-cudnn7 ``` - 如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: + 如果您的机器不在中国大陆地区,可以直接从 DockerHub 拉取镜像: - * CPU版的PaddlePaddle: + * CPU 版的 PaddlePaddle: ``` docker pull paddlepaddle/paddle:[版本号] ``` - * CPU版的PaddlePaddle,且镜像中预装好了 jupyter: + * CPU 版的 PaddlePaddle,且镜像中预装好了 jupyter: ``` docker pull paddlepaddle/paddle:[版本号]-jupyter ``` - * GPU版的PaddlePaddle: + * GPU 版的 PaddlePaddle: ``` docker pull paddlepaddle/paddle:[版本号]-gpu-cuda10.2-cudnn7 ``` - 在`:`后请您填写PaddlePaddle版本号,例如当前版本`2.1.0`,更多请见[镜像简介](#dockers)。 + 在`:`后请您填写 PaddlePaddle 版本号,例如当前版本`2.1.0`,更多请见[镜像简介](#dockers)。 - 上例中,`cuda10.2-cudnn7` 也仅作示意用,表示安装GPU版的镜像。如果您还想安装其他cuda/cudnn版本的镜像,可以将其替换成`cuda11.2-cudnn8`等。 + 上例中,`cuda10.2-cudnn7` 也仅作示意用,表示安装 GPU 版的镜像。如果您还想安装其他 cuda/cudnn 版本的镜像,可以将其替换成`cuda11.2-cudnn8`等。 您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 -2. 构建、进入Docker容器 +2. 构建、进入 Docker 容器 - * 使用CPU版本的PaddlePaddle: + * 使用 CPU 版本的 PaddlePaddle: @@ -62,18 +62,18 @@ docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash ``` - > --name [Name of container] 设定Docker的名称; + > --name [Name of container] 设定 Docker 的名称; > -it 参数说明容器已和本机交互式运行; - > -v $PWD:/paddle 指定将当前路径(PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; + > -v $PWD:/paddle 指定将当前路径(PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看;/bin/bash是在Docker中要执行的命令 + > `` 指定需要使用的 image 名称,您可以通过`docker images`命令查看;/bin/bash 是在 Docker 中要执行的命令 - * 使用CPU版本的PaddlePaddle,且镜像中预装好了 jupyter: + * 使用 CPU 版本的 PaddlePaddle,且镜像中预装好了 jupyter: ``` mkdir ./jupyter_docker @@ -94,11 +94,11 @@ > --env USER_PASSWD=[password you set] 为 jupyter 设置登录密码,[password you set] 是自己设置的密码; - > -v $PWD:/home/paddle 指定将当前路径(PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /home/paddle 目录; + > -v $PWD:/home/paddle 指定将当前路径(PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的 /home/paddle 目录; - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看 + > `` 指定需要使用的 image 名称,您可以通过`docker images`命令查看 - * 使用GPU版本的PaddlePaddle: + * 使用 GPU 版本的 PaddlePaddle: @@ -106,19 +106,19 @@ nvidia-docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash ``` - > --name [Name of container] 设定Docker的名称; + > --name [Name of container] 设定 Docker 的名称; > -it 参数说明容器已和本机交互式运行; - > -v $PWD:/paddle 指定将当前路径(PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; + > -v $PWD:/paddle 指定将当前路径(PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看;/bin/bash是在Docker中要执行的命令 + > `` 指定需要使用的 image 名称,您可以通过`docker images`命令查看;/bin/bash 是在 Docker 中要执行的命令 -至此,您已经成功使用Docker安装PaddlePaddle,更多Docker使用请参见[Docker官方文档](https://docs.docker.com) +至此,您已经成功使用 Docker 安装 PaddlePaddle,更多 Docker 使用请参见[Docker 官方文档](https://docs.docker.com)

@@ -134,33 +134,33 @@
- + - + - + - +
registry.baidubce.com/paddlepaddle/paddle:2.1.0 安装了2.1.0版本paddle的CPU镜像 安装了 2.1.0 版本 paddle 的 CPU 镜像
registry.baidubce.com/paddlepaddle/paddle:2.1.0-jupyter 安装了2.1.0版本paddle的CPU镜像,且镜像中预装好了jupyter,启动docker即运行jupyter服务 安装了 2.1.0 版本 paddle 的 CPU 镜像,且镜像中预装好了 jupyter,启动 docker 即运行 jupyter 服务
registry.baidubce.com/paddlepaddle/paddle:2.1.0-gpu-cuda11.2-cudnn8 安装了2.1.0版本paddle的GPU镜像,cuda版本为11.2,cudnn版本为8.1 安装了 2.1.0 版本 paddle 的 GPU 镜像,cuda 版本为 11.2,cudnn 版本为 8.1
registry.baidubce.com/paddlepaddle/paddle:2.1.0-gpu-cuda10.2-cudnn7 安装了2.1.0版本paddle的GPU镜像,cuda版本为10.2,cudnn版本为7 安装了 2.1.0 版本 paddle 的 GPU 镜像,cuda 版本为 10.2,cudnn 版本为 7

-您可以在 [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/) 中找到PaddlePaddle的各个发行的版本的docker镜像。 +您可以在 [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/) 中找到 PaddlePaddle 的各个发行的版本的 docker 镜像。 ### 注意事项 -* 镜像中Python版本为3.7 +* 镜像中 Python 版本为 3.7 ### 补充说明 -* 当您需要第二次进入Docker容器中,使用如下命令: +* 当您需要第二次进入 Docker 容器中,使用如下命令: 启动之前创建的容器 ``` @@ -172,20 +172,20 @@ docker attach [Name of container] ``` -* 如您是Docker新手,您可以参考互联网上的资料学习,例如[Docker教程](http://www.runoob.com/docker/docker-hello-world.html) +* 如您是 Docker 新手,您可以参考互联网上的资料学习,例如[Docker 教程](http://www.runoob.com/docker/docker-hello-world.html) ## 如何卸载 -请您进入Docker容器后,执行如下命令 +请您进入 Docker 容器后,执行如下命令 -* **CPU版本的PaddlePaddle**: +* **CPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle ``` -* **GPU版本的PaddlePaddle**: +* **GPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle-gpu ``` -或通过`docker rm [Name of container]`来直接删除Docker容器 +或通过`docker rm [Name of container]`来直接删除 Docker 容器 diff --git a/docs/install/docker/macos-docker.md b/docs/install/docker/macos-docker.md index f7e3e5533e8..66762d9e906 100644 --- a/docs/install/docker/macos-docker.md +++ b/docs/install/docker/macos-docker.md @@ -1,44 +1,44 @@ -# **macOS下的Docker安装** +# **macOS 下的 Docker 安装** -[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用Docker,既可以将PaddlePaddle的安装&使用与系统环境隔离,也可以与主机共享GPU、网络等资源 +[Docker](https://docs.docker.com/install/)是一个开源的应用容器引擎。使用 Docker,既可以将 PaddlePaddle 的安装&使用与系统环境隔离,也可以与主机共享 GPU、网络等资源 ## 环境准备 -- macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持GPU版本) +- macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持 GPU 版本) -- 在本地主机上[安装Docker](https://hub.docker.com/search/?type=edition&offering=community) +- 在本地主机上[安装 Docker](https://hub.docker.com/search/?type=edition&offering=community) ## 安装步骤 -1. 拉取PaddlePaddle镜像 +1. 拉取 PaddlePaddle 镜像 - * CPU版的PaddlePaddle: + * CPU 版的 PaddlePaddle: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:[版本号] ``` - * CPU版的PaddlePaddle,且镜像中预装好了 jupyter: + * CPU 版的 PaddlePaddle,且镜像中预装好了 jupyter: ``` docker pull registry.baidubce.com/paddlepaddle/paddle:[版本号]-jupyter ``` - 如果您的机器不在中国大陆地区,可以直接从DockerHub拉取镜像: + 如果您的机器不在中国大陆地区,可以直接从 DockerHub 拉取镜像: - * CPU版的PaddlePaddle: + * CPU 版的 PaddlePaddle: ``` docker pull paddlepaddle/paddle:[版本号] ``` - * CPU版的PaddlePaddle,且镜像中预装好了 jupyter: + * CPU 版的 PaddlePaddle,且镜像中预装好了 jupyter: ``` docker pull paddlepaddle/paddle:[版本号]-jupyter ``` - 在`:`后请您填写PaddlePaddle版本号,您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 + 在`:`后请您填写 PaddlePaddle 版本号,您可以访问[DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/)获取与您机器适配的镜像。 -2. 构建、进入Docker容器 +2. 构建、进入 Docker 容器 - * 使用CPU版本的PaddlePaddle: + * 使用 CPU 版本的 PaddlePaddle: @@ -46,17 +46,17 @@ docker run --name [Name of container] -it -v $PWD:/paddle /bin/bash ``` - > --name [Name of container] 设定Docker的名称; + > --name [Name of container] 设定 Docker 的名称; > -it 参数说明容器已和本机交互式运行; - > -v $PWD:/paddle 指定将当前路径(PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; + > -v $PWD:/paddle 指定将当前路径(PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的 /paddle 目录; - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看;/bin/bash是在Docker中要执行的命令 + > `` 指定需要使用的 image 名称,您可以通过`docker images`命令查看;/bin/bash 是在 Docker 中要执行的命令 - * 使用CPU版本的PaddlePaddle,且镜像中预装好了 jupyter: + * 使用 CPU 版本的 PaddlePaddle,且镜像中预装好了 jupyter: ``` mkdir ./jupyter_docker @@ -77,14 +77,14 @@ > --env USER_PASSWD=[password you set] 为 jupyter 设置登录密码,[password you set] 是自己设置的密码; - > -v $PWD:/home/paddle 指定将当前路径(PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 /home/paddle 目录; + > -v $PWD:/home/paddle 指定将当前路径(PWD 变量会展开为当前路径的绝对路径)挂载到容器内部的 /home/paddle 目录; - > `` 指定需要使用的image名称,您可以通过`docker images`命令查看 + > `` 指定需要使用的 image 名称,您可以通过`docker images`命令查看 -至此,您已经成功使用Docker安装PaddlePaddle,更多Docker使用请参见[Docker官方文档](https://docs.docker.com) +至此,您已经成功使用 Docker 安装 PaddlePaddle,更多 Docker 使用请参见[Docker 官方文档](https://docs.docker.com)

@@ -100,34 +100,34 @@ registry.baidubce.com/paddlepaddle/paddle:2.1.0 - 安装了2.1.0版本paddle的CPU镜像 + 安装了 2.1.0 版本 paddle 的 CPU 镜像 registry.baidubce.com/paddlepaddle/paddle:2.1.0-jupyter - 安装了2.1.0版本paddle的CPU镜像,且镜像中预装好了jupyter,启动docker即运行jupyter服务 + 安装了 2.1.0 版本 paddle 的 CPU 镜像,且镜像中预装好了 jupyter,启动 docker 即运行 jupyter 服务 registry.baidubce.com/paddlepaddle/paddle:2.1.0-gpu-cuda11.2-cudnn8 - 安装了2.1.0版本paddle的GPU镜像,cuda版本为11.2,cudnn版本为8.1 + 安装了 2.1.0 版本 paddle 的 GPU 镜像,cuda 版本为 11.2,cudnn 版本为 8.1 registry.baidubce.com/paddlepaddle/paddle:2.1.0-gpu-cuda10.2-cudnn7 - 安装了2.1.0版本paddle的GPU镜像,cuda版本为10.2,cudnn版本为7 + 安装了 2.1.0 版本 paddle 的 GPU 镜像,cuda 版本为 10.2,cudnn 版本为 7

-您可以在 [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/) 中找到PaddlePaddle的各个发行的版本的docker镜像。 +您可以在 [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags/) 中找到 PaddlePaddle 的各个发行的版本的 docker 镜像。 ### 注意事项 -* 镜像中Python版本为3.7 +* 镜像中 Python 版本为 3.7 ### 补充说明 -* 当您需要第二次进入Docker容器中,使用如下命令: +* 当您需要第二次进入 Docker 容器中,使用如下命令: 启动之前创建的容器 ``` @@ -139,15 +139,15 @@ docker attach [Name of container] ``` -* 如您是Docker新手,您可以参考互联网上的资料学习,例如[Docker教程](http://www.runoob.com/docker/docker-hello-world.html) +* 如您是 Docker 新手,您可以参考互联网上的资料学习,例如[Docker 教程](http://www.runoob.com/docker/docker-hello-world.html) ## 如何卸载 -请您进入Docker容器后,执行如下命令 +请您进入 Docker 容器后,执行如下命令 -* **CPU版本的PaddlePaddle**: +* **CPU 版本的 PaddlePaddle**: ``` pip uninstall paddlepaddle ``` -或通过`docker rm [Name of container]`来直接删除Docker容器 +或通过`docker rm [Name of container]`来直接删除 Docker 容器 diff --git a/docs/install/index_cn.rst b/docs/install/index_cn.rst index d5ca48eb684..b8f02eadd68 100644 --- a/docs/install/index_cn.rst +++ b/docs/install/index_cn.rst @@ -12,7 +12,7 @@ * 新增对 python3.9 的支持,并不再支持 python2.7 和 python3.5 * 新增对 CUDA 11.2 的支持,并不再支持 CUDA 9.0、CUDA 10.0 和 CUDA 11.0 * 新增对 ROCm 平台的支持(2.1 中飞桨对 ROCm 平台的支持是 experimental 的) -* Linux系统相关的包已被拆分为 avx 和 noavx 两种类型的包(大部分机器都使用 avx 指令集,可使用 `Linux下的PIP安装 `_ 页面中的命令查看您的机器是否支持) +* Linux 系统相关的包已被拆分为 avx 和 noavx 两种类型的包(大部分机器都使用 avx 指令集,可使用 `Linux 下的 PIP 安装 `_ 页面中的命令查看您的机器是否支持) * 新增预装好 jupyter 的 CPU 镜像,启动镜像后即启动 jupyter 服务 * 新增支持 Windows Visual Studio 2017 编译,由 VS2015 全面升级至 VS2017 @@ -21,7 +21,7 @@ 安装说明 ----------- -本说明将指导您在64位操作系统编译和安装PaddlePaddle +本说明将指导您在 64 位操作系统编译和安装 PaddlePaddle **1. 操作系统要求:** @@ -34,7 +34,7 @@ **2. 处理器要求** * 处理器支持 MKL -* 处理器架构是x86_64(或称作 x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构 +* 处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构 **3. Python 和 pip 版本要求:** @@ -69,37 +69,37 @@ * CentOS 7 支持 CUDA 10.1/10.2/11.2 * CentOS 6 不推荐,不提供编译出现问题时的官方支持 - * 如果您是使用 **nvidia-docker** 安装,在CentOS 7 下支持 CUDA 10.2/11.2 + * 如果您是使用 **nvidia-docker** 安装,在 CentOS 7 下支持 CUDA 10.2/11.2 * MacOS 不支持:MacOS 平台不支持 GPU 安装 -请确保您的环境满足以上条件。如您有其他需求,请参考 `多版本whl包安装列表 `_ . +请确保您的环境满足以上条件。如您有其他需求,请参考 `多版本 whl 包安装列表 `_ . **5. PaddlePaddle 对 NCCL 支持情况:** * Windows 支持情况 - * 不支持NCCL + * 不支持 NCCL * Ubuntu 支持情况 * Ubuntu 16.04: - * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.1 下支持 NCCL v2.4.2-v2.4.8 * Ubuntu 18.04: - * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.1 下支持 NCCL v2.4.2-v2.4.8 * CentOS 支持情况 - * CentOS 6:不支持NCCL + * CentOS 6:不支持 NCCL * CentOS 7: - * CUDA10.1 下支持NCCL v2.4.2-v2.4.8 + * CUDA10.1 下支持 NCCL v2.4.2-v2.4.8 * MacOS 支持情况 - * 不支持NCCL + * 不支持 NCCL **第一种安装方式:使用 pip 安装** -您可以选择“使用pip安装”、“使用conda安装”、“使用docker安装”、“从源码编译安装” 四种方式中的任意一种方式进行安装。 +您可以选择“使用 pip 安装”、“使用 conda 安装”、“使用 docker 安装”、“从源码编译安装” 四种方式中的任意一种方式进行安装。 本节将介绍使用 pip 的安装方式。 @@ -139,18 +139,18 @@ python -m pip --version -6. 确认 Python 和 pip 是 64 bit,并且处理器架构是x86_64(或称作 x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是 "64bit" ,第二行输出的是 "x86_64" 、 "x64" 或 "AMD64" 即可: +6. 确认 Python 和 pip 是 64 bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是 "64bit" ,第二行输出的是 "x86_64" 、 "x64" 或 "AMD64" 即可: :: python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" -7. 如果您希望使用 `pip `_ 进行安装PaddlePaddle可以直接使用以下命令: +7. 如果您希望使用 `pip `_ 进行安装 PaddlePaddle 可以直接使用以下命令: - (1). **CPU版本** :如果您只是想安装CPU版本请参考如下命令安装 + (1). **CPU 版本** :如果您只是想安装 CPU 版本请参考如下命令安装 - 安装CPU版本的命令为: + 安装 CPU 版本的命令为: :: python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple @@ -160,13 +160,13 @@ python -m pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple - (2). **GPU版本** :如果您想使用GPU版本请参考如下命令安装 + (2). **GPU 版本** :如果您想使用 GPU 版本请参考如下命令安装 注意: * 需要您确认您的 GPU 满足上方列出的要求 - 请注意用以下指令安装的PaddlePaddle在Windows、Ubuntu、CentOS下只支持CUDA10.2: + 请注意用以下指令安装的 PaddlePaddle 在 Windows、Ubuntu、CentOS 下只支持 CUDA10.2: :: python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple @@ -180,23 +180,23 @@ 8. 验证安装 - 使用 python 进入python解释器,输入import paddle ,再输入 paddle.utils.run_check()。 + 使用 python 进入 python 解释器,输入 import paddle ,再输入 paddle.utils.run_check()。 如果出现 PaddlePaddle is installed successfully!,说明您已成功安装。 9. 更多帮助信息请参考: - `Linux下的PIP安装 `_ + `Linux 下的 PIP 安装 `_ - `MacOS下的PIP安装 `_ + `MacOS 下的 PIP 安装 `_ - `Windows下的PIP安装 `_ + `Windows 下的 PIP 安装 `_ **第二种安装方式:使用源代码编译安装** - 如果您只是使用 PaddlePaddle ,建议使用 **pip** 安装即可。 -- 如果您有开发PaddlePaddle的需求,请参考:`从源码编译 `_ +- 如果您有开发 PaddlePaddle 的需求,请参考:`从源码编译 `_ .. toctree:: diff --git a/docs/install/install_Kunlun_en.md b/docs/install/install_Kunlun_en.md index e908595c477..3bfbc0121b5 100644 --- a/docs/install/install_Kunlun_en.md +++ b/docs/install/install_Kunlun_en.md @@ -105,7 +105,7 @@ In addition, if there are environmental problems with the pre-built wheel packag - **CPU: Phytium,FT-2000+/64** - **OS version: Kylin release V10 (SP1)/(Tercel)-aarch64-Build04/20200711** - **Python version: 3.6/3.7 (64 bit)** -- **pip或pip3 version: 9.0.1+ (64 bit)** +- **pip 或 pip3 version: 9.0.1+ (64 bit)** - **cmake version: 3.15+** - **gcc/g++ version: 8.2+** diff --git a/docs/install/install_Kunlun_zh.md b/docs/install/install_Kunlun_zh.md index f6eb2b0a71d..313957fec02 100644 --- a/docs/install/install_Kunlun_zh.md +++ b/docs/install/install_Kunlun_zh.md @@ -1,10 +1,10 @@ -# 昆仑XPU芯片安装及运行飞桨 +# 昆仑 XPU 芯片安装及运行飞桨 -百度昆仑AI计算处理器(Baidu KUNLUN AI Computing Processor)是百度集十年AI产业技术实践于2019年推出的全功能AI芯片。基于自主研发的先进XPU架构,为云端和边缘端的人工智能业务而设计。 百度昆仑与飞桨及其他国产软硬件强强组合,打造一个全面领先的国产化AI技术生态,部署和应用于诸多 “人工智能+“的行业领域,包括智能云和高性能计算,智慧制造、智慧城市和安防等。更多昆仑XPU芯片详情及技术指标请 [点击这里](https://cloud.baidu.com/product/kunlun.html) 。 -参考以下内容可快速了解和体验昆仑XPU芯片上运行飞桨: +百度昆仑 AI 计算处理器(Baidu KUNLUN AI Computing Processor)是百度集十年 AI 产业技术实践于 2019 年推出的全功能 AI 芯片。基于自主研发的先进 XPU 架构,为云端和边缘端的人工智能业务而设计。 百度昆仑与飞桨及其他国产软硬件强强组合,打造一个全面领先的国产化 AI 技术生态,部署和应用于诸多 “人工智能+“的行业领域,包括智能云和高性能计算,智慧制造、智慧城市和安防等。更多昆仑 XPU 芯片详情及技术指标请 [点击这里](https://cloud.baidu.com/product/kunlun.html) 。 +参考以下内容可快速了解和体验昆仑 XPU 芯片上运行飞桨: -- [飞桨对昆仑XPU芯片的支持](../guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.html) -- [飞桨框架昆仑XPU版安装说明](../guides/hardware_support/xpu_docs/paddle_install_cn.html) -- [飞桨框架昆仑XPU版训练示例](../guides/hardware_support/xpu_docs/train_example_cn.html) -- [飞桨预测库昆仑XPU版安装及使用示例](../guides/hardware_support/xpu_docs/inference_install_example_cn.html) +- [飞桨对昆仑 XPU 芯片的支持](../guides/hardware_support/xpu_docs/paddle_2.0_xpu_cn.html) +- [飞桨框架昆仑 XPU 版安装说明](../guides/hardware_support/xpu_docs/paddle_install_cn.html) +- [飞桨框架昆仑 XPU 版训练示例](../guides/hardware_support/xpu_docs/train_example_cn.html) +- [飞桨预测库昆仑 XPU 版安装及使用示例](../guides/hardware_support/xpu_docs/inference_install_example_cn.html) diff --git a/docs/install/install_ROCM_zh.md b/docs/install/install_ROCM_zh.md index 6a10d431f69..ac576d448de 100644 --- a/docs/install/install_ROCM_zh.md +++ b/docs/install/install_ROCM_zh.md @@ -1,11 +1,11 @@ -# 海光DCU芯片运行飞桨 +# 海光 DCU 芯片运行飞桨 -DCU(Deep Computing Unit 深度计算器)是 海光(HYGON)推出的一款专门用户AI人工智能和深度学习的加速卡。Paddle ROCm版当前可以支持在海光CPU与DCU上进行模型训练与推理。 +DCU(Deep Computing Unit 深度计算器)是 海光(HYGON)推出的一款专门用户 AI 人工智能和深度学习的加速卡。Paddle ROCm 版当前可以支持在海光 CPU 与 DCU 上进行模型训练与推理。 参考以下内容可快速了解和体验在海光芯片上运行飞桨: -- [飞桨框架ROCm版支持模型](../guides/hardware_support/rocm_docs/paddle_rocm_cn.html) -- [飞桨框架ROCm版安装说明](../guides/hardware_support/rocm_docs/paddle_install_cn.html) -- [飞桨框架ROCm版训练示例](../guides/hardware_support/rocm_docs/train_example_cn.html) -- [飞桨框架ROCm版预测示例](../guides/hardware_support/rocm_docs/infer_example_cn.html) +- [飞桨框架 ROCm 版支持模型](../guides/hardware_support/rocm_docs/paddle_rocm_cn.html) +- [飞桨框架 ROCm 版安装说明](../guides/hardware_support/rocm_docs/paddle_install_cn.html) +- [飞桨框架 ROCm 版训练示例](../guides/hardware_support/rocm_docs/train_example_cn.html) +- [飞桨框架 ROCm 版预测示例](../guides/hardware_support/rocm_docs/infer_example_cn.html) diff --git a/docs/install/install_script.md b/docs/install/install_script.md index 34809aead08..72300a97a7e 100644 --- a/docs/install/install_script.md +++ b/docs/install/install_script.md @@ -4,49 +4,49 @@ 下载脚本至本地后,使用命令`/bin/bash fast_install.sh`启动脚本 -### Ubuntu和CentOS +### Ubuntu 和 CentOS 脚本会执行以下几步: -1. GPU检测 +1. GPU 检测 - 检测您的机器是否含有我们支持的GPU,如果有,会安装GPU版本的PaddlePaddle,否则会安装CPU版本。 - (PaddlePaddle目前支持NVIDIA[官网](https://developer.nvidia.com/cuda-gpus#collapseOne)列出的,算力7.0以下的GPU和v100系列的GPU) + 检测您的机器是否含有我们支持的 GPU,如果有,会安装 GPU 版本的 PaddlePaddle,否则会安装 CPU 版本。 + (PaddlePaddle 目前支持 NVIDIA[官网](https://developer.nvidia.com/cuda-gpus#collapseOne)列出的,算力 7.0 以下的 GPU 和 v100 系列的 GPU) -2. CUDA,cuDNN检测 +2. CUDA,cuDNN 检测 - 检测您的机器是否安装我们支持的CUDA,cuDNN,具体地: + 检测您的机器是否安装我们支持的 CUDA,cuDNN,具体地: - 1. 在`/usr/local/` 及其子目录下寻找 `cuda10.1/cuda10.2/cuda11.0/cuda11.2` 目录下的`version.txt`文件(通常如果您以默认方式安装了CUDA)。 如果提示未找到CUDA请使用命令`find / -name version.txt`找到您所需要的CUDA目录下的“version.txt”路径,然后按照提示输入。 - 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的cuDNN未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的cuDNN版本的`cudnn.h`路径并按提示输入 + 1. 在`/usr/local/` 及其子目录下寻找 `cuda10.1/cuda10.2/cuda11.0/cuda11.2` 目录下的`version.txt`文件(通常如果您以默认方式安装了 CUDA)。 如果提示未找到 CUDA 请使用命令`find / -name version.txt`找到您所需要的 CUDA 目录下的“version.txt”路径,然后按照提示输入。 + 2. 在`/usr` 及其子目录下寻找文件 `cudnn.h` , 如果您的 cuDNN 未安装在默认路径请使用命令`find / -name cudnn.h`寻找您希望使用的 cuDNN 版本的`cudnn.h`路径并按提示输入 - 如果未找到相应文件,则会安装CPU版本的PaddlePaddle + 如果未找到相应文件,则会安装 CPU 版本的 PaddlePaddle 3. 选择数学库 -脚本默认会为您安装支持[MKL](https://software.intel.com/en-us/mkl)数学库的PaddlePaddle,如果您的机器不支持`MKL`,请选择安装支持[OPENBLAS](https://www.openblas.net)的PaddlePaddle +脚本默认会为您安装支持[MKL](https://software.intel.com/en-us/mkl)数学库的 PaddlePaddle,如果您的机器不支持`MKL`,请选择安装支持[OPENBLAS](https://www.openblas.net)的 PaddlePaddle -4. 选择PaddlePaddle版本 -我们为您提供2种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 +4. 选择 PaddlePaddle 版本 +我们为您提供 2 种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 -5. 选择Python版本 -脚本默认会使用您机器中的Python,您也可以输入您希望使用的Python的路径 +5. 选择 Python 版本 +脚本默认会使用您机器中的 Python,您也可以输入您希望使用的 Python 的路径 -6. 检查[AVX](https://zh.wikipedia.org/zh-hans/AVX指令集)指令集 +6. 检查[AVX](https://zh.wikipedia.org/zh-hans/AVX 指令集)指令集 7. 使用[Python virtualenv](https://virtualenv.pypa.io/en/latest/) -脚本也支持按您的需求创建Python的虚拟环境 +脚本也支持按您的需求创建 Python 的虚拟环境 -以上检查完成后就会为您安装对应您系统的PaddlePaddle了,安装一般需要1~2分钟会根据您的网络来决定,请您耐心等待。 +以上检查完成后就会为您安装对应您系统的 PaddlePaddle 了,安装一般需要 1~2 分钟会根据您的网络来决定,请您耐心等待。 ### macOS 脚本会执行以下几步: -1. 选择PaddlePaddle版本 -我们为您提供2种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 +1. 选择 PaddlePaddle 版本 +我们为您提供 2 种版本:开发版和稳定版,推荐您选择测试验证过的稳定版 -2. 检查Python版本 +2. 检查 Python 版本 由于 macOS 自带的 Python 通常依赖于系统环境,因此我们不支持 macOS 自带的 Python 环境,请重新从 Python.org 安装 Python,然后根据提示输入您希望使用的 Python 的路径 -3. 检查是否支持[AVX](https://zh.wikipedia.org/zh-hans/AVX指令集)指令集 +3. 检查是否支持[AVX](https://zh.wikipedia.org/zh-hans/AVX 指令集)指令集 diff --git a/docs/install/pip/frompip.rst b/docs/install/pip/frompip.rst index 85e089a9c88..931460df602 100644 --- a/docs/install/pip/frompip.rst +++ b/docs/install/pip/frompip.rst @@ -1,5 +1,5 @@ =========================== -**Pip安装** +**Pip 安装** =========================== .. toctree:: diff --git a/docs/install/pip/linux-pip.md b/docs/install/pip/linux-pip.md index e1559ac074f..5917db8375c 100644 --- a/docs/install/pip/linux-pip.md +++ b/docs/install/pip/linux-pip.md @@ -1,20 +1,20 @@ -# Linux下的PIP安装 +# Linux 下的 PIP 安装 ## 一、环境准备 -### 1.1目前飞桨支持的环境 +### 1.1 目前飞桨支持的环境 * **Linux 版本 (64 bit)** - * **CentOS 7 (GPU版本支持CUDA 10.1/10.2/11.2)** + * **CentOS 7 (GPU 版本支持 CUDA 10.1/10.2/11.2)** * **Ubuntu 16.04 (GPU 版本支持 CUDA 10.1/10.2/11.2)** * **Ubuntu 18.04 (GPU 版本支持 CUDA 10.1/10.2/11.2)** * **Python 版本 3.6/3.7/3.8/3.9 (64 bit)** -* **pip 或 pip3 版本 20.2.2或更高版本 (64 bit)** +* **pip 或 pip3 版本 20.2.2 或更高版本 (64 bit)** -### 1.2如何查看您的环境 +### 1.2 如何查看您的环境 * 可以使用以下命令查看本机的操作系统和位数信息: @@ -33,13 +33,13 @@ ``` -* 需要确认python的版本是否满足要求 +* 需要确认 python 的版本是否满足要求 * 使用以下命令确认是 3.6/3.7/3.8/3.9 python --version -* 需要确认pip的版本是否满足要求,要求pip版本为20.2.2或更高版本 +* 需要确认 pip 的版本是否满足要求,要求 pip 版本为 20.2.2 或更高版本 ``` python -m ensurepip @@ -51,7 +51,7 @@ -* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: +* 需要确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: ``` @@ -60,7 +60,7 @@ -* 默认提供的安装包需要计算机支持MKL +* 默认提供的安装包需要计算机支持 MKL * 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 @@ -68,23 +68,23 @@ ## 二、开始安装 -本文档为您介绍pip安装方式 +本文档为您介绍 pip 安装方式 ### 首先请您选择您的版本 -* 如果您的计算机没有 NVIDIA® GPU,请安装[CPU版的PaddlePaddle](#cpu) +* 如果您的计算机没有 NVIDIA® GPU,请安装[CPU 版的 PaddlePaddle](#cpu) -* 如果您的计算机有NVIDIA® GPU,请确保满足以下条件并且安装[GPU版PaddlePaddle](#gpu) +* 如果您的计算机有 NVIDIA® GPU,请确保满足以下条件并且安装[GPU 版 PaddlePaddle](#gpu) - * **CUDA 工具包10.1/10.2配合cuDNN v7.6+(如需多卡支持,需配合NCCL2.7及更高)** + * **CUDA 工具包 10.1/10.2 配合 cuDNN v7.6+(如需多卡支持,需配合 NCCL2.7 及更高)** - * **CUDA 工具包11.2配合cuDNN v8.1.1(如需多卡支持,需配合NCCL2.7及更高)** + * **CUDA 工具包 11.2 配合 cuDNN v8.1.1(如需多卡支持,需配合 NCCL2.7 及更高)** - * **GPU运算能力超过3.5的硬件设备** + * **GPU 运算能力超过 3.5 的硬件设备** - 您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + 您可参考 NVIDIA 官方文档了解 CUDA 和 CUDNN 的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) -* 如果您需要使用多卡环境请确保您已经正确安装nccl2,或者按照以下指令安装nccl2(这里提供的是CUDA9,cuDNN7下nccl2的安装指令,更多版本的安装信息请参考NVIDIA[官方网站](https://developer.nvidia.com/nccl)): +* 如果您需要使用多卡环境请确保您已经正确安装 nccl2,或者按照以下指令安装 nccl2(这里提供的是 CUDA9,cuDNN7 下 nccl2 的安装指令,更多版本的安装信息请参考 NVIDIA[官方网站](https://developer.nvidia.com/nccl)): * **CentOS 系统可以参考以下命令** @@ -117,7 +117,7 @@ ``` -#### 2.1 CPU版的PaddlePaddle +#### 2.1 CPU 版的 PaddlePaddle ``` @@ -126,11 +126,11 @@ -#### 2.2 GPU版的PaddlePaddle +#### 2.2 GPU 版的 PaddlePaddle -2.2.1 CUDA10.1的PaddlePaddle +2.2.1 CUDA10.1 的 PaddlePaddle ``` python -m pip install paddlepaddle-gpu==0.0.0.post101 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html @@ -138,21 +138,21 @@ -2.2.2 CUDA10.2的PaddlePaddle +2.2.2 CUDA10.2 的 PaddlePaddle ``` python -m pip install paddlepaddle-gpu==0.0.0.post102 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html ``` -2.2.3 CUDA11.0的PaddlePaddle +2.2.3 CUDA11.0 的 PaddlePaddle ``` python -m pip install paddlepaddle-gpu==0.0.0.post110 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html ``` -2.2.4 CUDA11.1的PaddlePaddle +2.2.4 CUDA11.1 的 PaddlePaddle ``` @@ -160,7 +160,7 @@ ``` -2.2.5 CUDA11.2的PaddlePaddle +2.2.5 CUDA11.2 的 PaddlePaddle ``` @@ -171,7 +171,7 @@ 注: -* 如果你使用的是安培架构的GPU,推荐使用CUDA11.2。如果你使用的是非安培架构的GPU,推荐使用CUDA10.2,性能更优。 +* 如果你使用的是安培架构的 GPU,推荐使用 CUDA11.2。如果你使用的是非安培架构的 GPU,推荐使用 CUDA10.2,性能更优。 * 请确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python。根据您的环境您可能需要将说明中所有命令行中的 python 替换为 python3 或者替换为具体的 Python 路径。 @@ -180,7 +180,7 @@ ## **三、验证安装** -安装完成后您可以使用 `python` 或 `python3` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 或 `python3` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 @@ -188,8 +188,8 @@ ## **四、如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: -* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` +* **CPU 版本的 PaddlePaddle**: `python -m pip uninstall paddlepaddle` -* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` +* **GPU 版本的 PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` diff --git a/docs/install/pip/macos-pip.md b/docs/install/pip/macos-pip.md index e29688c9ae8..529a87d8661 100644 --- a/docs/install/pip/macos-pip.md +++ b/docs/install/pip/macos-pip.md @@ -1,17 +1,17 @@ -# macOS 下的PIP安装 +# macOS 下的 PIP 安装 ## 一、环境准备 -### 1.1目前飞桨支持的环境 +### 1.1 目前飞桨支持的环境 -* **macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持GPU版本)** +* **macOS 版本 10.11/10.12/10.13/10.14 (64 bit) (不支持 GPU 版本)** * **Python 版本 3.6/3.7/3.8/3.9 (64 bit)** -* **pip 或 pip3 版本 20.2.2或更高版本 (64 bit)** +* **pip 或 pip3 版本 20.2.2 或更高版本 (64 bit)** -### 1.2如何查看您的环境 +### 1.2 如何查看您的环境 * 可以使用以下命令查看本机的操作系统和位数信息: @@ -31,7 +31,7 @@ -* 需要确认python的版本是否满足要求 +* 需要确认 python 的版本是否满足要求 * 使用以下命令确认是 3.6/3.7/3.8/3.9 @@ -39,7 +39,7 @@ python --version ``` -* 需要确认pip的版本是否满足要求,要求pip版本为20.2.2或更高版本 +* 需要确认 pip 的版本是否满足要求,要求 pip 版本为 20.2.2 或更高版本 ``` @@ -52,7 +52,7 @@ -* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: +* 需要确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: ``` python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" @@ -60,7 +60,7 @@ -* 默认提供的安装包需要计算机支持MKL +* 默认提供的安装包需要计算机支持 MKL * 如果您对机器环境不了解,请下载使用[快速安装脚本](https://fast-install.bj.bcebos.com/fast_install.sh),配套说明请参考[这里](https://github.com/PaddlePaddle/FluidDoc/tree/develop/doc/fluid/install/install_script.md)。 @@ -68,7 +68,7 @@ ## 二、开始安装 -本文档为您介绍pip安装方式 +本文档为您介绍 pip 安装方式 ### 首先请您选择您的版本 @@ -77,7 +77,7 @@ ### 根据版本进行安装 -确定您的环境满足条件后可以开始安装了,选择下面您要安装的PaddlePaddle +确定您的环境满足条件后可以开始安装了,选择下面您要安装的 PaddlePaddle ``` @@ -86,20 +86,20 @@ * 注: -* macOS上您需要安装 unrar 以支持 PaddlePaddle,可以使用命令 `brew install rar` +* macOS 上您需要安装 unrar 以支持 PaddlePaddle,可以使用命令 `brew install rar` * 请确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python。根据您的环境您可能需要将说明中所有命令行中的 python 替换为具体的 Python 路径。 * 默认下载最新稳定版的安装包,如需获取开发版安装包,请参考[这里](https://www.paddlepaddle.org.cn/install/quick/zh/1.8.5-windows-pip) -* 使用 macOS 中自带Python可能会导致安装失败。请使用[Python.org](https://www.python.org/downloads/mac-osx/)提供的python3.6.x、python3.7.x、python3.8.x 或python3.9.x。 +* 使用 macOS 中自带 Python 可能会导致安装失败。请使用[Python.org](https://www.python.org/downloads/mac-osx/)提供的 python3.6.x、python3.7.x、python3.8.x 或 python3.9.x。 ## **三、验证安装** -安装完成后您可以使用 `python` 进入python解释器,输入`import paddle` ,再输入 +安装完成后您可以使用 `python` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 ## **四、如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: * `python -m pip uninstall paddlepaddle` diff --git a/docs/install/pip/windows-pip.md b/docs/install/pip/windows-pip.md index 6b8c9cc5171..361ff8a0653 100644 --- a/docs/install/pip/windows-pip.md +++ b/docs/install/pip/windows-pip.md @@ -1,17 +1,17 @@ -# Windows下的PIP安装 +# Windows 下的 PIP 安装 ## 一、环境准备 -### 1.1目前飞桨支持的环境 +### 1.1 目前飞桨支持的环境 * **Windows 7/8/10 专业版/企业版 (64bit)** -* **GPU版本支持CUDA 10.1/10.2/11.0/11.1/11.2,且仅支持单卡** +* **GPU 版本支持 CUDA 10.1/10.2/11.0/11.1/11.2,且仅支持单卡** * **Python 版本 3.6+/3.7+/3.8+/3.9+ (64 bit)** -* **pip 版本 20.2.2或更高版本 (64 bit)** +* **pip 版本 20.2.2 或更高版本 (64 bit)** -### 1.2如何查看您的环境 +### 1.2 如何查看您的环境 -* 需要确认python的版本是否满足要求 +* 需要确认 python 的版本是否满足要求 * 使用以下命令确认是 3.6/3.7/3.8/3.9 @@ -19,7 +19,7 @@ python --version ``` -* 需要确认pip的版本是否满足要求,要求pip版本为20.2.2或更高版本 +* 需要确认 pip 的版本是否满足要求,要求 pip 版本为 20.2.2 或更高版本 ``` python -m ensurepip @@ -30,28 +30,28 @@ ``` -* 需要确认Python和pip是64bit,并且处理器架构是x86_64(或称作x64、Intel 64、AMD64)架构,目前PaddlePaddle不支持arm64架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: +* 需要确认 Python 和 pip 是 64bit,并且处理器架构是 x86_64(或称作 x64、Intel 64、AMD64)架构,目前 PaddlePaddle 不支持 arm64 架构。下面的第一行输出的是"64bit",第二行输出的是"x86_64"、"x64"或"AMD64"即可: ``` python -c "import platform;print(platform.architecture()[0]);print(platform.machine())" ``` -* 默认提供的安装包需要计算机支持MKL -* Windows暂不支持NCCL,分布式等相关功能 +* 默认提供的安装包需要计算机支持 MKL +* Windows 暂不支持 NCCL,分布式等相关功能 ## 二、开始安装 -本文档为您介绍pip安装方式 +本文档为您介绍 pip 安装方式 ### 首先请您选择您的版本 -* 如果您的计算机没有 NVIDIA® GPU,请安装[CPU版的PaddlePaddle](#cpu) +* 如果您的计算机没有 NVIDIA® GPU,请安装[CPU 版的 PaddlePaddle](#cpu) -* 如果您的计算机有NVIDIA® GPU,请确保满足以下条件并且安装GPU版PaddlePaddle +* 如果您的计算机有 NVIDIA® GPU,请确保满足以下条件并且安装 GPU 版 PaddlePaddle - * **CUDA 工具包10.1/10.2 配合 cuDNN v7.6.5+** + * **CUDA 工具包 10.1/10.2 配合 cuDNN v7.6.5+** * **CUDA 工具包 11.0 配合 cuDNN v8.0.2** @@ -59,29 +59,29 @@ * **CUDA 工具包 11.2 配合 cuDNN v8.2.1** - * **GPU运算能力超过3.5的硬件设备** + * **GPU 运算能力超过 3.5 的硬件设备** - * 注:目前官方发布的windows安装包仅包含 CUDA 10.1/10.2/11.0/11.1/11.2,如需使用其他cuda版本,请通过源码自行编译。您可参考NVIDIA官方文档了解CUDA和CUDNN的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) + * 注:目前官方发布的 windows 安装包仅包含 CUDA 10.1/10.2/11.0/11.1/11.2,如需使用其他 cuda 版本,请通过源码自行编译。您可参考 NVIDIA 官方文档了解 CUDA 和 CUDNN 的安装流程和配置方法,请见[CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html),[cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) ### 根据版本进行安装 -确定您的环境满足条件后可以开始安装了,选择下面您要安装的PaddlePaddle +确定您的环境满足条件后可以开始安装了,选择下面您要安装的 PaddlePaddle -#### 2.1 CPU版的PaddlePaddle +#### 2.1 CPU 版的 PaddlePaddle ``` python -m pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/windows/cpu-mkl-avx/develop.html ``` -#### 2.2 GPU版的PaddlePaddle +#### 2.2 GPU 版的 PaddlePaddle -2.2.1 CUDA10.1的PaddlePaddle +2.2.1 CUDA10.1 的 PaddlePaddle ``` @@ -89,7 +89,7 @@ ``` -2.2.2 CUDA10.2的PaddlePaddle +2.2.2 CUDA10.2 的 PaddlePaddle ``` @@ -97,7 +97,7 @@ ``` -2.2.3 CUDA11.0的PaddlePaddle +2.2.3 CUDA11.0 的 PaddlePaddle ``` @@ -105,7 +105,7 @@ ``` -2.2.4 CUDA11.1的PaddlePaddle +2.2.4 CUDA11.1 的 PaddlePaddle ``` @@ -113,7 +113,7 @@ ``` -2.2.5 CUDA11.2的PaddlePaddle +2.2.5 CUDA11.2 的 PaddlePaddle ``` python -m pip install paddlepaddle-gpu==0.0.0.post112 -f https://www.paddlepaddle.org.cn/whl/windows/gpu/develop.html @@ -122,25 +122,25 @@ 注: -* 如果你使用的是安培架构的GPU,推荐使用CUDA11.2。如果你使用的是非安培架构的GPU,推荐使用CUDA10.2,性能更优。 +* 如果你使用的是安培架构的 GPU,推荐使用 CUDA11.2。如果你使用的是非安培架构的 GPU,推荐使用 CUDA10.2,性能更优。 -* 请确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python。根据您的环境,可能需要将上述命令行中所有 `python` 替换为具体的 `Python解释器` 路径(例如C:\Python37\python.exe)。 +* 请确认需要安装 PaddlePaddle 的 Python 是您预期的位置,因为您计算机可能有多个 Python。根据您的环境,可能需要将上述命令行中所有 `python` 替换为具体的 `Python 解释器` 路径(例如 C:\Python37\python.exe)。 ## **三、验证安装** -安装完成后您可以使用 `python` 进入python解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` +安装完成后您可以使用 `python` 进入 python 解释器,输入`import paddle` ,再输入 `paddle.utils.run_check()` 如果出现`PaddlePaddle is installed successfully!`,说明您已成功安装。 注: -* 由于飞桨使用Visual Studio进行编译,使用时需要操作系统自带Visual C++运行时库,大部分情况下Windows系统已默认自带,但对于某些纯净版系统可能未安装,若 `import paddle` 后出现 `DLL load failed` 报错,请下载 https://aka.ms/vs/17/release/vc_redist.x64.exe 安装后再次尝试。 +* 由于飞桨使用 Visual Studio 进行编译,使用时需要操作系统自带 Visual C++运行时库,大部分情况下 Windows 系统已默认自带,但对于某些纯净版系统可能未安装,若 `import paddle` 后出现 `DLL load failed` 报错,请下载 https://aka.ms/vs/17/release/vc_redist.x64.exe 安装后再次尝试。 ## **四、如何卸载** -请使用以下命令卸载PaddlePaddle: +请使用以下命令卸载 PaddlePaddle: -* **CPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle` +* **CPU 版本的 PaddlePaddle**: `python -m pip uninstall paddlepaddle` -* **GPU版本的PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` +* **GPU 版本的 PaddlePaddle**: `python -m pip uninstall paddlepaddle-gpu` diff --git a/docs/practices/cv/index_cn.rst b/docs/practices/cv/index_cn.rst index e28fd70d3a4..ef5e45e0189 100644 --- a/docs/practices/cv/index_cn.rst +++ b/docs/practices/cv/index_cn.rst @@ -5,10 +5,10 @@ 这里提供了一些计算机视觉的案例: - - `图像分类 <./image_classification.html>`_ :介绍使用 PaddlePaddle 在MNIST数据集上完成图像分类。 - - `图像分类 <./convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 + - `图像分类 <./image_classification.html>`_ :介绍使用 PaddlePaddle 在 MNIST 数据集上完成图像分类。 + - `图像分类 <./convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在 Cifar10 数据集上完成图像分类。 - `以图搜图 <./image_search.html>`_ : 介绍使用 PaddlePaddle 实现以图搜图。 - - `图像分割 <./image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现U-Net模型完成图像分割。 + - `图像分割 <./image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现 U-Net 模型完成图像分割。 - `OCR <./image_ocr.html>`_ : 介绍使用 PaddlePaddle 实现 OCR。 - `图像超分 <./super_resolution_sub_pixel.html>`_ : 介绍使用 PaddlePaddle 完成图像超分。 - `人脸关键点检测 <./landmark_detection.html>`_ : 介绍使用 PaddlePaddle 完成人脸关键点检测。 diff --git a/docs/practices/index_cn.rst b/docs/practices/index_cn.rst index 650f84f6217..5e0938b74e5 100644 --- a/docs/practices/index_cn.rst +++ b/docs/practices/index_cn.rst @@ -9,16 +9,16 @@ - `hello paddle <./quick_start/hello_paddle.html>`_ :简单介绍 PaddlePaddle,完成你的第一个 PaddlePaddle 项目。 - `动态图 <./quick_start/dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 - - `高层API详细介绍 <./quick_start/high_level_api.html>`_ :详细介绍 PaddlePaddle 高层API。 + - `高层 API 详细介绍 <./quick_start/high_level_api.html>`_ :详细介绍 PaddlePaddle 高层 API。 - `模型加载与保存 <./quick_start/save_model.html>`_ :介绍 PaddlePaddle 模型的加载与保存。 - `线性回归 <./quick_start/linear_regression.html>`_ :介绍使用 PaddlePaddle 实现线性回归任务。 计算机视觉: - - `图像分类 <./cv/image_classification.html>`_ :介绍使用 PaddlePaddle 在MNIST数据集上完成图像分类。 - - `图像分类 <./cv/convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在Cifar10数据集上完成图像分类。 + - `图像分类 <./cv/image_classification.html>`_ :介绍使用 PaddlePaddle 在 MNIST 数据集上完成图像分类。 + - `图像分类 <./cv/convnet_image_classification.html>`_ :介绍使用 PaddlePaddle 在 Cifar10 数据集上完成图像分类。 - `以图搜图 <./cv/image_search.html>`_ : 介绍使用 PaddlePaddle 实现以图搜图。 - - `图像分割 <./cv/image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现U-Net模型完成图像分割。 + - `图像分割 <./cv/image_segmentation.html>`_ : 介绍使用 PaddlePaddle 实现 U-Net 模型完成图像分割。 - `OCR <./cv/image_ocr.html>`_ : 介绍使用 PaddlePaddle 实现 OCR。 - `图像超分 <./cv/super_resolution_sub_pixel.html>`_ : 介绍使用 PaddlePaddle 完成图像超分。 - `人脸关键点检测 <./cv/landmark_detection.html>`_ : 介绍使用 PaddlePaddle 完成人脸关键点检测。 @@ -26,8 +26,8 @@ 自然语言处理: - - `N-Gram <./nlp/n_gram_model.html>`_ :介绍使用 PaddlePaddle 实现N-Gram 模型。 - - `文本分类 <./nlp/imdb_bow_classification.html>`_ :介绍使用 PaddlePaddle 在IMDB数据集上完成文本分类。 + - `N-Gram <./nlp/n_gram_model.html>`_ :介绍使用 PaddlePaddle 实现 N-Gram 模型。 + - `文本分类 <./nlp/imdb_bow_classification.html>`_ :介绍使用 PaddlePaddle 在 IMDB 数据集上完成文本分类。 - `情感分类 <./nlp/pretrained_word_embeddings.html>`_ :介绍使用预训练词向量完成情感分类。 - `文本翻译 <./nlp/seq2seq_with_attention.html>`_ :介绍使用 PaddlePaddle 实现文本翻译。 - `数字加法 <./nlp/addition_rnn.html>`_ : 介绍使用 PaddlePaddle 实现数字加法。 diff --git a/docs/practices/nlp/index_cn.rst b/docs/practices/nlp/index_cn.rst index 821630b415d..2ac7319bab0 100644 --- a/docs/practices/nlp/index_cn.rst +++ b/docs/practices/nlp/index_cn.rst @@ -5,8 +5,8 @@ 这里提供了一些自然语言处理的示例: - - `N-Gram <./n_gram_model.html>`_ :介绍使用 PaddlePaddle 实现N-Gram 模型。 - - `文本分类 <./imdb_bow_classification.html>`_ :介绍使用 PaddlePaddle 在IMDB数据集上完成文本分类。 + - `N-Gram <./n_gram_model.html>`_ :介绍使用 PaddlePaddle 实现 N-Gram 模型。 + - `文本分类 <./imdb_bow_classification.html>`_ :介绍使用 PaddlePaddle 在 IMDB 数据集上完成文本分类。 - `情感分类 <./pretrained_word_embeddings.html>`_ :介绍使用预训练词向量完成情感分类。 - `文本翻译 <./seq2seq_with_attention.html>`_ :介绍使用 PaddlePaddle 实现文本翻译。 - `数字加法 <./addition_rnn.html>`_ : 介绍使用 PaddlePaddle 实现数字加法。 diff --git a/docs/practices/quick_start/index_cn.rst b/docs/practices/quick_start/index_cn.rst index 26123949dc0..f2c1621c2ba 100644 --- a/docs/practices/quick_start/index_cn.rst +++ b/docs/practices/quick_start/index_cn.rst @@ -8,7 +8,7 @@ - `hello paddle <./hello_paddle.html>`_ :简单介绍 PaddlePaddle,完成你的第一个 PaddlePaddle 项目。 - `动态图 <./dynamic_graph.html>`_ :介绍使用 PaddlePaddle 动态图。 - - `高层API详细介绍 <./high_level_api.html>`_ :详细介绍 PaddlePaddle 高层API。 + - `高层 API 详细介绍 <./high_level_api.html>`_ :详细介绍 PaddlePaddle 高层 API。 - `模型加载与保存 <./save_model.html>`_ :介绍 PaddlePaddle 模型的加载与保存。 - `线性回归 <./linear_regression.html>`_ :介绍使用 PaddlePaddle 实现线性回归任务。 diff --git a/docs/release_note_cn.md b/docs/release_note_cn.md index 3b2dcd3cb0c..296aceb50e4 100644 --- a/docs/release_note_cn.md +++ b/docs/release_note_cn.md @@ -41,7 +41,7 @@ ### (2)性能优化 - 优化`paddle.incubate.nn.functional.fused_attention`和`paddle.incubate.nn.functional.fused_feedforward`算子,增加`add_residual`属性,用以控制最后一步是否进行加`residual`操作,CAE 模型性能提升 7.7%。([#43719](https://github.com/PaddlePaddle/Paddle/pull/43719)) -- 优化 `linspace` 算子,将 `start`、`stop`、`num`三个输入 Tensor 初始化在 CPU 上,避免在算子中进行 GPU -> CPU 拷贝,SOLOv2 模型性能提升6%。([#43746](https://github.com/PaddlePaddle/Paddle/pull/43746)) +- 优化 `linspace` 算子,将 `start`、`stop`、`num`三个输入 Tensor 初始化在 CPU 上,避免在算子中进行 GPU -> CPU 拷贝,SOLOv2 模型性能提升 6%。([#43746](https://github.com/PaddlePaddle/Paddle/pull/43746)) ### (3)问题修复 @@ -77,7 +77,7 @@ ### (2)底层优化 -#### CPU性能优化 +#### CPU 性能优化 - EnableMkldnn 配置中移除 `gpu_cpu_reshape2_matmul_fuse_pass`,修复 ResNet50 性能下降的问题。 ([#43750](https://github.com/PaddlePaddle/Paddle/pull/43750)) @@ -89,10 +89,10 @@ ### (3)问题修复 -#### 框架及API修复 +#### 框架及 API 修复 - 修复联编 Paddle-Lite XPU 时的编译报错问题。([#43178](https://github.com/PaddlePaddle/Paddle/pull/43178)) -- 修复 ERNIE 3.0 pass误触发的问题。([#43948](https://github.com/PaddlePaddle/Paddle/pull/43948)) +- 修复 ERNIE 3.0 pass 误触发的问题。([#43948](https://github.com/PaddlePaddle/Paddle/pull/43948)) - 修复 multihead op 中 int8 量化属性读不到的问题。([#43020](https://github.com/PaddlePaddle/Paddle/pull/43020)) #### 后端能力修复 @@ -137,15 +137,15 @@ ### 飞桨高可复用算子库 PHI -- 发布飞桨高可复用算子库 PHI (Paddle HIgh reusability operator library),支持组合式算子功能复用、Primitive算子内核复用、插件式硬件加速库复用。针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 Phi,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 python 开发接口保持一致的 C++ 运算类 API,以及近500个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持Primitive API方式开发算子内核,可支持不同硬件(比如GPU和XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如NPU)的加速库,实现低成本复用硬件加速库。 +- 发布飞桨高可复用算子库 PHI (Paddle HIgh reusability operator library),支持组合式算子功能复用、Primitive 算子内核复用、插件式硬件加速库复用。针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 Phi,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 python 开发接口保持一致的 C++ 运算类 API,以及近 500 个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持 Primitive API 方式开发算子内核,可支持不同硬件(比如 GPU 和 XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如 NPU)的加速库,实现低成本复用硬件加速库。 ### 分布式训练 - 全面升级自适应分布式训练架构,含弹性扩缩容、异步流水执行器、异构通信、自动并行等多个模块,支持了多种异构硬件下自动感知的分布式训练及分布式推理。 -- 动态图混合并行下新增MoE并行策略、GroupSharded 并行策略、Pure FP16 等,进一步支持了动态图下大模型的高效并行训练。 +- 动态图混合并行下新增 MoE 并行策略、GroupSharded 并行策略、Pure FP16 等,进一步支持了动态图下大模型的高效并行训练。 -- 全面升级优化了通用异构参数服务器架构,进行各模块的抽象简化,如通信、存储等,提升了参数服务器的二次开发体验;GPU 参数服务器在千亿参数百亿数据分钟级流式训练下性能提升2.38倍。 +- 全面升级优化了通用异构参数服务器架构,进行各模块的抽象简化,如通信、存储等,提升了参数服务器的二次开发体验;GPU 参数服务器在千亿参数百亿数据分钟级流式训练下性能提升 2.38 倍。 ### 编译安装 @@ -161,7 +161,7 @@ - 新增自定义新硬件接入:提供一种插件式扩展 PaddlePaddle 硬件后端的方式。 -- 新增对华为昇腾910 / GraphCore IPU / 寒武纪MLU / 昆仑芯2代多种异构芯片的训练/推理支持。 +- 新增对华为昇腾 910 / GraphCore IPU / 寒武纪 MLU / 昆仑芯 2 代多种异构芯片的训练/推理支持。 ### 框架架构 @@ -169,7 +169,7 @@ ## 2. 不兼容升级 -- 预编译安装包中移除CUDA sm35 ARCH: 受到包体积大小的影响,在预编译的安装包中移除了 CUDA sm35 架构。 ([#41754](https://github.com/PaddlePaddle/Paddle/pull/41754)) +- 预编译安装包中移除 CUDA sm35 ARCH: 受到包体积大小的影响,在预编译的安装包中移除了 CUDA sm35 架构。 ([#41754](https://github.com/PaddlePaddle/Paddle/pull/41754)) - `paddle.to_tensor` 将一个 python int scalar 转换为 Tensor 时,在 Windows 上的默认数据类型由 int32 变为 int64,从而与 Linux/Mac 保持对齐。([#39662](https://github.com/PaddlePaddle/Paddle/pull/39662)) @@ -284,7 +284,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API -- 新增4个自动微分类 API,支持科学计算需求,具体列表如下:([#40692](https://github.com/PaddlePaddle/Paddle/pull/40692)) +- 新增 4 个自动微分类 API,支持科学计算需求,具体列表如下:([#40692](https://github.com/PaddlePaddle/Paddle/pull/40692)) - `paddle.incubate.autograd.vjp`,计算向量-雅可比矩阵乘积。 @@ -316,7 +316,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.linalg.lu`、 `paddle.linalg.lu_unpack`,计算矩阵 lu 分解、解压缩 lu 矩阵。([#38617](https://github.com/PaddlePaddle/Paddle/pull/38617), [#38559](https://github.com/PaddlePaddle/Paddle/pull/38559), [#38616](https://github.com/PaddlePaddle/Paddle/pull/38616)) -- 新增21个概率分布类 API,包括6个随机变量分布,13个随机变量变换,2个 KL 散度计算,用于强化学习、变分推断、科学计算等场景,具体列表如下:([#40536](https://github.com/PaddlePaddle/Paddle/pull/40536), [#38820](https://github.com/PaddlePaddle/Paddle/pull/38820), [#38558](https://github.com/PaddlePaddle/Paddle/pull/38558/files), [#38445](https://github.com/PaddlePaddle/Paddle/pull/38445), [#38244](https://github.com/PaddlePaddle/Paddle/pull/38244), [#38047](https://github.com/PaddlePaddle/Paddle/pull/38047)) +- 新增 21 个概率分布类 API,包括 6 个随机变量分布,13 个随机变量变换,2 个 KL 散度计算,用于强化学习、变分推断、科学计算等场景,具体列表如下:([#40536](https://github.com/PaddlePaddle/Paddle/pull/40536), [#38820](https://github.com/PaddlePaddle/Paddle/pull/38820), [#38558](https://github.com/PaddlePaddle/Paddle/pull/38558/files), [#38445](https://github.com/PaddlePaddle/Paddle/pull/38445), [#38244](https://github.com/PaddlePaddle/Paddle/pull/38244), [#38047](https://github.com/PaddlePaddle/Paddle/pull/38047)) - `paddle.distribution.ExponentialFamily`,指数分布族基类。 @@ -388,7 +388,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.text.ViterbiDecoder`、`paddle.text.viterbi_decode` Viterbi 解码 API,主要用于序列标注模型的预测。 ([#35778](https://github.com/PaddlePaddle/Paddle/pull/35778)) -- 新增 11 个 Sparse 类 API,支持创建 COO、CSR 格式的Sparse Tensor,与 Tensor 互相转换等基础功能: +- 新增 11 个 Sparse 类 API,支持创建 COO、CSR 格式的 Sparse Tensor,与 Tensor 互相转换等基础功能: - `paddle.sparse.sparse_coo_tensor`,创建 COO 格式的 Sparse Tensor。 ([#40780](https://github.com/PaddlePaddle/Paddle/pull/40780)) @@ -432,7 +432,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.Tensor.to_uva_tensor`,支持将 numpy 对象转换为实际存储在 CPU,但可作为 CUDA 对象进行虚拟地址访问的功能。([#39146](https://github.com/PaddlePaddle/Paddle/pull/39146), [#38950](https://github.com/PaddlePaddle/Paddle/pull/38950)) - - 新增`paddle.rot90`,沿 `axes` 指定的平面将 n 维 Tensor 旋转90度。([#37634](https://github.com/PaddlePaddle/Paddle/pull/37634)) + - 新增`paddle.rot90`,沿 `axes` 指定的平面将 n 维 Tensor 旋转 90 度。([#37634](https://github.com/PaddlePaddle/Paddle/pull/37634)) - 新增`paddle.logit` 和 `paddle.Tensor.logit`,计算输入 Tensor 的 logit 函数值。([#37844](https://github.com/PaddlePaddle/Paddle/pull/37844)) @@ -450,7 +450,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.erfinv` 和 `paddle.Tensor.erfinv`,计算输入 Tensor 的逆误差函数。([#38295](https://github.com/PaddlePaddle/Paddle/pull/38295)) - - 新增 `paddle.lerp` 和 `paddle.Tensor.lerp`,根据给定权重计算输入Tensor间的线性插值。([#37253](https://github.com/PaddlePaddle/Paddle/pull/37253)) + - 新增 `paddle.lerp` 和 `paddle.Tensor.lerp`,根据给定权重计算输入 Tensor 间的线性插值。([#37253](https://github.com/PaddlePaddle/Paddle/pull/37253)) - 新增 `paddle.angle`,用于计算复数 Tensor 的相位角。 ([#37689](https://github.com/PaddlePaddle/Paddle/pull/37689)) @@ -472,7 +472,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.bincount` 和 `paddle.Tensor.bincount`,用于统计 Tensor 中每个元素出现的次数。([#36317](https://github.com/PaddlePaddle/Paddle/pull/36317)) - - 新增 `paddle.fmax`、 `paddle.fmin`,扩展了max/min的功能,支持比较的两个 Tensor 中有 NaN 值的情况,即如果对应位置上有1个 NaN 值,则返回那个非 NaN 值;如果对应位置上有2个 NaN 值,则返回 NaN 值。([#37826](https://github.com/PaddlePaddle/Paddle/pull/37826)) + - 新增 `paddle.fmax`、 `paddle.fmin`,扩展了 max/min 的功能,支持比较的两个 Tensor 中有 NaN 值的情况,即如果对应位置上有 1 个 NaN 值,则返回那个非 NaN 值;如果对应位置上有 2 个 NaN 值,则返回 NaN 值。([#37826](https://github.com/PaddlePaddle/Paddle/pull/37826)) - 新增 `paddle.diff`,用于计算沿给定维度的第 n 个前向差值,目前支持 n=1。([#37441](https://github.com/PaddlePaddle/Paddle/pull/37441)) @@ -484,9 +484,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 `paddle.det` 与 `paddle.slogdet`,用于计算矩阵的行列式和行列式的自然对数。 ([#34992](https://github.com/PaddlePaddle/Paddle/pull/34992)) - - 新增`paddle.nn.utils.parameters_to_vector`,可以将输入的多个 parameter 展平并连接为1个1-D Tensor。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + - 新增`paddle.nn.utils.parameters_to_vector`,可以将输入的多个 parameter 展平并连接为 1 个 1-D Tensor。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - - 新增`paddle.nn.utils.vector_to_parameters`,将1个1-D Tensor按顺序切分给输入的多个 parameter。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) + - 新增`paddle.nn.utils.vector_to_parameters`,将 1 个 1-D Tensor 按顺序切分给输入的多个 parameter。([#38020](https://github.com/PaddlePaddle/Paddle/pull/38020)) - 新增组网类 API @@ -556,7 +556,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增支持 python3 的 type hint 语法。([#36544](https://github.com/PaddlePaddle/Paddle/pull/36544)) -- Pass开发 +- Pass 开发 - 新增基于 NVIDIA cuBlasLt Epilogue 的 FC + [relu|gelu] 的前向与反向融合。([#39437](https://github.com/PaddlePaddle/Paddle/pull/39437)) @@ -576,7 +576,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 从混合精度训练 `paddle.amp.GradScaler` 的 `minimize` 中拆分出 `paddle.amp.Gradscaler.unscale_` 方法,提供恢复 loss 的独立接口。([#35825](https://github.com/PaddlePaddle/Paddle/pull/35825)) -- 为 `paddle.nn.ClipByGlobalNorm` 动态图模式添加 FP16 支持,为clip op 添加 FP16 Kernel,使`clip`相关操作支持 FP16。([#36198](https://github.com/PaddlePaddle/Paddle/pull/36198), [#36577](https://github.com/PaddlePaddle/Paddle/pull/36577)) +- 为 `paddle.nn.ClipByGlobalNorm` 动态图模式添加 FP16 支持,为 clip op 添加 FP16 Kernel,使`clip`相关操作支持 FP16。([#36198](https://github.com/PaddlePaddle/Paddle/pull/36198), [#36577](https://github.com/PaddlePaddle/Paddle/pull/36577)) - 支持 `paddle.amp.decorate` 传入的`optimizer`参数为 None。([#37541](https://github.com/PaddlePaddle/Paddle/pull/37541)) @@ -614,19 +614,19 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 飞桨高可复用算子库 PHI -针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 PHI,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 python 开发接口保持一致的 C++ 运算类 API,以及近500个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持Primitive API方式开发算子内核,可支持不同硬件(比如GPU和XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如NPU)的加速库,实现低成本复用硬件加速库。主要可分为以下几部分工作: +针对飞桨框架原算子库存在的算子接口不清晰、算子复用成本较高、调用性能不够快的问题,我们重构了飞桨框架的算子库,设计了灵活、高效的函数式算子库 PHI,可以通过对函数式算子接口组合调用的方式实现新算子。新算子库提供了 200 余个跟 python 开发接口保持一致的 C++ 运算类 API,以及近 500 个可供组合调用的前、反向函数式算子内核 Kernel,可大幅降低框架原生算子和自定义算子的开发成本。新算子库支持 Primitive API 方式开发算子内核,可支持不同硬件(比如 GPU 和 XPU)的算子内核复用。新算子库支持以插件方式接入硬件(比如 NPU)的加速库,实现低成本复用硬件加速库。主要可分为以下几部分工作: - **算子库基础架构、核心组件与机制实现**:合理规划新算子库的目录结构,设计实现了新算子库的公共基础数据结构、新的函数式 InferMeta 和 Kernel 开发范式以及相应的注册和管理组件,并且支持 Kernel 文件的自动化编译对象生成及编译依赖关系生成,使开发者仅需关注函数式 Kernel 的实现,开发范式简洁清晰。([#34425](https://github.com/PaddlePaddle/Paddle/pull/34425), [#37107](https://github.com/PaddlePaddle/Paddle/pull/37107), [#36946](https://github.com/PaddlePaddle/Paddle/pull/36946), [#36948](https://github.com/PaddlePaddle/Paddle/pull/36948), [#37876](https://github.com/PaddlePaddle/Paddle/pull/37876), [#37916](https://github.com/PaddlePaddle/Paddle/pull/37916), [#37977](https://github.com/PaddlePaddle/Paddle/pull/37977), [38078](https://github.com/PaddlePaddle/Paddle/pull/38078), [#38861](https://github.com/PaddlePaddle/Paddle/pull/38861), [#39123](https://github.com/PaddlePaddle/Paddle/pull/39123), [#39131](https://github.com/PaddlePaddle/Paddle/pull/39131), [#39748](https://github.com/PaddlePaddle/Paddle/pull/39748), [#39790](https://github.com/PaddlePaddle/Paddle/pull/39790), [#39941](https://github.com/PaddlePaddle/Paddle/pull/39941), [#40239](https://github.com/PaddlePaddle/Paddle/pull/40239), [#40635](https://github.com/PaddlePaddle/Paddle/pull/40635), [#41091](https://github.com/PaddlePaddle/Paddle/pull/41091), [#37409](https://github.com/PaddlePaddle/Paddle/pull/37409), [#37942](https://github.com/PaddlePaddle/Paddle/pull/37942), [#39002](https://github.com/PaddlePaddle/Paddle/pull/39002), [#38109](https://github.com/PaddlePaddle/Paddle/pull/38109), [#37881](https://github.com/PaddlePaddle/Paddle/pull/37881), [#37517](https://github.com/PaddlePaddle/Paddle/pull/37517), [#39870](https://github.com/PaddlePaddle/Paddle/pull/39870), [#40975](https://github.com/PaddlePaddle/Paddle/pull/40975), [#39475](https://github.com/PaddlePaddle/Paddle/pull/39475), [#37304](https://github.com/PaddlePaddle/Paddle/pull/37304), #36910, #37120, #37146, #37215, #37255, #37369, #38258, #38257, #38355, #38853, #38937, #38977, #38946, #39085, #39153, #39228, #38301, #38275, #38506, #38607, #38473, #38632, #38811, #38880, #38996, #38914, #39101) -- **算子库C++ API体系建设**:设计实现了基于 yaml 配置文件的算子定义范式、自动生成了200余个C++运算类 API,供内外部开发者复用,降低了基础运算的重复开发成本。([#37668](https://github.com/PaddlePaddle/Paddle/pull/37668), [#36938](https://github.com/PaddlePaddle/Paddle/pull/36938), [#38172](https://github.com/PaddlePaddle/Paddle/pull/38172), [#38182](https://github.com/PaddlePaddle/Paddle/pull/38182), [#38311](https://github.com/PaddlePaddle/Paddle/pull/38311), [#38438](https://github.com/PaddlePaddle/Paddle/pull/38438), [#39057](https://github.com/PaddlePaddle/Paddle/pull/39057), [#39229](https://github.com/PaddlePaddle/Paddle/pull/39229), [#39281](https://github.com/PaddlePaddle/Paddle/pull/39281), [#39263](https://github.com/PaddlePaddle/Paddle/pull/39263), [#39408](https://github.com/PaddlePaddle/Paddle/pull/39408), [#39436](https://github.com/PaddlePaddle/Paddle/pull/39436), [#39482](https://github.com/PaddlePaddle/Paddle/pull/39482), [#39497](https://github.com/PaddlePaddle/Paddle/pull/39497), [#39651](https://github.com/PaddlePaddle/Paddle/pull/39651), [#39521](https://github.com/PaddlePaddle/Paddle/pull/39521), [#39760](https://github.com/PaddlePaddle/Paddle/pull/39760), [#40060](https://github.com/PaddlePaddle/Paddle/pull/40060), [#40196](https://github.com/PaddlePaddle/Paddle/pull/40196), [#40218](https://github.com/PaddlePaddle/Paddle/pull/40218), [#40640](https://github.com/PaddlePaddle/Paddle/pull/40640), [#40732](https://github.com/PaddlePaddle/Paddle/pull/40732), [#40729](https://github.com/PaddlePaddle/Paddle/pull/40729), [#40840](https://github.com/PaddlePaddle/Paddle/pull/40840), [#40867](https://github.com/PaddlePaddle/Paddle/pull/40867), [#41025](https://github.com/PaddlePaddle/Paddle/pull/41025), [#41368](https://github.com/PaddlePaddle/Paddle/pull/41368)) +- **算子库 C++ API 体系建设**:设计实现了基于 yaml 配置文件的算子定义范式、自动生成了 200 余个 C++运算类 API,供内外部开发者复用,降低了基础运算的重复开发成本。([#37668](https://github.com/PaddlePaddle/Paddle/pull/37668), [#36938](https://github.com/PaddlePaddle/Paddle/pull/36938), [#38172](https://github.com/PaddlePaddle/Paddle/pull/38172), [#38182](https://github.com/PaddlePaddle/Paddle/pull/38182), [#38311](https://github.com/PaddlePaddle/Paddle/pull/38311), [#38438](https://github.com/PaddlePaddle/Paddle/pull/38438), [#39057](https://github.com/PaddlePaddle/Paddle/pull/39057), [#39229](https://github.com/PaddlePaddle/Paddle/pull/39229), [#39281](https://github.com/PaddlePaddle/Paddle/pull/39281), [#39263](https://github.com/PaddlePaddle/Paddle/pull/39263), [#39408](https://github.com/PaddlePaddle/Paddle/pull/39408), [#39436](https://github.com/PaddlePaddle/Paddle/pull/39436), [#39482](https://github.com/PaddlePaddle/Paddle/pull/39482), [#39497](https://github.com/PaddlePaddle/Paddle/pull/39497), [#39651](https://github.com/PaddlePaddle/Paddle/pull/39651), [#39521](https://github.com/PaddlePaddle/Paddle/pull/39521), [#39760](https://github.com/PaddlePaddle/Paddle/pull/39760), [#40060](https://github.com/PaddlePaddle/Paddle/pull/40060), [#40196](https://github.com/PaddlePaddle/Paddle/pull/40196), [#40218](https://github.com/PaddlePaddle/Paddle/pull/40218), [#40640](https://github.com/PaddlePaddle/Paddle/pull/40640), [#40732](https://github.com/PaddlePaddle/Paddle/pull/40732), [#40729](https://github.com/PaddlePaddle/Paddle/pull/40729), [#40840](https://github.com/PaddlePaddle/Paddle/pull/40840), [#40867](https://github.com/PaddlePaddle/Paddle/pull/40867), [#41025](https://github.com/PaddlePaddle/Paddle/pull/41025), [#41368](https://github.com/PaddlePaddle/Paddle/pull/41368)) - **算子库兼容各执行体系**:实现新的 InferMeta 及 Kernel 接入原动静态图执行体系、支持原 OpKernel 注册安全移除并迁移为新的 Kernel 形式。([#34425](https://github.com/PaddlePaddle/Paddle/pull/34425), [#38825](https://github.com/PaddlePaddle/Paddle/pull/38825), [#38837](https://github.com/PaddlePaddle/Paddle/pull/38837), [#38842](https://github.com/PaddlePaddle/Paddle/pull/38842), [#38976](https://github.com/PaddlePaddle/Paddle/pull/38976), [#39134](https://github.com/PaddlePaddle/Paddle/pull/39134), [#39140](https://github.com/PaddlePaddle/Paddle/pull/39140), [#39135](https://github.com/PaddlePaddle/Paddle/pull/39135), [#39252](https://github.com/PaddlePaddle/Paddle/pull/39252), [#39222](https://github.com/PaddlePaddle/Paddle/pull/39222), [#39351](https://github.com/PaddlePaddle/Paddle/pull/39351)) - **算子库底层数据结构及工具函数与框架解耦**:解除 Phi 在核心数据结构上对 框架的依赖,为后续 Phi 独立编译奠定基础,支持 infrt、自定义 Kernel 等一系列基于 Phi 的建设工作 ([#38583](https://github.com/PaddlePaddle/Paddle/pull/38583), [#39188](https://github.com/PaddlePaddle/Paddle/pull/39188), [#39560](https://github.com/PaddlePaddle/Paddle/pull/39560), [#39931](https://github.com/PaddlePaddle/Paddle/pull/39931), [#39169](https://github.com/PaddlePaddle/Paddle/pull/39169), [#38951](https://github.com/PaddlePaddle/Paddle/pull/38951), [#38898](https://github.com/PaddlePaddle/Paddle/pull/38898), [#38873](https://github.com/PaddlePaddle/Paddle/pull/38873), [#38696](https://github.com/PaddlePaddle/Paddle/pull/38696), [#38651](https://github.com/PaddlePaddle/Paddle/pull/38651), [#39359](https://github.com/PaddlePaddle/Paddle/pull/39359), [#39305](https://github.com/PaddlePaddle/Paddle/pull/39305), [#39234](https://github.com/PaddlePaddle/Paddle/pull/39234), [#39098](https://github.com/PaddlePaddle/Paddle/pull/39098), [#39120](https://github.com/PaddlePaddle/Paddle/pull/39120), [#38979](https://github.com/PaddlePaddle/Paddle/pull/38979), [#38899](https://github.com/PaddlePaddle/Paddle/pull/38899), [#38844](https://github.com/PaddlePaddle/Paddle/pull/38844), [#39714](https://github.com/PaddlePaddle/Paddle/pull/39714), [#39729](https://github.com/PaddlePaddle/Paddle/pull/39729), [#39889](https://github.com/PaddlePaddle/Paddle/pull/39889), [#39587](https://github.com/PaddlePaddle/Paddle/pull/39587), [#39558](https://github.com/PaddlePaddle/Paddle/pull/39558), [#39514](https://github.com/PaddlePaddle/Paddle/pull/39514), [#39502](https://github.com/PaddlePaddle/Paddle/pull/39502), [#39300](https://github.com/PaddlePaddle/Paddle/pull/39300), [#39246](https://github.com/PaddlePaddle/Paddle/pull/39246), [#39124](https://github.com/PaddlePaddle/Paddle/pull/39124)) -- **自定义算子机制与 Phi 整合并完善**:支持在自定义算子编写时调用 Phi 自动生成的200余个C++运算类 API,降低自定义算子开发成本,并进行一系列问题修复。([#37122](https://github.com/PaddlePaddle/Paddle/pull/37122), [#37276](https://github.com/PaddlePaddle/Paddle/pull/37276), [#37281](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37262](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37415](https://github.com/PaddlePaddle/Paddle/pull/37415), [#37423](https://github.com/PaddlePaddle/Paddle/pull/37423), [#37583](https://github.com/PaddlePaddle/Paddle/pull/37683), [#38776](https://github.com/PaddlePaddle/Paddle/pull/38776), [#39353](https://github.com/PaddlePaddle/Paddle/pull/39353), [#41072](https://github.com/PaddlePaddle/Paddle/pull/41072)) +- **自定义算子机制与 Phi 整合并完善**:支持在自定义算子编写时调用 Phi 自动生成的 200 余个 C++运算类 API,降低自定义算子开发成本,并进行一系列问题修复。([#37122](https://github.com/PaddlePaddle/Paddle/pull/37122), [#37276](https://github.com/PaddlePaddle/Paddle/pull/37276), [#37281](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37262](https://github.com/PaddlePaddle/Paddle/pull/37281), [#37415](https://github.com/PaddlePaddle/Paddle/pull/37415), [#37423](https://github.com/PaddlePaddle/Paddle/pull/37423), [#37583](https://github.com/PaddlePaddle/Paddle/pull/37683), [#38776](https://github.com/PaddlePaddle/Paddle/pull/38776), [#39353](https://github.com/PaddlePaddle/Paddle/pull/39353), [#41072](https://github.com/PaddlePaddle/Paddle/pull/41072)) -- **算子规模化迁移改写**:迁移了约250个高频算子的前、反向算子内核 Kernel 至新算子库,改写为函数式,支持在 C++端通过调用多个基础 Kernel 函数封装,快速组合实现高性能算子;同时,添加相应的 yaml 算子定义,并接入新动态图执行体系,提升 python API 调度性能。迁移改写的算子包括: +- **算子规模化迁移改写**:迁移了约 250 个高频算子的前、反向算子内核 Kernel 至新算子库,改写为函数式,支持在 C++端通过调用多个基础 Kernel 函数封装,快速组合实现高性能算子;同时,添加相应的 yaml 算子定义,并接入新动态图执行体系,提升 python API 调度性能。迁移改写的算子包括: - sqrt ([#40727](https://github.com/PaddlePaddle/Paddle/pull/40727)) @@ -1168,7 +1168,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 动态图重构后,为 Tensor 的 setitem 功能添加 inplace 策略。([#40915](https://github.com/PaddlePaddle/Paddle/pull/40915)) - - 动态图重构后添加`_reset_grad_inplace_version`接口,将 Tensor 的梯度的 inplace version 置为0。([#41101](https://github.com/PaddlePaddle/Paddle/pull/41101)) + - 动态图重构后添加`_reset_grad_inplace_version`接口,将 Tensor 的梯度的 inplace version 置为 0。([#41101](https://github.com/PaddlePaddle/Paddle/pull/41101)) - 反向计算过程中如果不需要前向 Tensor 的值(no need buffer 属性),则不需要对该 Tensor 进行 inplace version 的检测操作。 为 no_need_buffer 的 Tensor 跳过 inplace version 的检查。([#41350](https://github.com/PaddlePaddle/Paddle/pull/41350)) @@ -1202,7 +1202,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 增强多线程场景下调试和报错功能,将子线程的报错捕获到主线程中统一抛出,以提升用户体验。([#36692](https://github.com/PaddlePaddle/Paddle/pull/36692),[#36802](https://github.com/PaddlePaddle/Paddle/pull/36802)) -- 修复新执行器通信流重置 Allocator 中 stream 缓存信息的问题,减少跨 stream 场景下的 RecordStream 开销,优化后 DeepFM 模型性能提升约8%。([#42046](https://github.com/PaddlePaddle/Paddle/pull/42046)) +- 修复新执行器通信流重置 Allocator 中 stream 缓存信息的问题,减少跨 stream 场景下的 RecordStream 开销,优化后 DeepFM 模型性能提升约 8%。([#42046](https://github.com/PaddlePaddle/Paddle/pull/42046)) - 优化新执行器算子间的依赖分析方法,提升运行性能;为 send/recv 通信算子建立正确依赖以支持流水线并行。([#42009](https://github.com/PaddlePaddle/Paddle/pull/42009)) @@ -1213,7 +1213,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增弹性功能(含节点故障、扩容、缩容),提升分布式的容错能力。 ([#36684](https://github.com/PaddlePaddle/Paddle/pull/36684), [#37177](https://github.com/PaddlePaddle/Paddle/pull/37177), [#37781](https://github.com/PaddlePaddle/Paddle/pull/37781)) - - Launch启动模块,重构并新增 `master` 协同和节点个数 `nnodes` 定义 ,提升分布式启动易用性。 ([#40086](https://github.com/PaddlePaddle/Paddle/pull/40086), [#40568](https://github.com/PaddlePaddle/Paddle/pull/40568), [#40782](https://github.com/PaddlePaddle/Paddle/pull/40782), [#40844](https://github.com/PaddlePaddle/Paddle/pull/40844), [#40936](https://github.com/PaddlePaddle/Paddle/pull/40936), [#41190](https://github.com/PaddlePaddle/Paddle/pull/41190), [#41314](https://github.com/PaddlePaddle/Paddle/pull/41314)) + - Launch 启动模块,重构并新增 `master` 协同和节点个数 `nnodes` 定义 ,提升分布式启动易用性。 ([#40086](https://github.com/PaddlePaddle/Paddle/pull/40086), [#40568](https://github.com/PaddlePaddle/Paddle/pull/40568), [#40782](https://github.com/PaddlePaddle/Paddle/pull/40782), [#40844](https://github.com/PaddlePaddle/Paddle/pull/40844), [#40936](https://github.com/PaddlePaddle/Paddle/pull/40936), [#41190](https://github.com/PaddlePaddle/Paddle/pull/41190), [#41314](https://github.com/PaddlePaddle/Paddle/pull/41314)) - 新增对 GPU/NPU/XPU 多种硬件的异构训练的支持。([#37613](https://github.com/PaddlePaddle/Paddle/pull/37613), [#37998](https://github.com/PaddlePaddle/Paddle/pull/37998)) @@ -1229,7 +1229,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 MoE(Mixture of Experts)并行策略, 支持超大 MoE 模型训练。([#41092](https://github.com/PaddlePaddle/Paddle/pull/41092), [#40895](https://github.com/PaddlePaddle/Paddle/pull/40895), [#40850](https://github.com/PaddlePaddle/Paddle/pull/40580), [#39224](https://github.com/PaddlePaddle/Paddle/pull/39224)) - - 新增 GroupSharded 并行策略,支持 stage1、stage2、stage3三个阶段模型状态分组切片训练策略,支持同、异步通信,并可与 Recompute、AMP O1\O2、Offload、GroupShardedClipGrad、GroupShardedScaler 等基础功能组合使用。([#37489](https://github.com/PaddlePaddle/Paddle/pull/37489), [#37568](https://github.com/PaddlePaddle/Paddle/pull/37568), [#37707](https://github.com/PaddlePaddle/Paddle/pull/37707), [#37836](https://github.com/PaddlePaddle/Paddle/pull/37836), [#37947](https://github.com/PaddlePaddle/Paddle/pull/37947), [#38151](https://github.com/PaddlePaddle/Paddle/pull/38151), [#38407](https://github.com/PaddlePaddle/Paddle/pull/38407), [#38052](https://github.com/PaddlePaddle/Paddle/pull/38052), [#39112](https://github.com/PaddlePaddle/Paddle/pull/39112), [#38989](https://github.com/PaddlePaddle/Paddle/pull/38989), [#39171](https://github.com/PaddlePaddle/Paddle/pull/39171), [#39285](https://github.com/PaddlePaddle/Paddle/pull/39285), [#39334](https://github.com/PaddlePaddle/Paddle/pull/39334), [#39397](https://github.com/PaddlePaddle/Paddle/pull/39397), [#39581](https://github.com/PaddlePaddle/Paddle/pull/39581), [#39668](https://github.com/PaddlePaddle/Paddle/pull/39668), [#40129](https://github.com/PaddlePaddle/Paddle/pull/40129), [#40396](https://github.com/PaddlePaddle/Paddle/pull/40396), [#40488](https://github.com/PaddlePaddle/Paddle/pull/40488), [#40601](https://github.com/PaddlePaddle/Paddle/pull/40601),[#37725](https://github.com/PaddlePaddle/Paddle/pull/37725),[#37904](https://github.com/PaddlePaddle/Paddle/pull/37904), [#38064](https://github.com/PaddlePaddle/Paddle/pull/38064)) + - 新增 GroupSharded 并行策略,支持 stage1、stage2、stage3 三个阶段模型状态分组切片训练策略,支持同、异步通信,并可与 Recompute、AMP O1\O2、Offload、GroupShardedClipGrad、GroupShardedScaler 等基础功能组合使用。([#37489](https://github.com/PaddlePaddle/Paddle/pull/37489), [#37568](https://github.com/PaddlePaddle/Paddle/pull/37568), [#37707](https://github.com/PaddlePaddle/Paddle/pull/37707), [#37836](https://github.com/PaddlePaddle/Paddle/pull/37836), [#37947](https://github.com/PaddlePaddle/Paddle/pull/37947), [#38151](https://github.com/PaddlePaddle/Paddle/pull/38151), [#38407](https://github.com/PaddlePaddle/Paddle/pull/38407), [#38052](https://github.com/PaddlePaddle/Paddle/pull/38052), [#39112](https://github.com/PaddlePaddle/Paddle/pull/39112), [#38989](https://github.com/PaddlePaddle/Paddle/pull/38989), [#39171](https://github.com/PaddlePaddle/Paddle/pull/39171), [#39285](https://github.com/PaddlePaddle/Paddle/pull/39285), [#39334](https://github.com/PaddlePaddle/Paddle/pull/39334), [#39397](https://github.com/PaddlePaddle/Paddle/pull/39397), [#39581](https://github.com/PaddlePaddle/Paddle/pull/39581), [#39668](https://github.com/PaddlePaddle/Paddle/pull/39668), [#40129](https://github.com/PaddlePaddle/Paddle/pull/40129), [#40396](https://github.com/PaddlePaddle/Paddle/pull/40396), [#40488](https://github.com/PaddlePaddle/Paddle/pull/40488), [#40601](https://github.com/PaddlePaddle/Paddle/pull/40601),[#37725](https://github.com/PaddlePaddle/Paddle/pull/37725),[#37904](https://github.com/PaddlePaddle/Paddle/pull/37904), [#38064](https://github.com/PaddlePaddle/Paddle/pull/38064)) - 静态图混合并行 @@ -1237,11 +1237,11 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 张量模型并行下,dropout 支持设置确定性随机种子生成器,以确保非分布式变量的随机一致性和分布式变量的随机性。([#36228](https://github.com/PaddlePaddle/Paddle/pull/36228)) - - NPU 混合并行支持 Offload,可节约40%显存。([#37224](https://github.com/PaddlePaddle/Paddle/pull/37224)) + - NPU 混合并行支持 Offload,可节约 40%显存。([#37224](https://github.com/PaddlePaddle/Paddle/pull/37224)) - 为 seed op 增加 `force_cpu` 可选参数,使 dropout 可以直接从 CPU 读取 seed 的值。([#35820](https://github.com/PaddlePaddle/Paddle/pull/35820)) - - 完善Automatic Sparsity (ASP)sharding策略,支持根据program选择sharding策略。(#[#40028](https://github.com/PaddlePaddle/Paddle/pull/40028)) + - 完善 Automatic Sparsity (ASP)sharding 策略,支持根据 program 选择 sharding 策略。(#[#40028](https://github.com/PaddlePaddle/Paddle/pull/40028)) - 自动并行 @@ -1263,7 +1263,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增重计算功能(Recompute),优化显存。([#38920](https://github.com/PaddlePaddle/Paddle/pull/38920)) - - 新增 Sharding 优化 pass, 支持 p-g-os 3 个stage 的切分优化。([#38502](https://github.com/PaddlePaddle/Paddle/pull/38502)) + - 新增 Sharding 优化 pass, 支持 p-g-os 3 个 stage 的切分优化。([#38502](https://github.com/PaddlePaddle/Paddle/pull/38502)) - 新增 AMP + FP16 优化 pass。([#38764](https://github.com/PaddlePaddle/Paddle/pull/38764), [#40615](https://github.com/PaddlePaddle/Paddle/pull/40615)) @@ -1289,7 +1289,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 统一参数服务器下,新增评估指标模块,支持 AUC/WuAUC/MaskAuc 等评估指标计算及可自定义扩展。 ([#38789](https://github.com/PaddlePaddle/Paddle/pull/38789)) - - 支持在昆仑2芯片上的 XPU 参数服务器训练。 ([#41917](https://github.com/PaddlePaddle/Paddle/pull/41917), [#42266](https://github.com/PaddlePaddle/Paddle/pull/42266), [#41916](https://github.com/PaddlePaddle/Paddle/pull/41916)) + - 支持在昆仑 2 芯片上的 XPU 参数服务器训练。 ([#41917](https://github.com/PaddlePaddle/Paddle/pull/41917), [#42266](https://github.com/PaddlePaddle/Paddle/pull/42266), [#41916](https://github.com/PaddlePaddle/Paddle/pull/41916)) #### Profiler @@ -1333,7 +1333,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### CINN 编译器接入 -飞桨的编译器功能在逐步丰富中,针对 CINN([GitHub - PaddlePaddle/CINN: Compiler Infrastructure for Neural Networks](https://github.com/PaddlePaddle/CINN)) 的变更,Paddle 侧接入也进行了相对应的更改,以适配编译器 CINN 的功能。其中主要包括增加Paddle-CINN 运行流程的子图管理相关功能,显存和速度性能的优化、开发过程发现的 bug 修复。 +飞桨的编译器功能在逐步丰富中,针对 CINN([GitHub - PaddlePaddle/CINN: Compiler Infrastructure for Neural Networks](https://github.com/PaddlePaddle/CINN)) 的变更,Paddle 侧接入也进行了相对应的更改,以适配编译器 CINN 的功能。其中主要包括增加 Paddle-CINN 运行流程的子图管理相关功能,显存和速度性能的优化、开发过程发现的 bug 修复。 - 功能开发: @@ -1345,7 +1345,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 cinn_launch op 的 Kernel 实现添加辅助类 CinnLaunchContext 管理子图编译、运行的中间数据,提升可扩展性和代码可读性。([#37938](https://github.com/PaddlePaddle/Paddle/pull/37938)) - - 为 CINN 子图添加额外的 fetch 结点,从而保证 CINN 外部结点能取到待fetch变量的值。([#37172](https://github.com/PaddlePaddle/Paddle/pull/37172), [#37190](https://github.com/PaddlePaddle/Paddle/pull/37190)) + - 为 CINN 子图添加额外的 fetch 结点,从而保证 CINN 外部结点能取到待 fetch 变量的值。([#37172](https://github.com/PaddlePaddle/Paddle/pull/37172), [#37190](https://github.com/PaddlePaddle/Paddle/pull/37190)) - 添加对 CINN 子图符号化的功能,符号化用于拓扑排序子图并返回 CINN 执行序列。([#36417](https://github.com/PaddlePaddle/Paddle/pull/36417) @@ -1419,9 +1419,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持更多 op 适配模 op 量化。([#40083](https://github.com/PaddlePaddle/Paddle/pull/40083)) - - 支持控制流中的OP量化。([#37498](https://github.com/PaddlePaddle/Paddle/pull/37498)) + - 支持控制流中的 OP 量化。([#37498](https://github.com/PaddlePaddle/Paddle/pull/37498)) - - 新增支持matmul_v2 OP的量化。([#36469](https://github.com/PaddlePaddle/Paddle/pull/36469)) + - 新增支持 matmul_v2 OP 的量化。([#36469](https://github.com/PaddlePaddle/Paddle/pull/36469)) - 新增支持量化后的 matmul_v2 在 TensorRT 上的推理。([#36594](https://github.com/PaddlePaddle/Paddle/pull/36594)) @@ -1433,21 +1433,21 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 实现 CPU-GPU 统一内存寻址(CUDA Managed Memory),支持在显存受限场景下训练超大模型。([#39075](https://github.com/PaddlePaddle/Paddle/pull/39075)) - - C++底层新增GetBasePtr接口,用来获取设备接口CUDAMalloc创建的设备地址。([#37978](https://github.com/PaddlePaddle/Paddle/pull/37978)) + - C++底层新增 GetBasePtr 接口,用来获取设备接口 CUDAMalloc 创建的设备地址。([#37978](https://github.com/PaddlePaddle/Paddle/pull/37978)) - - 减少AutoGrowth Allocator 中 free blocks 的数量,提升显存分配性能。([#35732](https://github.com/PaddlePaddle/Paddle/pull/35732)) + - 减少 AutoGrowth Allocator 中 free blocks 的数量,提升显存分配性能。([#35732](https://github.com/PaddlePaddle/Paddle/pull/35732)) - - 对于 `initializer.Normal` 和 `initializer.Constant` 数据类型是 FP16 的 Tensor 去除多余的 float32 临时 Tensor 以及 cast,节省2倍显存。 ([#38818](https://github.com/PaddlePaddle/Paddle/pull/38818)) + - 对于 `initializer.Normal` 和 `initializer.Constant` 数据类型是 FP16 的 Tensor 去除多余的 float32 临时 Tensor 以及 cast,节省 2 倍显存。 ([#38818](https://github.com/PaddlePaddle/Paddle/pull/38818)) - 动态图高阶导数组网测试 - - 为动态图增加三阶导数组网测试,以及Broadcast情况的测试。 ([#36814](https://github.com/PaddlePaddle/Paddle/pull/36814) , [#37377](https://github.com/PaddlePaddle/Paddle/pull/37377)) + - 为动态图增加三阶导数组网测试,以及 Broadcast 情况的测试。 ([#36814](https://github.com/PaddlePaddle/Paddle/pull/36814) , [#37377](https://github.com/PaddlePaddle/Paddle/pull/37377)) - 自定义 op:支持 ROCm(HIP) 平台进行自定义 op 注册。 ([#36771](https://github.com/PaddlePaddle/Paddle/pull/36771)) - Cost Model:增加基于运行 Profile 的 Cost Model。 ([#35774](https://github.com/PaddlePaddle/Paddle/pull/35774)) -- 提供定制化层 (nn.Layer)的自动稀疏训练支持,让用戶可根据自定义的Prune函数来对其设计的层进行稀疏剪枝。([#40253](https://github.com/PaddlePaddle/Paddle/pull/40253)) +- 提供定制化层 (nn.Layer)的自动稀疏训练支持,让用戶可根据自定义的 Prune 函数来对其设计的层进行稀疏剪枝。([#40253](https://github.com/PaddlePaddle/Paddle/pull/40253)) - 新增字符串张量底层数据结构表示,使框架具备字符串张量表示和计算的能力。([#39830](https://github.com/PaddlePaddle/Paddle/pull/39830), [#40992](https://github.com/PaddlePaddle/Paddle/pull/40992)) @@ -1501,7 +1501,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - LayerNorm ([#40418](https://github.com/PaddlePaddle/Paddle/pull/40418)) -- 增加基于 SSD-内存-GPU显存 的3级存储图检索引擎,支持大规模图神经网络训练。([#42472](https://github.com/PaddlePaddle/Paddle/pull/42472), [#42321](https://github.com/PaddlePaddle/Paddle/pull/42321), [#42027](https://github.com/PaddlePaddle/Paddle/pull/42027)) +- 增加基于 SSD-内存-GPU 显存 的 3 级存储图检索引擎,支持大规模图神经网络训练。([#42472](https://github.com/PaddlePaddle/Paddle/pull/42472), [#42321](https://github.com/PaddlePaddle/Paddle/pull/42321), [#42027](https://github.com/PaddlePaddle/Paddle/pull/42027)) - 增加异构多云训练通信模块 switch,实现 Send/Recv 接口,支持多云异构通信。([#40965](https://github.com/PaddlePaddle/Paddle/pull/40965) [40911](https://github.com/PaddlePaddle/Paddle/pull/40911)) @@ -1523,7 +1523,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 transpose op 新增 bool 类型支持。([#35886](https://github.com/PaddlePaddle/Paddle/pull/35886)) -- 将 `paddle.mm` 底层算子从 matmul 切换到matmul_v2。 ([#35770](https://github.com/PaddlePaddle/Paddle/pull/35770)) +- 将 `paddle.mm` 底层算子从 matmul 切换到 matmul_v2。 ([#35770](https://github.com/PaddlePaddle/Paddle/pull/35770)) - 为 `paddle.einsum` 支持静态图模式调用,支持未知 shape。 ([#40360](https://github.com/PaddlePaddle/Paddle/pull/40360)) @@ -1537,7 +1537,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 `paddle.fft` 下所有 API 新增 ROCM 后端支持,并优化 CUFFT 后端报错信息。([#36415](https://github.com/PaddlePaddle/Paddle/pull/36415), [#36114](https://github.com/PaddlePaddle/Paddle/pull/36114/files)) -- 为 `Tensor.getitem` 增加对切片部分维度为0的功能支持,即允许切片索引结果为空。([#37313](https://github.com/PaddlePaddle/Paddle/pull/37313)) +- 为 `Tensor.getitem` 增加对切片部分维度为 0 的功能支持,即允许切片索引结果为空。([#37313](https://github.com/PaddlePaddle/Paddle/pull/37313)) - 为 `Tensor.setitem` 支持 int 和 bool 类型 Tensor 使用 bool 索引。([#37761](https://github.com/PaddlePaddle/Paddle/pull/37761)) @@ -1559,7 +1559,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 为 `paddle.take_along_axis`、`paddle.put_along_axis` 支持更多 size 的输入,允许 index 矩阵的 shape size 大于 arr 矩阵的 shape size。 ([#39072](https://github.com/PaddlePaddle/Paddle/pull/39072)) -- 优化 API `paddle.nn.Pad2D`在 replicate 为0时的报错信息。([#36510](https://github.com/PaddlePaddle/Paddle/pull/36510/files)) +- 优化 API `paddle.nn.Pad2D`在 replicate 为 0 时的报错信息。([#36510](https://github.com/PaddlePaddle/Paddle/pull/36510/files)) - 支持 API `paddle.nn.Pad2D`在 tuple 格式的 pad 输入。([#35985](https://github.com/PaddlePaddle/Paddle/pull/35985/files)) @@ -1577,11 +1577,11 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 完善`paddle.amp.GradScaler`调用 check_finite_and_unscale op 的逻辑,消除该处创建 bool 变量所引入的 cudaMemcpy。([#37770](https://github.com/PaddlePaddle/Paddle/pull/37770)) -- 新增对 unstack 和 unique op 元素个数为0的 Tensor 增加检查。([#36021](https://github.com/PaddlePaddle/Paddle/pull/36021)) +- 新增对 unstack 和 unique op 元素个数为 0 的 Tensor 增加检查。([#36021](https://github.com/PaddlePaddle/Paddle/pull/36021)) -- 新增支持昆仑2的多层、双向 LSTM 功能,完善 RNN 前反向 op,支持时序类模型训练使用。([#](https://github.com/PaddlePaddle/Paddle/pull/41781)[42076](https://github.com/PaddlePaddle/Paddle/pull/42076)) +- 新增支持昆仑 2 的多层、双向 LSTM 功能,完善 RNN 前反向 op,支持时序类模型训练使用。([#](https://github.com/PaddlePaddle/Paddle/pull/41781)[42076](https://github.com/PaddlePaddle/Paddle/pull/42076)) -- 新增支持昆仑2的 bce_loss 前反向 op。([#41610](https://github.com/PaddlePaddle/Paddle/pull/41610)) +- 新增支持昆仑 2 的 bce_loss 前反向 op。([#41610](https://github.com/PaddlePaddle/Paddle/pull/41610)) - 添加 `paddle.linalg.det` 的反向实现。([#36013](https://github.com/PaddlePaddle/Paddle/pull/36013)) @@ -1589,19 +1589,19 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 动态图转静态图 - - 优化动转静下 `ProgramCache.last` 接口行为,使其返回最近使用的 Program,而非最后生成的Program。([#39541](https://github.com/PaddlePaddle/Paddle/pull/39541)) + - 优化动转静下 `ProgramCache.last` 接口行为,使其返回最近使用的 Program,而非最后生成的 Program。([#39541](https://github.com/PaddlePaddle/Paddle/pull/39541)) - 优化动转静下 `paddle.reshape` API 的报错信息,新增推荐用法提示。([#40599](https://github.com/PaddlePaddle/Paddle/pull/40599)) - 优化动转静代码转写时 `is_api_in_module` 函数中异常捕获类型。([#40243](https://github.com/PaddlePaddle/Paddle/pull/40243)) - - 优化动转静模块报错提示,默认隐藏warning信息。([#39730](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39730)) + - 优化动转静模块报错提示,默认隐藏 warning 信息。([#39730](https://github.com/PaddlePaddle/Paddle/pull/https://github.com/PaddlePaddle/Paddle/pull/39730)) - - 增加动转静对于type hint语法的支持,提高变量类型分析的准确性。([#39572](https://github.com/PaddlePaddle/Paddle/pull/39572)) + - 增加动转静对于 type hint 语法的支持,提高变量类型分析的准确性。([#39572](https://github.com/PaddlePaddle/Paddle/pull/39572)) - - 优化 `paddle.cond` 功能,允许bool、int等基本类型支持值相等。([#37888](https://github.com/PaddlePaddle/Paddle/pull/37888)) + - 优化 `paddle.cond` 功能,允许 bool、int 等基本类型支持值相等。([#37888](https://github.com/PaddlePaddle/Paddle/pull/37888)) - - 优化动转静`@to_static` 装饰普通函数时,允许切换train/eval模式。([#37383](https://github.com/PaddlePaddle/Paddle/pull/37383)) + - 优化动转静`@to_static` 装饰普通函数时,允许切换 train/eval 模式。([#37383](https://github.com/PaddlePaddle/Paddle/pull/37383)) - 优化动转静报错栈,突出用户相关代码,减少框架冗余报错栈。([#36741](https://github.com/PaddlePaddle/Paddle/pull/36741)) @@ -1627,7 +1627,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 优化 Fleet API 和 DistributedStrategy 配置以使用动态图并行功能,提升动态图易用性。([#40408](https://github.com/PaddlePaddle/Paddle/pull/40408)) - - 优化动态图混合并行 HybridParallelClipGrad 策略,支持4D混合并行 + Pure FP16 训练。([#36237](https://github.com/PaddlePaddle/Paddle/pull/36237), [#36555](https://github.com/PaddlePaddle/Paddle/pull/36555)) + - 优化动态图混合并行 HybridParallelClipGrad 策略,支持 4D 混合并行 + Pure FP16 训练。([#36237](https://github.com/PaddlePaddle/Paddle/pull/36237), [#36555](https://github.com/PaddlePaddle/Paddle/pull/36555)) - 重构动态图数据并行策略,以支持新动态图和新通信库功能。([#40389](https://github.com/PaddlePaddle/Paddle/pull/40389), [#40593](https://github.com/PaddlePaddle/Paddle/pull/40593), [#40836](https://github.com/PaddlePaddle/Paddle/pull/40836), [#41119](https://github.com/PaddlePaddle/Paddle/pull/41119), [#41413](https://github.com/PaddlePaddle/Paddle/pull/41413), [#39987](https://github.com/PaddlePaddle/Paddle/pull/39987)) @@ -1637,7 +1637,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 图检索引擎 - - 优化图引擎的图采样接口返回的数据格式,采样速度提升3倍。([#37315](https://github.com/PaddlePaddle/Paddle/pull/37315)) + - 优化图引擎的图采样接口返回的数据格式,采样速度提升 3 倍。([#37315](https://github.com/PaddlePaddle/Paddle/pull/37315)) - 减少图引擎线程量以提升性能。([#37098](https://github.com/PaddlePaddle/Paddle/pull/37098)) @@ -1683,77 +1683,77 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 分布式训练 -- 混合并行优化器 sharding 支持 optimize_cast 优化,将前反向参数 cast 移到优化器阶段,性能提升7%。([#35878](https://github.com/PaddlePaddle/Paddle/pull/35878)) +- 混合并行优化器 sharding 支持 optimize_cast 优化,将前反向参数 cast 移到优化器阶段,性能提升 7%。([#35878](https://github.com/PaddlePaddle/Paddle/pull/35878)) -- GPUPS 优化:支持梯度 fuse allreduce 训练,训练提升20%。 ([#35131](https://github.com/PaddlePaddle/Paddle/pull/35131)) +- GPUPS 优化:支持梯度 fuse allreduce 训练,训练提升 20%。 ([#35131](https://github.com/PaddlePaddle/Paddle/pull/35131)) -- GPUPS 优化:dump CPU 优化提速3.21倍。 ([#40068](https://github.com/PaddlePaddle/Paddle/pull/40068)) +- GPUPS 优化:dump CPU 优化提速 3.21 倍。 ([#40068](https://github.com/PaddlePaddle/Paddle/pull/40068)) -- CPU 参数服务器流式训练优化:支持稀疏参数统计量自动统计、稀疏参数增量保存等功能,训练性能提升20%。([#36465](https://github.com/PaddlePaddle/Paddle/pull/36465), [#36601](https://github.com/PaddlePaddle/Paddle/pull/36601), [#36734](https://github.com/PaddlePaddle/Paddle/pull/36734), [#36909](https://github.com/PaddlePaddle/Paddle/pull/36909), [#36943](https://github.com/PaddlePaddle/Paddle/pull/36943), [#37181](https://github.com/PaddlePaddle/Paddle/pull/37181), [#37194](https://github.com/PaddlePaddle/Paddle/pull/37194), [#37515](https://github.com/PaddlePaddle/Paddle/pull/37515), [#37626](https://github.com/PaddlePaddle/Paddle/pull/37626), [#37995](https://github.com/PaddlePaddle/Paddle/pull/37995), [#38582](https://github.com/PaddlePaddle/Paddle/pull/38582), [#39250](https://github.com/PaddlePaddle/Paddle/pull/39250), [#40762](https://github.com/PaddlePaddle/Paddle/pull/40762), [#41234](https://github.com/PaddlePaddle/Paddle/pull/41234), [#41320](https://github.com/PaddlePaddle/Paddle/pull/41320), [#41400](https://github.com/PaddlePaddle/Paddle/pull/41400)) +- CPU 参数服务器流式训练优化:支持稀疏参数统计量自动统计、稀疏参数增量保存等功能,训练性能提升 20%。([#36465](https://github.com/PaddlePaddle/Paddle/pull/36465), [#36601](https://github.com/PaddlePaddle/Paddle/pull/36601), [#36734](https://github.com/PaddlePaddle/Paddle/pull/36734), [#36909](https://github.com/PaddlePaddle/Paddle/pull/36909), [#36943](https://github.com/PaddlePaddle/Paddle/pull/36943), [#37181](https://github.com/PaddlePaddle/Paddle/pull/37181), [#37194](https://github.com/PaddlePaddle/Paddle/pull/37194), [#37515](https://github.com/PaddlePaddle/Paddle/pull/37515), [#37626](https://github.com/PaddlePaddle/Paddle/pull/37626), [#37995](https://github.com/PaddlePaddle/Paddle/pull/37995), [#38582](https://github.com/PaddlePaddle/Paddle/pull/38582), [#39250](https://github.com/PaddlePaddle/Paddle/pull/39250), [#40762](https://github.com/PaddlePaddle/Paddle/pull/40762), [#41234](https://github.com/PaddlePaddle/Paddle/pull/41234), [#41320](https://github.com/PaddlePaddle/Paddle/pull/41320), [#41400](https://github.com/PaddlePaddle/Paddle/pull/41400)) #### 算子优化 -- 优化 `FasterTokenizer` 性能,性能与优化前相比提升10%。 ([#36701](https://github.com/PaddlePaddle/Paddle/pull/36701)) +- 优化 `FasterTokenizer` 性能,性能与优化前相比提升 10%。 ([#36701](https://github.com/PaddlePaddle/Paddle/pull/36701)) -- 优化 `index_select` 反向计算,性能较优化前有3.7~25.2倍提升。([#37055](https://github.com/PaddlePaddle/Paddle/pull/37055)) +- 优化 `index_select` 反向计算,性能较优化前有 3.7~25.2 倍提升。([#37055](https://github.com/PaddlePaddle/Paddle/pull/37055)) -- 优化 `paddle.nn.ClipByGlobalNorm` 的性能,以10*10的 `paddle.nn.Linear` 为例,性能与优化前相比提升30%左右。 ([#38209](https://github.com/PaddlePaddle/Paddle/pull/38209)) +- 优化 `paddle.nn.ClipByGlobalNorm` 的性能,以 10*10 的 `paddle.nn.Linear` 为例,性能与优化前相比提升 30%左右。 ([#38209](https://github.com/PaddlePaddle/Paddle/pull/38209)) -- 优化 `pnorm` 在 `axis` 维度极大或极小情况下的性能,前向速度提升31~96倍,反向速度提升1.1~19倍。([#37685](https://github.com/PaddlePaddle/Paddle/pull/37685), [#38215](https://github.com/PaddlePaddle/Paddle/pull/38215), [#39011](https://github.com/PaddlePaddle/Paddle/pull/39011)) +- 优化 `pnorm` 在 `axis` 维度极大或极小情况下的性能,前向速度提升 31~96 倍,反向速度提升 1.1~19 倍。([#37685](https://github.com/PaddlePaddle/Paddle/pull/37685), [#38215](https://github.com/PaddlePaddle/Paddle/pull/38215), [#39011](https://github.com/PaddlePaddle/Paddle/pull/39011)) -- 优化 `softmax` 前、反向性能,对于 `axis!=-1` 的配置加速比为2倍左右。([#38602](https://github.com/PaddlePaddle/Paddle/pull/38602), [#38609](https://github.com/PaddlePaddle/Paddle/pull/38609), [#32387](https://github.com/PaddlePaddle/Paddle/pull/32387), [#37927](https://github.com/PaddlePaddle/Paddle/pull/37927/files)) +- 优化 `softmax` 前、反向性能,对于 `axis!=-1` 的配置加速比为 2 倍左右。([#38602](https://github.com/PaddlePaddle/Paddle/pull/38602), [#38609](https://github.com/PaddlePaddle/Paddle/pull/38609), [#32387](https://github.com/PaddlePaddle/Paddle/pull/32387), [#37927](https://github.com/PaddlePaddle/Paddle/pull/37927/files)) -- 优化 `log_softmax` 前、反向性能,对于 `axis!=-1`的配置加速比为6~20倍左右。([#38992](https://github.com/PaddlePaddle/Paddle/pull/38992), [#40612](https://github.com/PaddlePaddle/Paddle/pull/40612)) +- 优化 `log_softmax` 前、反向性能,对于 `axis!=-1`的配置加速比为 6~20 倍左右。([#38992](https://github.com/PaddlePaddle/Paddle/pull/38992), [#40612](https://github.com/PaddlePaddle/Paddle/pull/40612)) -- 优化 `softmax_with_cross_entropy` 前、反向性能,对于 `hard_label` 的配置加速比为1.3倍左右。([#39553](https://github.com/PaddlePaddle/Paddle/pull/39553), [#40424](https://github.com/PaddlePaddle/Paddle/pull/40424), [#40643](https://github.com/PaddlePaddle/Paddle/pull/40643)) +- 优化 `softmax_with_cross_entropy` 前、反向性能,对于 `hard_label` 的配置加速比为 1.3 倍左右。([#39553](https://github.com/PaddlePaddle/Paddle/pull/39553), [#40424](https://github.com/PaddlePaddle/Paddle/pull/40424), [#40643](https://github.com/PaddlePaddle/Paddle/pull/40643)) -- 优化 `top_k` 性能,对于一维且 `k` 较大时(k=5000)的配置加速比为22倍以上。([#40941](https://github.com/PaddlePaddle/Paddle/pull/40941)) +- 优化 `top_k` 性能,对于一维且 `k` 较大时(k=5000)的配置加速比为 22 倍以上。([#40941](https://github.com/PaddlePaddle/Paddle/pull/40941)) -- 优化 `elementwise_mul` 反向计算,较优化前有1.85~12.16倍性能提升。([#37728](https://github.com/PaddlePaddle/Paddle/pull/37728)) +- 优化 `elementwise_mul` 反向计算,较优化前有 1.85~12.16 倍性能提升。([#37728](https://github.com/PaddlePaddle/Paddle/pull/37728)) -- 优化 `elementwise_min` 反向和 `elementwise_max` 反向,较优化前打平或有1.05~18.75倍性能提升。([#38236](https://github.com/PaddlePaddle/Paddle/pull/38236), [#37906](https://github.com/PaddlePaddle/Paddle/pull/37906)) +- 优化 `elementwise_min` 反向和 `elementwise_max` 反向,较优化前打平或有 1.05~18.75 倍性能提升。([#38236](https://github.com/PaddlePaddle/Paddle/pull/38236), [#37906](https://github.com/PaddlePaddle/Paddle/pull/37906)) -- 优化 `nearest_interp` 前向和反向计算,前向较优化前性能有1.5~2.3倍提升;反向性能较优化前有60%~1.8倍提升。([#38528](https://github.com/PaddlePaddle/Paddle/pull/38528), [#39067](https://github.com/PaddlePaddle/Paddle/pull/39067)) +- 优化 `nearest_interp` 前向和反向计算,前向较优化前性能有 1.5~2.3 倍提升;反向性能较优化前有 60%~1.8 倍提升。([#38528](https://github.com/PaddlePaddle/Paddle/pull/38528), [#39067](https://github.com/PaddlePaddle/Paddle/pull/39067)) -- 优化 `bilinear_interp` 前向和反向计算,前向较优化前性能有0.4~2.3倍提升;反向性能较优化前有10%~30%提升。([#39243](https://github.com/PaddlePaddle/Paddle/pull/39243), [#39423](https://github.com/PaddlePaddle/Paddle/pull/39423)) +- 优化 `bilinear_interp` 前向和反向计算,前向较优化前性能有 0.4~2.3 倍提升;反向性能较优化前有 10%~30%提升。([#39243](https://github.com/PaddlePaddle/Paddle/pull/39243), [#39423](https://github.com/PaddlePaddle/Paddle/pull/39423)) -- 优化 `dropout` 前向和反向计算,性能提升约20%。([#39795](https://github.com/PaddlePaddle/Paddle/pull/39795), [#38859](https://github.com/PaddlePaddle/Paddle/pull/38859), [#38279](https://github.com/PaddlePaddle/Paddle/pull/38279), [#40053](https://github.com/PaddlePaddle/Paddle/pull/40053)) +- 优化 `dropout` 前向和反向计算,性能提升约 20%。([#39795](https://github.com/PaddlePaddle/Paddle/pull/39795), [#38859](https://github.com/PaddlePaddle/Paddle/pull/38859), [#38279](https://github.com/PaddlePaddle/Paddle/pull/38279), [#40053](https://github.com/PaddlePaddle/Paddle/pull/40053)) -- 优化 `grid_sampler`前向和反向计算,前向较优化前性能有10%~30%提升;反向性能较优化前有10%~60%提升。([#39751](https://github.com/PaddlePaddle/Paddle/pull/39751)) +- 优化 `grid_sampler`前向和反向计算,前向较优化前性能有 10%~30%提升;反向性能较优化前有 10%~60%提升。([#39751](https://github.com/PaddlePaddle/Paddle/pull/39751)) -- 优化 `group_norm` 前向和反向计算,前向性能提升1.04~2.35倍,反向性能提升1.12~1.18倍。([#39944](https://github.com/PaddlePaddle/Paddle/pull/39944), [#40657](https://github.com/PaddlePaddle/Paddle/pull/40657), [#39596](https://github.com/PaddlePaddle/Paddle/pull/39596)) +- 优化 `group_norm` 前向和反向计算,前向性能提升 1.04~2.35 倍,反向性能提升 1.12~1.18 倍。([#39944](https://github.com/PaddlePaddle/Paddle/pull/39944), [#40657](https://github.com/PaddlePaddle/Paddle/pull/40657), [#39596](https://github.com/PaddlePaddle/Paddle/pull/39596)) -- 优化 `conv1d` 前向和反向计算,前向性能提升1.00~2.01倍,反向性能提升1.01~474.56倍。([#38425](https://github.com/PaddlePaddle/Paddle/pull/38425)) +- 优化 `conv1d` 前向和反向计算,前向性能提升 1.00~2.01 倍,反向性能提升 1.01~474.56 倍。([#38425](https://github.com/PaddlePaddle/Paddle/pull/38425)) -- 优化 `elementwise_div` 反向计算,反向性能提升1.02~29.25倍。([#38044](https://github.com/PaddlePaddle/Paddle/pull/38044)) +- 优化 `elementwise_div` 反向计算,反向性能提升 1.02~29.25 倍。([#38044](https://github.com/PaddlePaddle/Paddle/pull/38044)) -- 优化 `gelu` 前向和反向计算,前向性能提升1.13~1.43倍,反向性能提升1.10~1.55倍。([#38188](https://github.com/PaddlePaddle/Paddle/pull/38188), [#38263](https://github.com/PaddlePaddle/Paddle/pull/38263)) +- 优化 `gelu` 前向和反向计算,前向性能提升 1.13~1.43 倍,反向性能提升 1.10~1.55 倍。([#38188](https://github.com/PaddlePaddle/Paddle/pull/38188), [#38263](https://github.com/PaddlePaddle/Paddle/pull/38263)) -- 优化 `elementwise_sub` 反向计算,反向性能提升1.04~15.64倍。([#37754](https://github.com/PaddlePaddle/Paddle/pull/37754)) +- 优化 `elementwise_sub` 反向计算,反向性能提升 1.04~15.64 倍。([#37754](https://github.com/PaddlePaddle/Paddle/pull/37754)) -- 优化 `flip` 在输入一维数据时前向性能,性能提升100%。([#37825](https://github.com/PaddlePaddle/Paddle/pull/37825)) +- 优化 `flip` 在输入一维数据时前向性能,性能提升 100%。([#37825](https://github.com/PaddlePaddle/Paddle/pull/37825)) -- 优化 `layer_norm` 前向和反向计算,前向较优化前提升2-5倍,反向较优化前提升20%~50%。([#39167](https://github.com/PaddlePaddle/Paddle/pull/39167), [#39247](https://github.com/PaddlePaddle/Paddle/pull/39247)) +- 优化 `layer_norm` 前向和反向计算,前向较优化前提升 2-5 倍,反向较优化前提升 20%~50%。([#39167](https://github.com/PaddlePaddle/Paddle/pull/39167), [#39247](https://github.com/PaddlePaddle/Paddle/pull/39247)) -- 优化 `embedding` 前向和反向计算,前向较优化前最大提升1.51倍,反向较优化前提升1.03~7.79倍。([#39856](https://github.com/PaddlePaddle/Paddle/pull/39856), [#39886](https://github.com/PaddlePaddle/Paddle/pull/398866)) +- 优化 `embedding` 前向和反向计算,前向较优化前最大提升 1.51 倍,反向较优化前提升 1.03~7.79 倍。([#39856](https://github.com/PaddlePaddle/Paddle/pull/39856), [#39886](https://github.com/PaddlePaddle/Paddle/pull/398866)) -- 优化 `gelu` FP16 前向和反向计算,前向较优化前提升9%~12%,反向较优化前提升2%~9%。([#38980](https://github.com/PaddlePaddle/Paddle/pull/38980)) +- 优化 `gelu` FP16 前向和反向计算,前向较优化前提升 9%~12%,反向较优化前提升 2%~9%。([#38980](https://github.com/PaddlePaddle/Paddle/pull/38980)) - 移除 `gather_nd`前反向算子中的 CPU -> GPU 显式数据传输操作,移除 `index_select` 前反向算子中的显式同步操作,将 `scatter_nd` 中的 GPU -> GPU 数据传输由同步操作改成异步操作。([#40933](https://github.com/PaddlePaddle/Paddle/pull/40933)) -- 优化 `Lars optimzier` 计算,优化后 Resnet50 PF16 模型训练性能较优化前提升5.1%。 ([#35652](https://github.com/PaddlePaddle/Paddle/pull/35652), [#35476](https://github.com/PaddlePaddle/Paddle/pull/35476)) +- 优化 `Lars optimzier` 计算,优化后 Resnet50 PF16 模型训练性能较优化前提升 5.1%。 ([#35652](https://github.com/PaddlePaddle/Paddle/pull/35652), [#35476](https://github.com/PaddlePaddle/Paddle/pull/35476)) -- 优化 `AvgPool2dGrad` 计算,优化后性能较优化前提升2.6倍。 ([#35389](https://github.com/PaddlePaddle/Paddle/pull/35389)) +- 优化 `AvgPool2dGrad` 计算,优化后性能较优化前提升 2.6 倍。 ([#35389](https://github.com/PaddlePaddle/Paddle/pull/35389)) -- 优化 `Elementwise` 类计算对于多元输出的功能支持,优化后计算性能较优化前提升最多可达15% 。([#38329](https://github.com/PaddlePaddle/Paddle/pull/38329), [#38410](https://github.com/PaddlePaddle/Paddle/pull/38410)) +- 优化 `Elementwise` 类计算对于多元输出的功能支持,优化后计算性能较优化前提升最多可达 15% 。([#38329](https://github.com/PaddlePaddle/Paddle/pull/38329), [#38410](https://github.com/PaddlePaddle/Paddle/pull/38410)) - 优化 `Categorical`的 `probs`计算,简化计算逻辑,性能提升 4 ~ 5 倍。([#42178](https://github.com/PaddlePaddle/Paddle/pull/42178)) -- `paddle.sum` 性能优化,性能相比优化前提升约20%。([#42309](https://github.com/PaddlePaddle/Paddle/pull/42309)) +- `paddle.sum` 性能优化,性能相比优化前提升约 20%。([#42309](https://github.com/PaddlePaddle/Paddle/pull/42309)) #### 自动调优 -新增训练全流程硬件感知性能自动调优功能,在图像分类、分割、检测和图像生成任务上与模型默认参数配置下的性能相比提升约3%~50%以上。通过 `paddle.incubate.autotune.set_config` API设置自动调优状态,当前默认关闭。自动调优具体包括三个层次: +新增训练全流程硬件感知性能自动调优功能,在图像分类、分割、检测和图像生成任务上与模型默认参数配置下的性能相比提升约 3%~50%以上。通过 `paddle.incubate.autotune.set_config` API 设置自动调优状态,当前默认关闭。自动调优具体包括三个层次: - `paddle.io.DataLoader` 新增自动调优功能,根据训练数据和设备资源选择最佳的模型 num_workers。 ([#42004](https://github.com/PaddlePaddle/Paddle/pull/42004)) @@ -1763,7 +1763,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### 调度优化 -- 移除 `paddle.nn.ClipGradByGlobalNorm` 中的 CudaStreamSync 隐藏操作,减少执行时的调度开销,在 ptb 模型上有5%的性能提升。([#42170](https://github.com/PaddlePaddle/Paddle/pull/42170)) +- 移除 `paddle.nn.ClipGradByGlobalNorm` 中的 CudaStreamSync 隐藏操作,减少执行时的调度开销,在 ptb 模型上有 5%的性能提升。([#42170](https://github.com/PaddlePaddle/Paddle/pull/42170)) - 优化一系列底层数据结构及原动态图执行体系中的细节实现,提升原动态图的调度性能。([#42010](https://github.com/PaddlePaddle/Paddle/pull/42010), [#42171](https://github.com/PaddlePaddle/Paddle/pull/42171), [#42224](https://github.com/PaddlePaddle/Paddle/pull/42224), [#42256](https://github.com/PaddlePaddle/Paddle/pull/42256), [#42306](https://github.com/PaddlePaddle/Paddle/pull/42306), [#42329](https://github.com/PaddlePaddle/Paddle/pull/42329)[, #42340](https://github.com/PaddlePaddle/Paddle/pull/42340), [#42368](https://github.com/PaddlePaddle/Paddle/pull/42368), [#42425](https://github.com/PaddlePaddle/Paddle/pull/42425)) @@ -1773,7 +1773,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. #### API -- 修复 `paddle.sum` 输入参数类型和输出参数类型不一致且 `axis` 轴对应的 reduce 元素个数为1时,输出类型错误问题。([#36123](https://github.com/PaddlePaddle/Paddle/pull/36123)) +- 修复 `paddle.sum` 输入参数类型和输出参数类型不一致且 `axis` 轴对应的 reduce 元素个数为 1 时,输出类型错误问题。([#36123](https://github.com/PaddlePaddle/Paddle/pull/36123)) - 修复 `paddle.flops` 在 layer 输出类型为 tuple 时的 `AttributeError`。([#38850](https://github.com/PaddlePaddle/Paddle/pull/38850)) @@ -1807,9 +1807,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 `paddle.nn.Identity` 没有公开的问题。([#39615](https://github.com/PaddlePaddle/Paddle/pull/39615)) -- 修复动态图重构后,`fill_` 和 `zero_` inplace API的输入在 CUDAPinned Place上时,输出值不正确的 bug。([#41229](https://github.com/PaddlePaddle/Paddle/pull/41229)) +- 修复动态图重构后,`fill_` 和 `zero_` inplace API 的输入在 CUDAPinned Place 上时,输出值不正确的 bug。([#41229](https://github.com/PaddlePaddle/Paddle/pull/41229)) -- 动态图重构后,修复使用 append op 的方式调用 assign op 导致输出 Tensor 的 inplace version 值不正确的bug,修改为使用 `_C_ops` 的方式调用 assign op。([#41118](https://github.com/PaddlePaddle/Paddle/pull/41118)) +- 动态图重构后,修复使用 append op 的方式调用 assign op 导致输出 Tensor 的 inplace version 值不正确的 bug,修改为使用 `_C_ops` 的方式调用 assign op。([#41118](https://github.com/PaddlePaddle/Paddle/pull/41118)) - 移除 `elementwise_add` 三阶 Kernel 中不合理的代码,修复组网过程未初始化问题。 ([#36618](https://github.com/PaddlePaddle/Paddle/pull/36618)) @@ -1821,7 +1821,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复高阶微分 `gradients` 接口在指定 target_grad 时未按预期生效的问题。([#40940](https://github.com/PaddlePaddle/Paddle/pull/40940/)) -- 修复动态图 op`_BatchNormBase` 基类中修改了 default_dtype,导致后续组网参数类型错误的问题,受影响的API有 `paddle.nn.BatchNorm1D`,`paddle.nn.BatchNorm2D`,`paddle.nn.BatchNorm3D`,`paddle.nn.SyncBatchNorm`。具体原因是当 `get_default_dtype() == 'float16'` 时,通过 `set_default_dtype('float32')`修改默认参数数据类型,动态图组网的参数类型是通过 default_dtype 来创建的,因此当默认参数类型被修改后导致后续的组网参数类型错误。 ([#36376](https://github.com/PaddlePaddle/Paddle/pull/36376)) +- 修复动态图 op`_BatchNormBase` 基类中修改了 default_dtype,导致后续组网参数类型错误的问题,受影响的 API 有 `paddle.nn.BatchNorm1D`,`paddle.nn.BatchNorm2D`,`paddle.nn.BatchNorm3D`,`paddle.nn.SyncBatchNorm`。具体原因是当 `get_default_dtype() == 'float16'` 时,通过 `set_default_dtype('float32')`修改默认参数数据类型,动态图组网的参数类型是通过 default_dtype 来创建的,因此当默认参数类型被修改后导致后续的组网参数类型错误。 ([#36376](https://github.com/PaddlePaddle/Paddle/pull/36376)) - 修复 batchnorm op 中,当数据类型为 FP32 ,且数据维度 `dims = 2,data_layout = NHWC` 时,反向 op 内中间变量未定义问题。 ([#37020](https://github.com/PaddlePaddle/Paddle/pull/37020)) @@ -1891,19 +1891,19 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 `paddle.nn.functional.pad` API 在模型动转静时,padding 为 Tensor 条件下的 Infershape 信息错误问题。([#42414](https://github.com/PaddlePaddle/Paddle/pull/42414)) -- 修复 `paddle.distribution.StickBreakingTransform` 输入维度超过2时异常的问题。([#41762](https://github.com/PaddlePaddle/Paddle/pull/41672)) +- 修复 `paddle.distribution.StickBreakingTransform` 输入维度超过 2 时异常的问题。([#41762](https://github.com/PaddlePaddle/Paddle/pull/41672)) - 修复 fused_attention op 中 QK^T 计算出 nan/inf 的问题。([#42032](https://github.com/PaddlePaddle/Paddle/pull/42032)) -- 修复 fused_attention op 中 FusedResidualDropoutBias 在V100上计算出 nan/inf 问题。([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) +- 修复 fused_attention op 中 FusedResidualDropoutBias 在 V100 上计算出 nan/inf 问题。([#42398](https://github.com/PaddlePaddle/Paddle/pull/42398)) - 修复 full_like op 在执行时引入的多余的 data transform 问题。([#41973](https://github.com/PaddlePaddle/Paddle/pull/41973)) - 修复 p_norm op 在 GPU 环境上计算 nan 的问题。([#41804](https://github.com/PaddlePaddle/Paddle/pull/41804)) -- 修复 split op 在参数 sections 存在为0的 size 情况下,段错误的问题。([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755)) +- 修复 split op 在参数 sections 存在为 0 的 size 情况下,段错误的问题。([#41755](https://github.com/PaddlePaddle/Paddle/pull/41755)) -- 修复6个 elementwise op(pow、complex、divide_double、multiply_double、fmax、fmin)在需要 broadcast 的情况下,多卡训练时报Place(gpu:0) 不支持的问题。([#42332](https://github.com/PaddlePaddle/Paddle/pull/42332)) +- 修复 6 个 elementwise op(pow、complex、divide_double、multiply_double、fmax、fmin)在需要 broadcast 的情况下,多卡训练时报 Place(gpu:0) 不支持的问题。([#42332](https://github.com/PaddlePaddle/Paddle/pull/42332)) - 修复 import paddle 时由于 PIL 版本升级导致的废弃接口报 warning 的问题。([#42307](https://github.com/PaddlePaddle/Paddle/pull/42307)) @@ -1965,7 +1965,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 sharding 开启 offload 时,sharding 的 save_persistables 接口未保存 FP16 参数和 offload 持久化变量的问题。([#40477](https://github.com/PaddlePaddle/Paddle/pull/40477)) - - 修复开启 sharding 训练时,ema 参数在非0号卡上无法保存的问题。([#39860](https://github.com/PaddlePaddle/Paddle/pull/39860)) + - 修复开启 sharding 训练时,ema 参数在非 0 号卡上无法保存的问题。([#39860](https://github.com/PaddlePaddle/Paddle/pull/39860)) - 修复 FC 按照列切分梯度计算错误的问题。([#38724](https://github.com/PaddlePaddle/Paddle/pull/38724)) @@ -2003,7 +2003,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 ChannelWise 量化训练速度慢的问题。([#40772](https://github.com/PaddlePaddle/Paddle/pull/40772)) -- 修复量化训练初始化为0的 Tensor 出 NAN 的问题。([#36762](https://github.com/PaddlePaddle/Paddle/pull/36762)) +- 修复量化训练初始化为 0 的 Tensor 出 NAN 的问题。([#36762](https://github.com/PaddlePaddle/Paddle/pull/36762)) - 修复多线程场景下混合精度 amp_level 设置错误问题。([#39198](https://github.com/PaddlePaddle/Paddle/pull/39198)) @@ -2035,7 +2035,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复联编开启 -DWITH_DISTRIBUTED 生成 Paddle Inference 缺少符号 `paddle::distributed::TensorTable` 的问题。 ([#41128](https://github.com/PaddlePaddle/Paddle/pull/41128)) -- matmul_v2 op 新增 shape check,在 shape 中存在0值进行信息报错。 ([#35791](https://github.com/PaddlePaddle/Paddle/pull/35791)) +- matmul_v2 op 新增 shape check,在 shape 中存在 0 值进行信息报错。 ([#35791](https://github.com/PaddlePaddle/Paddle/pull/35791)) - 修复动态图 recompute 对于没有梯度输入提示信息反复打印,改成用 warning 只打印一次的方式。([#38293](https://github.com/PaddlePaddle/Paddle/pull/38293)) @@ -2051,7 +2051,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 eigvalsh 单元测试数据初始化问题。([#39841](https://github.com/PaddlePaddle/Paddle/pull/39841)) -- 修复 segment op 在 V100上寄存器使用过多导致不能正常运行的问题。([#38113](https://github.com/PaddlePaddle/Paddle/pull/38113)) +- 修复 segment op 在 V100 上寄存器使用过多导致不能正常运行的问题。([#38113](https://github.com/PaddlePaddle/Paddle/pull/38113)) - 修复 conv 相关算子稀疏化维度错误的问题。([#36054](https://github.com/PaddlePaddle/Paddle/pull/36054)) @@ -2059,7 +2059,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 divide op 整数除法还是整数的 bug。([#40890](https://github.com/PaddlePaddle/Paddle/pull/40890)) -- 修复 `paddle.multiplex` 候选 Tensor 大小为0崩溃问题。([#34972](https://github.com/PaddlePaddle/Paddle/pull/34972)) +- 修复 `paddle.multiplex` 候选 Tensor 大小为 0 崩溃问题。([#34972](https://github.com/PaddlePaddle/Paddle/pull/34972)) - 修复 `paddle.kl_div` 参数 `reduction` 给定情况下速度异常的问题。([#37283](https://github.com/PaddlePaddle/Paddle/pull/37283)) @@ -2075,7 +2075,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 移出 `paddle.io.BatchSampler` 对输入参数 dataset 需要是 `paddle.io.Dataset` 类型的限制,扩大对用户自定义数据集的支持。([#40184](https://github.com/PaddlePaddle/Paddle/pull/40184)) -- 修复 `paddle.summary` 报错op_flops不存在的问题。([#36489](https://github.com/PaddlePaddle/Paddle/pull/36489)) +- 修复 `paddle.summary` 报错 op_flops 不存在的问题。([#36489](https://github.com/PaddlePaddle/Paddle/pull/36489)) - 修复 lars_momentum op 在 lars_weight_decay=0 时公式错误的问题。([#40892](https://github.com/PaddlePaddle/Paddle/pull/40892)) @@ -2087,14 +2087,14 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复模型读取时模型档案大小未初始化的问题。([#40518](https://github.com/PaddlePaddle/Paddle/pull/40518)) -- 修复 Expand op 逻辑 bug,当输入Tensor X 的维度,小于要拓展的 shape 时,可能导致取得 Out.Shape 是错误的。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) +- 修复 Expand op 逻辑 bug,当输入 Tensor X 的维度,小于要拓展的 shape 时,可能导致取得 Out.Shape 是错误的。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - 修复 Expand_As op 只取 y.shape,而没有 Y 变量输入时,导致的动转静报错。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) - 修复 Expand_As op 计算输出 shape 时逻辑的错误。([#38677](https://github.com/PaddlePaddle/Paddle/pull/38677)) -- 修复 `core.VarDesc.VarType.STRINGS` 类型的变量获取 `lod_level` 属性报错的问题,并且设置其 `lod_level` 为None。([#39077](https://github.com/PaddlePaddle/Paddle/pull/39077)) +- 修复 `core.VarDesc.VarType.STRINGS` 类型的变量获取 `lod_level` 属性报错的问题,并且设置其 `lod_level` 为 None。([#39077](https://github.com/PaddlePaddle/Paddle/pull/39077)) - 修复框架功能 `PyLayer` 不支持不同 dtype 的问题。 ([#37974](https://github.com/PaddlePaddle/Paddle/pull/37974)) @@ -2102,7 +2102,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复调用 DisableGlogInfo() 接口后依旧残留部分日志的问题。 ([#36356](https://github.com/PaddlePaddle/Paddle/pull/36356)) -- 修复 SimpleRNN、GRU和LSTM API CPU训练时多层RNN(dropout 设置为0时)反向计算出错的问题。 ([#37080](https://github.com/PaddlePaddle/Paddle/pull/37080)) +- 修复 SimpleRNN、GRU 和 LSTM API CPU 训练时多层 RNN(dropout 设置为 0 时)反向计算出错的问题。 ([#37080](https://github.com/PaddlePaddle/Paddle/pull/37080)) - 为 cufft 和 hipfft 后端的 fft 添加了 cache。 ([#36646](https://github.com/PaddlePaddle/Paddle/pull/36646)) @@ -2118,7 +2118,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. ### (1)新增特性 -#### 新增API +#### 新增 API - 增加 Java API,Java 开发者可以通过简单灵活的接口实现在服务端和云端的高性能推理。([#37162](https://github.com/PaddlePaddle/Paddle/pull/37162)) @@ -2130,7 +2130,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 ONNX Runtime 后端支持,当前集成版本只支持 CPU。([#39988](https://github.com/PaddlePaddle/Paddle/pull/39988), [#40561](https://github.com/PaddlePaddle/Paddle/pull/40561)) -- 基于 Paddle Lite 子图方式,新增昇腾310推理支持。([#35226](https://github.com/PaddlePaddle/Paddle/pull/35226)) +- 基于 Paddle Lite 子图方式,新增昇腾 310 推理支持。([#35226](https://github.com/PaddlePaddle/Paddle/pull/35226)) - 新增原生 GPU FP16 推理功能。([#40531](https://github.com/PaddlePaddle/Paddle/pull/40531)) @@ -2138,13 +2138,13 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 新增 TensorRT config 的配置接口:`void UpdateConfigInterleaved(paddle_infer::Config* c, bool with_interleaved)`,用于 int8 量化推理中特殊的数据排布。([#38884](https://github.com/PaddlePaddle/Paddle/pull/38884)) -- log 中增加 TensorRT inspector 输出信息,仅在 TensorRT 8.2及以上版本有效。 ([#38362](https://github.com/PaddlePaddle/Paddle/pull/38362),[#38200](https://github.com/PaddlePaddle/Paddle/pull/38200))) +- log 中增加 TensorRT inspector 输出信息,仅在 TensorRT 8.2 及以上版本有效。 ([#38362](https://github.com/PaddlePaddle/Paddle/pull/38362),[#38200](https://github.com/PaddlePaddle/Paddle/pull/38200))) - 增加 TensorRT ASP 稀疏推理支持。([#36413](https://github.com/PaddlePaddle/Paddle/pull/36413)) ### (2)底层优化 -#### CPU性能优化 +#### CPU 性能优化 - 优化 MKLDNN 的缓存机制。([#38336](https://github.com/PaddlePaddle/Paddle/pull/38336), [#36980](https://github.com/PaddlePaddle/Paddle/pull/36980), [#36695](https://github.com/PaddlePaddle/Paddle/pull/36695)) @@ -2168,7 +2168,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持 gelu、FC+gelu ops 使用 TensorRT 推理。([#38399](https://github.com/PaddlePaddle/Paddle/pull/38399))合作团队 -- 支持 `deformable_conv` 在静态 shape下使用 TensorRT 推理。([#36612](https://github.com/PaddlePaddle/Paddle/pull/36612) [#36850](https://github.com/PaddlePaddle/Paddle/pull/36850) [#37345](https://github.com/PaddlePaddle/Paddle/pull/37345)) +- 支持 `deformable_conv` 在静态 shape 下使用 TensorRT 推理。([#36612](https://github.com/PaddlePaddle/Paddle/pull/36612) [#36850](https://github.com/PaddlePaddle/Paddle/pull/36850) [#37345](https://github.com/PaddlePaddle/Paddle/pull/37345)) - 支持 nearest_interp_v2 op 使用 TensorRT 推理。([#34126](https://github.com/PaddlePaddle/Paddle/pull/34126)) @@ -2180,9 +2180,9 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持 flatten_contiguous_rang op 使用 TensorRT 推理。([#38922](https://github.com/PaddlePaddle/Paddle/pull/38922)) -- 支持 `pool2d` 属性 `padding` 的维度为4、`global_pooling` 和 `ceil_mode` 为 True 情况下使用 TensorRT 推理。([#39545](https://github.com/PaddlePaddle/Paddle/pull/39545)) +- 支持 `pool2d` 属性 `padding` 的维度为 4、`global_pooling` 和 `ceil_mode` 为 True 情况下使用 TensorRT 推理。([#39545](https://github.com/PaddlePaddle/Paddle/pull/39545)) -- 支持 batch_norm 和 elementwise_add 为5维时使用 TensorRT 推理。([#36446](https://github.com/PaddlePaddle/Paddle/pull/36446)) +- 支持 batch_norm 和 elementwise_add 为 5 维时使用 TensorRT 推理。([#36446](https://github.com/PaddlePaddle/Paddle/pull/36446)) - 新增 pool3d 使用 TensorRT 推理。([#36545](https://github.com/PaddlePaddle/Paddle/pull/36545), [#36783](https://github.com/PaddlePaddle/Paddle/pull/36783)) @@ -2208,7 +2208,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 量化支持 - - `PostTrainingQuantization` API新增支持`paddle.io.DataLoader` 对象或者 `Python Generator`的输入。([#38686](https://github.com/PaddlePaddle/Paddle/pull/38686)) + - `PostTrainingQuantization` API 新增支持`paddle.io.DataLoader` 对象或者 `Python Generator`的输入。([#38686](https://github.com/PaddlePaddle/Paddle/pull/38686)) - ERNIE 全量化模型推理支持 interleaved 数据排布。([#39424](https://github.com/PaddlePaddle/Paddle/pull/39424)) @@ -2220,7 +2220,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 支持 multihead attention 非变长分支中 FC 部分的量化推理。([#39660](https://github.com/PaddlePaddle/Paddle/pull/39660)) -#### 昇腾NPU 相关功能 +#### 昇腾 NPU 相关功能 - - 重构 shape 算子前向计算逻辑,支持在 NPU 上执行。([#39613](https://github.com/PaddlePaddle/Paddle/pull/39613)) @@ -2230,7 +2230,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. ### (3)问题修复 -#### 框架及API修复 +#### 框架及 API 修复 - 修复保存静态图时模型剪裁的问题。([#37579](https://github.com/PaddlePaddle/Paddle/pull/37579)) @@ -2280,7 +2280,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 transpose 参数错误。([#39006](https://github.com/PaddlePaddle/Paddle/pull/39006)) -- 修复 nearest_interp_v2 输入 scale 维度小于1时崩溃的问题。([#38725](https://github.com/PaddlePaddle/Paddle/pull/38725)) +- 修复 nearest_interp_v2 输入 scale 维度小于 1 时崩溃的问题。([#38725](https://github.com/PaddlePaddle/Paddle/pull/38725)) - 修复 prelu 在 dynamic shape 时不支持一维输入的问题。([#39389](https://github.com/PaddlePaddle/Paddle/pull/39389)) @@ -2296,7 +2296,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 nearest_interp op 当 align_corners 为 True 时,TensorRT layer 的结果和原生 op 的结果有 diff,底层实现不一样。([#37525](https://github.com/PaddlePaddle/Paddle/pull/37525)) -- 修复qkv_plugin: 核函数计算错误。([#37096](https://github.com/PaddlePaddle/Paddle/pull/37096)) +- 修复 qkv_plugin: 核函数计算错误。([#37096](https://github.com/PaddlePaddle/Paddle/pull/37096)) - 修复动态量化的推理 pass 的问题。([#35879](https://github.com/PaddlePaddle/Paddle/pull/35879)) @@ -2314,17 +2314,17 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 softmax 在 python 和 C++上性能差异较大的问题。([#37130](https://github.com/PaddlePaddle/Paddle/pull/37130)) -- 修复 matmul 在静态 shape 2维输入和动态 shape 3维输入情况下推理失败问题。([#36849](https://github.com/PaddlePaddle/Paddle/pull/36849)) +- 修复 matmul 在静态 shape 2 维输入和动态 shape 3 维输入情况下推理失败问题。([#36849](https://github.com/PaddlePaddle/Paddle/pull/36849)) - 修复 reshape_transpose_matmul_mkldnn_fuse_pass 对 shape 处理不当问题。([#36731](https://github.com/PaddlePaddle/Paddle/pull/36731)) -- 修复输入为2维,但 TensorRT 获取到4维的问题。([#36614](https://github.com/PaddlePaddle/Paddle/pull/36614)) +- 修复输入为 2 维,但 TensorRT 获取到 4 维的问题。([#36614](https://github.com/PaddlePaddle/Paddle/pull/36614)) - 修复 interpolate_v2 MKLDNN 算子在 scale 属性为空时报错问题。([#36623](https://github.com/PaddlePaddle/Paddle/pull/36623)) - 修复 recurrent 算子在多线程场景性能差问题。([#36052](https://github.com/PaddlePaddle/Paddle/pull/36052)) -- 移除 relu、sigmoid、tanh、relu6、batch_norm、clip、concat、gelu、hard_sigmoid、prelu、softmax、split、swish 对 TensorRT 2维输入的限制。([#37097](https://github.com/PaddlePaddle/Paddle/pull/37097)) +- 移除 relu、sigmoid、tanh、relu6、batch_norm、clip、concat、gelu、hard_sigmoid、prelu、softmax、split、swish 对 TensorRT 2 维输入的限制。([#37097](https://github.com/PaddlePaddle/Paddle/pull/37097)) - 修复 reshape op 使用 TensorRT 推理。([#41090](https://github.com/PaddlePaddle/Paddle/pull/41090)) @@ -2342,7 +2342,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 MKLDNN bfloat16 推理中 split 算子执行问题。([#39548](https://github.com/PaddlePaddle/Paddle/pull/39548)) -- 修复 MKLDNN matmul_v2 算子不支持6维问题。([#36342](https://github.com/PaddlePaddle/Paddle/pull/36342), [#38665](https://github.com/PaddlePaddle/Paddle/pull/38665)) +- 修复 MKLDNN matmul_v2 算子不支持 6 维问题。([#36342](https://github.com/PaddlePaddle/Paddle/pull/36342), [#38665](https://github.com/PaddlePaddle/Paddle/pull/38665)) - 修复 MKLDNN matmul_v2_transpose_reshape 中的 MKLDNN DeviceContext 错误问题。([#38554](https://github.com/PaddlePaddle/Paddle/pull/38554)) @@ -2350,7 +2350,7 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 MKLDNN bfloat16 placement 算子列表并添加缺失算子。([#36291](https://github.com/PaddlePaddle/Paddle/pull/36291)) -- 修复 MKLDNN 算子的格式问题,包括: FC、conv_transpose、6维 Tensor 报错问题、conv 对 `NHWC` 输入的输出 format 错误问题。([#38890](https://github.com/PaddlePaddle/Paddle/pull/38890), [#37344](https://github.com/PaddlePaddle/Paddle/pull/37344), [#37175](https://github.com/PaddlePaddle/Paddle/pull/37175), [#38553](https://github.com/PaddlePaddle/Paddle/pull/38553), [#40049](https://github.com/PaddlePaddle/Paddle/pull/40049), [#39097](https://github.com/PaddlePaddle/Paddle/pull/39097)) +- 修复 MKLDNN 算子的格式问题,包括: FC、conv_transpose、6 维 Tensor 报错问题、conv 对 `NHWC` 输入的输出 format 错误问题。([#38890](https://github.com/PaddlePaddle/Paddle/pull/38890), [#37344](https://github.com/PaddlePaddle/Paddle/pull/37344), [#37175](https://github.com/PaddlePaddle/Paddle/pull/37175), [#38553](https://github.com/PaddlePaddle/Paddle/pull/38553), [#40049](https://github.com/PaddlePaddle/Paddle/pull/40049), [#39097](https://github.com/PaddlePaddle/Paddle/pull/39097)) - 修复 MKLDNN 多线程推理场景因 cache 机制报错问题。([#36290](https://github.com/PaddlePaddle/Paddle/pull/36290), [#35884](https://github.com/PaddlePaddle/Paddle/pull/35884)) @@ -2363,31 +2363,31 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 修复 MKLDNN 某些 op 修改 layout 后需要改回原 layout 的问题。([#39422](https://github.com/PaddlePaddle/Paddle/pull/39422)) -- 修复针对昇腾910推理场景下,由于未释放 GIL 锁,导致与昇腾软件栈冲突,python API 下报错的问题。 ([#38605](https://github.com/PaddlePaddle/Paddle/pull/38605)) +- 修复针对昇腾 910 推理场景下,由于未释放 GIL 锁,导致与昇腾软件栈冲突,python API 下报错的问题。 ([#38605](https://github.com/PaddlePaddle/Paddle/pull/38605)) ## 5. 环境适配 ### 编译安装 -- 从2.3.0 版本开始,飞桨对框架支持的 GPU 架构种类进行了调整和升级。(更多请参考: [飞桨支持的 GPU 架构](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.3rc/install/Tables.html#gpu)) +- 从 2.3.0 版本开始,飞桨对框架支持的 GPU 架构种类进行了调整和升级。(更多请参考: [飞桨支持的 GPU 架构](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.3rc/install/Tables.html#gpu)) 备注: -- PIP 源安装是指用 `pip install paddlepaddle` 或 `pip install paddlepaddle-gpu`从 PIP 官网下载安装包及依赖库的安装方式,支持架构种类少,安装包更轻量,下载源来自国外(相比bos源支持架构种类精简,安装包更轻量,只提供一种 CUDA 版本的安装包)。 +- PIP 源安装是指用 `pip install paddlepaddle` 或 `pip install paddlepaddle-gpu`从 PIP 官网下载安装包及依赖库的安装方式,支持架构种类少,安装包更轻量,下载源来自国外(相比 bos 源支持架构种类精简,安装包更轻量,只提供一种 CUDA 版本的安装包)。 - - 2.3版本之前,飞桨 PIP 源安装包(CUDA10.2)支持的 GPU 架构为:3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5。 + - 2.3 版本之前,飞桨 PIP 源安装包(CUDA10.2)支持的 GPU 架构为:3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5。 - - 2.3版本之后,飞桨 PIP 源安装包(CUDA11.0)支持的 GPU 架构为:6.0, 6.1, 7.0, 7.5, 8.0 + - 2.3 版本之后,飞桨 PIP 源安装包(CUDA11.0)支持的 GPU 架构为:6.0, 6.1, 7.0, 7.5, 8.0 -- 飞桨官网 bos 源是指从飞桨官网下载安装包及依赖库的安装方式,支持的 GPU 架构更多,下载源来自国内,速度较快。(相比PIP源支持架构种类多,提供多个 CUDA 版本的安装包): +- 飞桨官网 bos 源是指从飞桨官网下载安装包及依赖库的安装方式,支持的 GPU 架构更多,下载源来自国内,速度较快。(相比 PIP 源支持架构种类多,提供多个 CUDA 版本的安装包): - - 2.3版本之前,飞桨官网 bos 源安装包支持的 GPU 架构: + - 2.3 版本之前,飞桨官网 bos 源安装包支持的 GPU 架构: - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; - CUDA11 : 5.2,6.0,6.1,7.0,7.5,8.0。 - - 2.3版本之后,飞桨官网 bos 源安装包支持的 GPU 架构 + - 2.3 版本之后,飞桨官网 bos 源安装包支持的 GPU 架构 - CUDA10 : 3.5, 5.0, 5.2, 6.0, 6.1, 7.0, 7.5; @@ -2406,13 +2406,13 @@ AssertionError: elu_ only support alpha >= 0, please use elu instead. - 自定义新硬件接入:提供一种插件式扩展 PaddlePaddle 硬件后端的方式。通过该功能,开发者无需为特定硬件修改 PaddlePaddle 代码,只需实现标准接口,并编译成动态链接库,则可作为插件供 PaddlePaddle 调用。降低为 PaddlePaddle 添加新硬件后端的开发难度。当前支持自定义 Runtime 接入和自定义 Kernel 接入。 -- 华为 NPU 芯片(Ascend910)训练/推理支持,支持ResNet50、YoloV3、BERT、Transformer等多个模型,支持静态图与混合精度训练,支持单卡、单机、多机分布式训练。 +- 华为 NPU 芯片(Ascend910)训练/推理支持,支持 ResNet50、YoloV3、BERT、Transformer 等多个模型,支持静态图与混合精度训练,支持单卡、单机、多机分布式训练。 -- Graphcore IPU芯片(包括IPU Mk2 GC200 和 Bow IPU)训练/推理支持,支持ResNet50、BERT等模型,支持静态图训练,支持单芯片、单机、多机分布式训练。 +- Graphcore IPU 芯片(包括 IPU Mk2 GC200 和 Bow IPU)训练/推理支持,支持 ResNet50、BERT 等模型,支持静态图训练,支持单芯片、单机、多机分布式训练。 -- 寒武纪MLU芯片(MLU370x4)训练/推理支持,支持ResNet50等模型,支持静态图+动态图训练,支持混合精度训练,支持单卡、单机、多机分布式训练。 +- 寒武纪 MLU 芯片(MLU370x4)训练/推理支持,支持 ResNet50 等模型,支持静态图+动态图训练,支持混合精度训练,支持单卡、单机、多机分布式训练。 -- 昆仑芯2代芯片(昆仑芯 AI加速卡 R200、R300)训练/推理支持,支持ResNet50、YoloV3、OCR-DB、SSD、MobilnetV3、UNet、BERT、Transformer、GPT-2、Wide&Deep、DeepFM,支持静态图+动态图训练,支持混合精度训练,支持单机单卡、单机多卡训练。 +- 昆仑芯 2 代芯片(昆仑芯 AI 加速卡 R200、R300)训练/推理支持,支持 ResNet50、YoloV3、OCR-DB、SSD、MobilnetV3、UNet、BERT、Transformer、GPT-2、Wide&Deep、DeepFM,支持静态图+动态图训练,支持混合精度训练,支持单机单卡、单机多卡训练。 ## Thanks to our Contributors From 945102abe14b42387aa6a6e3dc5ac707c191e8cd Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 13:01:46 +0000 Subject: [PATCH 06/20] fix check_api_cn script to ignore some files not include sample code --- ci_scripts/check_api_cn.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci_scripts/check_api_cn.sh b/ci_scripts/check_api_cn.sh index f2923e99045..34d688b8778 100644 --- a/ci_scripts/check_api_cn.sh +++ b/ci_scripts/check_api_cn.sh @@ -11,7 +11,7 @@ function filter_cn_api_files() { local __resultvar=$2 local need_check_files="" for file in `echo $git_files`;do - grep "code-block" ../docs/$file > /dev/null + grep 'code-block:: python' ../docs/$file > /dev/null if [ $? -eq 0 ] ;then api_file=`echo $file | sed 's#api/##g'` grep -w "${api_file}" ${DIR_PATH}/api_white_list.txt > /dev/null From 91955b1c04bc244536944cb2b82a409e5b889a0a Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 14:43:49 +0000 Subject: [PATCH 07/20] use COPY-FROM to avoid some ci error --- docs/api/paddle/amp/GradScaler_cn.rst | 299 +------------- docs/api/paddle/device/cuda/Event_cn.rst | 27 +- docs/api/paddle/device/cuda/Stream_cn.rst | 45 +- .../device/cuda/get_device_capability_cn.rst | 12 +- .../paddle/device/cuda/get_device_name_cn.rst | 12 +- .../paddle/distributed/InMemoryDataset_cn.rst | 313 +------------- .../api/paddle/distributed/ParallelEnv_cn.rst | 79 +--- .../distributed/fleet/utils/HDFSClient_cn.rst | 157 +------ .../distributed/fleet/utils/LocalFS_cn.rst | 90 +--- .../nn/functional/fused_feedforward_cn.rst | 15 +- docs/api/paddle/jit/TracedLayer_cn.rst | 79 +--- docs/api/paddle/nn/Layer_cn.rst | 391 ++---------------- docs/api/paddle/static/BuildStrategy_cn.rst | 158 +------ docs/api/paddle/static/Executor_cn.rst | 194 +-------- .../api/paddle/static/ParallelExecutor_cn.rst | 128 +----- .../paddle/vision/transforms/Compose_cn.rst | 10 +- 16 files changed, 122 insertions(+), 1887 deletions(-) diff --git a/docs/api/paddle/amp/GradScaler_cn.rst b/docs/api/paddle/amp/GradScaler_cn.rst index b2379a931b6..a2bb68b1c4e 100644 --- a/docs/api/paddle/amp/GradScaler_cn.rst +++ b/docs/api/paddle/amp/GradScaler_cn.rst @@ -36,23 +36,7 @@ GradScaler 用于动态图模式下的"自动混合精度"的训练。它控制 代码示例 ::::::::: -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler scale(var) @@ -71,23 +55,7 @@ scale(var) **代码示例** -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler.scale minimize(optimizer, *args, **kwargs) ''''''''' @@ -104,23 +72,7 @@ minimize(optimizer, *args, **kwargs) **代码示例** -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler.minimize step(optimizer) ''''''''' @@ -135,22 +87,7 @@ step(optimizer) **代码示例** -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.step(optimizer) # update parameters - scaler.update() # update the loss scaling ratio - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler.step update() ''''''''' @@ -159,22 +96,7 @@ update() **代码示例** -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.step(optimizer) # update parameters - scaler.update() # update the loss scaling ratio - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler.update unscale_(optimizer) ''''''''' @@ -188,23 +110,7 @@ unscale_(optimizer) **代码示例** -.. code-block:: python - - import paddle - - model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True) - optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters()) - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = paddle.rand([10, 3, 32, 32]) - with paddle.amp.auto_cast(): - conv = model(data) - loss = paddle.mean(conv) - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.unscale_(optimizer) # unscale the parameter - scaler.step(optimizer) - scaler.update() - optimizer.clear_grad() +COPY-FROM: paddle.amp.GradScaler.unscale_ is_enable() ''''''''' @@ -217,18 +123,7 @@ bool,采用 loss scaling 策略返回 True,否则返回 False。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - enable = scaler.is_enable() - print(enable) # True +COPY-FROM: paddle.amp.GradScaler.is_enable is_use_dynamic_loss_scaling() ''''''''' @@ -241,18 +136,7 @@ bool,动态调节 loss scaling 缩放比例返回 True,否则返回 False。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - use_dynamic_loss_scaling = scaler.is_use_dynamic_loss_scaling() - print(use_dynamic_loss_scaling) # True +COPY-FROM: paddle.amp.GradScaler.is_use_dynamic_loss_scaling get_init_loss_scaling() ''''''''' @@ -265,18 +149,7 @@ float,初始化的 loss scaling 缩放比例。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - init_loss_scaling = scaler.get_init_loss_scaling() - print(init_loss_scaling) # 1024 +COPY-FROM: paddle.amp.GradScaler.get_init_loss_scaling set_init_loss_scaling(new_init_loss_scaling) ''''''''' @@ -289,20 +162,7 @@ set_init_loss_scaling(new_init_loss_scaling) **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - print(scaler.get_init_loss_scaling()) # 1024 - new_init_loss_scaling = 1000 - scaler.set_init_loss_scaling(new_init_loss_scaling) - print(scaler.get_init_loss_scaling()) # 1000 +COPY-FROM: paddle.amp.GradScaler.set_init_loss_scaling get_incr_ratio() ''''''''' @@ -315,18 +175,7 @@ float,增大 loss scaling 时使用的乘数。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - incr_ratio = scaler.get_incr_ratio() - print(incr_ratio) # 2.0 +COPY-FROM: paddle.amp.GradScaler.get_incr_ratio set_incr_ratio(new_incr_ratio) ''''''''' @@ -339,20 +188,7 @@ set_incr_ratio(new_incr_ratio) **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - print(scaler.get_incr_ratio()) # 2.0 - new_incr_ratio = 3.0 - scaler.set_incr_ratio(new_incr_ratio) - print(scaler.get_incr_ratio()) # 3.0 +COPY-FROM: paddle.amp.GradScaler.set_incr_ratio get_decr_ratio() ''''''''' @@ -365,18 +201,7 @@ float,缩小 loss scaling 时使用的乘数。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - decr_ratio = scaler.get_decr_ratio() - print(decr_ratio) # 0.5 +COPY-FROM: paddle.amp.GradScaler.get_decr_ratio set_decr_ratio(new_decr_ratio) ''''''''' @@ -389,20 +214,7 @@ set_decr_ratio(new_decr_ratio) **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - print(scaler.get_decr_ratio()) # 0.5 - new_decr_ratio = 0.1 - scaler.set_decr_ratio(new_decr_ratio) - print(scaler.get_decr_ratio()) # 0.1 +COPY-FROM: paddle.amp.GradScaler.set_decr_ratio get_incr_every_n_steps() ''''''''' @@ -415,18 +227,7 @@ int,参数 incr_every_n_steps。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - incr_every_n_steps = scaler.get_incr_every_n_steps() - print(incr_every_n_steps) # 1000 +COPY-FROM: paddle.amp.GradScaler.get_incr_every_n_steps set_incr_every_n_steps(new_incr_every_n_steps) ''''''''' @@ -439,20 +240,7 @@ set_incr_every_n_steps(new_incr_every_n_steps) **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - print(scaler.get_incr_every_n_steps()) # 1000 - new_incr_every_n_steps = 2000 - scaler.set_incr_every_n_steps(new_incr_every_n_steps) - print(scaler.get_incr_every_n_steps()) # 2000 +COPY-FROM: paddle.amp.GradScaler.set_incr_every_n_steps get_decr_every_n_nan_or_inf() ''''''''' @@ -465,18 +253,7 @@ int,参数 decr_every_n_nan_or_inf。 **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - decr_every_n_nan_or_inf = scaler.get_decr_every_n_nan_or_inf() - print(decr_every_n_nan_or_inf) # 2 +COPY-FROM: paddle.amp.GradScaler.get_decr_every_n_nan_or_inf set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) ''''''''' @@ -489,20 +266,7 @@ set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) **代码示例** -.. code-block:: python - - import paddle - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - print(scaler.get_decr_every_n_nan_or_inf()) # 2 - new_decr_every_n_nan_or_inf = 3 - scaler.set_decr_every_n_nan_or_inf(new_decr_every_n_nan_or_inf) - print(scaler.get_decr_every_n_nan_or_inf()) # 3 +COPY-FROM: paddle.amp.GradScaler.set_decr_every_n_nan_or_inf state_dict() ''''''''' @@ -515,18 +279,7 @@ dict,字典存储的参数包括:scale(tensor):loss scaling 因子、incr_ra **代码示例** -.. code-block:: python - - import paddle - - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - scaler_state = scaler.state_dict() +COPY-FROM: paddle.amp.GradScaler.state_dict load_state_dict(state_dict) ''''''''' @@ -539,16 +292,4 @@ load_state_dict(state_dict) **代码示例** -.. code-block:: python - - import paddle - - scaler = paddle.amp.GradScaler(enable=True, - init_loss_scaling=1024, - incr_ratio=2.0, - decr_ratio=0.5, - incr_every_n_steps=1000, - decr_every_n_nan_or_inf=2, - use_dynamic_loss_scaling=True) - scaler_state = scaler.state_dict() - scaler.load_state_dict(scaler_state) +COPY-FROM: paddle.amp.GradScaler.load_state_dict diff --git a/docs/api/paddle/device/cuda/Event_cn.rst b/docs/api/paddle/device/cuda/Event_cn.rst index ab72e355fed..86f5203644b 100644 --- a/docs/api/paddle/device/cuda/Event_cn.rst +++ b/docs/api/paddle/device/cuda/Event_cn.rst @@ -21,11 +21,7 @@ None 代码示例 :::::::::::: -.. code-block:: python - - # required: gpu - import paddle - event = paddle.device.cuda.Event() +COPY-FROM: paddle.device.cuda.Event 方法 @@ -41,12 +37,7 @@ record(CUDAStream=None) **代码示例** -.. code-block:: python - - # required: gpu - import paddle - event = paddle.device.cuda.Event() - event.record() +COPY-FROM: paddle.device.cuda.Event.record query() ''''''''' @@ -59,12 +50,7 @@ query() **代码示例** -.. code-block:: python - - # required: gpu - import paddle - event = paddle.device.cuda.Event() - is_done = event.query() +COPY-FROM: paddle.device.cuda.Event.query synchronize() @@ -74,9 +60,4 @@ synchronize() **代码示例** -.. code-block:: python - - # required: gpu - import paddle - event = paddle.device.cuda.Event() - event.synchronize() +COPY-FROM: paddle.device.cuda.Event.synchronize diff --git a/docs/api/paddle/device/cuda/Stream_cn.rst b/docs/api/paddle/device/cuda/Stream_cn.rst index b61bae35d25..21f28564178 100644 --- a/docs/api/paddle/device/cuda/Stream_cn.rst +++ b/docs/api/paddle/device/cuda/Stream_cn.rst @@ -17,13 +17,7 @@ CUDA stream 的句柄。 代码示例 :::::::::::: -.. code-block:: python - - # required: gpu - import paddle - s1 = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - s2 = paddle.device.cuda.Stream(0, 1) - s3 = paddle.device.cuda.Stream() +COPY-FROM: paddle.device.cuda.Stream @@ -40,13 +34,7 @@ wait_event(event) **代码示例** -.. code-block:: python - - # required: gpu - import paddle - s = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - event = paddle.device.cuda.Event() - s.wait_event(event) +COPY-FROM: paddle.device.cuda.Stream.wait_event wait_stream(stream) @@ -61,13 +49,7 @@ wait_stream(stream) **代码示例** -.. code-block:: python - - # required: gpu - import paddle - s1 = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - s2 = paddle.device.cuda.Stream(0, 1) - s1.wait_stream(s2) +COPY-FROM: paddle.device.cuda.Stream.wait_stream query() @@ -80,12 +62,7 @@ query() **代码示例** -.. code-block:: python - - # required: gpu - import paddle - s = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - is_done = s.query() +COPY-FROM: paddle.device.cuda.Stream.query synchronize() ''''''''' @@ -94,12 +71,7 @@ synchronize() **代码示例** -.. code-block:: python - - # required: gpu - import paddle - s = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - s.synchronize() +COPY-FROM: paddle.device.cuda.Stream.synchronize record_event(event=None) ''''''''' @@ -115,9 +87,4 @@ record_event(event=None) **代码示例** -.. code-block:: python - - # required: gpu - import paddle - s = paddle.device.cuda.Stream(paddle.CUDAPlace(0), 1) - event = s.record_event() +COPY-FROM: paddle.device.cuda.Stream.record_event diff --git a/docs/api/paddle/device/cuda/get_device_capability_cn.rst b/docs/api/paddle/device/cuda/get_device_capability_cn.rst index a8c7e1e4bbd..5619ca50195 100644 --- a/docs/api/paddle/device/cuda/get_device_capability_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_capability_cn.rst @@ -20,14 +20,4 @@ tuple(int,int):设备计算能力的主要和次要修订号。 代码示例 ::::::::: - .. code-block:: python - - # required: gpu - - import paddle - - paddle.device.cuda.get_device_capability() - - paddle.device.cuda.get_device_capability(0) - - paddle.device.cuda.get_device_capability(paddle.CUDAPlace(0)) +COPY-FROM: paddle.device.cuda.get_device_capability diff --git a/docs/api/paddle/device/cuda/get_device_name_cn.rst b/docs/api/paddle/device/cuda/get_device_name_cn.rst index 248977319bc..e5a94f08449 100644 --- a/docs/api/paddle/device/cuda/get_device_name_cn.rst +++ b/docs/api/paddle/device/cuda/get_device_name_cn.rst @@ -20,14 +20,4 @@ str:设备的名称。 代码示例 :::::::::: - .. code-block:: python - - # required: gpu - - import paddle - - paddle.device.cuda.get_device_name() - - paddle.device.cuda.get_device_name(0) - - paddle.device.cuda.get_device_name(paddle.CUDAPlace(0)) +COPY-FROM: paddle.device.cuda.get_device_name diff --git a/docs/api/paddle/distributed/InMemoryDataset_cn.rst b/docs/api/paddle/distributed/InMemoryDataset_cn.rst index 85b4f92747d..2fa482bf03e 100644 --- a/docs/api/paddle/distributed/InMemoryDataset_cn.rst +++ b/docs/api/paddle/distributed/InMemoryDataset_cn.rst @@ -4,7 +4,7 @@ InMemoryDataset ------------------------------- -.. py:class:: paddle.distributed.InMemoryDataset +.. py:class:: paddle.distributed.InMemoryDataset() @@ -13,11 +13,7 @@ InMemoryDataset,它将数据加载到内存中,并在训练前随机整理 代码示例 :::::::::::: -.. code-block:: python - - import paddle - paddle.enable_static() - dataset = paddle.distributed.InMemoryDataset() +COPY-FROM: paddle.distributed.InMemoryDataset 方法 :::::::::::: @@ -49,54 +45,7 @@ None。 **代码示例** -.. code-block:: python - - import paddle - import os - - paddle.enable_static() - - with open("test_queue_dataset_run_a.txt", "w") as f: - data = "2 1 2 2 5 4 2 2 7 2 1 3\n" - data += "2 6 2 2 1 4 2 2 4 2 2 3\n" - data += "2 5 2 2 9 9 2 2 7 2 1 3\n" - data += "2 7 2 2 1 9 2 3 7 2 5 3\n" - f.write(data) - with open("test_queue_dataset_run_b.txt", "w") as f: - data = "2 1 2 2 5 4 2 2 7 2 1 3\n" - data += "2 6 2 2 1 4 2 2 4 2 2 3\n" - data += "2 5 2 2 9 9 2 2 7 2 1 3\n" - data += "2 7 2 2 1 9 2 3 7 2 5 3\n" - f.write(data) - - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - - dataset = paddle.distributed.InMemoryDataset() - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - dataset.set_filelist( - ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) - dataset.load_into_memory() - - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - startup_program = paddle.static.Program() - main_program = paddle.static.Program() - exe.run(startup_program) - - exe.train_from_dataset(main_program, dataset) - - os.remove("./test_queue_dataset_run_a.txt") - os.remove("./test_queue_dataset_run_b.txt") +COPY-FROM: paddle.distributed.InMemoryDataset.init _init_distributed_settings(**kwargs) ''''''''' @@ -125,24 +74,7 @@ None。 **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=[]) - dataset._init_distributed_settings( - parse_ins_id=True, - parse_content=True, - fea_eval=True, - candidate_size=10000) - +COPY-FROM: paddle.distributed.InMemoryDataset._init_distributed_settings update_settings(**kwargs) ''''''''' @@ -178,24 +110,7 @@ None。 **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=[]) - dataset._init_distributed_settings( - parse_ins_id=True, - parse_content=True, - fea_eval=True, - candidate_size=10000) - dataset.update_settings(batch_size=2) +COPY-FROM: paddle.distributed.InMemoryDataset.update_settings load_into_memory() ''''''''' @@ -208,27 +123,7 @@ load_into_memory() **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() +COPY-FROM: paddle.distributed.InMemoryDataset.load_into_memory preload_into_memory(thread_num=None) ''''''''' @@ -241,28 +136,7 @@ preload_into_memory(thread_num=None) **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.preload_into_memory() - dataset.wait_preload_done() +COPY-FROM: paddle.distributed.InMemoryDataset.preload_into_memory wait_preload_done() ''''''''' @@ -271,28 +145,7 @@ wait_preload_done() **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.preload_into_memory() - dataset.wait_preload_done() +COPY-FROM: paddle.distributed.InMemoryDataset.wait_preload_done local_shuffle() ''''''''' @@ -301,28 +154,7 @@ local_shuffle() **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.local_shuffle() +COPY-FROM: paddle.distributed.InMemoryDataset.local_shuffle global_shuffle(fleet=None, thread_num=12) ''''''''' @@ -331,28 +163,7 @@ global_shuffle(fleet=None, thread_num=12) **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle() +COPY-FROM: paddle.distributed.InMemoryDataset.global_shuffle **参数** @@ -364,36 +175,7 @@ release_memory() 当数据不再使用时,释放 InMemoryDataset 内存数据。 -**代码示例** - -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle() - exe = paddle.static.Executor(paddle.CPUPlace()) - startup_program = paddle.static.Program() - main_program = paddle.static.Program() - exe.run(startup_program) - exe.train_from_dataset(main_program, dataset) - dataset.release_memory() +COPY-FROM: paddle.distributed.InMemoryDataset.release_memory get_memory_data_size(fleet=None) ''''''''' @@ -412,29 +194,7 @@ get_memory_data_size(fleet=None) **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - print dataset.get_memory_data_size() - +COPY-FROM: paddle.distributed.InMemoryDataset.get_memory_data_size get_shuffle_data_size(fleet=None) ''''''''' @@ -453,30 +213,7 @@ shuffle 数据的大小。 **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - dataset = paddle.distributed.InMemoryDataset() - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.global_shuffle() - print dataset.get_shuffle_data_size() +COPY-FROM: paddle.distributed.InMemoryDataset.get_shuffle_data_size slots_shuffle(slots) ''''''''' @@ -489,26 +226,4 @@ slots_shuffle(slots) **代码示例** -.. code-block:: python - - import paddle - paddle.enable_static() - - dataset = paddle.distributed.InMemoryDataset() - dataset._init_distributed_settings(fea_eval=True) - slots = ["slot1", "slot2", "slot3", "slot4"] - slots_vars = [] - for slot in slots: - var = paddle.static.data( - name=slot, shape=[None, 1], dtype="int64", lod_level=1) - slots_vars.append(var) - dataset.init( - batch_size=1, - thread_num=2, - input_type=1, - pipe_command="cat", - use_var=slots_vars) - filelist = ["a.txt", "b.txt"] - dataset.set_filelist(filelist) - dataset.load_into_memory() - dataset.slots_shuffle(['slot1']) +COPY-FROM: paddle.distributed.InMemoryDataset.slots_shuffle diff --git a/docs/api/paddle/distributed/ParallelEnv_cn.rst b/docs/api/paddle/distributed/ParallelEnv_cn.rst index 9a90c221f60..e7f9a386d4e 100644 --- a/docs/api/paddle/distributed/ParallelEnv_cn.rst +++ b/docs/api/paddle/distributed/ParallelEnv_cn.rst @@ -15,35 +15,7 @@ ParallelEnv 代码示例 ::::::::: - .. code-block:: python - - import paddle - import paddle.distributed as dist - - def train(): - # 1. initialize parallel environment - dist.init_parallel_env() - - # 2. get current ParallelEnv - parallel_env = dist.ParallelEnv() - print("rank: ", parallel_env.rank) - print("world_size: ", parallel_env.world_size) - - # print result in process 1: - # rank: 1 - # world_size: 2 - # print result in process 2: - # rank: 2 - # world_size: 2 - - if __name__ == '__main__': - # 1. start by ``paddle.distributed.spawn`` (default) - dist.spawn(train, nprocs=2) - # 2. start by ``paddle.distributed.launch`` - # train() - -属性 -:::::::::::: +COPY-FROM: paddle.distributed.ParallelEnv 属性 :::::::::::: @@ -56,15 +28,7 @@ rank **代码示例** - .. code-block:: python - - # execute this command in terminal: export PADDLE_TRAINER_ID=0 - import paddle.distributed as dist - - env = dist.ParallelEnv() - print("The rank is %d" % env.rank) - # The rank is 0 - +COPY-FROM: paddle.distributed.ParallelEnv.rank world_size ''''''''' @@ -75,15 +39,7 @@ world_size **代码示例** - .. code-block:: python - - # execute this command in terminal: export PADDLE_TRAINERS_NUM=4 - import paddle.distributed as dist - - env = dist.ParallelEnv() - print("The world_size is %d" % env.world_size) - # The world_size is 4 - +COPY-FROM: paddle.distributed.ParallelEnv.world_size device_id ''''''''' @@ -94,15 +50,7 @@ device_id **代码示例** - .. code-block:: python - - # execute this command in terminal: export FLAGS_selected_gpus=1 - import paddle.distributed as dist - - env = dist.ParallelEnv() - print("The device id are %d" % env.device_id) - # The device id are 1 - +COPY-FROM: paddle.distributed.ParallelEnv.device_id current_endpoint ''''''''' @@ -113,15 +61,7 @@ current_endpoint **代码示例** - .. code-block:: python - - # execute this command in terminal: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170 - import paddle.distributed as dist - - env = dist.ParallelEnv() - print("The current endpoint are %s" % env.current_endpoint) - # The current endpoint are 127.0.0.1:6170 - +COPY-FROM: paddle.distributed.ParallelEnv.current_endpoint trainer_endpoints ''''''''' @@ -132,11 +72,4 @@ trainer_endpoints **代码示例** - .. code-block:: python - - # execute this command in terminal: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171 - import paddle.distributed as dist - - env = dist.ParallelEnv() - print("The trainer endpoints are %s" % env.trainer_endpoints) - # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171'] +COPY-FROM: paddle.distributed.ParallelEnv.trainer_endpoints diff --git a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst index 6d856f8b4e1..c59edbb6372 100644 --- a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst @@ -15,18 +15,7 @@ HDFSClient 代码示例 :::::::::::: -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - hadoop_home = "/home/client/hadoop-client/hadoop/" - - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.ls_dir("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient 方法 :::::::::::: @@ -44,18 +33,7 @@ ls_dir(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - subdirs, files = client.ls_dir("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.ls_dir mkdirs(fs_path) ''''''''' @@ -67,18 +45,7 @@ mkdirs(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.mkdirs("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.mkdirs delete(fs_path) ''''''''' @@ -90,18 +57,7 @@ delete(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.delete("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.delete is_file(fs_path) ''''''''' @@ -117,18 +73,7 @@ is_file(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - ret = client.is_file("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_file is_dir(fs_path) ''''''''' @@ -144,18 +89,7 @@ is_dir(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - ret = client.is_file("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_dir is_exist(fs_path) ''''''''' @@ -171,18 +105,7 @@ is_exist(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - ret = client.is_exist("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_exist upload(local_path, fs_path) ''''''''' @@ -195,18 +118,7 @@ upload(local_path, fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.upload("test_hdfs_client", "hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.upload download(fs_path, local_path) ''''''''' @@ -219,19 +131,7 @@ download(fs_path, local_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.download("hdfs:/test_hdfs_client", "./") - +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.download touch(fs_path, exist_ok=True) ''''''''' @@ -244,18 +144,7 @@ touch(fs_path, exist_ok=True) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.touch("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.touch mv(fs_src_path, fs_dst_path, overwrite=False) ''''''''' @@ -269,18 +158,7 @@ HADOOP 系统文件移动。 **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - client.mv("hdfs:/test_hdfs_client", "hdfs:/test_hdfs_client2") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.mv list_dirs(fs_path) ''''''''' @@ -296,15 +174,4 @@ list_dirs(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import HDFSClient - - hadoop_home = "/home/client/hadoop-client/hadoop/" - configs = { - "fs.default.name": "hdfs://xxx.hadoop.com:54310", - "hadoop.job.ugi": "hello,hello123" - } - - client = HDFSClient(hadoop_home, configs) - subdirs = client.list_dirs("hdfs:/test_hdfs_client") +COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.list_dirs diff --git a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst index 3d23cfd024d..5ebb14f3993 100644 --- a/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/LocalFS_cn.rst @@ -8,12 +8,8 @@ LocalFS 代码示例 :::::::::::: -.. code-block:: python - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - subdirs, files = client.ls_dir("./") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS 方法 :::::::::::: @@ -31,12 +27,7 @@ ls_dir(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - subdirs, files = client.ls_dir("./") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.ls_dir mkdirs(fs_path) ''''''''' @@ -48,13 +39,7 @@ mkdirs(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.mkdirs("test_mkdirs") - client.delete("test_mkdirs") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.mkdirs rename(fs_src_path, fs_dst_path) ''''''''' @@ -67,17 +52,7 @@ rename(fs_src_path, fs_dst_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.touch("test_rename_src") - print(client.is_exists("test_rename_src")) # True - client.rename("test_rename_src", "test_rename_dst") - print(client.is_exists("test_rename_src")) # False - print(client.is_exists("test_rename_dst")) # True - client.delete("test_rename_dst") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.rename delete(fs_path) ''''''''' @@ -89,13 +64,7 @@ delete(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.mkdirs("test_localFS_mkdirs") - client.delete("test_localFS_mkdirs") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.delete is_file(fs_path) ''''''''' @@ -111,14 +80,7 @@ is_file(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.touch("test_is_file") - print(client.is_file("test_is_file")) # True - client.delete("test_is_file") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.is_file is_dir(fs_path) ''''''''' @@ -134,14 +96,7 @@ is_dir(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.mkdirs("test_is_dir") - print(client.is_dir("test_is_file")) # True - client.delete("test_is_dir") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.is_dir is_exist(fs_path) ''''''''' @@ -157,12 +112,7 @@ is_exist(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - ret = local_fs.is_exist("test_is_exist") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.is_exist touch(fs_path, exist_ok=True) ''''''''' @@ -175,13 +125,7 @@ touch(fs_path, exist_ok=True) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.touch("test_touch") - client.delete("test_touch") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.touch mv(src_path, dst_path, overwrite=False) ''''''''' @@ -195,14 +139,7 @@ mv(src_path, dst_path, overwrite=False) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - client.touch("test_mv_src") - client.mv("test_mv_src", "test_mv_dst") - client.delete("test_mv_dst") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.mv list_dirs(fs_path) ''''''''' @@ -218,9 +155,4 @@ list_dirs(fs_path) **代码示例** -.. code-block:: python - - from paddle.distributed.fleet.utils import LocalFS - - client = LocalFS() - subdirs = client.list_dirs("./") +COPY-FROM: paddle.distributed.fleet.utils.LocalFS.list_dirs diff --git a/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst b/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst index a5814dced87..02aa4cd2e3d 100644 --- a/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst +++ b/docs/api/paddle/incubate/nn/functional/fused_feedforward_cn.rst @@ -56,17 +56,4 @@ fused_feedforward 代码示例 :::::::::: -.. code-block:: python - - # required: gpu - import paddle - import numpy as np - x_data = np.random.random((1, 8, 8)).astype("float32") - linear1_weight_data = np.random.random((8, 8)).astype("float32") - linear2_weight_data = np.random.random((8, 8)).astype("float32") - x = paddle.to_tensor(x_data) - linear1_weight = paddle.to_tensor(linear1_weight_data) - linear2_weight = paddle.to_tensor(linear2_weight_data) - out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight) - print(out.numpy().shape) - # (1, 8, 8) +COPY-FROM: paddle.incubate.nn.functional.fused_feedforward diff --git a/docs/api/paddle/jit/TracedLayer_cn.rst b/docs/api/paddle/jit/TracedLayer_cn.rst index 1e7bc731060..132e8cb8704 100644 --- a/docs/api/paddle/jit/TracedLayer_cn.rst +++ b/docs/api/paddle/jit/TracedLayer_cn.rst @@ -37,29 +37,7 @@ tuple,包含 2 个元素,其中第一个元素是 ``layer(*inputs)`` 的输 **代码示例** -.. code-block:: python - - import paddle - - class ExampleLayer(paddle.nn.Layer): - def __init__(self): - super(ExampleLayer, self).__init__() - self._fc = paddle.nn.Linear(3, 10) - - def forward(self, input): - return self._fc(input) - - layer = ExampleLayer() - in_var = paddle.uniform(shape=[2, 3], dtype='float32') - out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var]) - - # 内部使用 Executor 运行静态图模型 - out_static_graph = static_layer([in_var]) - print(len(out_static_graph)) # 1 - print(out_static_graph[0].shape) # (2, 10) - - # 将静态图模型保存为预测模型 - static_layer.save_inference_model(path='./saved_infer_model') +COPY-FROM: paddle.jit.TracedLayer.trace set_strategy(build_strategy=None, exec_strategy=None) ''''''''' @@ -77,31 +55,7 @@ set_strategy(build_strategy=None, exec_strategy=None) **代码示例** -.. code-block:: python - - import paddle - - class ExampleLayer(paddle.nn.Layer): - def __init__(self): - super(ExampleLayer, self).__init__() - self._fc = paddle.nn.Linear(3, 10) - - def forward(self, input): - return self._fc(input) - - layer = ExampleLayer() - in_var = paddle.uniform(shape=[2, 3], dtype='float32') - - out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var]) - - build_strategy = paddle.static.BuildStrategy() - build_strategy.enable_inplace = True - - exec_strategy = paddle.static.ExecutionStrategy() - exec_strategy.num_threads = 2 - - static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy) - out_static_graph = static_layer([in_var]) +COPY-FROM: paddle.jit.TracedLayer.set_strategy save_inference_model(path, feed=None, fetch=None) ''''''''' @@ -122,31 +76,4 @@ save_inference_model(path, feed=None, fetch=None) **代码示例** -.. code-block:: python - - import numpy as np - import paddle - - class ExampleLayer(paddle.nn.Layer): - def __init__(self): - super(ExampleLayer, self).__init__() - self._fc = paddle.nn.Linear(3, 10) - - def forward(self, input): - return self._fc(input) - - save_dirname = './saved_infer_model' - in_np = np.random.random([2, 3]).astype('float32') - in_var = paddle.to_tensor(in_np) - layer = ExampleLayer() - out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var]) - static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0]) - - paddle.enable_static() - place = paddle.CPUPlace() - exe = paddle.static.Executor(place) - program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname, - exe) - - fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars) - print(fetch.shape) # (2, 10) +COPY-FROM: paddle.jit.TracedLayer.save_inference_model diff --git a/docs/api/paddle/nn/Layer_cn.rst b/docs/api/paddle/nn/Layer_cn.rst index 7bf906fd46a..ff64f2d7448 100644 --- a/docs/api/paddle/nn/Layer_cn.rst +++ b/docs/api/paddle/nn/Layer_cn.rst @@ -28,27 +28,7 @@ train() **代码示例** -.. code-block:: python - - import paddle - - class MyLayer(paddle.nn.Layer): - def __init__(self): - super(MyLayer, self).__init__() - self._linear = paddle.nn.Linear(1, 1) - self._dropout = paddle.nn.Dropout(p=0.5) - - def forward(self, input): - temp = self._linear(input) - temp = self._dropout(temp) - return temp - - x = paddle.randn([10, 1], 'float32') - mylayer = MyLayer() - mylayer.eval() # set mylayer._dropout to eval mode - out = mylayer(x) - mylayer.train() # set mylayer._dropout to train mode - out = mylayer(x) +COPY-FROM: paddle.nn.Layer eval() ''''''''' @@ -60,26 +40,7 @@ eval() **代码示例** -.. code-block:: python - - import paddle - - class MyLayer(paddle.nn.Layer): - def __init__(self): - super(MyLayer, self).__init__() - self._linear = paddle.nn.Linear(1, 1) - self._dropout = paddle.nn.Dropout(p=0.5) - - def forward(self, input): - temp = self._linear(input) - temp = self._dropout(temp) - return temp - - x = paddle.randn([10, 1], 'float32') - mylayer = MyLayer() - mylayer.eval() # set mylayer._dropout to eval mode - out = mylayer(x) - print(out) +COPY-FROM: paddle.nn.Layer.eval full_name() ''''''''' @@ -91,20 +52,7 @@ str, Layer 的全名 **代码示例** -.. code-block:: python - - import paddle - - class LinearNet(paddle.nn.Layer): - def __init__(self): - super(LinearNet, self).__init__(name_scope = "demo_linear_net") - self._linear = paddle.nn.Linear(1, 1) - - def forward(self, x): - return self._linear(x) - - linear_net = LinearNet() - print(linear_net.full_name()) # demo_linear_net_0 +COPY-FROM: paddle.nn.Layer.full_name register_forward_pre_hook(hook) ''''''''' @@ -124,33 +72,7 @@ HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注 **代码示例** -.. code-block:: python - - import paddle - import numpy as np - - # the forward_post_hook change the input of the layer: input = input * 2 - def forward_pre_hook(layer, input): - # user can use layer and input for information statistis tasks - # change the input - input_return = (input[0] * 2) - return input_return - - linear = paddle.nn.Linear(13, 5) - # register the hook - forward_pre_hook_handle = linear.register_forward_pre_hook(forward_pre_hook) - value0 = np.arange(26).reshape(2, 13).astype("float32") - in0 = paddle.to_tensor(value0) - out0 = linear(in0) - - # remove the hook - forward_pre_hook_handle.remove() - value1 = value0 * 2 - in1 = paddle.to_tensor(value1) - out1 = linear(in1) - - # hook change the linear's input to input * 2, so out0 is equal to out1. - assert (out0.numpy() == out1.numpy()).any() +COPY-FROM: paddle.nn.Layer.register_forward_pre_hook register_forward_post_hook(hook) ''''''''' @@ -170,30 +92,7 @@ HookRemoveHelper,可通过调用 ``hook_remove_helper.remove()`` 来删除注 **代码示例** -.. code-block:: python - - import paddle - import numpy as np - - # the forward_post_hook change the output of the layer: output = output * 2 - def forward_post_hook(layer, input, output): - # user can use layer, input and output for information statistis tasks - # change the output - return output * 2 - - linear = paddle.nn.Linear(13, 5) - # register the hook - forward_post_hook_handle = linear.register_forward_post_hook(forward_post_hook) - value1 = np.arange(26).reshape(2, 13).astype("float32") - in1 = paddle.to_tensor(value1) - out0 = linear(in1) - - # remove the hook - forward_post_hook_handle.remove() - out1 = linear(in1) - - # hook change the linear's output to output * 2, so out0 is equal to out1 * 2. - assert (out0.numpy() == (out1.numpy()) * 2).any() +COPY-FROM: paddle.nn.Layer.register_forward_post_hook create_parameter(shape, attr=None, dtype="float32", is_bias=False, default_initializer=None) ''''''''' @@ -213,23 +112,7 @@ Tensor,创建的参数变量 **代码示例** -.. code-block:: python - - import paddle - - class MyLayer(paddle.nn.Layer): - def __init__(self): - super(MyLayer, self).__init__() - self._linear = paddle.nn.Linear(1, 1) - w_tmp = self.create_parameter([1,1]) - self.add_parameter("w_tmp", w_tmp) - - def forward(self, input): - return self._linear(input) - - mylayer = MyLayer() - for name, param in mylayer.named_parameters(): - print(name, param) # will print w_tmp,_linear.weight,_linear.bias +COPY-FROM: paddle.nn.Layer.create_parameter create_variable(name=None, persistable=None, dtype=None) ''''''''' @@ -247,24 +130,7 @@ Tensor,返回创建的 ``Tensor`` **代码示例** -.. code-block:: python - - import paddle - - class MyLinear(paddle.nn.Layer): - def __init__(self, - in_features, - out_features): - super(MyLinear, self).__init__() - self.linear = paddle.nn.Linear( 10, 10) - - self.back_var = self.create_variable(name = "linear_tmp_0", dtype=self._dtype) - - def forward(self, input): - out = self.linear(input) - paddle.assign( out, self.back_var) - - return out +COPY-FROM: paddle.nn.Layer.create_variable create_tensor(name=None, persistable=None, dtype=None) ''''''''' @@ -282,25 +148,7 @@ Tensor,返回创建的 ``Tensor`` **代码示例** -.. code-block:: python - - import paddle - - class MyLinear(paddle.nn.Layer): - def __init__(self, - in_features, - out_features): - super(MyLinear, self).__init__() - self.linear = paddle.nn.Linear( 10, 10) - - self.back_var = self.create_tensor(name = "linear_tmp_0", dtype=self._dtype) - - def forward(self, input): - out = self.linear(input) - paddle.assign( out, self.back_var) - - return out - +COPY-FROM: paddle.nn.Layer.create_tensor parameters(include_sublayers=True) ''''''''' @@ -316,12 +164,7 @@ list,一个由当前层及其子层的所有参数组成的列表,列表中 **代码示例** -.. code-block:: python - - import paddle - - linear = paddle.nn.Linear(1,1) - print(linear.parameters()) # print linear_0.w_0 and linear_0.b_0 +COPY-FROM: paddle.nn.Layer.parameters children() ''''''''' @@ -333,17 +176,7 @@ iterator,子层的迭代器。 **代码示例** -.. code-block:: python - - import paddle - - linear1 = paddle.nn.Linear(10, 3) - linear2 = paddle.nn.Linear(3, 10, bias_attr=False) - model = paddle.nn.Sequential(linear1, linear2) - - layer_list = list(model.children()) - - print(layer_list) # [, ] +COPY-FROM: paddle.nn.Layer.children named_children() ''''''''' @@ -355,17 +188,7 @@ iterator,产出子层名称和子层的元组的迭代器。 **代码示例** -.. code-block:: python - - import paddle - - linear1 = paddle.nn.Linear(10, 3) - linear2 = paddle.nn.Linear(3, 10, bias_attr=False) - model = paddle.nn.Sequential(linear1, linear2) - for prefix, layer in model.named_children(): - print(prefix, layer) - # ('0', ) - # ('1', ) +COPY-FROM: paddle.nn.Layer.named_children sublayers(include_self=False) ''''''''' @@ -381,23 +204,7 @@ sublayers(include_self=False) **代码示例** -.. code-block:: python - - import paddle - - class MyLayer(paddle.nn.Layer): - def __init__(self): - super(MyLayer, self).__init__() - self._linear = paddle.nn.Linear(1, 1) - self._dropout = paddle.nn.Dropout(p=0.5) - - def forward(self, input): - temp = self._linear(input) - temp = self._dropout(temp) - return temp - - mylayer = MyLayer() - print(mylayer.sublayers()) # [, ] +COPY-FROM: paddle.nn.Layer.sublayers clear_gradients() ''''''''' @@ -409,20 +216,7 @@ clear_gradients() **代码示例** -.. code-block:: python - - import paddle - import numpy as np - - value = np.arange(26).reshape(2, 13).astype("float32") - a = paddle.to_tensor(value) - linear = paddle.nn.Linear(13, 5) - adam = paddle.optimizer.Adam(learning_rate=0.01, - parameters=linear.parameters()) - out = linear(a) - out.backward() - adam.step() - linear.clear_gradients() +COPY-FROM: paddle.nn.Layer.clear_gradients named_parameters(prefix='', include_sublayers=True) ''''''''' @@ -439,15 +233,7 @@ iterator,产出名称和参数的元组的迭代器。 **代码示例** -.. code-block:: python - - import paddle - - fc1 = paddle.nn.Linear(10, 3) - fc2 = paddle.nn.Linear(3, 10, bias_attr=False) - model = paddle.nn.Sequential(fc1, fc2) - for name, param in model.named_parameters(): - print(name, param) +COPY-FROM: paddle.nn.Layer.named_parameters named_sublayers(prefix='', include_self=False, layers_set=None) ''''''''' @@ -465,15 +251,7 @@ iterator,产出名称和子层的元组的迭代器。 **代码示例** -.. code-block:: python - - import paddle - - fc1 = paddle.nn.Linear(10, 3) - fc2 = paddle.nn.Linear(3, 10, bias_attr=False) - model = paddle.nn.Sequential(fc1, fc2) - for prefix, layer in model.named_sublayers(): - print(prefix, layer) +COPY-FROM: paddle.nn.Layer.named_sublayers register_buffer(name, tensor, persistable=True) ''''''''' @@ -495,17 +273,7 @@ None **代码示例** -.. code-block:: python - - import numpy as np - import paddle - - linear = paddle.nn.Linear(10, 3) - value = np.array([0]).astype("float32") - buffer = paddle.to_tensor(value) - linear.register_buffer("buf_name", buffer, persistable=True) - # get the buffer by attribute. - print(linear.buf_name) +COPY-FROM: paddle.nn.Layer.register_buffer buffers(include_sublayers=True) ''''''''' @@ -521,17 +289,7 @@ list,一个由当前层及其子层的所有 buffers 组成的列表,列表 **代码示例** -.. code-block:: python - - import numpy as np - import paddle - - linear = paddle.nn.Linear(10, 3) - value = np.array([0]).astype("float32") - buffer = paddle.to_tensor(value) - linear.register_buffer("buf_name", buffer, persistable=True) - - print(linear.buffers()) # == print([linear.buf_name]) +COPY-FROM: paddle.nn.Layer.buffers named_buffers(prefix='', include_sublayers=True) ''''''''' @@ -548,27 +306,7 @@ iterator,产出名称和 buffer 的元组的迭代器。 **代码示例** -.. code-block:: python - - import numpy as np - import paddle - - fc1 = paddle.nn.Linear(10, 3) - buffer1 = paddle.to_tensor(np.array([0]).astype("float32")) - # register a tensor as buffer by specific `persistable` - fc1.register_buffer("buf_name_1", buffer1, persistable=True) - - fc2 = paddle.nn.Linear(3, 10) - buffer2 = paddle.to_tensor(np.array([1]).astype("float32")) - # register a buffer by assigning an attribute with Tensor. - # The `persistable` can only be False by this way. - fc2.buf_name_2 = buffer2 - - model = paddle.nn.Sequential(fc1, fc2) - - # get all named buffers - for name, buffer in model.named_buffers(): - print(name, buffer) +COPY-FROM: paddle.nn.Layer.named_buffers forward(*inputs, **kwargs) ''''''''' @@ -598,31 +336,7 @@ Layer,添加的子层 **代码示例** -.. code-block:: python - - import paddle - - class MySequential(paddle.nn.Layer): - def __init__(self, *layers): - super(MySequential, self).__init__() - if len(layers) > 0 and isinstance(layers[0], tuple): - for name, layer in layers: - self.add_sublayer(name, layer) - else: - for idx, layer in enumerate(layers): - self.add_sublayer(str(idx), layer) - - def forward(self, input): - for layer in self._sub_layers.values(): - input = layer(input) - return input - - fc1 = paddle.nn.Linear(10, 3) - fc2 = paddle.nn.Linear(3, 10, bias_attr=False) - model = MySequential(fc1, fc2) - for prefix, layer in model.named_sublayers(): - print(prefix, layer) - +COPY-FROM: paddle.nn.Layer.add_sublayer add_parameter(name, parameter) ''''''''' @@ -639,24 +353,7 @@ Parameter,传入的参数实例 **代码示例** -.. code-block:: python - - import paddle - - class MyLayer(paddle.nn.Layer): - def __init__(self): - super(MyLayer, self).__init__() - self._linear = paddle.nn.Linear(1, 1) - w_tmp = self.create_parameter([1,1]) - self.add_parameter("w_tmp", w_tmp) - - def forward(self, input): - return self._linear(input) - - mylayer = MyLayer() - for name, param in mylayer.named_parameters(): - print(name, param) # will print w_tmp,_linear.weight,_linear.bias - +COPY-FROM: paddle.nn.Layer.add_parameter state_dict(destination=None, include_sublayers=True, use_hook=True) ''''''''' @@ -674,14 +371,7 @@ dict,包含所有参数和可持久行 buffers 的 dict **代码示例** -.. code-block:: python - - import paddle - - emb = paddle.nn.Embedding(10, 10) - - state_dict = emb.state_dict() - paddle.save( state_dict, "paddle_dy.pdparams") +COPY-FROM: paddle.nn.Layer.state_dict set_state_dict(state_dict, use_structured_name=True) ''''''''' @@ -698,16 +388,7 @@ set_state_dict(state_dict, use_structured_name=True) **代码示例** -.. code-block:: python - - import paddle - - emb = paddle.nn.Embedding(10, 10) - - state_dict = emb.state_dict() - paddle.save(state_dict, "paddle_dy.pdparams") - para_state_dict = paddle.load("paddle_dy.pdparams") - emb.set_state_dict(para_state_dict) +COPY-FROM: paddle.nn.Layer.set_state_dict to(device=None, dtype=None, blocking=None) ''''''''' @@ -722,30 +403,4 @@ to(device=None, dtype=None, blocking=None) **代码示例** -.. code-block:: python - - import paddle - - linear=paddle.nn.Linear(2, 2) - linear.weight - #Parameter containing: - #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=False, - # [[-0.32770029, 0.38653070], - # [ 0.46030545, 0.08158520]]) - - linear.to(dtype='float64') - linear.weight - #Tenor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=False, - # [[-0.32770029, 0.38653070], - # [ 0.46030545, 0.08158520]]) - - linear.to(device='cpu') - linear.weight - #Tensor(shape=[2, 2], dtype=float64, place=CPUPlace, stop_gradient=False, - # [[-0.32770029, 0.38653070], - # [ 0.46030545, 0.08158520]]) - linear.to(device=paddle.CUDAPinnedPlace(), blocking=False) - linear.weight - #Tensor(shape=[2, 2], dtype=float64, place=CUDAPinnedPlace, stop_gradient=False, - # [[-0.04989364, -0.56889004], - # [ 0.33960250, 0.96878713]]) +COPY-FROM: paddle.nn.Layer.to diff --git a/docs/api/paddle/static/BuildStrategy_cn.rst b/docs/api/paddle/static/BuildStrategy_cn.rst index b9fa0328927..097f03161d0 100644 --- a/docs/api/paddle/static/BuildStrategy_cn.rst +++ b/docs/api/paddle/static/BuildStrategy_cn.rst @@ -14,31 +14,7 @@ BuildStrategy,一个 BuildStrategy 的实例。 代码示例 ::::::::: -.. code-block:: python - - import os - import paddle - import paddle.static as static - - paddle.enable_static() - - os.environ['CPU_NUM'] = str(2) - places = static.cpu_places() - - data = static.data(name="x", shape=[None, 1], dtype="float32") - hidden = static.nn.fc(x=data, size=10) - loss = paddle.mean(hidden) - paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - - build_strategy = static.BuildStrategy() - build_strategy.enable_inplace = True - build_strategy.memory_optimize = True - build_strategy.reduce_strategy = static.BuildStrategy.ReduceStrategy.Reduce - program = static.CompiledProgram(static.default_main_program()) - program = program.with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=places) - +COPY-FROM: paddle.static.BuildStrategy 属性 :::::::::::: @@ -49,15 +25,7 @@ str 类型。表示以 graphviz 格式向文件中写入计算图的路径,有 **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.debug_graphviz_path = "./graph" +COPY-FROM: paddle.static.BuildStrategy.debug_graphviz_path enable_sequential_execution @@ -67,16 +35,7 @@ bool 类型。如果设置为 True,则算子的执行顺序将与算子定义 **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.enable_sequential_execution = True - +COPY-FROM: paddle.static.BuildStrategy.enable_sequential_execution fuse_broadcast_ops ''''''''' @@ -85,16 +44,7 @@ bool 类型。表明是否融合(fuse) broadcast ops。该选项指在 Reduce **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.fuse_broadcast_ops = True - +COPY-FROM: paddle.static.BuildStrategy.fuse_broadcast_ops fuse_elewise_add_act_ops ''''''''' @@ -103,16 +53,7 @@ bool 类型。表明是否融合(fuse) elementwise_add_op 和 activation_op。 **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.fuse_elewise_add_act_ops = True - +COPY-FROM: paddle.static.BuildStrategy.fuse_elewise_add_act_ops fuse_relu_depthwise_conv ''''''''' @@ -121,15 +62,7 @@ bool 类型。表明是否融合(fuse) relu 和 depthwise_conv2d,节省 GPU **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.fuse_relu_depthwise_conv = True +COPY-FROM: paddle.static.BuildStrategy.fuse_relu_depthwise_conv gradient_scale_strategy ''''''''' @@ -138,53 +71,7 @@ gradient_scale_strategy **代码示例** -.. code-block:: python - - import numpy - import os - import paddle - import paddle.static as static - - paddle.enable_static() - - use_cuda = True - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - exe = static.Executor(place) - - # NOTE: If you use CPU to run the program, you need - # to specify the CPU_NUM, otherwise, paddle will use - # all the number of the logic core as the CPU_NUM, - # in that case, the batch size of the input should be - # greater than CPU_NUM, if not, the process will be - # failed by an exception. - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - places = static.cpu_places() - else: - places = static.cuda_places() - - data = static.data(name='X', shape=[None, 1], dtype='float32') - hidden = static.nn.fc(x=data, size=10) - loss = paddle.mean(hidden) - paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(static.default_startup_program()) - - build_strategy = static.BuildStrategy() - build_strategy.gradient_scale_strategy = \ - static.BuildStrategy.GradientScaleStrategy.Customized - compiled_prog = static.CompiledProgram( - static.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy, - places=places) - - dev_count = len(places) - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_grad = numpy.ones((dev_count)).astype("float32") * 0.01 - loss_grad_name = loss.name+"@GRAD" - loss_data = exe.run(compiled_prog, - feed={"X": x, loss_grad_name : loss_grad}, - fetch_list=[loss.name, loss_grad_name]) +COPY-FROM: paddle.static.BuildStrategy.gradient_scale_strategy memory_optimize ''''''''' @@ -199,15 +86,7 @@ reduce_strategy **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.reduce_strategy = static.BuildStrategy.ReduceStrategy.Reduce +COPY-FROM: paddle.static.BuildStrategy.reduce_strategy remove_unnecessary_lock ''''''''' @@ -216,16 +95,7 @@ bool 类型。设置 True 会去除 GPU 操作中的一些锁操作,``Parallel **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.remove_unnecessary_lock = True - +COPY-FROM: paddle.static.BuildStrategy.remove_unnecessary_lock sync_batch_norm ''''''''' @@ -234,12 +104,4 @@ bool 类型。表示是否使用同步的批正则化,即在训练阶段通过 **代码示例** -.. code-block:: python - - import paddle - import paddle.static as static - - paddle.enable_static() - - build_strategy = static.BuildStrategy() - build_strategy.sync_batch_norm = True +COPY-FROM: paddle.static.BuildStrategy.sync_batch_norm diff --git a/docs/api/paddle/static/Executor_cn.rst b/docs/api/paddle/static/Executor_cn.rst index 48ed619b6c0..0ff00eb4c3a 100644 --- a/docs/api/paddle/static/Executor_cn.rst +++ b/docs/api/paddle/static/Executor_cn.rst @@ -28,57 +28,7 @@ Executor 支持单 GPU、多 GPU 以及 CPU 运行。 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy - import os - - # Executor 只能在静态图模式使用 - paddle.enable_static() - - # 显式设置运行设备 - # use_cuda = True - # place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - # exe = paddle.static.Executor(place) - - # 如果不显示设置运行设备,PaddlePaddle 会设置默认运行设备 - exe = paddle.static.Executor() - - train_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(train_program, startup_program): - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - hidden = paddle.static.nn.fc(data, 10) - loss = paddle.mean(hidden) - paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - - # 仅运行一次 startup program - # 不需要优化/编译这个 startup program - startup_program.random_seed=1 - exe.run(startup_program) - - # 无需编译,直接运行 main program - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(train_program, feed={"X": x}, fetch_list=[loss.name]) - - # 另一种方法是,编译这个 main program 然后运行。 - # 参考 CompiledProgram 以获取更多信息。 - # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, - # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, - # 在这种情况下,输入的 batch size 应大于 CPU_NUM, - # 否则程序会异常中断。 - - # 显式设置运行设备 - # if not use_cuda: - # os.environ['CPU_NUM'] = str(2) - - # 未显示设置运行设备且安装的 Paddle 为 CPU 版本 - os.environ['CPU_NUM'] = str(2) - - compiled_prog = paddle.static.CompiledProgram( - train_program).with_data_parallel(loss_name=loss.name) - loss_data, = exe.run(compiled_prog, feed={"X": x}, fetch_list=[loss.name]) +COPY-FROM: paddle.static.Executor 方法 :::::::::::: @@ -94,15 +44,7 @@ close() **代码示例** -.. code-block:: python - - import paddle - - cpu = paddle.CPUPlace() - exe = paddle.static.Executor(cpu) - # 执行训练或测试过程 - exe.close() - +COPY-FROM: paddle.static.Executor.close run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True, use_prune=False) ''''''''' @@ -133,100 +75,11 @@ run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_na **代码示例 1** -.. code-block:: python - - import paddle - import numpy - - #首先创建执行引擎 - paddle.enable_static() - place = paddle.CPUPlace() # paddle.CUDAPlace(0) - exe = paddle.static.Executor(place) - - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - hidden = paddle.static.nn.fc(data, 10) - loss = paddle.mean(hidden) - adam = paddle.optimizer.Adam() - adam.minimize(loss) - i = paddle.zeros(shape=[1], dtype='int64') - array = paddle.fluid.layers.array_write(x=loss, i=i) - - #仅运行 startup 程序一次 - exe.run(paddle.static.default_startup_program()) - - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_val, array_val = exe.run(feed={'X': x}, - fetch_list=[loss.name, array.name]) - print(array_val) - # [array([0.02153828], dtype=float32)] +COPY-FROM: paddle.static.Executor.run:code-example-1 **代码示例 2** -.. code-block:: python - - import paddle - import numpy as np - - # 创建 Executor 对象 - paddle.enable_static() - place = paddle.CUDAPlace(0) - exe = paddle.static.Executor(place) - - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - class_dim = 2 - prediction = paddle.static.nn.fc(data, class_dim) - loss = paddle.mean(prediction) - adam = paddle.optimizer.Adam() - adam.minimize(loss) - - # 运行且仅运行一次 startup program - exe.run(paddle.static.default_startup_program()) - build_strategy = paddle.static.BuildStrategy() - binary = paddle.static.CompiledProgram( - paddle.static.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) - batch_size = 6 - x = np.random.random(size=(batch_size, 1)).astype('float32') - - # 1) 设置 return_merged 参数为 False 以获取不合并的计算结果: - unmerged_prediction, = exe.run(binary, - feed={'X': x}, - fetch_list=[prediction.name], - return_merged=False) - # 如果用户使用两个 GPU 卡来运行此 python 代码示例,输出结果将为(2, 3, class_dim)。 - # 输出结果中第一个维度值代表所使用的 GPU 卡数,而第二个维度值代表 batch_size 和所使用 - # 的 GPU 卡数之商。 - print("The unmerged prediction shape: {}".format( - np.array(unmerged_prediction).shape)) - print(unmerged_prediction) - - # 2) 设置 return_merged 参数为 True 以获取合并的计算结果: - merged_prediction, = exe.run(binary, - feed={'X': x}, - fetch_list=[prediction.name], - return_merged=True) - - # 如果用户使用两个 GPU 卡来运行此 python 代码示例,输出结果将为(6, class_dim)。输出结果 - # 中第一个维度值代表 batch_size 值。 - print("The merged prediction shape: {}".format( - np.array(merged_prediction).shape)) - print(merged_prediction) - - # 输出: - # The unmerged prediction shape: (2, 3, 2) - # [array([[-0.37620035, -0.19752218], - # [-0.3561043 , -0.18697084], - # [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354], - # [-0.49041364, -0.25748932], - # [-0.44331917, -0.23276259]], dtype=float32)] - # The merged prediction shape: (6, 2) - # [[-0.37789783 -0.19921964] - # [-0.3577645 -0.18863106] - # [-0.24274671 -0.12814042] - # [-0.24635398 -0.13003758] - # [-0.49232286 -0.25939852] - # [-0.44514108 -0.2345845 ]] - +COPY-FROM: paddle.static.Executor.run:code-example-2 infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) ''''''''' @@ -250,25 +103,7 @@ infer_from_dataset 的文档与 train_from_dataset 几乎完全相同,只是 **代码示例** -.. code-block:: python - - import paddle - - paddle.enable_static() - place = paddle.CPUPlace() # 使用 GPU 时可设置 place = paddle.CUDAPlace(0) - exe = paddle.static.Executor(place) - x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") - y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) - dataset = paddle.fluid.DatasetFactory().create_dataset() - dataset.set_use_var([x, y]) - dataset.set_thread(1) - # 您可以设置您自己的 filelist,如 filelist = ["dataA.txt"] - filelist = [] - dataset.set_filelist(filelist) - exe.run(paddle.static.default_startup_program()) - exe.infer_from_dataset(program=paddle.static.default_main_program(), - dataset=dataset) - +COPY-FROM: paddle.static.Executor.infer_from_dataset train_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) ''''''''' @@ -295,21 +130,4 @@ train_from_dataset 将销毁每次运行在 executor 中创建的所有资源。 **代码示例** -.. code-block:: python - - import paddle - - paddle.enable_static() - place = paddle.CPUPlace() # 使用 GPU 时可设置 place = paddle.CUDAPlace(0) - exe = paddle.static.Executor(place) - x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") - y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) - dataset = paddle.fluid.DatasetFactory().create_dataset() - dataset.set_use_var([x, y]) - dataset.set_thread(1) - # 您可以设置您自己的 filelist,如 filelist = ["dataA.txt"] - filelist = [] - dataset.set_filelist(filelist) - exe.run(paddle.static.default_startup_program()) - exe.train_from_dataset(program=paddle.static.default_main_program(), - dataset=dataset) +COPY-FROM: paddle.static.Executor.train_from_dataset diff --git a/docs/api/paddle/static/ParallelExecutor_cn.rst b/docs/api/paddle/static/ParallelExecutor_cn.rst index f12807286bf..1f2ba1b3650 100644 --- a/docs/api/paddle/static/ParallelExecutor_cn.rst +++ b/docs/api/paddle/static/ParallelExecutor_cn.rst @@ -40,50 +40,7 @@ ParallelExecutor 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy - import os - - use_cuda = True - paddle.enable_static() - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - - # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, - # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, - # 在这种情况下,输入的 batch size 应大于 CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - exe = paddle.static.Executor(place) - - train_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(train_program, startup_program): - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - hidden = paddle.static.nn.fc(data, 10) - loss = paddle.mean(hidden) - test_program = paddle.static.default_main_program().clone(for_test=True) - paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(startup_program) - - train_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - # 注意:如果此处不设置 share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 - test_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, - main_program=test_program, - share_vars_from=train_exe) - - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = train_exe.run(feed={"X": x}, - fetch_list=[loss.name]) - - loss_data, = test_exe.run(feed={"X": x}, - fetch_list=[loss.name]) +COPY-FROM: paddle.static.ParallelExecutor 方法 :::::::::::: @@ -109,52 +66,7 @@ run(fetch_list, feed=None, feed_dict=None, return_numpy=True) **代码示例** -.. code-block:: python - import paddle - import numpy - import os - - use_cuda = True - paddle.enable_static() - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - - # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, - # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, - # 在这种情况下,输入的 batch size 应大于 CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - exe = paddle.static.Executor(place) - - train_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(train_program, startup_program): - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - hidden = paddle.static.nn.fc(data, 10) - loss = paddle.mean(hidden) - paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(startup_program) - - train_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - # 如果 feed 参数是 dict 类型: - # 图像会被 split 到设备中。假设有两个设备,那么每个设备将会处理形为 (5, 1)的图像 - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = train_exe.run(feed={"X": x}, - fetch_list=[loss.name]) - - # 如果 feed 参数是 list 类型: - # 各设备挨个处理列表中的每个元素 - # 第一个设备处理形为 (10, 1) 的图像 - # 第二个设备处理形为 (9, 1) 的图像 - # - # 使用 exe.device_count 得到设备数目 - x2 = numpy.random.random(size=(9, 1)).astype('float32') - loss_data, = train_exe.run(feed=[{"X": x}, {"X": x2}], - fetch_list=[loss.name]) +COPY-FROM: paddle.static.ParallelExecutor.run drop_local_exe_scopes() ''''''''' @@ -167,38 +79,4 @@ drop_local_exe_scopes() **代码示例** -.. code-block:: python - - import paddle - import numpy - import os - - use_cuda = True - # 注意:如果你使用 CPU 运行程序,需要具体设置 CPU_NUM, - # 否则 PaddlePaddle 会把逻辑核的所有数目设为 CPU_NUM, - # 在这种情况下,输入的 batch size 应大于 CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - paddle.enable_static() - train_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(train_program, startup_program): - data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') - hidden = paddle.static.nn.fc(data, 10) - loss = paddle.mean(hidden) - - place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - exe = paddle.static.Executor(place) - exe.run(startup_program) - - parallel_exe = paddle.static.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = parallel_exe.run(feed={"X": x}, - fetch_list=[loss.name]) - - parallel_exe.drop_local_exe_scopes() +COPY-FROM: paddle.static.ParallelExecutor.drop_local_exe_scopes diff --git a/docs/api/paddle/vision/transforms/Compose_cn.rst b/docs/api/paddle/vision/transforms/Compose_cn.rst index 05250b47cf1..2e60bea6da7 100644 --- a/docs/api/paddle/vision/transforms/Compose_cn.rst +++ b/docs/api/paddle/vision/transforms/Compose_cn.rst @@ -22,12 +22,4 @@ Compose .. code-block:: python - from paddle.vision.datasets import Flowers - from paddle.vision.transforms import Compose, ColorJitter, Resize - - transform = Compose([ColorJitter(), Resize(size=608)]) - flowers = Flowers(mode='test', transform=transform) - - for i in range(10): - sample = flowers[i] - print(sample[0].size, sample[1]) +COPY-FROM: paddle.vision.transforms.Compose From e691bb4946321d8df52f079c88782dcb5310a14a Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 15:11:10 +0000 Subject: [PATCH 08/20] make pre-commit also check rst --- .pre-commit-config.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 45fdc772502..4e7ae5c6a1d 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,20 +12,20 @@ repos: - id: detect-private-key files: (?!.*paddle)^.*$ - id: end-of-file-fixer - files: \.md$ + files: \.md$|\.rst$ - id: trailing-whitespace - files: \.md$ + files: \.md$|\.rst$ - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.0.1 hooks: - id: forbid-crlf - files: \.md$ + files: \.md$|\.rst$ - id: remove-crlf - files: \.md$ + files: \.md$|\.rst$ - id: forbid-tabs - files: \.md$ + files: \.md$|\.rst$ - id: remove-tabs - files: \.md$ + files: \.md$|\.rst$ - repo: https://github.com/reyoung/pre-commit-hooks-jinja-compile.git rev: 4a369cc72a4a2b8d3813ab8cc17abb5f5b21ef6c hooks: From 2c08fd702f1697d80f0ce363285fe051874dbb4c Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 27 Jul 2022 17:04:00 +0000 Subject: [PATCH 09/20] remove missing `.. code-block:: python` --- docs/api/paddle/vision/transforms/Compose_cn.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/api/paddle/vision/transforms/Compose_cn.rst b/docs/api/paddle/vision/transforms/Compose_cn.rst index 2e60bea6da7..e75c1af579c 100644 --- a/docs/api/paddle/vision/transforms/Compose_cn.rst +++ b/docs/api/paddle/vision/transforms/Compose_cn.rst @@ -20,6 +20,4 @@ Compose 代码示例 ::::::::: -.. code-block:: python - COPY-FROM: paddle.vision.transforms.Compose From b3f848ebf6a87d2a3600a4b9efacb090b3544e4a Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Thu, 28 Jul 2022 04:06:40 +0000 Subject: [PATCH 10/20] update api_white_list.txt --- ci_scripts/api_white_list.txt | 38 ++++++++++++----------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/ci_scripts/api_white_list.txt b/ci_scripts/api_white_list.txt index 19d4458fcf8..c2cf551a5b4 100644 --- a/ci_scripts/api_white_list.txt +++ b/ci_scripts/api_white_list.txt @@ -1,12 +1,9 @@ -paddle/fluid/DistributeTranspiler_cn.rst -paddle/fluid/DistributeTranspilerConfig_cn.rst +paddle/fluid/transpiler/DistributeTranspiler_cn.rst +paddle/fluid/transpiler/DistributeTranspilerConfig_cn.rst paddle/fluid/transpiler/HashName_cn.rst -paddle/fluid/memory_optimize_cn.rst -paddle/fluid/release_memory_cn.rst -paddle/optimizer/Dpsgd_cn.rst -paddle/reader/ComposeNotAligned_cn.rst +paddle/fluid/transpiler/memory_optimize_cn.rst +paddle/fluid/transpiler/release_memory_cn.rst paddle/fluid/layers/scatter_cn.rst -paddle/tensor/manipulation/scatter_cn.rst paddle/distributed/init_parallel_env_cn.rst paddle/distributed/spawn_cn.rst paddle/distributed/ReduceOp_cn.rst @@ -18,25 +15,16 @@ paddle/distributed/barrier_cn.rst paddle/distributed/broadcast_cn.rst paddle/distributed/split_cn.rst paddle/distributed/fleet/Fleet_cn.rst -paddle/distributed/fleet/utils/fs/ExecuteError_cn.rst -paddle/distributed/fleet/utils/fs/FSFileExistsError_cn.rst -paddle/distributed/fleet/utils/fs/FSFileNotExistsError_cn.rst -paddle/distributed/fleet/utils/fs/FSShellCmdAborted_cn.rst -paddle/distributed/fleet/utils/fs/FSTimeOut_cn.rst -paddle/distributed/fleet/utils/fs/FS_cn.rst -paddle/distributed/fleet/utils/fs/HDFSClient_cn.rst -paddle/distributed/fleet/utils/fs/LocalFS_cn.rst -paddle/fluid/dygraph/parallel/DataParallel_cn.rst -paddle/fluid/dygraph/parallel/ParallelEnv_cn.rst -paddle/fluid/framework/is_compiled_with_xpu_cn.rst -paddle/fluid/framework/xpu_places_cn.rst -paddle/fluid/core/XPUPlace_cn.rst +paddle/distributed/fleet/utils/HDFSClient_cn.rst +paddle/distributed/fleet/utils/LocalFS_cn.rst +paddle/DataParallel_cn.rst +paddle/distributed/ParallelEnv_cn.rst +paddle/device/is_compiled_with_xpu_cn.rst +paddle/static/xpu_places_cn.rst +paddle/device/XPUPlace_cn.rst paddle/utils/cpp_extension/load_cn.rst paddle/utils/cpp_extension/setup_cn.rst paddle/utils/cpp_extension/CppExtension_cn.rst paddle/utils/cpp_extension/CUDAExtension_cn.rst -upgrade_guide_cn.md -paddle/hapi/hub/Overview_cn.rst -paddle/hapi/hub/help_cn.rst -paddle/hapi/hub/list_cn.rst -paddle/hapi/hub/load_cn.rst +paddle/hub/Overview_cn.rst +paddle/incubate/autograd/Overview_cn.rst \ No newline at end of file From ac0762495867af302cbab4620ead3c662746971a Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Fri, 29 Jul 2022 08:41:05 +0000 Subject: [PATCH 11/20] fix the issue caused by upstream --- docs/api/paddle/Tensor_cn.rst | 2 +- docs/api/paddle/count_nonzero_cn.rst | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/api/paddle/Tensor_cn.rst b/docs/api/paddle/Tensor_cn.rst index 10d46a8b284..7c422869327 100755 --- a/docs/api/paddle/Tensor_cn.rst +++ b/docs/api/paddle/Tensor_cn.rst @@ -629,7 +629,7 @@ cosh(name=None) count_nonzero(axis=None, keepdim=False, name=None) ::::::::: -返回:沿给定的轴 ``axis`` 统计输入Tensor ``x`` 中非零元素的个数。 +返回:沿给定的轴 ``axis`` 统计输入 Tensor ``x`` 中非零元素的个数。 返回类型:Tensor diff --git a/docs/api/paddle/count_nonzero_cn.rst b/docs/api/paddle/count_nonzero_cn.rst index a7c7283ac87..a69d6c775e4 100644 --- a/docs/api/paddle/count_nonzero_cn.rst +++ b/docs/api/paddle/count_nonzero_cn.rst @@ -5,19 +5,19 @@ count_nonzero .. py:function:: paddle.count_nonzero(x, axis=None, keepdim=False, name=None) -沿给定的轴 ``axis`` 统计输入Tensor ``x`` 中非零元素的个数。 +沿给定的轴 ``axis`` 统计输入 Tensor ``x`` 中非零元素的个数。 参数 :::::::::: - - **x** (Tensor) - 输入的Tensor,数据类型为:bool、float16、float32、float64、int32、int64。 - - **axis** (None|int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是int或者int元素的列表。``axis`` 值应该在范围[-D, D)内,D是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于0,则等价于 :math:`axis + D`。如果 ``axis`` 是None,则对 ``x`` 的全部元素计算中位数。默认值为None。 - - **keepdim** (bool,可选) - 是否在输出Tensor中保留减小的维度。如果 ``keepdim`` 为True,则输出Tensor和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为1)。否则,输出Tensor的形状会在 ``axis`` 上进行squeeze操作。默认值为True。 + - **x** (Tensor) - 输入的 Tensor,数据类型为:bool、float16、float32、float64、int32、int64。 + - **axis** (None|int|list|tuple,可选) - 指定对 ``x`` 进行计算的轴。``axis`` 可以是 int 或者 int 元素的列表。``axis`` 值应该在范围[-D, D)内,D 是 ``x`` 的维度。如果 ``axis`` 或者其中的元素值小于 0,则等价于 :math:`axis + D`。如果 ``axis`` 是 None,则对 ``x`` 的全部元素计算中位数。默认值为 None。 + - **keepdim** (bool,可选) - 是否在输出 Tensor 中保留减小的维度。如果 ``keepdim`` 为 True,则输出 Tensor 和 ``x`` 具有相同的维度(减少的维度除外,减少的维度的大小为 1)。否则,输出 Tensor 的形状会在 ``axis`` 上进行 squeeze 操作。默认值为 True。 - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::: - ``Tensor``,沿着 ``axis`` 统计输入Tensor中非零元素的个数,数据类型int64。 + ``Tensor``,沿着 ``axis`` 统计输入 Tensor 中非零元素的个数,数据类型 int64。 代码示例 :::::::::: From 6521aa2b9180470014af3c741169efdc3a991061 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 1 Aug 2022 12:07:44 +0000 Subject: [PATCH 12/20] add a pre-commit hook to automatically insert space to cn and en char --- .pre-commit-config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4e7ae5c6a1d..eca948d0e85 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,13 @@ repos: files: \.md$|\.rst$ - id: remove-tabs files: \.md$|\.rst$ +- repo: https://github.com/ShigureLab/dochooks + rev: v0.3.0 + hooks: + - id: check-whitespace-between-cn-and-en-char + files: \.md$|\.rst$ + - id: insert-whitespace-between-cn-and-en-char + files: \.md$|\.rst$ - repo: https://github.com/reyoung/pre-commit-hooks-jinja-compile.git rev: 4a369cc72a4a2b8d3813ab8cc17abb5f5b21ef6c hooks: From f48202fb0474afa5d0666db06e0e42439a695f3a Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 1 Aug 2022 12:35:22 +0000 Subject: [PATCH 13/20] fix style issues from upstream --- docs/install/instalL_NGC_PaddlePaddle_ch.rst | 32 +++++++++----------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/docs/install/instalL_NGC_PaddlePaddle_ch.rst b/docs/install/instalL_NGC_PaddlePaddle_ch.rst index 20a7276b22d..967f563b4de 100644 --- a/docs/install/instalL_NGC_PaddlePaddle_ch.rst +++ b/docs/install/instalL_NGC_PaddlePaddle_ch.rst @@ -8,7 +8,7 @@ NGC PaddlePaddle 容器安装指南 整体介绍 ---------------------- -NGC PaddlePaddle 容器针对 NVIDIA GPU 加速进行了优化,并包含一组经过验证的库,可启用和优化NVIDIA GPU 性能。此容器还可能包含对 PaddlePaddle 源代码的修改,以最大限度地提高性能和兼容性。此容器还包含用于加速 ETL (`DALI `_, `RAPIDS `_),、训练(`cuDNN `_, `NCCL `_)和推理(`TensorRT `_)工作负载的软件。 +NGC PaddlePaddle 容器针对 NVIDIA GPU 加速进行了优化,并包含一组经过验证的库,可启用和优化 NVIDIA GPU 性能。此容器还可能包含对 PaddlePaddle 源代码的修改,以最大限度地提高性能和兼容性。此容器还包含用于加速 ETL (`DALI `_, `RAPIDS `_),、训练(`cuDNN `_, `NCCL `_)和推理(`TensorRT `_)工作负载的软件。 ---------------------- 环境准备 @@ -16,13 +16,13 @@ NGC PaddlePaddle 容器针对 NVIDIA GPU 加速进行了优化,并包含一组 使用 NGC PaddlePaddle 容器需要主机系统安装以下内容: -* `Docker引擎 `_ +* `Docker 引擎 `_ -* `NVIDIA GPU 驱动程序 `_ +* `NVIDIA GPU 驱动程序 `_ -* `NVIDIA 容器工具包 `_ +* `NVIDIA 容器工具包 `_ -有关支持的版本,请参阅 `NVIDIA框架容器支持矩阵 `_ 和 `NVIDIA 容器工具包文档 `_。 +有关支持的版本,请参阅 `NVIDIA 框架容器支持矩阵 `_ 和 `NVIDIA 容器工具包文档 `_。 不需要其他安装、编译或依赖管理。 无需安装 NVIDIA CUDA Toolkit。 @@ -47,7 +47,7 @@ NGC PaddlePaddle 容器针对 NVIDIA GPU 加速进行了优化,并包含一组 其中: -* 22.07 是容器版本。 +* 22.07 是容器版本。 PaddlePaddle 通过将其作为 Python 模块导入来运行: :: @@ -79,26 +79,26 @@ PaddlePaddle 通过将其作为 Python 模块导入来运行: 在 docker run 命令中。 ---------------------- - NGC容器介绍 + NGC 容器介绍 ---------------------- 有关内容的完整列表,请参阅 `NVIDIA PaddlePaddle 容器发行说明 `_。 此容器映像包含 NVIDIA 版 PaddlePaddle 的完整源代码,位于 /opt/paddle/paddle。它是作为系统 Python 模块预构建和安装的。 NVIDIA PaddlePaddle 容器针对与 NVIDIA GPU 一起使用进行了优化,并包含以下用于 GPU 加速的软件: -* `CUDA `_ +* `CUDA `_ -* `cuBLAS `_ +* `cuBLAS `_ -* `NVIDIA cuDNN `_ +* `NVIDIA cuDNN `_ -* `NVIDIA NCCL `_ (optimized for `NVLink `_ ) +* `NVIDIA NCCL `_ (optimized for `NVLink `_ ) -* `NVIDIA Data Loading Library (DALI) `_ +* `NVIDIA Data Loading Library (DALI) `_ -* `TensorRT `__ +* `TensorRT `__ -* `PaddlePaddle with TensorRT (Paddle-TRT) `_ +* `PaddlePaddle with TensorRT (Paddle-TRT) `_ 此容器中的软件堆栈已经过兼容性验证,不需要最终用户进行任何额外的安装或编译。此容器可以帮助您从端到端加速深度学习工作流程。 @@ -107,6 +107,4 @@ NVIDIA PaddlePaddle 容器针对与 NVIDIA GPU 一起使用进行了优化,并 NGC PaddlePaddle 容器软件许可协议 -------------------------------------------- -当您下载或使用NGC PaddlePaddle 容器时,即表示您已经同意并接受此 `最终用户许可协议 `_ 的条款及其对应约束。 - - +当您下载或使用 NGC PaddlePaddle 容器时,即表示您已经同意并接受此 `最终用户许可协议 `_ 的条款及其对应约束。 From 00f9ea6499727a20a6b4a006521bc58dbc6cffe6 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 1 Aug 2022 12:39:24 +0000 Subject: [PATCH 14/20] bump pre-commit hooks version --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eca948d0e85..2bf34f9ae5d 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ repos: - repo: https://github.com/pre-commit/mirrors-yapf.git - rev: v0.16.0 + rev: v0.32.0 hooks: - id: yapf files: \.py$ - repo: https://github.com/pre-commit/pre-commit-hooks - rev: a11d9314b22d8f8c7556443875b731ef05965464 + rev: v4.1.0 hooks: - id: check-merge-conflict - id: check-symlinks @@ -16,7 +16,7 @@ repos: - id: trailing-whitespace files: \.md$|\.rst$ - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.0.1 + rev: v1.1.14 hooks: - id: forbid-crlf files: \.md$|\.rst$ From af779947e72f3dbc57ec3dce9619dd808b490ece Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 2 Aug 2022 09:01:40 +0000 Subject: [PATCH 15/20] more COPY-FROM (the file only include one code block) --- docs/api/paddle/bincount_cn.rst | 12 +--- docs/api/paddle/diff_cn.rst | 23 +------ docs/api/paddle/distributed/ReduceOp_cn.rst | 21 ++---- docs/api/paddle/distributed/get_rank_cn.rst | 8 +-- docs/api/paddle/flatten_cn.rst | 15 +--- docs/api/paddle/flops_cn.rst | 60 +--------------- docs/api/paddle/gather_cn.rst | 12 +--- docs/api/paddle/incubate/graph_reindex_cn.rst | 17 +---- .../paddle/incubate/graph_send_recv_cn.rst | 26 +------ docs/api/paddle/inner_cn.rst | 12 +--- docs/api/paddle/jit/not_to_static_cn.rst | 20 +----- docs/api/paddle/kthvalue_cn.rst | 4 +- docs/api/paddle/linalg/lu_cn.rst | 39 +---------- docs/api/paddle/linalg/lu_unpack_cn.rst | 39 +---------- docs/api/paddle/metric/Accuracy_cn.rst | 9 +-- docs/api/paddle/metric/accuracy_cn.rst | 9 +-- docs/api/paddle/mode_cn.rst | 4 +- docs/api/paddle/nn/BatchNorm1D_cn.rst | 13 +--- docs/api/paddle/nn/BatchNorm2D_cn.rst | 13 +--- docs/api/paddle/nn/BatchNorm3D_cn.rst | 13 +--- docs/api/paddle/nn/BatchNorm_cn.rst | 13 +--- .../api/paddle/nn/ClipGradByGlobalNorm_cn.rst | 16 +---- docs/api/paddle/nn/ClipGradByNorm_cn.rst | 16 +---- docs/api/paddle/nn/ClipGradByValue_cn.rst | 16 +---- docs/api/paddle/nn/Embedding_cn.rst | 30 +------- docs/api/paddle/nn/Flatten_cn.rst | 10 +-- docs/api/paddle/nn/GroupNorm_cn.rst | 13 +--- docs/api/paddle/nn/InstanceNorm1D_cn.rst | 13 +--- docs/api/paddle/nn/InstanceNorm2D_cn.rst | 13 +--- docs/api/paddle/nn/InstanceNorm3D_cn.rst | 13 +--- docs/api/paddle/nn/LayerNorm_cn.rst | 13 +--- docs/api/paddle/nn/PixelShuffle_cn.rst | 10 +-- docs/api/paddle/nn/Softmax_cn.rst | 21 +----- docs/api/paddle/nn/SpectralNorm_cn.rst | 10 +-- .../paddle/nn/functional/diag_embed_cn.rst | 45 +----------- docs/api/paddle/nn/functional/dropout_cn.rst | 19 +---- .../api/paddle/nn/functional/embedding_cn.rst | 23 +------ .../paddle/nn/functional/grid_sample_cn.rst | 42 +---------- docs/api/paddle/nn/functional/one_hot_cn.rst | 12 +--- docs/api/paddle/nn/functional/pad_cn.rst | 30 +------- docs/api/paddle/nn/functional/softmax_cn.rst | 24 +------ .../paddle/nn/initializer/Orthogonal_cn.rst | 11 +-- docs/api/paddle/normal_cn.rst | 16 +---- .../optimizer/lr/CosineAnnealingDecay_cn.rst | 48 +------------ .../optimizer/lr/ExponentialDecay_cn.rst | 48 +------------ .../optimizer/lr/InverseTimeDecay_cn.rst | 47 +------------ .../paddle/optimizer/lr/LambdaDecay_cn.rst | 47 +------------ .../paddle/optimizer/lr/LinearWarmup_cn.rst | 49 +------------ .../paddle/optimizer/lr/MultiStepDecay_cn.rst | 47 +------------ .../optimizer/lr/MultiplicativeDecay_cn.rst | 20 +----- .../optimizer/lr/NaturalExpDecay_cn.rst | 47 +------------ docs/api/paddle/optimizer/lr/NoamDecay_cn.rst | 49 +------------ .../paddle/optimizer/lr/PiecewiseDecay_cn.rst | 47 +------------ .../optimizer/lr/PolynomialDecay_cn.rst | 47 +------------ .../optimizer/lr/ReduceOnPlateau_cn.rst | 48 +------------ docs/api/paddle/optimizer/lr/StepDecay_cn.rst | 47 +------------ docs/api/paddle/outer_cn.rst | 13 +--- docs/api/paddle/rand_cn.rst | 26 +------ docs/api/paddle/randn_cn.rst | 26 +------ docs/api/paddle/round_cn.rst | 8 +-- docs/api/paddle/scale_cn.rst | 8 --- docs/api/paddle/stack_cn.rst | 22 +----- docs/api/paddle/standard_normal_cn.rst | 26 +------ docs/api/paddle/static/nn/fc_cn.rst | 32 +-------- docs/api/paddle/static/nn/while_loop_cn.rst | 23 +------ docs/api/paddle/text/UCIHousing_cn.rst | 24 +------ docs/api/paddle/tolist_cn.rst | 11 +-- docs/api/paddle/topk_cn.rst | 2 +- docs/api/paddle/transpose_cn.rst | 9 +-- docs/api/paddle/version/cuda_cn.rst | 7 +- docs/api/paddle/version/cudnn_cn.rst | 7 +- docs/api/paddle/vision/ops/RoIPool_cn.rst | 14 +--- docs/api/paddle/vision/ops/roi_pool_cn.rst | 13 +--- .../vision/transforms/BaseTransform_cn.rst | 69 +------------------ .../transforms/BrightnessTransform_cn.rst | 12 +--- .../vision/transforms/CenterCrop_cn.rst | 14 +--- .../vision/transforms/ColorJitter_cn.rst | 12 +--- .../transforms/ContrastTransform_cn.rst | 12 +--- .../paddle/vision/transforms/Grayscale_cn.rst | 13 +--- .../vision/transforms/HueTransform_cn.rst | 12 +--- docs/api/paddle/vision/transforms/Pad_cn.rst | 16 +---- .../vision/transforms/RandomErasing_cn.rst | 10 +-- .../transforms/RandomHorizontalFlip_cn.rst | 13 +--- .../transforms/RandomResizedCrop_cn.rst | 13 +--- .../vision/transforms/RandomRotation_cn.rst | 13 +--- .../transforms/RandomVerticalFlip_cn.rst | 13 +--- .../paddle/vision/transforms/Resize_cn.rst | 18 +---- .../transforms/SaturationTransform_cn.rst | 12 +--- .../paddle/vision/transforms/ToTensor_cn.rst | 20 +----- .../paddle/vision/transforms/Transpose_cn.rst | 13 +--- .../paddle/vision/transforms/normalize_cn.rst | 17 +---- docs/api/paddle/vision/transforms/pad_cn.rst | 16 +---- .../paddle/vision/transforms/resize_cn.rst | 18 +---- .../paddle/vision/transforms/rotate_cn.rst | 13 +--- .../vision/transforms/to_grayscale_cn.rst | 13 +--- 95 files changed, 99 insertions(+), 1923 deletions(-) diff --git a/docs/api/paddle/bincount_cn.rst b/docs/api/paddle/bincount_cn.rst index cf44eb97389..e3d77b39a25 100644 --- a/docs/api/paddle/bincount_cn.rst +++ b/docs/api/paddle/bincount_cn.rst @@ -22,14 +22,4 @@ Tensor,维度为 1。 代码示例: :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([1, 2, 1, 4, 5]) - result1 = paddle.bincount(x) - print(result1) # [0, 2, 1, 0, 1, 1] - - w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) - result2 = paddle.bincount(x, weights=w) - print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] +COPY-FROM: paddle.bincount diff --git a/docs/api/paddle/diff_cn.rst b/docs/api/paddle/diff_cn.rst index d28acb1b638..66e5981a04c 100644 --- a/docs/api/paddle/diff_cn.rst +++ b/docs/api/paddle/diff_cn.rst @@ -30,25 +30,4 @@ diff 代码示例: ::::::::: -.. code-block:: python - - import paddle - x = paddle.to_tensor([1, 4, 5, 2]) - out = paddle.diff(x) - print(out) - # out: - # [3, 1, -3] - y = paddle.to_tensor([7, 9]) - out = paddle.diff(x, append=y) - print(out) - # out: - # [3, 1, -3, 5, 2] - z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) - out = paddle.diff(z, axis=0) - print(out) - # out: - # [[3, 3, 3]] - out = paddle.diff(z, axis=1) - print(out) - # out: - # [[1, 1], [1, 1]] +COPY-FROM: paddle.diff diff --git a/docs/api/paddle/distributed/ReduceOp_cn.rst b/docs/api/paddle/distributed/ReduceOp_cn.rst index b46d33c4d90..19f5bb54ddc 100644 --- a/docs/api/paddle/distributed/ReduceOp_cn.rst +++ b/docs/api/paddle/distributed/ReduceOp_cn.rst @@ -3,6 +3,8 @@ ReduceOp ------------------------------- +.. py:class:: paddle.distributed.ReduceOp() + 指定规约类操作的逐元素操作类型,需要是下述值之一: ReduceOp.SUM @@ -15,20 +17,5 @@ ReduceOp 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - import paddle - from paddle.distributed import ReduceOp - from paddle.distributed import init_parallel_env - - paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) - init_parallel_env() - if paddle.distributed.ParallelEnv().local_rank == 0: - np_data = np.array([[4, 5, 6], [4, 5, 6]]) - else: - np_data = np.array([[1, 2, 3], [1, 2, 3]]) - data = paddle.to_tensor(np_data) - paddle.distributed.all_reduce(data, op=ReduceOp.SUM) - out = data.numpy() - # [[5, 7, 9], [5, 7, 9]] + +COPY-FROM: paddle.distributed.ReduceOp diff --git a/docs/api/paddle/distributed/get_rank_cn.rst b/docs/api/paddle/distributed/get_rank_cn.rst index 2b5fa024829..fb4066f8e85 100644 --- a/docs/api/paddle/distributed/get_rank_cn.rst +++ b/docs/api/paddle/distributed/get_rank_cn.rst @@ -15,11 +15,5 @@ get_rank 代码示例 ::::::::: -.. code-block:: python - import paddle - import paddle.distributed as dist - - # execute this command in terminal: export PADDLE_TRAINER_ID=0 - print("The rank is %d" % dist.get_rank()) - # The rank is 0 +COPY-FROM: paddle.distributed.get_rank diff --git a/docs/api/paddle/flatten_cn.rst b/docs/api/paddle/flatten_cn.rst index 926458b45a3..3183d274513 100644 --- a/docs/api/paddle/flatten_cn.rst +++ b/docs/api/paddle/flatten_cn.rst @@ -54,17 +54,4 @@ flatten 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - image_shape=(2, 3, 4, 4) - x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]) - img = paddle.reshape(x, image_shape) / 100 - - out = paddle.flatten(img, start_axis=1, stop_axis=2) - # out shape is [2, 12, 4] - - # 在动态图模式下,输出 out 与输入 img 共享数据 - img[0, 0, 0, 0] = -1 - print(out[0, 0, 0]) # [-1] +COPY-FROM: paddle.flatten diff --git a/docs/api/paddle/flops_cn.rst b/docs/api/paddle/flops_cn.rst index c6f17f3cd0c..12e93b4c7ff 100644 --- a/docs/api/paddle/flops_cn.rst +++ b/docs/api/paddle/flops_cn.rst @@ -21,62 +21,4 @@ int,网络模型的计算量。 代码示例 ::::::::: - .. code-block:: python - - import paddle - import paddle.nn as nn - - class LeNet(nn.Layer): - def __init__(self, num_classes=10): - super(LeNet, self).__init__() - self.num_classes = num_classes - self.features = nn.Sequential( - nn.Conv2D( - 1, 6, 3, stride=1, padding=1), - nn.ReLU(), - nn.MaxPool2D(2, 2), - nn.Conv2D( - 6, 16, 5, stride=1, padding=0), - nn.ReLU(), - nn.MaxPool2D(2, 2)) - - if num_classes > 0: - self.fc = nn.Sequential( - nn.Linear(400, 120), - nn.Linear(120, 84), - nn.Linear( - 84, 10)) - - def forward(self, inputs): - x = self.features(inputs) - - if self.num_classes > 0: - x = paddle.flatten(x, 1) - x = self.fc(x) - return x - - lenet = LeNet() - # m 是 nn.Layer 的一个实类,x 是 m 的输入,y 是网络层的输出。 - def count_leaky_relu(m, x, y): - x = x[0] - nelements = x.numel() - m.total_ops += int(nelements) - - FLOPs = paddle.flops(lenet, [1, 1, 28, 28], custom_ops= {nn.LeakyReLU: count_leaky_relu}, - print_detail=True) - print(FLOPs) - - #+--------------+-----------------+-----------------+--------+--------+ - #| Layer Name | Input Shape | Output Shape | Params | Flops | - #+--------------+-----------------+-----------------+--------+--------+ - #| conv2d_2 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 | - #| re_lu_2 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 | - #| max_pool2d_2 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 | - #| conv2d_3 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 | - #| re_lu_3 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 | - #| max_pool2d_3 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 | - #| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 | - #| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 | - #| linear_2 | [1, 84] | [1, 10] | 850 | 840 | - #+--------------+-----------------+-----------------+--------+--------+ - #Total Flops: 347560 Total Params: 61610 +COPY-FROM: paddle.flops diff --git a/docs/api/paddle/gather_cn.rst b/docs/api/paddle/gather_cn.rst index 817e665ed7a..bc1bb77d088 100644 --- a/docs/api/paddle/gather_cn.rst +++ b/docs/api/paddle/gather_cn.rst @@ -39,14 +39,4 @@ gather 代码示例 :::::::::::: -.. code-block:: python - - import numpy as np - import paddle - - input_1 = np.array([[1,2],[3,4],[5,6]]) - index_1 = np.array([0,1]) - input = paddle.to_tensor(input_1) - index = paddle.to_tensor(index_1) - output = paddle.gather(input, index, axis=0) - # expected output: [[1,2],[3,4]] +COPY-FROM: paddle.gather diff --git a/docs/api/paddle/incubate/graph_reindex_cn.rst b/docs/api/paddle/incubate/graph_reindex_cn.rst index 246abf42930..80e03276100 100644 --- a/docs/api/paddle/incubate/graph_reindex_cn.rst +++ b/docs/api/paddle/incubate/graph_reindex_cn.rst @@ -51,19 +51,4 @@ graph_reindex 代码示例 :::::::::: -.. code-block:: python - - import paddle - - x = [0, 1, 2] - neighbors = [8, 9, 0, 4, 7, 6, 7] - count = [2, 3, 2] - x = paddle.to_tensor(x, dtype="int64") - neighbors = paddle.to_tensor(neighbors, dtype="int64") - count = paddle.to_tensor(count, dtype="int32") - - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors, count) - # reindex_src: [3, 4, 0, 5, 6, 7, 6] - # reindex_dst: [0, 0, 1, 1, 1, 2, 2] - # out_nodes: [0, 1, 2, 8, 9, 4, 7, 6] +COPY-FROM: paddle.incubate.graph_reindex diff --git a/docs/api/paddle/incubate/graph_send_recv_cn.rst b/docs/api/paddle/incubate/graph_send_recv_cn.rst index 828b189a2f9..f45293df49d 100644 --- a/docs/api/paddle/incubate/graph_send_recv_cn.rst +++ b/docs/api/paddle/incubate/graph_send_recv_cn.rst @@ -44,28 +44,4 @@ graph_send_recv 代码示例 :::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") - # Outputs: [[0., 2., 3.], [2., 8., 10.], [1., 4., 5.]] - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out_size = paddle.max(dst_index) + 1 - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum", out_size=out_size) - # Outputs: [[0., 2., 3.], [[2., 8., 10.]]] - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") - # Outputs: [[0., 2., 3.], [2., 8., 10.], [0., 0., 0.]] +COPY-FROM: paddle.incubate.graph_send_recv diff --git a/docs/api/paddle/inner_cn.rst b/docs/api/paddle/inner_cn.rst index 88db9c2735f..d07acea7b5d 100644 --- a/docs/api/paddle/inner_cn.rst +++ b/docs/api/paddle/inner_cn.rst @@ -27,14 +27,4 @@ inner 代码示例: :::::::::: -.. code-block:: python - - import paddle - - x = paddle.arange(1, 7).reshape((2, 3)).astype('float32') - y = paddle.arange(1, 10).reshape((3, 3)).astype('float32') - out = paddle.inner(x, y) - - print(out) - # ([[14, 32, 50], - # [32, 77, 122]]) +COPY-FROM: paddle.inner diff --git a/docs/api/paddle/jit/not_to_static_cn.rst b/docs/api/paddle/jit/not_to_static_cn.rst index 7431bd65a30..8441f7e91d7 100644 --- a/docs/api/paddle/jit/not_to_static_cn.rst +++ b/docs/api/paddle/jit/not_to_static_cn.rst @@ -17,23 +17,5 @@ callable,一个在动转静过程不会进行代码转写的函数。 示例代码 ::::::::: -.. code-block:: python - import paddle - - @paddle.jit.not_to_static - def func_not_to_static(x): - res = x - 1 - return res - - @paddle.jit.to_static - def func(x): - if paddle.mean(x) < 0: - out = func_not_to_static(x) - else: - out = x + 1 - return out - - x = paddle.ones([1, 2], dtype='float32') - out = func(x) - print(out) # [[2. 2.]] +COPY-FROM: paddle.jit.not_to_static diff --git a/docs/api/paddle/kthvalue_cn.rst b/docs/api/paddle/kthvalue_cn.rst index a50f2048a2d..8b79215cadc 100644 --- a/docs/api/paddle/kthvalue_cn.rst +++ b/docs/api/paddle/kthvalue_cn.rst @@ -3,7 +3,7 @@ kthvalue ------------------------------- -.. py:function:: paddle.kthvalue(x, k, axis=None, keepdim=False, name=None) +.. py:function:: paddle.kthvalue(x, k, axis=None, keepdim=False, name=None) 在指定的轴上查找第 k 小的元素和其对应所在的索引信息。 @@ -22,4 +22,4 @@ tuple(Tensor),返回第 k 小的元素和对应的索引信息。结果的 代码示例 ::::::::: -COPY-FROM: paddle.kthvalue(x, +COPY-FROM: paddle.kthvalue diff --git a/docs/api/paddle/linalg/lu_cn.rst b/docs/api/paddle/linalg/lu_cn.rst index 4886eed07cc..e13a31f7593 100644 --- a/docs/api/paddle/linalg/lu_cn.rst +++ b/docs/api/paddle/linalg/lu_cn.rst @@ -42,41 +42,4 @@ LU 和 pivot 可以通过调用 paddle.linalg.lu_unpack 展开获得 L、U、P 代码示例 :::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') - lu,p,info = paddle.linalg.lu(x, get_infos=True) - - # >>> lu: - # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[5. , 6. ], - # [0.20000000, 0.80000000], - # [0.60000000, 0.50000000]]) - # >>> p - # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, - # [3, 3]) - # >>> info - # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, - # 0) - - P,L,U = paddle.linalg.lu_unpack(lu,p) - - # >>> P - # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[0., 1., 0.], - # [0., 0., 1.], - # [1., 0., 0.]]), - # >>> L - # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[1. , 0. ], - # [0.20000000, 1. ], - # [0.60000000, 0.50000000]]), - # >>> U - # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[5. , 6. ], - # [0. , 0.80000000]])) - - - # one can verify : X = P @ L @ U ; +COPY-FROM: paddle.linalg.lu diff --git a/docs/api/paddle/linalg/lu_unpack_cn.rst b/docs/api/paddle/linalg/lu_unpack_cn.rst index 0217d1a9362..4701f23f603 100644 --- a/docs/api/paddle/linalg/lu_unpack_cn.rst +++ b/docs/api/paddle/linalg/lu_unpack_cn.rst @@ -36,41 +36,4 @@ lu_unpack 代码示例 :::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') - lu,p,info = paddle.linalg.lu(x, get_infos=True) - - # >>> lu: - # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[5. , 6. ], - # [0.20000000, 0.80000000], - # [0.60000000, 0.50000000]]) - # >>> p - # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, - # [3, 3]) - # >>> info - # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, - # 0) - - P,L,U = paddle.linalg.lu_unpack(lu,p) - - # >>> P - # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[0., 1., 0.], - # [0., 0., 1.], - # [1., 0., 0.]]), - # >>> L - # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[1. , 0. ], - # [0.20000000, 1. ], - # [0.60000000, 0.50000000]]), - # >>> U - # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, - # [[5. , 6. ], - # [0. , 0.80000000]])) - - - # one can verify : X = P @ L @ U ; +COPY-FROM: paddle.linalg.lu_unpack diff --git a/docs/api/paddle/metric/Accuracy_cn.rst b/docs/api/paddle/metric/Accuracy_cn.rst index 932e9c8d4be..f3f6b22f479 100644 --- a/docs/api/paddle/metric/Accuracy_cn.rst +++ b/docs/api/paddle/metric/Accuracy_cn.rst @@ -27,11 +27,4 @@ accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall 代码示例 ::::::::: -.. code-block:: python - - import paddle - - predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32') - label = paddle.to_tensor([[2], [0]], dtype="int64") - result = paddle.metric.accuracy(input=predictions, label=label, k=1) - # [0.5] +COPY-FROM: paddle.metric.accuracy diff --git a/docs/api/paddle/metric/accuracy_cn.rst b/docs/api/paddle/metric/accuracy_cn.rst index 932e9c8d4be..f3f6b22f479 100644 --- a/docs/api/paddle/metric/accuracy_cn.rst +++ b/docs/api/paddle/metric/accuracy_cn.rst @@ -27,11 +27,4 @@ accuracy layer。参考 https://en.wikipedia.org/wiki/Precision_and_recall 代码示例 ::::::::: -.. code-block:: python - - import paddle - - predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32') - label = paddle.to_tensor([[2], [0]], dtype="int64") - result = paddle.metric.accuracy(input=predictions, label=label, k=1) - # [0.5] +COPY-FROM: paddle.metric.accuracy diff --git a/docs/api/paddle/mode_cn.rst b/docs/api/paddle/mode_cn.rst index b5671014e56..2993087b113 100644 --- a/docs/api/paddle/mode_cn.rst +++ b/docs/api/paddle/mode_cn.rst @@ -3,7 +3,7 @@ mode ------------------------------- -.. py:function:: paddle.mode(x, axis=-1, keepdim=False, name=None): +.. py:function:: paddle.mode(x, axis=-1, keepdim=False, name=None): 沿着可选的 ``axis`` 查找对应轴上的众数和结果所在的索引信息。 @@ -22,4 +22,4 @@ tuple(Tensor),返回检索到的众数结果和对应索引信息。结果 ::::::::: -COPY-FROM: paddle.mode(x, +COPY-FROM: paddle.mode diff --git a/docs/api/paddle/nn/BatchNorm1D_cn.rst b/docs/api/paddle/nn/BatchNorm1D_cn.rst index 80094fefa3e..eec7e2ca229 100644 --- a/docs/api/paddle/nn/BatchNorm1D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm1D_cn.rst @@ -64,15 +64,4 @@ BatchNorm1D 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 1, 3)).astype('float32') - x = paddle.to_tensor(x_data) - batch_norm = paddle.nn.BatchNorm1D(1) - batch_norm_out = batch_norm(x) - - print(batch_norm_out) +COPY-FROM: paddle.nn.BatchNorm1D diff --git a/docs/api/paddle/nn/BatchNorm2D_cn.rst b/docs/api/paddle/nn/BatchNorm2D_cn.rst index c90ad45ce06..4d35e6ac84b 100644 --- a/docs/api/paddle/nn/BatchNorm2D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm2D_cn.rst @@ -64,15 +64,4 @@ BatchNorm2D 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - batch_norm = paddle.nn.BatchNorm2D(1) - batch_norm_out = batch_norm(x) - - print(batch_norm_out) +COPY-FROM: paddle.nn.BatchNorm2D diff --git a/docs/api/paddle/nn/BatchNorm3D_cn.rst b/docs/api/paddle/nn/BatchNorm3D_cn.rst index af3cb2e96de..6a6109dcfbb 100644 --- a/docs/api/paddle/nn/BatchNorm3D_cn.rst +++ b/docs/api/paddle/nn/BatchNorm3D_cn.rst @@ -64,15 +64,4 @@ BatchNorm3D 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - batch_norm = paddle.nn.BatchNorm3D(1) - batch_norm_out = batch_norm(x) - - print(batch_norm_out) +COPY-FROM: paddle.nn.BatchNorm3D diff --git a/docs/api/paddle/nn/BatchNorm_cn.rst b/docs/api/paddle/nn/BatchNorm_cn.rst index 8a8b2703afd..8d39473e3ff 100644 --- a/docs/api/paddle/nn/BatchNorm_cn.rst +++ b/docs/api/paddle/nn/BatchNorm_cn.rst @@ -63,15 +63,4 @@ BatchNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - x_data = np.random.random(size=(3, 10, 3, 7)).astype('float32') - x = paddle.to_tensor(x_data) - batch_norm = paddle.nn.BatchNorm(10) - batch_norm_out = batch_norm(x) - - print(batch_norm_out.shape) - # [3, 10, 3, 7] +COPY-FROM: paddle.nn.BatchNorm diff --git a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst index d15e942bd1e..70489985dbf 100644 --- a/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByGlobalNorm_cn.rst @@ -38,18 +38,4 @@ ClipGradByGlobalNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), - bias_attr=paddle.ParamAttr(need_clip=False)) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) - sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) - sdg.step() +COPY-FROM: paddle.nn.ClipGradByGlobalNorm diff --git a/docs/api/paddle/nn/ClipGradByNorm_cn.rst b/docs/api/paddle/nn/ClipGradByNorm_cn.rst index 963c994ca5d..3bc61cbfbb9 100644 --- a/docs/api/paddle/nn/ClipGradByNorm_cn.rst +++ b/docs/api/paddle/nn/ClipGradByNorm_cn.rst @@ -44,18 +44,4 @@ ClipGradByNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), - bias_attr=paddle.ParamAttr(need_clip=False)) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - - clip = paddle.nn.ClipGradByNorm(clip_norm=1.0) - sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) - sdg.step() +COPY-FROM: paddle.nn.ClipGradByNorm diff --git a/docs/api/paddle/nn/ClipGradByValue_cn.rst b/docs/api/paddle/nn/ClipGradByValue_cn.rst index 0c00f3a8ad4..de45990a9dd 100644 --- a/docs/api/paddle/nn/ClipGradByValue_cn.rst +++ b/docs/api/paddle/nn/ClipGradByValue_cn.rst @@ -29,18 +29,4 @@ ClipGradByValue 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - linear = paddle.nn.Linear(in_features=10, out_features=10, - weight_attr=paddle.ParamAttr(need_clip=True), - bias_attr=paddle.ParamAttr(need_clip=False)) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - - clip = paddle.nn.ClipGradByValue(min=-1, max=1) - sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters(), grad_clip=clip) - sdg.step() +COPY-FROM: paddle.nn.ClipGradByValue diff --git a/docs/api/paddle/nn/Embedding_cn.rst b/docs/api/paddle/nn/Embedding_cn.rst index 91a94245e84..626e0cbbba5 100644 --- a/docs/api/paddle/nn/Embedding_cn.rst +++ b/docs/api/paddle/nn/Embedding_cn.rst @@ -55,32 +55,4 @@ Tensor, input 映射后得到的 Embedding Tensor,数据类型和词嵌入的 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64) - y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32) - - x = paddle.to_tensor(x_data, stop_gradient=False) - y = paddle.to_tensor(y_data, stop_gradient=False) - - embedding = paddle.nn.Embedding(10, 3, sparse=True) - - w0=np.full(shape=(10, 3), fill_value=2).astype(np.float32) - embedding.weight.set_value(w0) - - adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01) - adam.clear_grad() - - # weight.shape = [10, 3] - - # x.data = [[3],[4],[5]] - # x.shape = [3, 1] - - # out.data = [[2,2,2], [2,2,2], [2,2,2]] - # out.shape = [3, 1, 3] - out=embedding(x) - out.backward() - adam.step() +COPY-FROM: paddle.nn.Embedding diff --git a/docs/api/paddle/nn/Flatten_cn.rst b/docs/api/paddle/nn/Flatten_cn.rst index 79b5339a5d1..a75251a6606 100644 --- a/docs/api/paddle/nn/Flatten_cn.rst +++ b/docs/api/paddle/nn/Flatten_cn.rst @@ -24,12 +24,4 @@ Flatten 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - inp_np = np.ones([5, 2, 3, 4]).astype('float32') - inp_np = paddle.to_tensor(inp_np) - flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2) - flatten_res = flatten(inp_np) +COPY-FROM: paddle.nn.Flatten diff --git a/docs/api/paddle/nn/GroupNorm_cn.rst b/docs/api/paddle/nn/GroupNorm_cn.rst index 66ebdd91834..71315cb4205 100644 --- a/docs/api/paddle/nn/GroupNorm_cn.rst +++ b/docs/api/paddle/nn/GroupNorm_cn.rst @@ -29,15 +29,4 @@ GroupNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 6, 2, 2)).astype('float32') - x = paddle.to_tensor(x_data) - group_norm = paddle.nn.GroupNorm(num_channels=6, num_groups=6) - group_norm_out = group_norm(x) - - print(group_norm_out) +COPY-FROM: paddle.nn.GroupNorm diff --git a/docs/api/paddle/nn/InstanceNorm1D_cn.rst b/docs/api/paddle/nn/InstanceNorm1D_cn.rst index 8dfe5ac9739..6a22167cbdc 100644 --- a/docs/api/paddle/nn/InstanceNorm1D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm1D_cn.rst @@ -47,15 +47,4 @@ Note: 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - instance_norm = paddle.nn.InstanceNorm1D(2) - instance_norm_out = instance_norm(x) - - print(instance_norm_out) +COPY-FROM: paddle.nn.InstanceNorm1D diff --git a/docs/api/paddle/nn/InstanceNorm2D_cn.rst b/docs/api/paddle/nn/InstanceNorm2D_cn.rst index 9651b45949c..e02df56fe3e 100644 --- a/docs/api/paddle/nn/InstanceNorm2D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm2D_cn.rst @@ -46,15 +46,4 @@ Note: 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - instance_norm = paddle.nn.InstanceNorm2D(2) - instance_norm_out = instance_norm(x) - - print(instance_norm_out) +COPY-FROM: paddle.nn.InstanceNorm2D diff --git a/docs/api/paddle/nn/InstanceNorm3D_cn.rst b/docs/api/paddle/nn/InstanceNorm3D_cn.rst index 9ad9bfc30c5..345ef87b3a0 100644 --- a/docs/api/paddle/nn/InstanceNorm3D_cn.rst +++ b/docs/api/paddle/nn/InstanceNorm3D_cn.rst @@ -45,15 +45,4 @@ Note: 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - instance_norm = paddle.nn.InstanceNorm3D(2) - instance_norm_out = instance_norm(x) - - print(instance_norm_out) +COPY-FROM: paddle.nn.InstanceNorm3D diff --git a/docs/api/paddle/nn/LayerNorm_cn.rst b/docs/api/paddle/nn/LayerNorm_cn.rst index 817fe730409..63ca462e9bd 100644 --- a/docs/api/paddle/nn/LayerNorm_cn.rst +++ b/docs/api/paddle/nn/LayerNorm_cn.rst @@ -41,15 +41,4 @@ LayerNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - np.random.seed(123) - x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32') - x = paddle.to_tensor(x_data) - layer_norm = paddle.nn.LayerNorm(x_data.shape[1:]) - layer_norm_out = layer_norm(x) - - print(layer_norm_out) +COPY-FROM: paddle.nn.LayerNorm diff --git a/docs/api/paddle/nn/PixelShuffle_cn.rst b/docs/api/paddle/nn/PixelShuffle_cn.rst index 671c6ab5f23..e9b8052f94c 100644 --- a/docs/api/paddle/nn/PixelShuffle_cn.rst +++ b/docs/api/paddle/nn/PixelShuffle_cn.rst @@ -29,13 +29,5 @@ PixelShuffle 代码示例 ::::::::: -.. code-block:: python - import paddle - import paddle.nn as nn - - x = paddle.rand((2, 9, 4, 4)) - pixel_shuffle = nn.PixelShuffle(3) - out = pixel_shuffle(x) - print(out.shape) - # (2, 1, 12, 12) +COPY-FROM: paddle.nn.PixelShuffle diff --git a/docs/api/paddle/nn/Softmax_cn.rst b/docs/api/paddle/nn/Softmax_cn.rst index ffb4b93010a..10de5dd6c2b 100644 --- a/docs/api/paddle/nn/Softmax_cn.rst +++ b/docs/api/paddle/nn/Softmax_cn.rst @@ -93,23 +93,4 @@ Softmax 激活层,OP 的计算过程如下: 代码示例 :::::::::: -.. code-block:: python - - import paddle - import numpy as np - - x = np.array([[[-2.0, 3.0, -4.0, 5.0], - [3.0, -4.0, 5.0, -6.0], - [-7.0, -8.0, 8.0, 9.0]], - [[1.0, -2.0, -3.0, 4.0], - [-5.0, 6.0, 7.0, -8.0], - [6.0, 7.0, 8.0, 9.0]]], 'float32') - x = paddle.to_tensor(x) - m = paddle.nn.Softmax() - out = m(x) - # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], - # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] +COPY-FROM: paddle.nn.Softmax diff --git a/docs/api/paddle/nn/SpectralNorm_cn.rst b/docs/api/paddle/nn/SpectralNorm_cn.rst index 9405e38b429..05a09829e45 100644 --- a/docs/api/paddle/nn/SpectralNorm_cn.rst +++ b/docs/api/paddle/nn/SpectralNorm_cn.rst @@ -44,12 +44,4 @@ SpectralNorm 代码示例 ::::::::: -.. code-block:: python - - import paddle - x = paddle.rand((2,8,32,32)) - - spectral_norm = paddle.nn.SpectralNorm(x.shape, dim=1, power_iters=2) - spectral_norm_out = spectral_norm(x) - - print(spectral_norm_out.shape) # [2, 8, 32, 32] +COPY-FROM: paddle.nn.SpectralNorm diff --git a/docs/api/paddle/nn/functional/diag_embed_cn.rst b/docs/api/paddle/nn/functional/diag_embed_cn.rst index ed1491f81a3..798e7c33c51 100644 --- a/docs/api/paddle/nn/functional/diag_embed_cn.rst +++ b/docs/api/paddle/nn/functional/diag_embed_cn.rst @@ -32,47 +32,4 @@ diag_embed 代码示例 :::::::::::: -.. code-block:: python - - import paddle.nn.functional as F - import numpy as np - - diag_embed = np.random.randn(2, 3).astype('float32') - # [[ 0.7545889 , -0.25074545, 0.5929117 ], - # [-0.6097662 , -0.01753256, 0.619769 ]] - - data1 = F.diag_embed(diag_embed) - data1.numpy() - # [[[ 0.7545889 , 0. , 0. ], - # [ 0. , -0.25074545, 0. ], - # [ 0. , 0. , 0.5929117 ]], - - # [[-0.6097662 , 0. , 0. ], - # [ 0. , -0.01753256, 0. ], - # [ 0. , 0. , 0.619769 ]]] - - data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2) - data2.numpy() - # [[[ 0. , 0. , 0. , 0. ], - # [ 0.7545889 , 0. , 0. , 0. ], - # [ 0. , -0.25074545, 0. , 0. ], - # [ 0. , 0. , 0.5929117 , 0. ]], - # - # [[ 0. , 0. , 0. , 0. ], - # [-0.6097662 , 0. , 0. , 0. ], - # [ 0. , -0.01753256, 0. , 0. ], - # [ 0. , 0. , 0.619769 , 0. ]]] - - data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2) - data3.numpy() - # [[[ 0. , 0.7545889 , 0. , 0. ], - # [ 0. , -0.6097662 , 0. , 0. ]], - # - # [[ 0. , 0. , -0.25074545, 0. ], - # [ 0. , 0. , -0.01753256, 0. ]], - # - # [[ 0. , 0. , 0. , 0.5929117 ], - # [ 0. , 0. , 0. , 0.619769 ]], - # - # [[ 0. , 0. , 0. , 0. ], - # [ 0. , 0. , 0. , 0. ]]] +COPY-FROM: paddle.nn.functional.diag_embed diff --git a/docs/api/paddle/nn/functional/dropout_cn.rst b/docs/api/paddle/nn/functional/dropout_cn.rst index 4c04bb1b372..fe2d1ce5ecd 100644 --- a/docs/api/paddle/nn/functional/dropout_cn.rst +++ b/docs/api/paddle/nn/functional/dropout_cn.rst @@ -113,21 +113,4 @@ axis 参数的默认值为 None。当 ``axis=None`` 时,dropout 的功能为 代码示例 ::::::::: -.. code-block:: python - - import paddle - import numpy as np - - x = np.array([[1,2,3], [4,5,6]]).astype('float32') - x = paddle.to_tensor(x) - y_train = paddle.nn.functional.dropout(x, 0.5) - y_test = paddle.nn.functional.dropout(x, 0.5, training=False) #test - y_0 = paddle.nn.functional.dropout(x, axis=0) - y_1 = paddle.nn.functional.dropout(x, axis=1) - y_01 = paddle.nn.functional.dropout(x, axis=[0,1]) - print(x) - print(y_train) - print(y_test) - print(y_0) - print(y_1) - print(y_01) +COPY-FROM: paddle.nn.functional.dropout diff --git a/docs/api/paddle/nn/functional/embedding_cn.rst b/docs/api/paddle/nn/functional/embedding_cn.rst index c82fb53c8e3..69b4477c718 100644 --- a/docs/api/paddle/nn/functional/embedding_cn.rst +++ b/docs/api/paddle/nn/functional/embedding_cn.rst @@ -55,25 +55,4 @@ Tensor, input 映射后得到的 Embedding Tensor,数据类型和权重定义 代码示例 :::::::::::: -.. code-block:: python - - import numpy as np - import paddle - import paddle.nn as nn - - x0 = np.arange(3, 6).reshape((3, 1)).astype(np.int64) - w0 = np.full(shape=(10, 3), fill_value=2).astype(np.float32) - - # x.data = [[3], [4], [5]] - # x.shape = [3, 1] - x = paddle.to_tensor(x0, stop_gradient=False) - - # w.data = [[2. 2. 2.] ... [2. 2. 2.]] - # w.shape = [10, 3] - w = paddle.to_tensor(w0, stop_gradient=False) - - # emb.data = [[[2., 2., 2.]], [[2., 2., 2.]], [[2., 2., 2.]]] - # emb.shape = [3, 1, 3] - - emb = nn.functional.embedding( - x=x, weight=w, sparse=True, name="embedding") +COPY-FROM: paddle.nn.functional.embedding diff --git a/docs/api/paddle/nn/functional/grid_sample_cn.rst b/docs/api/paddle/nn/functional/grid_sample_cn.rst index e417b02d4cc..1fc61a6cd92 100644 --- a/docs/api/paddle/nn/functional/grid_sample_cn.rst +++ b/docs/api/paddle/nn/functional/grid_sample_cn.rst @@ -70,44 +70,4 @@ Tensor,输入 X 基于输入网格的双线性插值计算结果,维度为 : 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import paddle.nn.functional as F - import numpy as np - - # shape=[1, 1, 3, 3] - x = np.array([[[[-0.6, 0.8, -0.5], - [-0.5, 0.2, 1.2], - [ 1.4, 0.3, -0.2]]]]).astype("float64") - - # grid shape = [1, 3, 4, 2] - grid = np.array( - [[[[ 0.2, 0.3], - [-0.4, -0.3], - [-0.9, 0.3], - [-0.9, -0.6]], - [[ 0.4, 0.1], - [ 0.9, -0.8], - [ 0.4, 0.5], - [ 0.5, -0.2]], - [[ 0.1, -0.8], - [-0.3, -1. ], - [ 0.7, 0.4], - [ 0.2, 0.8]]]]).astype("float64") - - - x = paddle.to_tensor(x) - grid = paddle.to_tensor(grid) - y_t = F.grid_sample( - x, - grid, - mode='bilinear', - padding_mode='border', - align_corners=True) - print(y_t) - - # output shape = [1, 1, 3, 4] - # [[[[ 0.34 0.016 0.086 -0.448] - # [ 0.55 -0.076 0.35 0.59 ] - # [ 0.596 0.38 0.52 0.24 ]]]] +COPY-FROM: paddle.nn.functional.grid_sample diff --git a/docs/api/paddle/nn/functional/one_hot_cn.rst b/docs/api/paddle/nn/functional/one_hot_cn.rst index 675a05596af..aaa7f5dff85 100644 --- a/docs/api/paddle/nn/functional/one_hot_cn.rst +++ b/docs/api/paddle/nn/functional/one_hot_cn.rst @@ -51,14 +51,4 @@ Tensor,转换后的 one_hot Tensor,数据类型为 float32。 代码示例 :::::::::::: -.. code-block:: python - - import paddle - label = paddle.to_tensor([1, 1, 3, 0], dtype='int64') - # label.shape = [4] - one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4) - # one_hot_label.shape = [4, 4] - # one_hot_label = [[0., 1., 0., 0.], - # [0., 1., 0., 0.], - # [0., 0., 0., 1.], - # [1., 0., 0., 0.]] +COPY-FROM: paddle.nn.functional.one_hot diff --git a/docs/api/paddle/nn/functional/pad_cn.rst b/docs/api/paddle/nn/functional/pad_cn.rst index 2d91dcbecce..d8530d09169 100644 --- a/docs/api/paddle/nn/functional/pad_cn.rst +++ b/docs/api/paddle/nn/functional/pad_cn.rst @@ -85,32 +85,4 @@ Tensor,对 ``x`` 进行 ``'pad'`` 的结果,数据类型和 ``x`` 相同。 代码示例 :::::::::::: -.. code-block:: python - - import numpy as np - import paddle - import paddle.nn.functional as F - - # example 1 - x_shape = (1, 1, 3) - x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1 - y = F.pad(x, [0, 0, 0, 0, 2, 3], value=1, mode='constant', data_format="NCL") - print(y) - # [[[1. 1. 1. 2. 3. 1. 1. 1.]]] - - # example 2 - x_shape = (1, 1, 3) - x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1 - y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL") - print(y) - # [[[1. 1. 1. 2. 3. 1. 1. 1.]]] - - # example 3 - x_shape = (1, 1, 2, 3) - x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1 - y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular') - print(y) - # [[[[6. 4. 5. 6. 4. 5.] - # [3. 1. 2. 3. 1. 2.] - # [6. 4. 5. 6. 4. 5.] - # [3. 1. 2. 3. 1. 2.]]]] +COPY-FROM: paddle.nn.functional.pad diff --git a/docs/api/paddle/nn/functional/softmax_cn.rst b/docs/api/paddle/nn/functional/softmax_cn.rst index 45b14069849..0e51888959d 100644 --- a/docs/api/paddle/nn/functional/softmax_cn.rst +++ b/docs/api/paddle/nn/functional/softmax_cn.rst @@ -96,26 +96,4 @@ softmax 代码示例 :::::::::: -.. code-block:: python - - import paddle - import paddle.nn.functional as F - import numpy as np - - x = np.array([[[2.0, 3.0, 4.0, 5.0], - [3.0, 4.0, 5.0, 6.0], - [7.0, 8.0, 8.0, 9.0]], - [[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [6.0, 7.0, 8.0, 9.0]]], 'float32') - x = paddle.to_tensor(x) - out1 = F.softmax(x) - out2 = F.softmax(x, dtype='float64') - # out1's data type is float32; out2's data type is float64 - # out1 and out2's value is as follows: - # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.07232949, 0.19661193, 0.19661193, 0.53444665]], - # [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - # [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] +COPY-FROM: paddle.nn.functional.softmax diff --git a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst index 9bee111c7bf..68a1cebe99d 100644 --- a/docs/api/paddle/nn/initializer/Orthogonal_cn.rst +++ b/docs/api/paddle/nn/initializer/Orthogonal_cn.rst @@ -35,13 +35,4 @@ Orthogonal 代码示例 ::::::::: -.. code-block:: python - - import paddle - - weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Orthogonal()) - linear = paddle.nn.Linear(10, 15, weight_attr=weight_attr) - # linear.weight: X * X' = I - - linear = paddle.nn.Linear(15, 10, weight_attr=weight_attr) - # linear.weight: X' * X = I +COPY-FROM: paddle.nn.initializer.Orthogonal diff --git a/docs/api/paddle/normal_cn.rst b/docs/api/paddle/normal_cn.rst index 25eb5e49781..ede638e2fe1 100644 --- a/docs/api/paddle/normal_cn.rst +++ b/docs/api/paddle/normal_cn.rst @@ -28,18 +28,4 @@ normal 示例代码 :::::::::: -.. code-block:: python - - import paddle - - out1 = paddle.normal(shape=[2, 3]) - # [[ 0.17501129 0.32364586 1.561118 ] - # [-1.7232178 1.1545963 -0.76156676]] # random - - mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) - out2 = paddle.normal(mean=mean_tensor) - # [ 0.18644847 -1.19434458 3.93694787] # random - - std_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) - out3 = paddle.normal(mean=mean_tensor, std=std_tensor) - # [1.00780561 3.78457445 5.81058198] # random +COPY-FROM: paddle.normal diff --git a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst index 2acd1601dab..3ac2c2d2b9a 100644 --- a/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/CosineAnnealingDecay_cn.rst @@ -39,53 +39,7 @@ CosineAnnealingDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(5): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(5): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.CosineAnnealingDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst index 0f655cd9531..cc28203da7a 100644 --- a/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/ExponentialDecay_cn.rst @@ -28,53 +28,7 @@ ExponentialDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.minimize(loss) - linear.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - +COPY-FROM: paddle.optimizer.lr.ExponentialDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst index 2a28e133abf..ebe9f53a9df 100644 --- a/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/InverseTimeDecay_cn.rst @@ -30,52 +30,7 @@ InverseTimeDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.minimize(loss) - linear.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.InverseTimeDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst index a2aba4c4476..722a7c28fc1 100644 --- a/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/LambdaDecay_cn.rst @@ -35,52 +35,7 @@ LambdaDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.minimize(loss) - linear.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.LambdaDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst index ecf4609b038..7aa3ee46158 100644 --- a/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst +++ b/docs/api/paddle/optimizer/lr/LinearWarmup_cn.rst @@ -39,54 +39,7 @@ LinearWarmup 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.LinearWarmup( - learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.LinearWarmup( - learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.LinearWarmup 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst index ba969d3adba..b5d40e896cb 100644 --- a/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiStepDecay_cn.rst @@ -37,52 +37,7 @@ MultiStepDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.MultiStepDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst index 78d69338bc7..4f11fe72d4b 100644 --- a/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/MultiplicativeDecay_cn.rst @@ -35,25 +35,7 @@ MultiplicativeDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.MultiplicativeDecay(learning_rate=0.5, lr_lambda=lambda x:0.95, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.minimize(loss) - linear.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.MultiplicativeDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst index 62049530e27..5213933030d 100644 --- a/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NaturalExpDecay_cn.rst @@ -28,52 +28,7 @@ NaturalExpDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.NaturalExpDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst index 24d1826186d..d5889874af0 100644 --- a/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/NoamDecay_cn.rst @@ -32,54 +32,7 @@ Noam 衰减的计算方式如下: 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - +COPY-FROM: paddle.optimizer.lr.NoamDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst b/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst index fe1e0642981..08d8dc9465c 100644 --- a/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PiecewiseDecay_cn.rst @@ -36,52 +36,7 @@ PiecewiseDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.PiecewiseDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst index 2be30a55785..1b241e4dd9f 100644 --- a/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/PolynomialDecay_cn.rst @@ -44,52 +44,7 @@ PolynomialDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.PolynomialDecay 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst b/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst index 198732cd134..e88698a33b5 100644 --- a/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst +++ b/docs/api/paddle/optimizer/lr/ReduceOnPlateau_cn.rst @@ -33,53 +33,7 @@ ReduceOnPlateau 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step(loss) # If you update learning rate each step - # scheduler.step(loss) # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step(out[0]) # If you update learning rate each step - # scheduler.step(out[0]) # If you update learning rate each epoch - +COPY-FROM: paddle.optimizer.lr.ReduceOnPlateau 方法 :::::::::::: diff --git a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst index bdb1dc70c59..fa99559b03a 100644 --- a/docs/api/paddle/optimizer/lr/StepDecay_cn.rst +++ b/docs/api/paddle/optimizer/lr/StepDecay_cn.rst @@ -37,52 +37,7 @@ StepDecay 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import numpy as np - - # train on default dynamic graph mode - linear = paddle.nn.Linear(10, 10) - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters()) - for epoch in range(20): - for batch_id in range(2): - x = paddle.uniform([10, 10]) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - sgd.step() - sgd.clear_gradients() - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch - - # train on static graph mode - paddle.enable_static() - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[None, 4, 5]) - y = paddle.static.data(name='y', shape=[None, 4, 5]) - z = paddle.static.nn.fc(x, 100) - loss = paddle.mean(z) - scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True) - sgd = paddle.optimizer.SGD(learning_rate=scheduler) - sgd.minimize(loss) - - exe = paddle.static.Executor() - exe.run(start_prog) - for epoch in range(20): - for batch_id in range(2): - out = exe.run( - main_prog, - feed={ - 'x': np.random.randn(3, 4, 5).astype('float32'), - 'y': np.random.randn(3, 4, 5).astype('float32') - }, - fetch_list=loss.name) - scheduler.step() # If you update learning rate each step - # scheduler.step() # If you update learning rate each epoch +COPY-FROM: paddle.optimizer.lr.StepDecay 方法 :::::::::::: diff --git a/docs/api/paddle/outer_cn.rst b/docs/api/paddle/outer_cn.rst index e6c05a01ee9..933fe6896cc 100644 --- a/docs/api/paddle/outer_cn.rst +++ b/docs/api/paddle/outer_cn.rst @@ -25,15 +25,4 @@ Tensor, x、y 的外积结果,Tensor shape 为 [x.size, y.size]。 代码示例: :::::::::: -.. code-block:: python - - import paddle - - x = paddle.arange(1, 4).astype('float32') - y = paddle.arange(1, 6).astype('float32') - out = paddle.outer(x, y) - - print(out) - # ([[1, 2, 3, 4, 5], - # [2, 4, 6, 8, 10], - # [3, 6, 9, 12, 15]]) +COPY-FROM: paddle.outer diff --git a/docs/api/paddle/rand_cn.rst b/docs/api/paddle/rand_cn.rst index 08a069d0cfc..0b5ba47982a 100644 --- a/docs/api/paddle/rand_cn.rst +++ b/docs/api/paddle/rand_cn.rst @@ -20,28 +20,4 @@ rand 示例代码 :::::::::: -.. code-block:: python - - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.rand(shape=[2, 3]) - # [[0.451152 , 0.55825245, 0.403311 ], # random - # [0.22550228, 0.22106001, 0.7877319 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor([2], 'int64') - dim2 = paddle.to_tensor([3], 'int32') - out2 = paddle.rand(shape=[dim1, dim2, 2]) - # [[[0.8879919 , 0.25788337], # random - # [0.28826773, 0.9712097 ], # random - # [0.26438272, 0.01796806]], # random - # [[0.33633623, 0.28654453], # random - # [0.79109055, 0.7305809 ], # random - # [0.870881 , 0.2984597 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.rand(shape_tensor) - # [[0.22920267, 0.841956 , 0.05981819], # random - # [0.4836288 , 0.24573246, 0.7516129 ]] # random +COPY-FROM: paddle.rand diff --git a/docs/api/paddle/randn_cn.rst b/docs/api/paddle/randn_cn.rst index 7353b074e74..312980d93db 100644 --- a/docs/api/paddle/randn_cn.rst +++ b/docs/api/paddle/randn_cn.rst @@ -20,28 +20,4 @@ randn 示例代码 :::::::::: -.. code-block:: python - - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.randn(shape=[2, 3]) - # [[-2.923464 , 0.11934398, -0.51249987], # random - # [ 0.39632758, 0.08177969, 0.2692008 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor([2], 'int64') - dim2 = paddle.to_tensor([3], 'int32') - out2 = paddle.randn(shape=[dim1, dim2, 2]) - # [[[-2.8852394 , -0.25898588], # random - # [-0.47420555, 0.17683524], # random - # [-0.7989969 , 0.00754541]], # random - # [[ 0.85201347, 0.32320443], # random - # [ 1.1399018 , 0.48336947], # random - # [ 0.8086993 , 0.6868893 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.randn(shape_tensor) - # [[-2.878077 , 0.17099959, 0.05111201] # random - # [-0.3761474, -1.044801 , 1.1870178 ]] # random +COPY-FROM: paddle.randn diff --git a/docs/api/paddle/round_cn.rst b/docs/api/paddle/round_cn.rst index e899bb0747c..cf6f554a867 100644 --- a/docs/api/paddle/round_cn.rst +++ b/docs/api/paddle/round_cn.rst @@ -34,10 +34,4 @@ round 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([1.2, -0.9, 3.4, 0.9], dtype='float32') - result = paddle.round(x) - print(result) # result=[1., -1., 3., 1.] +COPY-FROM: paddle.round diff --git a/docs/api/paddle/scale_cn.rst b/docs/api/paddle/scale_cn.rst index eeb0606b562..270f7143495 100644 --- a/docs/api/paddle/scale_cn.rst +++ b/docs/api/paddle/scale_cn.rst @@ -34,12 +34,4 @@ scale 代码示例 :::::::::::: -.. code-block:: python - - # scale as a float32 number - import paddle - - data = paddle.randn(shape=[2,3], dtype='float32') - res = paddle.scale(data, scale=2.0, bias=1.0) - COPY-FROM: paddle.scale diff --git a/docs/api/paddle/stack_cn.rst b/docs/api/paddle/stack_cn.rst index 0fa122a8adf..d03084976c0 100644 --- a/docs/api/paddle/stack_cn.rst +++ b/docs/api/paddle/stack_cn.rst @@ -69,24 +69,4 @@ stack 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x1 = paddle.to_tensor([[1.0, 2.0]]) - x2 = paddle.to_tensor([[3.0, 4.0]]) - x3 = paddle.to_tensor([[5.0, 6.0]]) - - out = paddle.stack([x1, x2, x3], axis=0) - print(out.shape) # [3, 1, 2] - print(out) - # [[[1., 2.]], - # [[3., 4.]], - # [[5., 6.]]] - - out = paddle.stack([x1, x2, x3], axis=-2) - print(out.shape) # [1, 3, 2] - print(out) - # [[[1., 2.], - # [3., 4.], - # [5., 6.]]] +COPY-FROM: paddle.stack diff --git a/docs/api/paddle/standard_normal_cn.rst b/docs/api/paddle/standard_normal_cn.rst index bb2166b0873..002a7156318 100644 --- a/docs/api/paddle/standard_normal_cn.rst +++ b/docs/api/paddle/standard_normal_cn.rst @@ -20,28 +20,4 @@ standard_normal 示例代码 :::::::::: -.. code-block:: python - - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.standard_normal(shape=[2, 3]) - # [[-2.923464 , 0.11934398, -0.51249987], # random - # [ 0.39632758, 0.08177969, 0.2692008 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor([2], 'int64') - dim2 = paddle.to_tensor([3], 'int32') - out2 = paddle.standard_normal(shape=[dim1, dim2, 2]) - # [[[-2.8852394 , -0.25898588], # random - # [-0.47420555, 0.17683524], # random - # [-0.7989969 , 0.00754541]], # random - # [[ 0.85201347, 0.32320443], # random - # [ 1.1399018 , 0.48336947], # random - # [ 0.8086993 , 0.6868893 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.standard_normal(shape_tensor) - # [[-2.878077 , 0.17099959, 0.05111201] # random - # [-0.3761474, -1.044801 , 1.1870178 ]] # random +COPY-FROM: paddle.standard_normal diff --git a/docs/api/paddle/static/nn/fc_cn.rst b/docs/api/paddle/static/nn/fc_cn.rst index 03a428b8aa5..310ab47c534 100755 --- a/docs/api/paddle/static/nn/fc_cn.rst +++ b/docs/api/paddle/static/nn/fc_cn.rst @@ -87,34 +87,4 @@ Tensor,形状为 :math:`[batch\_size, *, size]`,数据类型与输入 Tensor 代码示例 ::::::::: - -.. code-block:: python - - import paddle - paddle.enable_static() - - # When input is a single tensor - x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32") - # x: [[[0.1 0.2] - # [0.3 0.4]]] - out = paddle.static.nn.fc( - x=x, - size=1, - num_flatten_dims=2, - weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)), - bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))) - # out: [[[1.15] - # [1.35]]] - - # When input is multiple tensors - x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32") - # x0: [[[0.1 0.2] - # [0.3 0.4]]] - x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32") - # x1: [[[0.1 0.2 0.3]]] - out = paddle.static.nn.fc( - x=[x0, x1], - size=2, - weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)), - bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))) - # out: [[1.8 1.8]] +COPY-FROM: paddle.static.nn.fc diff --git a/docs/api/paddle/static/nn/while_loop_cn.rst b/docs/api/paddle/static/nn/while_loop_cn.rst index 6309027512c..4b9f8e33e89 100644 --- a/docs/api/paddle/static/nn/while_loop_cn.rst +++ b/docs/api/paddle/static/nn/while_loop_cn.rst @@ -29,25 +29,4 @@ list|tuple,循环迭代之后 ``body`` 的返回值,和 ``loop_vars`` 具有 示例代码 ::::::::: -.. code-block:: python - - import paddle - paddle.enable_static() - - def cond(i, ten): - return i < ten - - def body(i, ten): - i = i + 1 - return [i, ten] - - main_program = paddle.static.default_main_program() - startup_program = paddle.static.default_startup_program() - with paddle.static.program_guard(main_program, startup_program): - i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter - ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length - i, ten = paddle.static.nn.while_loop(cond, body, [i, ten]) - - exe = paddle.static.Executor(paddle.CPUPlace()) - res = exe.run(main_program, feed={}, fetch_list=[i]) - print(res) # [array([10])] +COPY-FROM: paddle.static.nn.while_loop diff --git a/docs/api/paddle/text/UCIHousing_cn.rst b/docs/api/paddle/text/UCIHousing_cn.rst index 83c6f42af2a..d02056cab9a 100644 --- a/docs/api/paddle/text/UCIHousing_cn.rst +++ b/docs/api/paddle/text/UCIHousing_cn.rst @@ -23,26 +23,4 @@ UCIHousing 代码示例 ::::::::: -.. code-block:: python - - import paddle - from paddle.text.datasets import UCIHousing - - class SimpleNet(paddle.nn.Layer): - def __init__(self): - super(SimpleNet, self).__init__() - - def forward(self, feature, target): - return paddle.sum(feature), target - - - uci_housing = UCIHousing(mode='train') - - for i in range(10): - feature, target = uci_housing[i] - feature = paddle.to_tensor(feature) - target = paddle.to_tensor(target) - - model = SimpleNet() - feature, target = model(feature, target) - print(feature.numpy().shape, target.numpy()) +COPY-FROM: paddle.text.datasets.UCIHousing diff --git a/docs/api/paddle/tolist_cn.rst b/docs/api/paddle/tolist_cn.rst index 874504786de..a5c1d2e5749 100644 --- a/docs/api/paddle/tolist_cn.rst +++ b/docs/api/paddle/tolist_cn.rst @@ -25,13 +25,4 @@ Tensor 对应结构的 list。 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - t = paddle.to_tensor([0,1,2,3,4]) - expectlist = t.tolist() - print(expectlist) #[0, 1, 2, 3, 4] - - expectlist = paddle.tolist(t) - print(expectlist) #[0, 1, 2, 3, 4] +COPY-FROM: paddle.tolist diff --git a/docs/api/paddle/topk_cn.rst b/docs/api/paddle/topk_cn.rst index 83053573403..fb5d5926b6b 100644 --- a/docs/api/paddle/topk_cn.rst +++ b/docs/api/paddle/topk_cn.rst @@ -3,7 +3,7 @@ topk ------------------------------- -.. py:function:: paddle.topk(x, k, axis=None, largest=True, sorted=True, name=None) +.. py:function:: paddle.topk(x, k, axis=None, largest=True, sorted=True, name=None) 沿着可选的 ``axis`` 查找 topk 最大或者最小的结果和结果所在的索引信息。 如果是一维 Tensor,则直接返回 topk 查询的结果。如果是多维 Tensor,则在指定的轴上查询 topk 的结果。 diff --git a/docs/api/paddle/transpose_cn.rst b/docs/api/paddle/transpose_cn.rst index 085f9bf3062..8134f2bce56 100644 --- a/docs/api/paddle/transpose_cn.rst +++ b/docs/api/paddle/transpose_cn.rst @@ -50,11 +50,4 @@ transpose 代码示例 :::::::::::: -.. code-block:: python - - import paddle - - x = paddle.randn([2, 3, 4]) - x_transposed = paddle.transpose(x, perm=[1, 0, 2]) - print(x_transposed.shape) - # [3L, 2L, 4L] +COPY-FROM: paddle.transpose diff --git a/docs/api/paddle/version/cuda_cn.rst b/docs/api/paddle/version/cuda_cn.rst index cd2a2ac96cd..0d6acf76406 100644 --- a/docs/api/paddle/version/cuda_cn.rst +++ b/docs/api/paddle/version/cuda_cn.rst @@ -16,9 +16,4 @@ cuda 代码示例: :::::::::: -.. code-block:: python - - import paddle - - paddle.version.cuda() - # '10.2' +COPY-FROM: paddle.version.cuda diff --git a/docs/api/paddle/version/cudnn_cn.rst b/docs/api/paddle/version/cudnn_cn.rst index f95d43d1b4e..0ca863a109b 100644 --- a/docs/api/paddle/version/cudnn_cn.rst +++ b/docs/api/paddle/version/cudnn_cn.rst @@ -16,9 +16,4 @@ cudnn 代码示例: :::::::::: -.. code-block:: python - - import paddle - - paddle.version.cudnn() - # '7.6.5' +COPY-FROM: paddle.version.cudnn diff --git a/docs/api/paddle/vision/ops/RoIPool_cn.rst b/docs/api/paddle/vision/ops/RoIPool_cn.rst index b8320b74bec..2a051c69ff4 100644 --- a/docs/api/paddle/vision/ops/RoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/RoIPool_cn.rst @@ -26,16 +26,4 @@ RoIPool 代码示例 ::::::::: -.. code-block:: python - - import paddle - from paddle.vision.ops import RoIPool - - data = paddle.rand([1, 256, 32, 32]) - boxes = paddle.rand([3, 4]) - boxes[:, 2] += boxes[:, 0] + 3 - boxes[:, 3] += boxes[:, 1] + 4 - boxes_num = paddle.to_tensor([3]).astype('int32') - roi_pool = RoIPool(output_size=(4, 3)) - pool_out = roi_pool(data, boxes, boxes_num) - assert pool_out.shape == [3, 256, 4, 3], '' +COPY-FROM: paddle.vision.ops.RoIPool diff --git a/docs/api/paddle/vision/ops/roi_pool_cn.rst b/docs/api/paddle/vision/ops/roi_pool_cn.rst index eb345f8a866..0fc11513667 100644 --- a/docs/api/paddle/vision/ops/roi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/roi_pool_cn.rst @@ -26,15 +26,4 @@ roi_pool 代码示例 ::::::::: -.. code-block:: python - - import paddle - from paddle.vision.ops import roi_pool - - data = paddle.rand([1, 256, 32, 32]) - boxes = paddle.rand([3, 4]) - boxes[:, 2] += boxes[:, 0] + 3 - boxes[:, 3] += boxes[:, 1] + 4 - boxes_num = paddle.to_tensor([3]).astype('int32') - pool_out = roi_pool(data, boxes, boxes_num=boxes_num, output_size=3) - assert pool_out.shape == [3, 256, 3, 3], '' +COPY-FROM: paddle.vision.ops.roi_pool diff --git a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst index d2d90c02530..542821a451d 100644 --- a/docs/api/paddle/vision/transforms/BaseTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BaseTransform_cn.rst @@ -38,71 +38,4 @@ BaseTransform 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - import paddle.vision.transforms.functional as F - from paddle.vision.transforms import BaseTransform - - def _get_image_size(img): - if F._is_pil_image(img): - return img.size - elif F._is_numpy_image(img): - return img.shape[:2][::-1] - else: - raise TypeError("Unexpected type {}".format(type(img))) - - class CustomRandomFlip(BaseTransform): - def __init__(self, prob=0.5, keys=None): - super(CustomRandomFlip, self).__init__(keys) - self.prob = prob - - def _get_params(self, inputs): - image = inputs[self.keys.index('image')] - params = {} - params['flip'] = np.random.random() < self.prob - params['size'] = _get_image_size(image) - return params - - def _apply_image(self, image): - if self.params['flip']: - return F.hflip(image) - return image - - # if you only want to transform image, do not need to rewrite this function - def _apply_coords(self, coords): - if self.params['flip']: - w = self.params['size'][0] - coords[:, 0] = w - coords[:, 0] - return coords - - # if you only want to transform image, do not need to rewrite this function - def _apply_boxes(self, boxes): - idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten() - coords = np.asarray(boxes).reshape(-1, 4)[:, idxs].reshape(-1, 2) - coords = self._apply_coords(coords).reshape((-1, 4, 2)) - minxy = coords.min(axis=1) - maxxy = coords.max(axis=1) - trans_boxes = np.concatenate((minxy, maxxy), axis=1) - return trans_boxes - - # if you only want to transform image, do not need to rewrite this function - def _apply_mask(self, mask): - if self.params['flip']: - return F.hflip(mask) - return mask - - # create fake inputs - fake_img = Image.fromarray((np.random.rand(400, 500, 3) * 255.).astype('uint8')) - fake_boxes = np.array([[2, 3, 200, 300], [50, 60, 80, 100]]) - fake_mask = fake_img.convert('L') - - # only transform for image: - flip_transform = CustomRandomFlip(1.0) - converted_img = flip_transform(fake_img) - - # transform for image, boxes and mask - flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask')) - (converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask)) - print('converted boxes', converted_boxes) +COPY-FROM: paddle.vision.transforms.BaseTransform diff --git a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst index 68346ea6bb8..ad7f209089b 100644 --- a/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/BrightnessTransform_cn.rst @@ -27,14 +27,4 @@ BrightnessTransform 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import BrightnessTransform - - transform = BrightnessTransform(0.4) - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) +COPY-FROM: paddle.vision.transforms.BrightnessTransform diff --git a/docs/api/paddle/vision/transforms/CenterCrop_cn.rst b/docs/api/paddle/vision/transforms/CenterCrop_cn.rst index 815046e8389..948d082d380 100644 --- a/docs/api/paddle/vision/transforms/CenterCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/CenterCrop_cn.rst @@ -27,16 +27,4 @@ CenterCrop 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import CenterCrop - - transform = CenterCrop(224) - - fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.size) - # out: (224, 224) width,height +COPY-FROM: paddle.vision.transforms.CenterCrop diff --git a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst index 5f0ff8a9b80..69ed67ae070 100644 --- a/docs/api/paddle/vision/transforms/ColorJitter_cn.rst +++ b/docs/api/paddle/vision/transforms/ColorJitter_cn.rst @@ -30,14 +30,4 @@ ColorJitter 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import ColorJitter - - transform = ColorJitter(0.4, 0.4, 0.4, 0.4) - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) +COPY-FROM: paddle.vision.transforms.ColorJitter diff --git a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst index 5ee4d9354af..38eb99b9c86 100644 --- a/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/ContrastTransform_cn.rst @@ -27,14 +27,4 @@ ContrastTransform 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import ContrastTransform - - transform = ContrastTransform(0.4) - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) +COPY-FROM: paddle.vision.transforms.ContrastTransform diff --git a/docs/api/paddle/vision/transforms/Grayscale_cn.rst b/docs/api/paddle/vision/transforms/Grayscale_cn.rst index f60c73505dd..b84bd19f22e 100644 --- a/docs/api/paddle/vision/transforms/Grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/Grayscale_cn.rst @@ -27,15 +27,4 @@ Grayscale 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import Grayscale - - transform = Grayscale() - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(np.array(fake_img).shape) +COPY-FROM: paddle.vision.transforms.Grayscale diff --git a/docs/api/paddle/vision/transforms/HueTransform_cn.rst b/docs/api/paddle/vision/transforms/HueTransform_cn.rst index 0bf7c0a3d17..78b4e93436c 100644 --- a/docs/api/paddle/vision/transforms/HueTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/HueTransform_cn.rst @@ -27,14 +27,4 @@ HueTransform 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import HueTransform - - transform = HueTransform(0.4) - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) +COPY-FROM: paddle.vision.transforms.HueTransform diff --git a/docs/api/paddle/vision/transforms/Pad_cn.rst b/docs/api/paddle/vision/transforms/Pad_cn.rst index 8ccb9134033..ea2c7e6e337 100644 --- a/docs/api/paddle/vision/transforms/Pad_cn.rst +++ b/docs/api/paddle/vision/transforms/Pad_cn.rst @@ -23,18 +23,4 @@ pad 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - padded_img = F.pad(fake_img, padding=1) - print(padded_img.size) - - padded_img = F.pad(fake_img, padding=(2, 1)) - print(padded_img.size) +COPY-FROM: paddle.vision.transforms.pad diff --git a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst index 8650b26373b..e49d6b7dc24 100644 --- a/docs/api/paddle/vision/transforms/RandomErasing_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomErasing_cn.rst @@ -31,12 +31,4 @@ RandomErasing 代码示例 ::::::::: -.. code-block:: python - - import paddle - - fake_img = paddle.randn((3, 10, 10)).astype(paddle.float32) - transform = paddle.vision.transforms.RandomErasing() - result = transform(fake_img) - - print(result) +COPY-FROM: paddle.vision.transforms.RandomErasing diff --git a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst index d23bc6004fc..e202e085a4f 100644 --- a/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomHorizontalFlip_cn.rst @@ -27,15 +27,4 @@ RandomHorizontalFlip 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import RandomHorizontalFlip - - transform = RandomHorizontalFlip(0.5) - - fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.size) +COPY-FROM: paddle.vision.transforms.RandomHorizontalFlip diff --git a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst index cbbe9b1d7a1..eef190d4423 100644 --- a/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomResizedCrop_cn.rst @@ -32,15 +32,4 @@ RandomResizedCrop 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import RandomResizedCrop - - transform = RandomResizedCrop(224) - - fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.size) +COPY-FROM: paddle.vision.transforms.RandomResizedCrop diff --git a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst index 153df3974de..2215f8a6a7e 100644 --- a/docs/api/paddle/vision/transforms/RandomRotation_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomRotation_cn.rst @@ -42,15 +42,4 @@ RandomRotate 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import RandomRotation - - transform = RandomRotation(90) - - fake_img = Image.fromarray((np.random.rand(200, 150, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.size) +COPY-FROM: paddle.vision.transforms.RandomRotation diff --git a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst index 4ed411a25c8..b5167c83fbd 100644 --- a/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst +++ b/docs/api/paddle/vision/transforms/RandomVerticalFlip_cn.rst @@ -27,15 +27,4 @@ RandomVerticalFlip 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import RandomVerticalFlip - - transform = RandomVerticalFlip() - - fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.size) +COPY-FROM: paddle.vision.transforms.RandomVerticalFlip diff --git a/docs/api/paddle/vision/transforms/Resize_cn.rst b/docs/api/paddle/vision/transforms/Resize_cn.rst index 9d9d25fa809..6d511303f05 100644 --- a/docs/api/paddle/vision/transforms/Resize_cn.rst +++ b/docs/api/paddle/vision/transforms/Resize_cn.rst @@ -35,20 +35,4 @@ resize 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - converted_img = F.resize(fake_img, 224) - print(converted_img.size) - # (262, 224) - - converted_img = F.resize(fake_img, (200, 150)) - print(converted_img.size) - # (150, 200) +COPY-FROM: paddle.vision.transforms.resize diff --git a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst index b69fffa0f75..5efef5550f0 100644 --- a/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst +++ b/docs/api/paddle/vision/transforms/SaturationTransform_cn.rst @@ -27,14 +27,4 @@ SaturationTransform 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import SaturationTransform - - transform = SaturationTransform(0.4) - - fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) +COPY-FROM: paddle.vision.transforms.SaturationTransform diff --git a/docs/api/paddle/vision/transforms/ToTensor_cn.rst b/docs/api/paddle/vision/transforms/ToTensor_cn.rst index dbe7f3bf56f..e40832af268 100644 --- a/docs/api/paddle/vision/transforms/ToTensor_cn.rst +++ b/docs/api/paddle/vision/transforms/ToTensor_cn.rst @@ -37,22 +37,4 @@ ToTensor 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - - import paddle.vision.transforms as T - import paddle.vision.transforms.functional as F - - fake_img = Image.fromarray((np.random.rand(4, 5, 3) * 255.).astype(np.uint8)) - - transform = T.ToTensor() - - tensor = transform(fake_img) - - print(tensor.shape) - # [3, 4, 5] - - print(tensor.dtype) - # paddle.float32 +COPY-FROM: paddle.vision.transforms.ToTensor diff --git a/docs/api/paddle/vision/transforms/Transpose_cn.rst b/docs/api/paddle/vision/transforms/Transpose_cn.rst index aa147af7938..75e94f63604 100644 --- a/docs/api/paddle/vision/transforms/Transpose_cn.rst +++ b/docs/api/paddle/vision/transforms/Transpose_cn.rst @@ -28,15 +28,4 @@ Transpose 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import Transpose - - transform = Transpose() - - fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8)) - - fake_img = transform(fake_img) - print(fake_img.shape) +COPY-FROM: paddle.vision.transforms.Transpose diff --git a/docs/api/paddle/vision/transforms/normalize_cn.rst b/docs/api/paddle/vision/transforms/normalize_cn.rst index c6d637b72a8..45e149ac2c8 100644 --- a/docs/api/paddle/vision/transforms/normalize_cn.rst +++ b/docs/api/paddle/vision/transforms/normalize_cn.rst @@ -24,19 +24,4 @@ normalize 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - mean = [127.5, 127.5, 127.5] - std = [127.5, 127.5, 127.5] - - normalized_img = F.normalize(fake_img, mean, std, data_format='HWC') - print(normalized_img.max(), normalized_img.min()) - # 0.99215686 -1.0 +COPY-FROM: paddle.vision.transforms.normalize diff --git a/docs/api/paddle/vision/transforms/pad_cn.rst b/docs/api/paddle/vision/transforms/pad_cn.rst index 8ccb9134033..ea2c7e6e337 100644 --- a/docs/api/paddle/vision/transforms/pad_cn.rst +++ b/docs/api/paddle/vision/transforms/pad_cn.rst @@ -23,18 +23,4 @@ pad 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - padded_img = F.pad(fake_img, padding=1) - print(padded_img.size) - - padded_img = F.pad(fake_img, padding=(2, 1)) - print(padded_img.size) +COPY-FROM: paddle.vision.transforms.pad diff --git a/docs/api/paddle/vision/transforms/resize_cn.rst b/docs/api/paddle/vision/transforms/resize_cn.rst index 9d9d25fa809..6d511303f05 100644 --- a/docs/api/paddle/vision/transforms/resize_cn.rst +++ b/docs/api/paddle/vision/transforms/resize_cn.rst @@ -35,20 +35,4 @@ resize 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - converted_img = F.resize(fake_img, 224) - print(converted_img.size) - # (262, 224) - - converted_img = F.resize(fake_img, (200, 150)) - print(converted_img.size) - # (150, 200) +COPY-FROM: paddle.vision.transforms.resize diff --git a/docs/api/paddle/vision/transforms/rotate_cn.rst b/docs/api/paddle/vision/transforms/rotate_cn.rst index efa22a3c93a..0a70e520da5 100644 --- a/docs/api/paddle/vision/transforms/rotate_cn.rst +++ b/docs/api/paddle/vision/transforms/rotate_cn.rst @@ -25,15 +25,4 @@ rotate 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - rotated_img = F.rotate(fake_img, 90) - print(rotated_img.size) +COPY-FROM: paddle.vision.transforms.rotate diff --git a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst index 5e3c339aa91..5a7e6d23d61 100644 --- a/docs/api/paddle/vision/transforms/to_grayscale_cn.rst +++ b/docs/api/paddle/vision/transforms/to_grayscale_cn.rst @@ -24,15 +24,4 @@ to_grayscale 代码示例 ::::::::: -.. code-block:: python - - import numpy as np - from PIL import Image - from paddle.vision.transforms import functional as F - - fake_img = (np.random.rand(256, 300, 3) * 255.).astype('uint8') - - fake_img = Image.fromarray(fake_img) - - gray_img = F.to_grayscale(fake_img) - print(gray_img.size) +COPY-FROM: paddle.vision.transforms.to_grayscale From 2cd0bfd898556ba5c3d88258e806e9853e4d5b20 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 2 Aug 2022 09:49:36 +0000 Subject: [PATCH 16/20] remove redundant labels --- docs/api/paddle/add_n_cn.rst | 3 ++- docs/api/paddle/argmin_cn.rst | 3 ++- docs/api/paddle/assign_cn.rst | 3 ++- docs/api/paddle/bernoulli_cn.rst | 3 ++- docs/api/paddle/count_nonzero_cn.rst | 3 ++- docs/api/paddle/crop_cn.rst | 3 ++- docs/api/paddle/full_cn.rst | 2 +- docs/api/paddle/heaviside_cn.rst | 3 ++- docs/api/paddle/incubate/autotune/set_config_cn.rst | 2 +- docs/api/paddle/linalg/corrcoef_cn.rst | 3 ++- docs/api/paddle/logspace_cn.rst | 3 ++- docs/api/paddle/multiplex_cn.rst | 3 +-- docs/api/paddle/nanmedian_cn.rst | 3 ++- docs/api/paddle/nn/AdaptiveAvgPool1D_cn.rst | 3 ++- docs/api/paddle/nn/ChannelShuffle_cn.rst | 3 ++- docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst | 3 ++- docs/api/paddle/nn/PixelUnshuffle_cn.rst | 3 ++- docs/api/paddle/nn/RReLU_cn.rst | 3 ++- docs/api/paddle/nn/functional/adaptive_avg_pool1d_cn.rst | 3 ++- docs/api/paddle/nn/functional/channel_shuffle_cn.rst | 3 ++- docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst | 3 ++- docs/api/paddle/nn/functional/pixel_unshuffle_cn.rst | 3 ++- docs/api/paddle/nn/functional/rrelu_cn.rst | 3 ++- docs/api/paddle/nn/initializer/Constant_cn.rst | 2 +- docs/api/paddle/nn/initializer/TruncatedNormal_cn.rst | 3 ++- docs/api/paddle/nn/initializer/Uniform_cn.rst | 3 ++- docs/api/paddle/nn/initializer/XavierNormal_cn.rst | 3 ++- docs/api/paddle/nn/initializer/XavierUniform_cn.rst | 3 ++- docs/api/paddle/nn/initializer/calculate_gain_cn.rst | 3 ++- docs/api/paddle/ones_cn.rst | 3 ++- docs/api/paddle/profiler/export_chrome_tracing_cn.rst | 2 +- docs/api/paddle/profiler/export_protobuf_cn.rst | 2 +- docs/api/paddle/profiler/load_profiler_result_cn.rst | 2 +- docs/api/paddle/put_along_axis_cn.rst | 3 +-- docs/api/paddle/reshape_cn.rst | 3 +-- docs/api/paddle/sort_cn.rst | 2 +- docs/api/paddle/squeeze_cn.rst | 3 ++- docs/api/paddle/take_along_axis_cn.rst | 2 +- docs/api/paddle/topk_cn.rst | 3 ++- docs/api/paddle/tril_indices_cn.rst | 2 +- docs/api/paddle/unique_cn.rst | 3 ++- docs/api/paddle/vision/ops/PSRoIPool_cn.rst | 3 ++- docs/api/paddle/vision/ops/RoIAlign_cn.rst | 3 ++- docs/api/paddle/vision/ops/psroi_pool_cn.rst | 3 ++- docs/api/paddle/vision/ops/roi_align_cn.rst | 3 ++- 45 files changed, 78 insertions(+), 48 deletions(-) diff --git a/docs/api/paddle/add_n_cn.rst b/docs/api/paddle/add_n_cn.rst index ab39e2bc76d..c15176d5d39 100644 --- a/docs/api/paddle/add_n_cn.rst +++ b/docs/api/paddle/add_n_cn.rst @@ -53,4 +53,5 @@ Tensor,输入 ``inputs`` 求和后的结果,shape 和数据类型与 ``input 代码示例 :::::::::::: -COPY-FROM: paddle.add_n:code-example1 + +COPY-FROM: paddle.add_n diff --git a/docs/api/paddle/argmin_cn.rst b/docs/api/paddle/argmin_cn.rst index 48e34934ff5..0b53114f929 100644 --- a/docs/api/paddle/argmin_cn.rst +++ b/docs/api/paddle/argmin_cn.rst @@ -22,4 +22,5 @@ argmin 示例代码 :::::::: -COPY-FROM: paddle.argmin:code-example1 + +COPY-FROM: paddle.argmin diff --git a/docs/api/paddle/assign_cn.rst b/docs/api/paddle/assign_cn.rst index 32243de34c7..6f34c32e37d 100644 --- a/docs/api/paddle/assign_cn.rst +++ b/docs/api/paddle/assign_cn.rst @@ -23,4 +23,5 @@ Tensor,形状、数据类型和值与 ``x`` 一致。 代码示例 :::::::::::: -COPY-FROM: paddle.assign:assign-example + +COPY-FROM: paddle.assign diff --git a/docs/api/paddle/bernoulli_cn.rst b/docs/api/paddle/bernoulli_cn.rst index c8a68fbd0bb..4b4446f81e4 100644 --- a/docs/api/paddle/bernoulli_cn.rst +++ b/docs/api/paddle/bernoulli_cn.rst @@ -27,4 +27,5 @@ bernoulli 代码示例 :::::::::::: -COPY-FROM: paddle.bernoulli:bernoulli-example + +COPY-FROM: paddle.bernoulli diff --git a/docs/api/paddle/count_nonzero_cn.rst b/docs/api/paddle/count_nonzero_cn.rst index a69d6c775e4..ec62b7094b6 100644 --- a/docs/api/paddle/count_nonzero_cn.rst +++ b/docs/api/paddle/count_nonzero_cn.rst @@ -21,4 +21,5 @@ count_nonzero 代码示例 :::::::::: -COPY-FROM: paddle.count_nonzero:count_nonzero-example + +COPY-FROM: paddle.count_nonzero diff --git a/docs/api/paddle/crop_cn.rst b/docs/api/paddle/crop_cn.rst index 05c8934c3a3..68f56a77366 100644 --- a/docs/api/paddle/crop_cn.rst +++ b/docs/api/paddle/crop_cn.rst @@ -70,4 +70,5 @@ crop 代码示例 ::::::::: -COPY-FROM: paddle.crop:code-example1 + +COPY-FROM: paddle.crop diff --git a/docs/api/paddle/full_cn.rst b/docs/api/paddle/full_cn.rst index 1a5cede2625..d6325ed3132 100644 --- a/docs/api/paddle/full_cn.rst +++ b/docs/api/paddle/full_cn.rst @@ -25,4 +25,4 @@ full 代码示例 :::::::::::: -COPY-FROM: paddle.full:code-example1 +COPY-FROM: paddle.full diff --git a/docs/api/paddle/heaviside_cn.rst b/docs/api/paddle/heaviside_cn.rst index f35dada3563..d15a0b5790e 100644 --- a/docs/api/paddle/heaviside_cn.rst +++ b/docs/api/paddle/heaviside_cn.rst @@ -34,4 +34,5 @@ heaviside 代码示例 :::::::::: -COPY-FROM: paddle.heaviside:heaviside-example + +COPY-FROM: paddle.heaviside diff --git a/docs/api/paddle/incubate/autotune/set_config_cn.rst b/docs/api/paddle/incubate/autotune/set_config_cn.rst index 59468ab37f1..c7f28a2dede 100644 --- a/docs/api/paddle/incubate/autotune/set_config_cn.rst +++ b/docs/api/paddle/incubate/autotune/set_config_cn.rst @@ -28,4 +28,4 @@ set_config 代码示例 :::::::::: -COPY-FROM: paddle.incubate.autotune.set_config:auto-tuning +COPY-FROM: paddle.incubate.autotune.set_config diff --git a/docs/api/paddle/linalg/corrcoef_cn.rst b/docs/api/paddle/linalg/corrcoef_cn.rst index e8bcdf99c38..adc63c91406 100644 --- a/docs/api/paddle/linalg/corrcoef_cn.rst +++ b/docs/api/paddle/linalg/corrcoef_cn.rst @@ -27,4 +27,5 @@ corrcoef 代码示例 :::::::::: -COPY-FROM: paddle.linalg.corrcoef:code-example1 + +COPY-FROM: paddle.linalg.corrcoef diff --git a/docs/api/paddle/logspace_cn.rst b/docs/api/paddle/logspace_cn.rst index 8d7cd25e7e9..28ba9bcab51 100644 --- a/docs/api/paddle/logspace_cn.rst +++ b/docs/api/paddle/logspace_cn.rst @@ -28,4 +28,5 @@ logspace 代码示例 :::::::::::: -COPY-FROM: paddle.logspace:logspace-example + +COPY-FROM: paddle.logspace diff --git a/docs/api/paddle/multiplex_cn.rst b/docs/api/paddle/multiplex_cn.rst index ec775a79bec..4018ca9e6d2 100644 --- a/docs/api/paddle/multiplex_cn.rst +++ b/docs/api/paddle/multiplex_cn.rst @@ -46,5 +46,4 @@ Tensor,进行 Multiplex 运算后的输出 Tensor。 代码示例 :::::::::::: - -COPY-FROM: paddle.multiplex:code-example1 +COPY-FROM: paddle.multiplex diff --git a/docs/api/paddle/nanmedian_cn.rst b/docs/api/paddle/nanmedian_cn.rst index 60eb64e5b24..6c6367cdba5 100644 --- a/docs/api/paddle/nanmedian_cn.rst +++ b/docs/api/paddle/nanmedian_cn.rst @@ -20,4 +20,5 @@ nanmedian 代码示例 :::::::::: -COPY-FROM: paddle.nanmedian:nanmedian-example + +COPY-FROM: paddle.nanmedian diff --git a/docs/api/paddle/nn/AdaptiveAvgPool1D_cn.rst b/docs/api/paddle/nn/AdaptiveAvgPool1D_cn.rst index 522e4d4d448..79e215d14ec 100755 --- a/docs/api/paddle/nn/AdaptiveAvgPool1D_cn.rst +++ b/docs/api/paddle/nn/AdaptiveAvgPool1D_cn.rst @@ -31,4 +31,5 @@ AdaptiveAvgPool1D 代码示例 ::::::::: -COPY-FROM: paddle.nn.AdaptiveAvgPool1D:AdaptiveAvgPool1D-example + +COPY-FROM: paddle.nn.AdaptiveAvgPool1D diff --git a/docs/api/paddle/nn/ChannelShuffle_cn.rst b/docs/api/paddle/nn/ChannelShuffle_cn.rst index 3864497306a..81279c66c7d 100644 --- a/docs/api/paddle/nn/ChannelShuffle_cn.rst +++ b/docs/api/paddle/nn/ChannelShuffle_cn.rst @@ -24,4 +24,5 @@ ChannelShuffle 代码示例 ::::::::: -COPY-FROM: paddle.nn.ChannelShuffle:ChannelShuffle-example + +COPY-FROM: paddle.nn.ChannelShuffle diff --git a/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst b/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst index 7b2325ec80a..b563d1b4800 100644 --- a/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst +++ b/docs/api/paddle/nn/CosineEmbeddingLoss_cn.rst @@ -37,4 +37,5 @@ CosineEmbeddingLoss 代码示例 ::::::::: -COPY-FROM: paddle.nn.CosineEmbeddingLoss:code-example1 + +COPY-FROM: paddle.nn.CosineEmbeddingLoss diff --git a/docs/api/paddle/nn/PixelUnshuffle_cn.rst b/docs/api/paddle/nn/PixelUnshuffle_cn.rst index 381c72010d6..64b0ed03bb7 100644 --- a/docs/api/paddle/nn/PixelUnshuffle_cn.rst +++ b/docs/api/paddle/nn/PixelUnshuffle_cn.rst @@ -25,4 +25,5 @@ PixelUnshuffle 代码示例 ::::::::: -COPY-FROM: paddle.nn.PixelUnshuffle:PixelUnshuffle-example + +COPY-FROM: paddle.nn.PixelUnshuffle diff --git a/docs/api/paddle/nn/RReLU_cn.rst b/docs/api/paddle/nn/RReLU_cn.rst index 1c85588e72b..a19423575dc 100644 --- a/docs/api/paddle/nn/RReLU_cn.rst +++ b/docs/api/paddle/nn/RReLU_cn.rst @@ -48,4 +48,5 @@ RReLU 激活层,应用随机纠正线性单元对神经元激活,参考论 代码示例 ::::::::: -COPY-FROM: paddle.nn.RRelu:RRelu-example + +COPY-FROM: paddle.nn.RRelu diff --git a/docs/api/paddle/nn/functional/adaptive_avg_pool1d_cn.rst b/docs/api/paddle/nn/functional/adaptive_avg_pool1d_cn.rst index 4b4318230f4..3f3b75a3c7e 100755 --- a/docs/api/paddle/nn/functional/adaptive_avg_pool1d_cn.rst +++ b/docs/api/paddle/nn/functional/adaptive_avg_pool1d_cn.rst @@ -25,4 +25,5 @@ Tensor,计算 1D 自适应平均池化的结果,数据类型与输入相同 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.adaptive_avg_pool1d:adaptive_avg_pool1d-example + +COPY-FROM: paddle.nn.functional.adaptive_avg_pool1d diff --git a/docs/api/paddle/nn/functional/channel_shuffle_cn.rst b/docs/api/paddle/nn/functional/channel_shuffle_cn.rst index 56f776fd103..ba9a3f8b9f6 100644 --- a/docs/api/paddle/nn/functional/channel_shuffle_cn.rst +++ b/docs/api/paddle/nn/functional/channel_shuffle_cn.rst @@ -23,4 +23,5 @@ channel_shuffle 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.channel_shuffle:channel_shuffle-example + +COPY-FROM: paddle.nn.functional.channel_shuffle diff --git a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst index 944536ddb7e..cc286b7387b 100644 --- a/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst +++ b/docs/api/paddle/nn/functional/cosine_embedding_loss_cn.rst @@ -38,4 +38,5 @@ cosine_embedding_loss 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.cosine_embedding_loss:code-example1 + +COPY-FROM: paddle.nn.functional.cosine_embedding_loss diff --git a/docs/api/paddle/nn/functional/pixel_unshuffle_cn.rst b/docs/api/paddle/nn/functional/pixel_unshuffle_cn.rst index 43a6916efbf..f5156449b23 100644 --- a/docs/api/paddle/nn/functional/pixel_unshuffle_cn.rst +++ b/docs/api/paddle/nn/functional/pixel_unshuffle_cn.rst @@ -23,4 +23,5 @@ pixel_unshuffle 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.pixel_unshuffle:pixel_unshuffle-example + +COPY-FROM: paddle.nn.functional.pixel_unshuffle diff --git a/docs/api/paddle/nn/functional/rrelu_cn.rst b/docs/api/paddle/nn/functional/rrelu_cn.rst index 4f5af367775..9e6a73ae78e 100644 --- a/docs/api/paddle/nn/functional/rrelu_cn.rst +++ b/docs/api/paddle/nn/functional/rrelu_cn.rst @@ -50,4 +50,5 @@ rrelu 激活函数,应用随机纠正线性单元对神经元激活,参考 代码示例 ::::::::: -COPY-FROM: paddle.nn.functional.rrelu:rrelu-example + +COPY-FROM: paddle.nn.functional.rrelu diff --git a/docs/api/paddle/nn/initializer/Constant_cn.rst b/docs/api/paddle/nn/initializer/Constant_cn.rst index 994d3c1e39b..97721ab1a0a 100644 --- a/docs/api/paddle/nn/initializer/Constant_cn.rst +++ b/docs/api/paddle/nn/initializer/Constant_cn.rst @@ -22,4 +22,4 @@ Constant 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.Constant:code-example1 +COPY-FROM: paddle.nn.initializer.Constant diff --git a/docs/api/paddle/nn/initializer/TruncatedNormal_cn.rst b/docs/api/paddle/nn/initializer/TruncatedNormal_cn.rst index b715f404513..b7ea46afeae 100644 --- a/docs/api/paddle/nn/initializer/TruncatedNormal_cn.rst +++ b/docs/api/paddle/nn/initializer/TruncatedNormal_cn.rst @@ -20,4 +20,5 @@ TruncatedNormal 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.TruncatedNormal:initializer_TruncatedNormal-example + +COPY-FROM: paddle.nn.initializer.TruncatedNormal diff --git a/docs/api/paddle/nn/initializer/Uniform_cn.rst b/docs/api/paddle/nn/initializer/Uniform_cn.rst index 544d5e872c7..24f2d981474 100644 --- a/docs/api/paddle/nn/initializer/Uniform_cn.rst +++ b/docs/api/paddle/nn/initializer/Uniform_cn.rst @@ -21,4 +21,5 @@ Uniform 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.Uniform:initializer_Uniform-example + +COPY-FROM: paddle.nn.initializer.Uniform diff --git a/docs/api/paddle/nn/initializer/XavierNormal_cn.rst b/docs/api/paddle/nn/initializer/XavierNormal_cn.rst index 2cd69df4cee..9a66678cda3 100644 --- a/docs/api/paddle/nn/initializer/XavierNormal_cn.rst +++ b/docs/api/paddle/nn/initializer/XavierNormal_cn.rst @@ -28,4 +28,5 @@ XavierNormal 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.XavierNormal:initializer_XavierNormal-example + +COPY-FROM: paddle.nn.initializer.XavierNormal diff --git a/docs/api/paddle/nn/initializer/XavierUniform_cn.rst b/docs/api/paddle/nn/initializer/XavierUniform_cn.rst index 42a4b494f6a..765f3d4fe0d 100644 --- a/docs/api/paddle/nn/initializer/XavierUniform_cn.rst +++ b/docs/api/paddle/nn/initializer/XavierUniform_cn.rst @@ -28,4 +28,5 @@ XavierUniform 代码示例 :::::::::::: -COPY-FROM: paddle.nn.initializer.XavierUniform:initializer_XavierUniform-example + +COPY-FROM: paddle.nn.initializer.XavierUniform diff --git a/docs/api/paddle/nn/initializer/calculate_gain_cn.rst b/docs/api/paddle/nn/initializer/calculate_gain_cn.rst index 3c643b24815..d246a122a34 100644 --- a/docs/api/paddle/nn/initializer/calculate_gain_cn.rst +++ b/docs/api/paddle/nn/initializer/calculate_gain_cn.rst @@ -18,4 +18,5 @@ Python float 数,推荐的增益值。 代码示例 ::::::::: -COPY-FROM: paddle.nn.initializer.calculate_gain:code-example1 + +COPY-FROM: paddle.nn.initializer.calculate_gain diff --git a/docs/api/paddle/ones_cn.rst b/docs/api/paddle/ones_cn.rst index 0eb91a876d9..2dfc89ba211 100644 --- a/docs/api/paddle/ones_cn.rst +++ b/docs/api/paddle/ones_cn.rst @@ -23,4 +23,5 @@ Tensor,每个元素都是 1,形状为 ``shape``,数据类型为 ``dtype`` 代码示例 ::::::::: -COPY-FROM: paddle.ones:ones-example + +COPY-FROM: paddle.ones diff --git a/docs/api/paddle/profiler/export_chrome_tracing_cn.rst b/docs/api/paddle/profiler/export_chrome_tracing_cn.rst index d3c639579a9..711a158ad8b 100644 --- a/docs/api/paddle/profiler/export_chrome_tracing_cn.rst +++ b/docs/api/paddle/profiler/export_chrome_tracing_cn.rst @@ -25,4 +25,4 @@ export_chrome_tracing 用于 :ref:`性能分析器 ` 的 on_trace_ready 参数。 -COPY-FROM: paddle.profiler.export_chrome_tracing:code-example1 +COPY-FROM: paddle.profiler.export_chrome_tracing diff --git a/docs/api/paddle/profiler/export_protobuf_cn.rst b/docs/api/paddle/profiler/export_protobuf_cn.rst index c94cf0b4ad9..396f97024c4 100644 --- a/docs/api/paddle/profiler/export_protobuf_cn.rst +++ b/docs/api/paddle/profiler/export_protobuf_cn.rst @@ -25,4 +25,4 @@ export_protobuf 用于 :ref:`性能分析器 ` 的 on_trace_ready 参数。 -COPY-FROM: paddle.profiler.export_protobuf:code-example1 +COPY-FROM: paddle.profiler.export_protobuf diff --git a/docs/api/paddle/profiler/load_profiler_result_cn.rst b/docs/api/paddle/profiler/load_profiler_result_cn.rst index d98761af04e..c02be46abe1 100644 --- a/docs/api/paddle/profiler/load_profiler_result_cn.rst +++ b/docs/api/paddle/profiler/load_profiler_result_cn.rst @@ -20,4 +20,4 @@ ProfilerResult 对象,底层存储性能数据的结构。 代码示例 :::::::::: -COPY-FROM: paddle.profiler.load_profiler_result:code-example1 +COPY-FROM: paddle.profiler.load_profiler_result diff --git a/docs/api/paddle/put_along_axis_cn.rst b/docs/api/paddle/put_along_axis_cn.rst index 4127ddc4384..3004da5beb8 100644 --- a/docs/api/paddle/put_along_axis_cn.rst +++ b/docs/api/paddle/put_along_axis_cn.rst @@ -23,5 +23,4 @@ put_along_axis 代码示例 ::::::::: - -COPY-FROM: paddle.put_along_axis:code-example1 +COPY-FROM: paddle.put_along_axis diff --git a/docs/api/paddle/reshape_cn.rst b/docs/api/paddle/reshape_cn.rst index 5dcabb8d19c..759e26b8cdd 100644 --- a/docs/api/paddle/reshape_cn.rst +++ b/docs/api/paddle/reshape_cn.rst @@ -39,5 +39,4 @@ reshape 代码示例 :::::::::::: - -COPY-FROM: paddle.reshape:code-example1 +COPY-FROM: paddle.reshape diff --git a/docs/api/paddle/sort_cn.rst b/docs/api/paddle/sort_cn.rst index 851915263e7..eb04c6d6bf9 100644 --- a/docs/api/paddle/sort_cn.rst +++ b/docs/api/paddle/sort_cn.rst @@ -26,4 +26,4 @@ Tensor,排序后的输出(与 ``x`` 维度相同、数据类型相同)。 代码示例 :::::::::::: -COPY-FROM: paddle.sort:code-example1 +COPY-FROM: paddle.sort diff --git a/docs/api/paddle/squeeze_cn.rst b/docs/api/paddle/squeeze_cn.rst index c8f9362fac8..20731220c6c 100644 --- a/docs/api/paddle/squeeze_cn.rst +++ b/docs/api/paddle/squeeze_cn.rst @@ -56,4 +56,5 @@ squeeze 代码示例 ::::::::: -COPY-FROM: paddle.squeeze:code-example1 + +COPY-FROM: paddle.squeeze diff --git a/docs/api/paddle/take_along_axis_cn.rst b/docs/api/paddle/take_along_axis_cn.rst index 9e6fa99c9df..1095c3fc789 100644 --- a/docs/api/paddle/take_along_axis_cn.rst +++ b/docs/api/paddle/take_along_axis_cn.rst @@ -22,4 +22,4 @@ take_along_axis ::::::::: -COPY-FROM: paddle.take_along_axis:code-example1 +COPY-FROM: paddle.take_along_axis diff --git a/docs/api/paddle/topk_cn.rst b/docs/api/paddle/topk_cn.rst index fb5d5926b6b..1fb086c0adb 100644 --- a/docs/api/paddle/topk_cn.rst +++ b/docs/api/paddle/topk_cn.rst @@ -24,4 +24,5 @@ tuple(Tensor), 返回 topk 的结果和结果的索引信息。结果的数据 代码示例 ::::::::: -COPY-FROM: paddle.topk:code-example1 + +COPY-FROM: paddle.topk diff --git a/docs/api/paddle/tril_indices_cn.rst b/docs/api/paddle/tril_indices_cn.rst index 3f0357654f0..2c249102a4a 100644 --- a/docs/api/paddle/tril_indices_cn.rst +++ b/docs/api/paddle/tril_indices_cn.rst @@ -26,4 +26,4 @@ Tensor,二维矩阵的下三角矩阵行坐标和列坐标。数据类型和 代码示例 ::::::::: -COPY-FROM: paddle.tril_indices:tril_indices-example +COPY-FROM: paddle.tril_indices diff --git a/docs/api/paddle/unique_cn.rst b/docs/api/paddle/unique_cn.rst index 38a090a7569..0645a4acaa1 100644 --- a/docs/api/paddle/unique_cn.rst +++ b/docs/api/paddle/unique_cn.rst @@ -29,4 +29,5 @@ unique 代码示例 :::::::::::: -COPY-FROM: paddle.unique:code-example1 + +COPY-FROM: paddle.unique diff --git a/docs/api/paddle/vision/ops/PSRoIPool_cn.rst b/docs/api/paddle/vision/ops/PSRoIPool_cn.rst index e017654296b..722bd3304ae 100644 --- a/docs/api/paddle/vision/ops/PSRoIPool_cn.rst +++ b/docs/api/paddle/vision/ops/PSRoIPool_cn.rst @@ -25,4 +25,5 @@ PSRoIPool 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.PSRoIPool:code-example1 + +COPY-FROM: paddle.vision.ops.PSRoIPool diff --git a/docs/api/paddle/vision/ops/RoIAlign_cn.rst b/docs/api/paddle/vision/ops/RoIAlign_cn.rst index 3458aeee637..d8da4d62acd 100644 --- a/docs/api/paddle/vision/ops/RoIAlign_cn.rst +++ b/docs/api/paddle/vision/ops/RoIAlign_cn.rst @@ -25,4 +25,5 @@ Tensor,形状为(num_boxes, channels, pooled_h, pooled_w)。 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.RoIAlign:code-example1 + +COPY-FROM: paddle.vision.ops.RoIAlign diff --git a/docs/api/paddle/vision/ops/psroi_pool_cn.rst b/docs/api/paddle/vision/ops/psroi_pool_cn.rst index 59252cd113f..476e319d867 100644 --- a/docs/api/paddle/vision/ops/psroi_pool_cn.rst +++ b/docs/api/paddle/vision/ops/psroi_pool_cn.rst @@ -25,4 +25,5 @@ PSROIPooling 由 R-FCN 提出。更多详细信息,请参阅 https://arxiv.org 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.psroi_pool:code-example1 + +COPY-FROM: paddle.vision.ops.psroi_pool diff --git a/docs/api/paddle/vision/ops/roi_align_cn.rst b/docs/api/paddle/vision/ops/roi_align_cn.rst index 4ded1ffd983..4f42b712e8a 100644 --- a/docs/api/paddle/vision/ops/roi_align_cn.rst +++ b/docs/api/paddle/vision/ops/roi_align_cn.rst @@ -26,4 +26,5 @@ RoI Align 是在指定输入的感兴趣区域上执行双线性插值以获得 代码示例 ::::::::: -COPY-FROM: paddle.vision.ops.roi_align:code-example1 + +COPY-FROM: paddle.vision.ops.roi_align From 7cb6afffffd2172fc4828320bd69a8cf12d00cf2 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 2 Aug 2022 11:19:54 +0000 Subject: [PATCH 17/20] more COPY-FROM (try multiple code example) --- docs/api/paddle/diag_cn.rst | 42 ++----------------------- docs/api/paddle/diagflat_cn.rst | 54 ++------------------------------- 2 files changed, 4 insertions(+), 92 deletions(-) diff --git a/docs/api/paddle/diag_cn.rst b/docs/api/paddle/diag_cn.rst index b0da9a9c46b..a379ba9fe5b 100644 --- a/docs/api/paddle/diag_cn.rst +++ b/docs/api/paddle/diag_cn.rst @@ -31,47 +31,9 @@ diag 代码示例 1 ::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([1, 2, 3]) - y = paddle.diag(x) - print(y) - # [[1 0 0] - # [0 2 0] - # [0 0 3]] - - y = paddle.diag(x, offset=1) - print(y) - # [[0 1 0 0] - # [0 0 2 0] - # [0 0 0 3] - # [0 0 0 0]] - - y = paddle.diag(x, padding_value=6) - print(y) - # [[1 6 6] - # [6 2 6] - # [6 6 3]] - +COPY-FROM: paddle.diag:code-example-1 代码示例 2 ::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]]) - y = paddle.diag(x) - print(y) - # [1 5] - - y = paddle.diag(x, offset=1) - print(y) - # [2 6] - - y = paddle.diag(x, offset=-1) - print(y) - # [4] +COPY-FROM: paddle.diag:code-example-2 diff --git a/docs/api/paddle/diagflat_cn.rst b/docs/api/paddle/diagflat_cn.rst index 226c783cad2..b0176a97ce2 100644 --- a/docs/api/paddle/diagflat_cn.rst +++ b/docs/api/paddle/diagflat_cn.rst @@ -30,59 +30,9 @@ diagflat 代码示例 1 ::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([1, 2, 3]) - y = paddle.diagflat(x) - print(y.numpy()) - # [[1 0 0] - # [0 2 0] - # [0 0 3]] - - y = paddle.diagflat(x, offset=1) - print(y.numpy()) - # [[0 1 0 0] - # [0 0 2 0] - # [0 0 0 3] - # [0 0 0 0]] - - y = paddle.diagflat(x, offset=-1) - print(y.numpy()) - # [[0 0 0 0] - # [1 0 0 0] - # [0 2 0 0] - # [0 0 3 0]] - +COPY-FROM: paddle.diagflat:code-example-1 代码示例 2 ::::::::: -.. code-block:: python - - import paddle - - x = paddle.to_tensor([[1, 2], [3, 4]]) - y = paddle.diagflat(x) - print(y.numpy()) - # [[1 0 0 0] - # [0 2 0 0] - # [0 0 3 0] - # [0 0 0 4]] - - y = paddle.diagflat(x, offset=1) - print(y.numpy()) - # [[0 1 0 0 0] - # [0 0 2 0 0] - # [0 0 0 3 0] - # [0 0 0 0 4] - # [0 0 0 0 0]] - - y = paddle.diagflat(x, offset=-1) - print(y.numpy()) - # [[0 0 0 0 0] - # [1 0 0 0 0] - # [0 2 0 0 0] - # [0 0 3 0 0] - # [0 0 0 4 0]] +COPY-FROM: paddle.diagflat:code-example-2 From effb3804bb70b3cd97803132efa24fc23001dba0 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 3 Aug 2022 12:38:18 +0000 Subject: [PATCH 18/20] restore HDFSClient changes --- .../distributed/fleet/utils/HDFSClient_cn.rst | 202 +++++++++++++++--- 1 file changed, 169 insertions(+), 33 deletions(-) diff --git a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst index c59edbb6372..3e562756f1c 100644 --- a/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst +++ b/docs/api/paddle/distributed/fleet/utils/HDFSClient_cn.rst @@ -4,18 +4,29 @@ HDFSClient ------------------------------- .. py:class:: paddle.distributed.fleet.utils.HDFSClient -一个 HADOOP 文件系统工具类。 +一个HADOOP文件系统工具类。 参数 :::::::::::: - - **hadoop_home** (str):HADOOP HOME 地址。 - - **configs** (dict): HADOOP 文件系统配置。需包含 `fs.default.name` 和 `hadoop.job.ugi` 这两个字段。 + - **hadoop_home** (str):HADOOP HOME地址。 + - **configs** (dict): HADOOP文件系统配置。需包含 `fs.default.name` 和 `hadoop.job.ugi` 这两个字段。 代码示例 :::::::::::: -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + hadoop_home = "/home/client/hadoop-client/hadoop/" + + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.ls_dir("hdfs:/test_hdfs_client") 方法 :::::::::::: @@ -25,15 +36,26 @@ ls_dir(fs_path) **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **返回** - - Tuple,一个包含所有子目录和文件名的 2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 + - Tuple,一个包含所有子目录和文件名的2-Tuple,格式形如:([subdirname1, subdirname1, ...], [filename1, filename2, ...])。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.ls_dir +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + subdirs, files = client.ls_dir("hdfs:/test_hdfs_client") mkdirs(fs_path) ''''''''' @@ -41,23 +63,45 @@ mkdirs(fs_path) **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.mkdirs +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.mkdirs("hdfs:/test_hdfs_client") delete(fs_path) ''''''''' -删除 HADOOP 文件(或目录)。 +删除HADOOP文件(或目录)。 **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.delete +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.delete("hdfs:/test_hdfs_client") is_file(fs_path) ''''''''' @@ -65,7 +109,7 @@ is_file(fs_path) **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **返回** @@ -73,7 +117,18 @@ is_file(fs_path) **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_file +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + ret = client.is_file("hdfs:/test_hdfs_client") is_dir(fs_path) ''''''''' @@ -81,7 +136,7 @@ is_dir(fs_path) **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **返回** @@ -89,7 +144,18 @@ is_dir(fs_path) **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_dir +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + ret = client.is_file("hdfs:/test_hdfs_client") is_exist(fs_path) ''''''''' @@ -97,7 +163,7 @@ is_exist(fs_path) **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **返回** @@ -105,68 +171,124 @@ is_exist(fs_path) **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.is_exist +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + ret = client.is_exist("hdfs:/test_hdfs_client") upload(local_path, fs_path) ''''''''' -上传本地文件至 HADOOP 文件系统。 +上传本地文件至HADOOP文件系统。 **参数** - **local_path** (str):本地文件路径。 - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.upload +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.upload("test_hdfs_client", "hdfs:/test_hdfs_client") download(fs_path, local_path) ''''''''' -下载 HADOOP 文件至本地文件系统。 +下载HADOOP文件至本地文件系统。 **参数** - **local_path** (str):本地文件路径。 - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.download +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.download("hdfs:/test_hdfs_client", "./") + touch(fs_path, exist_ok=True) ''''''''' -创建一个 HADOOP 文件。 +创建一个HADOOP文件。 **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 - **exist_ok** (bool):路径已存在时程序是否报错。若 `exist_ok = True`,则直接返回,反之则抛出文件存在的异常,默认不抛出异常。 **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.touch +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.touch("hdfs:/test_hdfs_client") mv(fs_src_path, fs_dst_path, overwrite=False) ''''''''' -HADOOP 系统文件移动。 +HADOOP系统文件移动。 **参数** - **fs_src_path** (str):移动前源文件路径名。 - **fs_dst_path** (str):移动后目标文件路径名。 - **overwrite** (bool):若目标文件已存在,是否删除进行重写,默认不重写并抛出异常。 - + **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.mv +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + client.mv("hdfs:/test_hdfs_client", "hdfs:/test_hdfs_client2") list_dirs(fs_path) ''''''''' -列出 HADOOP 文件路径下所有的子目录。 +列出HADOOP文件路径下所有的子目录。 **参数** - - **fs_path** (str): HADOOP 文件路径。 + - **fs_path** (str): HADOOP文件路径。 **返回** @@ -174,4 +296,18 @@ list_dirs(fs_path) **代码示例** -COPY-FROM: paddle.distributed.fleet.utils.HDFSClient.list_dirs +.. code-block:: python + + from paddle.distributed.fleet.utils import HDFSClient + + hadoop_home = "/home/client/hadoop-client/hadoop/" + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + subdirs = client.list_dirs("hdfs:/test_hdfs_client") + + + From 155cc1dde2288299ed5c6d6ee5e073978c667cfc Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 3 Aug 2022 12:40:14 +0000 Subject: [PATCH 19/20] fix style issues from upstream --- .../ops/distribute_fpn_proposals_cn.rst | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/api/paddle/vision/ops/distribute_fpn_proposals_cn.rst b/docs/api/paddle/vision/ops/distribute_fpn_proposals_cn.rst index 783c552d824..515c35e90d1 100644 --- a/docs/api/paddle/vision/ops/distribute_fpn_proposals_cn.rst +++ b/docs/api/paddle/vision/ops/distribute_fpn_proposals_cn.rst @@ -7,33 +7,33 @@ distribute_fpn_proposals -在 Feature Pyramid Networks(FPN)模型中,需要依据proposal的尺度和参考尺度与级别将所有proposal分配到不同的FPN级别中。此外,为了恢复proposals的顺序,我们返回一个数组,该数组表示当前proposals中的原始RoIs索引。计算每个RoI的FPN级别的公式如下: +在 Feature Pyramid Networks(FPN)模型中,需要依据 proposal 的尺度和参考尺度与级别将所有 proposal 分配到不同的 FPN 级别中。此外,为了恢复 proposals 的顺序,我们返回一个数组,该数组表示当前 proposals 中的原始 RoIs 索引。计算每个 RoI 的 FPN 级别的公式如下: .. math:: roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}\\ level = floor(&\log(\frac{roi\_scale}{refer\_scale}) + refer\_level) -其中BBoxArea为用来计算每个RoI的区域的方法。 +其中 BBoxArea 为用来计算每个 RoI 的区域的方法。 参数 :::::::::::: - - **fpn_rois** (Tensor) - 输入的FPN RoIs。是形状为[N,4]的2-D Tensor,其中N为检测框的个数,数据类型为float32或float64。 - - **min_level** (int) - 产生proposal的最低级别FPN层。 - - **max_level** (int) - 产生proposal的最高级别FPN层。 - - **refer_level** (int) - 具有指定比例的FPN层的引用级别。 - - **refer_scale** (int) - 具有指定级别的FPN层的引用比例。 - - **pixel_offset** (bool, 可选)- 是否有像素偏移。如果是True, 在计算形状大小时时会偏移1。默认值为False。 - - **rois_num** (Tensor, 可选): 每张图所包含的RoI数量。是形状为[B]的1-D Tensor, 数据类型为int32。其中B是图像数量。如果``rois_num`` 不为None, 将会返回一个形状为[B]的1-D Tensor, 其中每个元素是每张图在对应层级上的RoI数量。默认值为None。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 + - **fpn_rois** (Tensor) - 输入的 FPN RoIs。是形状为[N,4]的 2-D Tensor,其中 N 为检测框的个数,数据类型为 float32 或 float64。 + - **min_level** (int) - 产生 proposal 的最低级别 FPN 层。 + - **max_level** (int) - 产生 proposal 的最高级别 FPN 层。 + - **refer_level** (int) - 具有指定比例的 FPN 层的引用级别。 + - **refer_scale** (int) - 具有指定级别的 FPN 层的引用比例。 + - **pixel_offset** (bool, 可选)- 是否有像素偏移。如果是 True, 在计算形状大小时时会偏移 1。默认值为 False。 + - **rois_num** (Tensor, 可选): 每张图所包含的 RoI 数量。是形状为[B]的 1-D Tensor, 数据类型为 int32。其中 B 是图像数量。如果``rois_num`` 不为 None, 将会返回一个形状为[B]的 1-D Tensor, 其中每个元素是每张图在对应层级上的 RoI 数量。默认值为 None。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为 None。 返回 :::::::::::: -- **multi_rois** (List) - 长度为(max_level-min_level+1)的列表,其中元素为Variable,维度为[M, 4]的2-D LoDTensor,M为每个级别proposal的个数,数据类型为float32或float64。表示每个FPN级别包含的proposals。 -- **restore_ind** (Tensor) - 维度为[N,1]的Tensor,N是总rois的数量。数据类型为int32。它用于恢复fpn_rois的顺序。 -- **rois_num_per_level** (List) - 一个包含1-D Tensor的List。其中每个元素是每张图在对应层级上的RoI数量。数据类型为int32。 +- **multi_rois** (List) - 长度为(max_level-min_level+1)的列表,其中元素为 Variable,维度为[M, 4]的 2-D LoDTensor,M 为每个级别 proposal 的个数,数据类型为 float32 或 float64。表示每个 FPN 级别包含的 proposals。 +- **restore_ind** (Tensor) - 维度为[N,1]的 Tensor,N 是总 rois 的数量。数据类型为 int32。它用于恢复 fpn_rois 的顺序。 +- **rois_num_per_level** (List) - 一个包含 1-D Tensor 的 List。其中每个元素是每张图在对应层级上的 RoI 数量。数据类型为 int32。 代码示例 :::::::::::: From 4f2d7982fbf2530f5006ac2fa8d5b558a2e54064 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 3 Aug 2022 16:08:38 +0000 Subject: [PATCH 20/20] use COPY-FROM to avoid ci issue --- docs/api/paddle/nn/SyncBatchNorm_cn.rst | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/docs/api/paddle/nn/SyncBatchNorm_cn.rst b/docs/api/paddle/nn/SyncBatchNorm_cn.rst index a50f7170732..ffc2328681e 100644 --- a/docs/api/paddle/nn/SyncBatchNorm_cn.rst +++ b/docs/api/paddle/nn/SyncBatchNorm_cn.rst @@ -56,18 +56,7 @@ SyncBatchNorm 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import paddle.nn as nn - import numpy as np - x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') - x = paddle.to_tensor(x) - if paddle.is_compiled_with_cuda(): - sync_batch_norm = nn.SyncBatchNorm(2) - hidden1 = sync_batch_norm(x) - print(hidden1) - # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] +COPY-FROM: paddle.nn.SyncBatchNorm 方法 ::::::::: @@ -89,9 +78,4 @@ convert_sync_batchnorm(layer) 代码示例 :::::::::::: -.. code-block:: python - - import paddle - import paddle.nn as nn - model = nn.Sequential(nn.Conv2D(3, 5, 3), nn.BatchNorm2D(5)) - sync_model = nn.SyncBatchNorm.convert_sync_batchnorm(model) +COPY-FROM: paddle.nn.SyncBatchNorm.convert_sync_batchnorm