-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
remove nets.py in fluid #51717
remove nets.py in fluid #51717
Changes from 8 commits
37076c6
c188174
f4a399b
09950a7
050f926
d461807
22a7f00
c3ab29a
69fa7cc
091a1de
a70792a
b2dc097
26ab5a9
30f8f0d
1845822
f6df32a
e6242f6
64d1490
a845abb
01c85b4
783000f
3c26078
847c19e
3b90121
fad07ef
ebcce78
5f83bea
f784428
ce723f5
eceae44
316f736
03f3a31
54be8ec
37c8cd4
11daecf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,15 +13,12 @@ | |
# limitations under the License. | ||
|
||
import paddle | ||
from . import layers | ||
from .data_feeder import check_variable_and_dtype, convert_dtype | ||
from ..utils import deprecated | ||
import paddle | ||
|
||
from ...data_feeder import check_variable_and_dtype, convert_dtype | ||
|
||
__all__ = [ | ||
"simple_img_conv_pool", | ||
"sequence_conv_pool", | ||
"glu", | ||
"scaled_dot_product_attention", | ||
"img_conv_group", | ||
] | ||
|
@@ -356,60 +353,11 @@ def sequence_conv_pool( | |
return pool_out | ||
|
||
|
||
@deprecated(since="2.0.0", update_to="paddle.nn.functional.glu") | ||
def glu(input, dim=-1): | ||
r""" | ||
:api_attr: Static Graph | ||
|
||
The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` , | ||
:ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` . | ||
Specifically, GLU will plit the input into two equal-sized parts, | ||
:math:`a` and :math:`b`, along the given dimension and then compute as | ||
following: | ||
|
||
.. math:: | ||
|
||
{GLU}(a, b)= a \otimes \sigma(b) | ||
|
||
Refer to `Language Modeling with Gated Convolutional Networks | ||
<https://arxiv.org/pdf/1612.08083.pdf>`_. | ||
|
||
Args: | ||
input (Variable): The input variable which is a Tensor or LoDTensor. | ||
The supported data types include float32, float64 | ||
and float16 (only for GPU). | ||
dim (int, optional): The dimension along which to split. If :math:`dim < 0`, the | ||
dimension to split along is :math:`rank(input) + dim`. Default -1. | ||
|
||
Returns: | ||
Variable: Variable with half the size and same data type of input. | ||
|
||
Examples: | ||
.. code-block:: python | ||
|
||
import paddle.fluid as fluid | ||
import paddle | ||
paddle.enable_static() | ||
|
||
data = fluid.data( | ||
name="words", shape=[-1, 6, 3, 9], dtype="float32") | ||
# shape of output: [-1, 3, 3, 9] | ||
output = fluid.nets.glu(input=data, dim=1) | ||
""" | ||
check_variable_and_dtype( | ||
input, 'input', ['float16', 'float32', 'float64'], "glu" | ||
) | ||
a, b = paddle.split(input, num_or_sections=2, axis=dim) | ||
act_b = paddle.nn.functional.sigmoid(x=b) | ||
out = paddle.multiply(x=a, y=act_b) | ||
return out | ||
|
||
|
||
def scaled_dot_product_attention( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 需要明确每条单测的作用,进而确定单测使用到的函数的作用,原则上我们不为某个单测保留函数:
|
||
queries, keys, values, num_heads=1, dropout_rate=0.0 | ||
): | ||
r""" | ||
:api_attr: Static Graph | ||
:api_attr: Static Graph | ||
|
||
This interface Multi-Head Attention using scaled dot product. | ||
Attention mechanism can be seen as mapping a query and a set of key-value | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,6 +20,7 @@ | |
|
||
import paddle | ||
import paddle.fluid as fluid | ||
import paddle.fluid.tests.unittests.nets as nets | ||
|
||
BATCH_SIZE = 64 | ||
|
||
|
@@ -40,7 +41,7 @@ def convolutional_neural_network(use_py_reader): | |
use_double_buffer=False, | ||
) | ||
|
||
conv_pool_1 = fluid.nets.simple_img_conv_pool( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这个单测,从文件名看是测试执行器的,不应当在这个PR里移除 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 我刚提交PR的时候发生冲突,发现这个文件test_async_ssa_graph_executor_mnist.py已经整个被删除了,所以才把这个文件删了来解决冲突的 |
||
conv_pool_1 = nets.simple_img_conv_pool( | ||
input=img, | ||
filter_size=5, | ||
num_filters=20, | ||
|
@@ -49,7 +50,7 @@ def convolutional_neural_network(use_py_reader): | |
act="relu", | ||
) | ||
conv_pool_1 = paddle.static.nn.batch_norm(conv_pool_1) | ||
conv_pool_2 = fluid.nets.simple_img_conv_pool( | ||
conv_pool_2 = nets.simple_img_conv_pool( | ||
input=conv_pool_1, | ||
filter_size=5, | ||
num_filters=50, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -41,7 +41,7 @@ def setUp(self): | |
def check_identity(self, place): | ||
with dg.guard(place): | ||
x_var = dg.to_variable(self.x) | ||
y_var = fluid.nets.glu(x_var, self.dim) | ||
y_var = paddle.nn.functional.glu(x_var, self.dim) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 下面已有一个测试 |
||
y_np = y_var.numpy() | ||
|
||
np.testing.assert_allclose(y_np, self.out) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,7 +23,7 @@ | |
import paddle | ||
import paddle.fluid as fluid | ||
import paddle.fluid.layers as layers | ||
import paddle.fluid.nets as nets | ||
import paddle.fluid.tests.unittests.nets as nets | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里能否直接用 |
||
import paddle.nn.functional as F | ||
from paddle.fluid import core | ||
from paddle.fluid.dygraph import base, to_variable | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
__all__
没必要保留了