-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
supplet several interface of static Variable to consistent with dygraph Tensor #33330
Changes from 18 commits
e684e65
28e4836
fcf1296
7456c18
32b24d9
6711e56
9b16add
e73f754
e4adeed
2764be8
b2a7a1b
16fdb9a
7227cff
f2314ee
ec409db
48af9a7
6b0f8b3
cc37d2d
67837ee
1850dfc
13aacdb
9f3475d
dd05daa
064cb42
3b1d04c
58a9374
45c0ac4
217459f
9e75f25
58e7fcd
60c63bf
d88cec9
85d4e88
bdbeb60
2493b05
7c1d012
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/fluid/operators/share_data_op.h" | ||
#include "paddle/fluid/framework/op_registry.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
class ShareDataOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
|
||
void InferShape(framework::InferShapeContext *ctx) const override { | ||
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ShareData"); | ||
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ShareData"); | ||
auto in_type = ctx->GetInputsVarType("Input")[0]; | ||
auto out_type = ctx->GetOutputsVarType("Out")[0]; | ||
|
||
PADDLE_ENFORCE_EQ( | ||
in_type == framework::proto::VarType::LOD_TENSOR || | ||
in_type == framework::proto::VarType::SELECTED_ROWS, | ||
true, | ||
platform::errors::InvalidArgument( | ||
"Type of Variable[Input] must be LoDTensor or SelectedRows!")); | ||
PADDLE_ENFORCE_EQ( | ||
in_type, out_type, | ||
platform::errors::InvalidArgument( | ||
"The type of input (Input) and output (Out) are inconsistent.")); | ||
|
||
ctx->ShareDim("Input", "Out"); | ||
} | ||
}; | ||
|
||
class ShareDataOpMaker : public framework::OpProtoAndCheckerMaker { | ||
public: | ||
void Make() override { | ||
AddInput("Input", "The input tensor."); | ||
AddOutput("Out", | ||
"The returned tensor, will share data with the input Tensor."); | ||
AddComment(R"DOC( | ||
ShareData Operator. | ||
|
||
Return a tensor that share data with the input tensor and | ||
always doesn't have a Tensor copy. | ||
)DOC"); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
REGISTER_OPERATOR( | ||
share_data, ops::ShareDataOp, ops::ShareDataOpMaker, | ||
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, | ||
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>); | ||
REGISTER_OP_CPU_KERNEL(share_data, ops::ShareDataKernel<bool>, | ||
ops::ShareDataKernel<int>, ops::ShareDataKernel<int8_t>, | ||
ops::ShareDataKernel<uint8_t>, | ||
ops::ShareDataKernel<paddle::platform::float16>, | ||
ops::ShareDataKernel<int64_t>, | ||
ops::ShareDataKernel<float>, | ||
ops::ShareDataKernel<double>) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can change in next PR: 2019 -> 2021 |
||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/fluid/operators/share_data_op.h" | ||
|
||
REGISTER_OP_CUDA_KERNEL( | ||
share_data, paddle::operators::ShareDataKernel<bool>, | ||
paddle::operators::ShareDataKernel<int>, | ||
paddle::operators::ShareDataKernel<int8_t>, | ||
paddle::operators::ShareDataKernel<uint8_t>, | ||
paddle::operators::ShareDataKernel<paddle::platform::float16>, | ||
paddle::operators::ShareDataKernel<int64_t>, | ||
paddle::operators::ShareDataKernel<float>, | ||
paddle::operators::ShareDataKernel<double>); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
#include "paddle/fluid/framework/op_registry.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
template <typename T> | ||
class ShareDataKernel : public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext &ctx) const override { | ||
auto *in_var = ctx.InputVar("Input"); | ||
auto *out_var = ctx.OutputVar("Out"); | ||
if (in_var->IsType<framework::LoDTensor>()) { | ||
const auto &origin_tensor = in_var->Get<framework::LoDTensor>(); | ||
auto *detach_tensor = out_var->GetMutable<framework::LoDTensor>(); | ||
detach_tensor->ShareDataWith(origin_tensor); | ||
detach_tensor->ShareInplaceVersionCounterWith(origin_tensor); | ||
} else { | ||
const auto &origin_selected_rows = in_var->Get<framework::SelectedRows>(); | ||
auto *detach_selected_rows = | ||
out_var->GetMutable<framework::SelectedRows>(); | ||
detach_selected_rows->mutable_value()->ShareDataWith( | ||
origin_selected_rows.value()); | ||
detach_selected_rows->mutable_value()->ShareInplaceVersionCounterWith( | ||
origin_selected_rows.value()); | ||
} | ||
} | ||
}; | ||
} // namespace operators | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -947,35 +947,45 @@ def __init__(self, | |
self._stop_gradient = stop_gradient | ||
self.is_data = is_data | ||
|
||
@fake_interface_only | ||
def detach(self): | ||
""" | ||
**Notes**: | ||
**This API is ONLY available in Dygraph mode** | ||
|
||
Returns a new Variable, detached from the current graph. | ||
It will share data with origin Variable and always doesn't have a Tensor copy. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. "and always doesn't have a Tensor copy" -> "without tensor copy" |
||
In addition, the detached Variable doesn't provide gradient propagation. | ||
|
||
Returns: | ||
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable. | ||
|
||
|
||
Examples: | ||
.. code-block:: python | ||
:name: code-example1 | ||
|
||
import paddle.fluid as fluid | ||
from paddle.fluid.dygraph.base import to_variable | ||
from paddle.fluid.dygraph import Linear | ||
import numpy as np | ||
import paddle | ||
paddle.enable_static() | ||
|
||
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') | ||
with fluid.dygraph.guard(): | ||
linear = Linear(32, 64) | ||
data = to_variable(data) | ||
x = linear(data) | ||
y = x.detach() | ||
# create a static Variable | ||
x = paddle.static.data(name='x', shape=[3, 2, 1]) | ||
|
||
# create a detached Variable | ||
y = x.detach() | ||
|
||
""" | ||
pass | ||
assert self.type == core.VarDesc.VarType.SELECTED_ROWS or \ | ||
self.type == core.VarDesc.VarType.LOD_TENSOR, \ | ||
"only support a variable with SELECTED_ROWS or LOD_TENSOR to be detached" | ||
|
||
output = self.block.create_var( | ||
name=unique_name.generate_with_ignorable_key("detach_" + self.name), | ||
dtype=self.dtype, | ||
type=self.type, | ||
persistable=self.persistable, | ||
stop_gradient=True) | ||
|
||
self.block.append_op( | ||
type='share_data', | ||
inputs={'Input': [self]}, | ||
outputs={'Out': [output]}) | ||
return output | ||
|
||
@fake_interface_only | ||
def numpy(self): | ||
|
@@ -1810,6 +1820,35 @@ def set_value(self, value, scope=None): | |
|
||
t.set(value, place) | ||
|
||
def size(self): | ||
""" | ||
Returns the number of elements for current Variable, which is a int64 Variable with shape [1] | ||
|
||
Returns: | ||
Variable: the number of elements for current Variable | ||
|
||
Examples: | ||
.. code-block:: python | ||
:name: code-example1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 有文档预览的结果吗?麻烦将结果截图放到PR里 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
|
||
import paddle | ||
paddle.enable_static() | ||
|
||
# create a static Variable | ||
x = paddle.static.data(name='x', shape=[3, 2, 1]) | ||
|
||
# get the number of elements of the Variable | ||
y = x.size() | ||
|
||
""" | ||
output = self.block.create_var( | ||
name=unique_name.generate_with_ignorable_key(self.name + "_size"), | ||
dtype=core.VarDesc.VarType.INT64) | ||
|
||
self.block.append_op( | ||
type='size', inputs={'Input': [self]}, outputs={'Out': [output]}) | ||
return output | ||
|
||
|
||
def get_all_op_protos(): | ||
""" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -270,6 +270,28 @@ def test_astype(self): | |
fetch_list=[b]) | ||
self.assertTrue(numpy.allclose(a_np.astype('float32'), b_np)) | ||
|
||
@prog_scope() | ||
def test_ndim(self): | ||
a = paddle.static.data(name="a", shape=[10, 1]) | ||
self.assertEqual(a.dim(), 2) | ||
self.assertEqual(a.ndimension(), 2) | ||
self.assertEqual(a.ndim, 2) | ||
|
||
@prog_scope() | ||
def test_matmul(self): | ||
a = fluid.layers.data(name='a', shape=[2, 3], dtype='float32') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can change in next PR: use 2.0 api instead of fluid API for the new code. |
||
b = fluid.layers.data(name='b', shape=[3, 5], dtype='float32') | ||
c = a @b # __matmul__ | ||
a_np = numpy.random.uniform(-1, 1, size=[2, 3]).astype('float32') | ||
b_np = numpy.random.uniform(-1, 1, size=[3, 5]).astype('float32') | ||
place = fluid.CPUPlace() | ||
exe = fluid.Executor(place) | ||
c_np = exe.run(fluid.default_main_program(), | ||
feed={"a": a_np, | ||
"b": b_np}, | ||
fetch_list=[c]) | ||
self.assertTrue(numpy.allclose(a_np @b_np, c_np)) | ||
|
||
|
||
if __name__ == '__main__': | ||
unittest.main() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The input tensor of ShareData operator?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, used on static.Variable.detach