Skip to content

Commit

Permalink
[NewIR] Rename feed with place to data (#55778)
Browse files Browse the repository at this point in the history
* fix bug: feed_with_place should consider variable existence

* fix

* fix build scope

* change method to set feed var name

* remove feed_with_place to placeholder

* fix

* rename to data

* fix

* fix
  • Loading branch information
kangguangli authored Aug 4, 2023
1 parent e3b6e02 commit 274e5e5
Show file tree
Hide file tree
Showing 12 changed files with 41 additions and 52 deletions.
6 changes: 3 additions & 3 deletions paddle/fluid/framework/executor_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = in_t.place().GetType();

auto op_desc = block->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
Expand All @@ -391,7 +391,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = param.place().GetType();

auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
Expand Down Expand Up @@ -471,7 +471,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram(
continue;
}
auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -980,7 +980,7 @@ void BuildOpFuncList(
if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
op_name == "pd.data" || op_name == "pd.shadow_output") {
VLOG(6) << "skip process " << op_name;
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/new_ir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1619,7 +1619,7 @@ void NewIRInterpreter::BuildInstruction() {
if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") {
op_name == "pd.data" || op_name == "pd.shaddow_output") {
VLOG(6) << "skip process " << op_name;
continue;
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,8 @@ void HandleForSpecialOp(
variable_list);
}

if (op_name == "pd.feed_with_place") {
VLOG(6) << "Handle for pd.feed_with_place";
if (op_name == "pd.data") {
VLOG(6) << "Handle for pd.data";
auto var_name =
op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString();

Expand Down Expand Up @@ -492,7 +492,7 @@ void BuildScope(const ir::Block& block,
if (op_name == "pd.feed" || op_name == "pd.fetch" ||
op_name == "builtin.combine" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
op_name == "pd.data" || op_name == "pd.shadow_output") {
HandleForSpecialOp(op,
inner_scope,
var_name_prefix,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ phi::KernelKey GetKernelKey(
op->result(0).type().dyn_cast<DenseTensorType>().dtype())};
}

if (op->name() == "pd.feed_with_place") {
if (op->name() == "pd.data") {
// NOTE, for now feed op don't need a kernel, so the data type from Op
// Result the next op use base program datatype
auto t =
Expand Down
14 changes: 2 additions & 12 deletions paddle/fluid/ir_adaptor/translator/op_translator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -986,7 +986,7 @@ struct FeedOpTranscriber : public OpTranscriber {
}
};

struct FeedWithPlaceOpTranscriber : public OpTranscriber {
struct DataOpTranscriber : public FeedOpTranscriber {
ir::AttributeMap TranslateOpAttribute(
ir::IrContext* ctx,
const std::string& normalized_op_name,
Expand All @@ -1007,16 +1007,6 @@ struct FeedWithPlaceOpTranscriber : public OpTranscriber {

return attribute_map;
}

std::vector<ir::OpResult> GenerateOperationInput(
ir::IrContext* ctx,
TranslationContext* param_map,
const OpDesc& op_desc,
const std::string& normalized_op_name,
const OpInputInfoList& input_infos,
ir::Program* program) override {
return {};
}
};

struct SplitOpTranscriber : public OpTranscriber {
Expand Down Expand Up @@ -1473,7 +1463,7 @@ OpTranslator::OpTranslator() {
special_handlers["assign_value"] = AssignValueOpTranscriber();
special_handlers["cast"] = CastOpTranscriber();
special_handlers["feed"] = FeedOpTranscriber();
special_handlers["feed_with_place"] = FeedWithPlaceOpTranscriber();
special_handlers["data"] = DataOpTranscriber();
special_handlers["fetch_v2"] = FetchOpTranscriber();
special_handlers["increment"] = IncrementOpTranscriber();
special_handlers["lookup_table_v2"] = EmbeddingOpTranscriber();
Expand Down
24 changes: 12 additions & 12 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,18 @@
data_type : x
backward : cumsum_grad

- op : data
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : data
param : [index, dtype]
data_type : dtype
backend : place

- op : depthwise_conv2d
args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW")
output : Tensor(out)
Expand Down Expand Up @@ -838,18 +850,6 @@
inplace: (x -> out)
backward : expm1_grad

- op : feed_with_place
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : feed_with_place
param : [index, dtype]
data_type : dtype
backend : place

- op : fft_c2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward)
output : Tensor
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/feed_with_place_kernel.h"
#include "paddle/phi/kernels/data_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h"
#include "paddle/phi/kernels/impl/data_impl.h"

#include "paddle/phi/kernels/funcs/tensor_formatter.h"

namespace phi {

template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
DenseTensor* out) {}
void DataKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
DenseTensor* out) {}

template <typename T, typename Context>
void ShadowOutputKernel(const Context& ctx,
Expand All @@ -35,8 +35,7 @@ void ShadowOutputKernel(const Context& ctx,

} // namespace phi

PD_REGISTER_KERNEL(
feed_with_place, CPU, ALL_LAYOUT, phi::FeedWithPlaceKernel, float) {}
PD_REGISTER_KERNEL(data, CPU, ALL_LAYOUT, phi::DataKernel, float) {}

PD_REGISTER_KERNEL(shadow_feed,
CPU,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
namespace phi {

template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
// std::string name,
DenseTensor* out);
void DataKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
// std::string name,
DenseTensor* out);

template <typename T, typename Context>
void ShadowOutputKernel(const Context& ctx,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/feed_with_place_kernel.h"
#include "paddle/phi/kernels/data_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h"
#include "paddle/phi/kernels/impl/data_impl.h"

PD_REGISTER_KERNEL(shadow_feed,
GPU,
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@
from paddle.fluid.layer_helper import LayerHelper


def feed_with_place():
helper = LayerHelper('feed_with_place', **locals())
def data():
helper = LayerHelper('data', **locals())

out = helper.create_variable_for_type_inference('float32')
helper.append_op(
type='feed_with_place',
type='data',
inputs={},
outputs={'out': out},
attrs={
Expand All @@ -46,7 +46,7 @@ def test_with_new_ir(self):
new_scope = paddle.static.Scope()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
out = feed_with_place()
out = data()


if __name__ == "__main__":
Expand Down

0 comments on commit 274e5e5

Please sign in to comment.