Skip to content

Commit

Permalink
Merge branch 'drr' of https://github.com/yuanlehome/Paddle into drr_pass
Browse files Browse the repository at this point in the history
  • Loading branch information
zyfncg committed Oct 13, 2023
2 parents 3aabfe8 + 4bac4b1 commit 77e8138
Show file tree
Hide file tree
Showing 707 changed files with 15,245 additions and 31,156 deletions.
4 changes: 2 additions & 2 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ bugprone-integer-division,
bugprone-misplaced-widening-cast,
-bugprone-move-forwarding-reference,
-bugprone-multiple-statement-macro,
-bugprone-narrowing-conversions,
bugprone-narrowing-conversions,
-bugprone-not-null-terminated-result,
-bugprone-parent-virtual-call,
-bugprone-posix-return,
Expand Down Expand Up @@ -155,7 +155,7 @@ cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-goto,
cppcoreguidelines-c-copy-assignment-signature,
cppcoreguidelines-explicit-virtual-functions,
-cppcoreguidelines-init-variables,
cppcoreguidelines-init-variables,
cppcoreguidelines-narrowing-conversions,
cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
Expand Down
4 changes: 4 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,7 @@
path = third_party/jitify
url = https://github.com/NVIDIA/jitify.git
ignore = dirty
[submodule "third_party/cccl"]
path = third_party/cccl
url = https://github.com/NVIDIA/cccl.git
ignore = dirty
4 changes: 3 additions & 1 deletion cmake/external/brpc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
# limitations under the License.

include(ExternalProject)
set(OPENSSL_USE_STATIC_LIBS ON)
if(NOT WITH_ARM)
set(OPENSSL_USE_STATIC_LIBS ON)
endif()
find_package(OpenSSL REQUIRED)

message(STATUS "ssl:" ${OPENSSL_SSL_LIBRARY})
Expand Down
31 changes: 31 additions & 0 deletions cmake/external/cccl.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
include(ExternalProject)

set(CCCL_PATH
"${THIRD_PARTY_PATH}/cccl"
CACHE STRING "A path setting for external_cccl path.")
set(CCCL_PREFIX_DIR ${CCCL_PATH})
set(CCCL_SOURCE_DIR ${PADDLE_SOURCE_DIR}/third_party/cccl)

# The latest commit has bugs in windows, so we set a fix commit.
set(CCCL_TAG 1f6e4bcae0fbf1bbed87f88544d8d2161c490fc1)
execute_process(COMMAND git --git-dir=${CCCL_SOURCE_DIR}/.git
--work-tree=${CCCL_SOURCE_DIR} checkout ${CCCL_TAG})

set(CCCL_INCLUDE_DIR ${CCCL_SOURCE_DIR})
message("CCCL_INCLUDE_DIR is ${CCCL_INCLUDE_DIR}")
include_directories(${CCCL_INCLUDE_DIR})

ExternalProject_Add(
extern_cccl
${EXTERNAL_PROJECT_LOG_ARGS}
SOURCE_DIR ${CCCL_SOURCE_DIR}
PREFIX ${CCCL_PREFIX_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND "")

add_library(cccl INTERFACE)

add_dependencies(cccl extern_cccl)
5 changes: 4 additions & 1 deletion cmake/generic.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@
# To build a unit test binary, which is an executable binary with libpaddle.so
# automatically linked:
#
# paddle_test(example SHARED)
# paddle_test(example SRCS example_test.cc)
#

# including binary directory for generated headers.
Expand Down Expand Up @@ -1345,6 +1345,9 @@ function(math_library TARGET)
if(WITH_GPU)
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0)
list(APPEND math_common_deps cub)
elseif(${CMAKE_CUDA_COMPILER_VERSION} EQUAL 12.0
OR ${CMAKE_CUDA_COMPILER_VERSION} GREATER 12.0)
list(APPEND math_common_deps cccl)
else()
list(APPEND math_common_deps)
endif()
Expand Down
12 changes: 12 additions & 0 deletions cmake/third_party.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,14 @@ if(NOT DEFINED WITH_MKLDNN)
endif()
endif()

if(WIN32)
if(MSVC)
if(MSVC_VERSION LESS 1920)
set(WITH_MKLDNN OFF)
endif()
endif()
endif()

if(WIN32
OR APPLE
OR NOT WITH_GPU
Expand Down Expand Up @@ -375,6 +383,10 @@ if(WITH_GPU)
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0)
include(external/cub) # download cub
list(APPEND third_party_deps extern_cub)
elseif(${CMAKE_CUDA_COMPILER_VERSION} EQUAL 12.0
OR ${CMAKE_CUDA_COMPILER_VERSION} GREATER 12.0)
include(external/cccl)
list(APPEND third_party_deps extern_cccl)
endif()
set(URL
"https://paddlepaddledeps.bj.bcebos.com/externalErrorMsg_20210928.tar.gz"
Expand Down
83 changes: 83 additions & 0 deletions paddle/cinn/ast_gen_ius/ast_gen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "paddle/cinn/ir/operation.h"
#include "paddle/cinn/ir/tensor.h"
#include "paddle/cinn/lang/compute.h"
#include "paddle/cinn/optim/replace_var_with_expr.h"

namespace cinn {
namespace ast_gen_ius {
Expand Down Expand Up @@ -84,11 +85,75 @@ ir::Expr AstGen::Build(const ir::Tensor& tensor, TensorGroup* tensor_group) {
tensor_group->MarkShareMemBuffer(tensor, init_tensor);
tensor_group->CtrlDepend(tensor, init_tensor);
Expr init_body = ir::Store::Make(init_tensor, init_value, axis_exprs);
// create schedule block itervars, i0,i1...
std::vector<ir::Var> block_vars;
std::vector<ir::Expr> iter_values;
// reduce body and reduce init schedule block should have different objects
// for same axis so we re-create objects
std::vector<Var> axis_vars = common::GenDefaultAxis(axis_len);
for (int i = 0; i < shape.size(); ++i) {
block_vars.push_back(Var(Expr(0),
shape[i],
cinn::UniqName("i" + std::to_string(i)),
/*is_reduce = */ false));
optim::ReplaceVarWithExpr(&init_body, axis[i], block_vars[i]);
axis_vars[i]->is_reduce_axis = false;
if (shape[i] == Expr(1)) {
iter_values.push_back(Expr(0));
} else {
iter_values.push_back(axis_vars[i]);
}
}
init_body = ir::ScheduleBlockRealize::Make(
iter_values,
ir::ScheduleBlock::Make(
block_vars, {}, {}, reduce_init_name, init_body));

// For the remaining reduce axis, make reduce body
const std::vector<ir::Var>& reduce_axis = tensor->reduce_axis;
ir::Expr reduce_body =
ConvertReduceBody(tensor->body(), tensor, axis_exprs);
// create schedule block itervars, i0,i1...
std::vector<ir::Var> reduce_block_vars;
std::vector<ir::Expr> reduce_iter_values;
// reduce body and reduce init schedule block should have different objects
// for same axis so we re-create objects
std::vector<Var> reduce_axis_vars = common::GenDefaultAxis(axis_len);
for (int i = 0; i < shape.size(); ++i) {
reduce_block_vars.push_back(Var(Expr(0),
shape[i],
cinn::UniqName("i" + std::to_string(i)),
/*is_reduce = */ false));
reduce_axis_vars[i]->is_reduce_axis = false;
if (shape[i] == Expr(1)) {
reduce_iter_values.push_back(Expr(0));
} else {
reduce_iter_values.push_back(axis_vars[i]);
}
}
for (int i = 0; i < reduce_axis.size(); ++i) {
int count = shape.size() + i;
reduce_block_vars.push_back(
Var(reduce_axis[i]->lower_bound,
reduce_axis[i]->upper_bound,
cinn::UniqName("i" + std::to_string(count)),
/*is_reduce = */ true));
ir::Var reduce_axis_var = reduce_axis[i];
reduce_axis_var->is_reduce_axis = true;
reduce_iter_values.push_back(reduce_axis_var);
}
for (int i = 0; i < axis.size(); ++i) {
optim::ReplaceVarWithExpr(&reduce_body, axis[i], reduce_block_vars[i]);
}
for (int i = axis.size(); i < reduce_block_vars.size(); ++i) {
optim::ReplaceVarWithExpr(
&reduce_body, reduce_axis[i - axis.size()], reduce_block_vars[i]);
}

reduce_body = ir::ScheduleBlockRealize::Make(
reduce_iter_values,
ir::ScheduleBlock::Make(
reduce_block_vars, {}, {}, tensor->name, reduce_body));
for (int i = static_cast<int>(reduce_axis.size()) - 1; i >= 0; --i) {
reduce_body = ir::For::Make(reduce_axis[i],
reduce_axis[i]->lower_bound,
Expand All @@ -114,6 +179,24 @@ ir::Expr AstGen::Build(const ir::Tensor& tensor, TensorGroup* tensor_group) {
return body;
} else {
ir::Expr body = ir::Store::Make(tensor, tensor->body(), axis_exprs);
// create schedule block itervars, i0,i1...
std::vector<ir::Var> block_vars;
std::vector<ir::Expr> iter_values;
std::vector<Var> axis_vars = common::GenDefaultAxis(axis_len);
for (int i = 0; i < shape.size(); ++i) {
block_vars.push_back(Var(
Expr(0), shape[i], cinn::UniqName("i" + std::to_string(i)), false));
optim::ReplaceVarWithExpr(&body, axis[i], block_vars[i]);
axis_vars[i]->is_reduce_axis = false;
if (shape[i] == Expr(1)) {
iter_values.push_back(Expr(0));
} else {
iter_values.push_back(axis_vars[i]);
}
}
body = ir::ScheduleBlockRealize::Make(
iter_values,
ir::ScheduleBlock::Make(block_vars, {}, {}, tensor->name, body));
for (int i = static_cast<int>(axis_len) - 1; i >= 0; --i) {
ir::Var loop_var = axis[i];
ir::Expr loop_extent = shape[i];
Expand Down
94 changes: 81 additions & 13 deletions paddle/cinn/ast_gen_ius/tensor_group.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,26 +21,37 @@
#include "paddle/cinn/ir/ir_base.h"
#include "paddle/cinn/ir/tensor.h"
#include "paddle/cinn/ir/utils/ir_nodes_collector.h"
#include "paddle/cinn/poly/stage.h"

namespace cinn {
namespace ast_gen_ius {

TensorGroup::TensorGroup(const std::vector<ir::Tensor>& tensors) {
std::set<ir::Tensor> all_tensors(tensors.begin(), tensors.end());

for (auto& tensor : tensors) {
for (const ir::Tensor& tensor : tensors) {
output_tensor_names_.insert(tensor->name);
std::set<ir::Expr> used_tensors = ir::ir_utils::CollectIRNodes(
tensor->body(), [](const Expr* x) { return x->as_tensor(); });
for (const Expr& x : used_tensors) {
const ir::Tensor to_dep = x.as_tensor_ref();
all_tensors.insert(to_dep);
this->CtrlDepend(tensor, to_dep);
this->Insert(tensor);
}
}

void TensorGroup::ShowLog() const {
VLOG(6) << "Showing log for TensorGroup";
for (auto& p : name_to_tensor_) {
VLOG(6) << "Tensor name = " << p.first << " depends on {";
if (ctrl_dep_.count(p.first)) {
for (auto& dep_name : ctrl_dep_.at(p.first)) {
VLOG(6) << dep_name;
}
}
VLOG(6) << "}";
}
}

for (const ir::Tensor& t : all_tensors) {
name_to_tensor_.insert({t->name, t});
TensorGroup::TensorGroup(
const std::unordered_map<std::string, ir::Tensor>& tensor_map) {
for (const auto& map_pair : tensor_map) {
const ir::Tensor& tensor = map_pair.second;
output_tensor_names_.insert(tensor->name);
this->Insert(tensor);
}
}

Expand All @@ -51,7 +62,23 @@ bool TensorGroup::Contain(const std::string& name) const {
}

void TensorGroup::Insert(const ir::Tensor& tensor) {
name_to_tensor_.insert({tensor->name, tensor});
if (!name_to_tensor_.count(tensor->name)) {
name_to_tensor_.insert({tensor->name, tensor});
}

// Using set to de-duplicate
std::set<ir::Tensor> dep_tensors;
std::set<ir::Expr> used_tensors = ir::ir_utils::CollectIRNodes(
tensor->body(), [](const Expr* x) { return x->as_tensor(); });
for (const Expr& x : used_tensors) {
const ir::Tensor to_dep = x.as_tensor_ref();
dep_tensors.insert(to_dep);
this->CtrlDepend(tensor, to_dep);
}

for (const ir::Tensor& t : dep_tensors) {
this->Insert(t);
}
}

ir::Tensor TensorGroup::Get(const std::string& name) {
Expand All @@ -72,6 +99,8 @@ std::vector<ir::Tensor> TensorGroup::GetGenFuncTopoOrder(
for (const auto& dep_pair : ctrl_dep_) {
const std::unordered_set<std::string>& dep_tensor_names = dep_pair.second;
in_degree[dep_pair.first] = dep_tensor_names.size();
VLOG(6) << "indegree[" << dep_pair.first
<< "] = " << dep_tensor_names.size();
}

std::vector<ir::Tensor> ret;
Expand All @@ -95,7 +124,6 @@ std::vector<ir::Tensor> TensorGroup::GetGenFuncTopoOrder(
while (!node_set.empty()) {
const std::string cur = *(node_set.begin());
node_set.erase(node_set.begin());

if (!input_arg_names.count(cur)) {
ret.push_back(name_to_tensor_[cur]);
}
Expand Down Expand Up @@ -187,5 +215,45 @@ absl::flat_hash_map<std::string, ir::Tensor> TensorGroup::AllocateBuffers() {
return name_to_tensor_;
}

void StageMapShareMemory(const poly::StageMap& stages) {
absl::flat_hash_map<std::string, ir::_Tensor_*> tensor_map;
for (auto& stage : stages) {
tensor_map[stage.second->tensor()->name] = stage.second->tensor();
}
for (auto& stage : stages) {
if (!stage.second->tensor()->buffer.defined() &&
!stage.second->meta.tensors_to_share_buffer_with.empty()) {
for (auto& str : stage.second->meta.tensors_to_share_buffer_with) {
if (tensor_map[str]->buffer.defined()) {
auto edited_shape = tensor_map[str]->buffer->shape;
stage.second->tensor()->Bind(tensor_map[str]->buffer);
tensor_map[str]->buffer->shape = edited_shape;
VLOG(3) << "Stage Tensor " << stage.second->tensor()->name
<< " bind buffer to " << tensor_map[str]->name << " , "
<< tensor_map[str]->buffer->name;
}
}
}
}
}

TensorGroup ConvertStageMapToTensorGroup(const poly::StageMap& stage_map) {
std::vector<ir::Tensor> stage_tensors;
std::set<ir::Tensor> reshape_tensors;
for (auto iter = stage_map.begin(); iter != stage_map.end(); ++iter) {
if (iter->second->has_expression()) {
const std::string& tensor_name = iter->first;
stage_tensors.push_back(ir::Tensor(iter->second->tensor()));
if (utils::Endswith(tensor_name, "_reshape")) {
reshape_tensors.insert(ir::Tensor(iter->second->tensor()));
}
}
}

ast_gen_ius::TensorGroup tensor_group(stage_tensors);
StageMapShareMemory(stage_map);
return tensor_group;
}

} // namespace ast_gen_ius
} // namespace cinn
15 changes: 15 additions & 0 deletions paddle/cinn/ast_gen_ius/tensor_group.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "paddle/cinn/ir/ir.h"
#include "paddle/cinn/ir/ir_base.h"
#include "paddle/cinn/ir/tensor.h"
#include "paddle/cinn/poly/stage.h"

namespace cinn {
namespace ast_gen_ius {
Expand All @@ -41,11 +42,21 @@ class TensorGroup {
*/
explicit TensorGroup(const std::vector<ir::Tensor>& tensors);

/**
* Constructor for a TensorGroup, the argument tensors should be output tensor
* arguments of the AST body to be generated. The dependent tensors of the
* output tensors will be collected during construction.
*/
explicit TensorGroup(
const std::unordered_map<std::string, ir::Tensor>& tensor_map);

/**
* Destructor.
*/
~TensorGroup();

void ShowLog() const;

/**
* Returns true if TensorGroup collection contains a tensor with input name.
*/
Expand Down Expand Up @@ -119,5 +130,9 @@ class TensorGroup {
std::unordered_map<std::string, std::string> share_memory_tensor_;
};

// TODO(zhhsplendid): remove stage_map need to change all fcompute CINNValuePack
// we will change it in the next PR
TensorGroup ConvertStageMapToTensorGroup(const poly::StageMap& stage_map);

} // namespace ast_gen_ius
} // namespace cinn
Loading

0 comments on commit 77e8138

Please sign in to comment.