Skip to content

Commit

Permalink
Fix communicatoin communication (#60995)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Jan 22, 2024
1 parent c644bac commit 8f96b8c
Show file tree
Hide file tree
Showing 12 changed files with 18 additions and 18 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cinn/cinn_instruction_run_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ Both the input and output of this operator are a set of variables
which are the input and output arguments of the bound cinn instruction respectively.
In addition, there is an attribute named 'cached_index' should be
set necessarily to get the CinnCompiledObject where the instruction is included
and 'instruction_index' is fetch the instruction object from complied runtime prograrm.
and 'instruction_index' is fetch the instruction object from complied runtime program.
It accomplishes the execution of the instruction according to the following steps:
0. Set the shapes ot the output variables at InferShape function with
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/cinn/cinn_launch_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void CinnLaunchContext::BuildVarNameMap(
paddle2cinn_varmap_.size(),
cinn2paddle_varmap_.size(),
platform::errors::PreconditionNotMet(
"Size of variables is not euqal, paddle[%ld] vs cinn[%ld]",
"Size of variables is not equal, paddle[%ld] vs cinn[%ld]",
paddle2cinn_varmap_.size(),
cinn2paddle_varmap_.size()));
}
Expand Down Expand Up @@ -426,7 +426,7 @@ std::unique_ptr<framework::ProgramDesc> CinnLaunchContext::BuildCompiledProgram(
// to the new VarDesc.
// (2) For all variables, the shape, data type of their VarDescs
// are set by values of the corresponding compiled tensors,
// including the in/out variables where the equiality between their tensors
// including the in/out variables where the equality between their tensors
// and the CINN compiled ones is verified in corresponding cinn_launch_op.
for (auto&& arg : cinn_argument_names_) {
const std::string& var_name = cinn2paddle_varmap_.at(arg);
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/operators/cinn/cinn_launch_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ class CinnLaunchContext {
// Return whether a Paddle variable used in cinn execution
bool IsVariableUsed(const std::string& var_name) const;

// Check the equiality in type and dimension between the tensor
// Check the equality in type and dimension between the tensor
// in Paddle and the compiled tensor returned by CINN of a same variable
void CheckTensorEquivalent(const std::string& var_name,
const phi::DenseTensor& paddle_tensor);
Expand All @@ -96,7 +96,7 @@ class CinnLaunchContext {
return skip_eager_vars_;
}

// Redirect the name of a Paddle variable to the orignal if it was inplaced
// Redirect the name of a Paddle variable to the original if it was inplaced
std::string RedirectVarName(const std::string& var_name) const;

// Return internal variable names list
Expand Down Expand Up @@ -157,7 +157,7 @@ class CinnLaunchContext {
// a list of internal variable names in Paddle
std::unordered_set<std::string> internal_var_names_;
// In CINN, there are two variables(in/out) mapped to the one inplaced
// variable of Paddle. To resovle this conflict, we add a output counterpart
// variable of Paddle. To resolve this conflict, we add a output counterpart
// in Paddle with the name suffixed by @InplaceOut.
// This set stores which Paddle variable names are inplaced.
std::unordered_set<std::string> inplace_var_names_;
Expand All @@ -179,14 +179,14 @@ class CinnLaunchContext {
std::vector<std::string> skip_eager_vars_;

// because a cinn_pod_value_t does not own a cinn_buffer_t object,
// an extra stroage is necessary to keep those objects and they can
// an extra storage is necessary to keep those objects and they can
// not be released until the runtime program finish execution.
std::vector<std::unique_ptr<cinn_buffer_t>> hold_buffers_;
// this map saves all execution arguments with their cinn names as key,
// and it is passed to the Execute interface of a cinn runtime program.
std::map<std::string, cinn_pod_value_t> name2argument_;
// this map saves all execution arguments with paddle variables as key,
// this map conbine name2argument_ and paddle2cinn_varmap_
// this map combine name2argument_ and paddle2cinn_varmap_
std::map<std::string, cinn_pod_value_t> paddle2argument_;
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cinn/cinn_launch_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ void DebugCinnCompiledResult(const CinnCompiledObject& result) {
const auto& cinn_scope = *(result.scope);
const auto& paddle2cinn_varmap = result.paddle2cinn_varmap;

VLOG(4) << "Compiled runtime_program instrunction size:["
VLOG(4) << "Compiled runtime_program instruction size:["
<< cinn_runtime_program->size() << "]";

std::vector<std::string> infos;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class CCommInitMultiTrainerOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
CCommInitMultiTrainer operator
Initialize collective communicatoin context within this trainer
Initialize collective communication context within this trainer
)DOC");
AddAttr<int>("ntrainers",
"(int) The number of trainers of distributed trainers");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/c_gen_xccl_id_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class CGenXCCLIdOp : public framework::OperatorBase {
class CGenXCCLIdOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddOutput("Out", "Raw variable contains a XCCL UniqueId instaces.");
AddOutput("Out", "Raw variable contains a XCCL UniqueId instances.");
AddComment(R"DOC(
CGenXCCLId operator
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/global_gather_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ struct GlobalGatherProcessGroupFunctor<phi::GPUContext, T> {
}
};

template <typename T, typename DeivceContext>
template <typename T, typename DeviceContext>
class GlobalGatherOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/depend_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ y = opB(x)
if tensor b and tensor x has some inner dependency, for example, x share data with b,
we need to add explicit dependency for x <- b, otherwise the these two operators may
be executed parellel in static graph. We can use depend op as below,
be executed parallel in static graph. We can use depend op as below,
b = opA(a)
x = depend(x, b)
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/feed_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ class FeedOp : public framework::OperatorWithKernel {
feed_sparse_tensor.GetIndicesDict());
} else {
PADDLE_THROW(
phi::errors::Unimplemented("Only support DenseTnesor, Strings, and "
phi::errors::Unimplemented("Only support DenseTensor, Strings, and "
"SparseCooTensor for feed op now."));
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/detection/bipartite_match_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ class BipartiteMatchOpMaker : public framework::OpProtoAndCheckerMaker {
"represented by each row and each column. For example, assumed one "
"entity is A with shape [K], another entity is B with shape [M]. The "
"DistMat[i][j] is the distance between A[i] and B[j]. The bigger "
"the distance is, the better macthing the pairs are. Please note, "
"the distance is, the better matching the pairs are. Please note, "
"This tensor can contain LoD information to represent a batch of "
"inputs. One instance of this batch can contain different numbers of "
"entities.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class CollectFpnProposalsOpMaker : public framework::OpProtoAndCheckerMaker {
"Select post_nms_topN RoIs from"
" all images and all fpn layers");
AddComment(R"DOC(
This operator concats all proposals from different images
This operator concatenates all proposals from different images
and different FPN levels. Then sort all of those proposals
by objectness confidence. Select the post_nms_topN RoIs in
total. Finally, re-sort the RoIs in the order of batch index.
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/detection/density_prior_box_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ class DensityPriorBoxOp : public framework::OperatorWithKernel {
fixed_sizes.size(),
densities.size()));
size_t num_priors = 0;
for (auto densitie : densities) {
num_priors += (fixed_ratios.size()) * (pow(densitie, 2)); // NOLINT
for (auto density : densities) {
num_priors += (fixed_ratios.size()) * (pow(density, 2)); // NOLINT
}
if (!flatten) {
std::vector<int64_t> dim_vec(4);
Expand Down

0 comments on commit 8f96b8c

Please sign in to comment.