From ff0caf909ab3a938cab5a6458a22fa9a9608a9db Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 21 Aug 2024 14:27:51 +0800 Subject: [PATCH] Fix (#66943) --- .../collective/c_comm_init_all_op.cc | 55 ------ .../collective/c_comm_init_all_op.cu.cc | 20 -- .../operators/collective/c_comm_init_all_op.h | 95 ---------- .../collective/c_comm_init_all_op_xpu.cc | 20 -- .../pir/dialect/op_generator/ops_api_gen.py | 1 + paddle/fluid/primitive/codegen/gen.py | 1 + .../core/platform/device/xpu/bkcl_helper.h | 177 +++++++++++++++++- paddle/phi/infermeta/nullary.cc | 2 + paddle/phi/infermeta/nullary.h | 2 + .../phi/kernels/gpu/comm_init_all_kernel.cu | 42 +++++ .../phi/kernels/xpu/comm_init_all_kernel.cc | 76 ++++++++ .../phi/ops/yaml/inconsistent/static_ops.yaml | 10 + paddle/phi/ops/yaml/legacy/static_ops.yaml | 10 + paddle/phi/ops/yaml/op_compat.yaml | 2 + 14 files changed, 322 insertions(+), 191 deletions(-) delete mode 100644 paddle/fluid/operators/collective/c_comm_init_all_op.cc delete mode 100644 paddle/fluid/operators/collective/c_comm_init_all_op.cu.cc delete mode 100644 paddle/fluid/operators/collective/c_comm_init_all_op.h delete mode 100644 paddle/fluid/operators/collective/c_comm_init_all_op_xpu.cc create mode 100644 paddle/phi/kernels/gpu/comm_init_all_kernel.cu create mode 100644 paddle/phi/kernels/xpu/comm_init_all_kernel.cc diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.cc b/paddle/fluid/operators/collective/c_comm_init_all_op.cc deleted file mode 100644 index 21729fd438b19..0000000000000 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.cc +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/collective/c_comm_init_all_op.h" - -namespace paddle::operators { - -class CCommInitAllOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override {} - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return phi::KernelKey(framework::proto::VarType::FP32, ctx.GetPlace()); - } -}; - -class CCommInitAllOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddComment(R"DOC( -CCommInitAll operator - -Initialize all collective communication context -)DOC"); - AddAttr>( - "devices", - "(std::vector) which devices does the nccl comm initialized on") - .SetDefault({}); - AddAttr("ring_id", "(int default 0) user specified ring id") - .SetDefault(0); - } -}; - -} // namespace paddle::operators - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(c_comm_init_all, - ops::CCommInitAllOp, - ops::CCommInitAllOpMaker); diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.cu.cc b/paddle/fluid/operators/collective/c_comm_init_all_op.cu.cc deleted file mode 100644 index 43e40b6389932..0000000000000 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.cu.cc +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/collective/c_comm_init_all_op.h" - -namespace ops = paddle::operators; - -PD_REGISTER_STRUCT_KERNEL( - c_comm_init_all, GPU, ALL_LAYOUT, ops::CCommInitAllKernel, float) {} diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.h b/paddle/fluid/operators/collective/c_comm_init_all_op.h deleted file mode 100644 index 78371975af0fc..0000000000000 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.h +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -#include "paddle/fluid/framework/op_info.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/threadpool.h" -#include "paddle/phi/core/platform/collective_helper.h" - -#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -#include "paddle/fluid/platform/device/gpu/nccl_helper.h" -#endif - -#if defined(PADDLE_WITH_XPU_BKCL) -#include "paddle/fluid/platform/device/xpu/bkcl_helper.h" -#endif - -namespace paddle { -namespace operators { - -template -class CCommInitAllKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { -#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) - std::vector devices = ctx.Attr>("devices"); - if (devices.empty()) { - devices = platform::GetSelectedDevices(); - } - - int rid = ctx.Attr("ring_id"); - - platform::NCCLCommContext::Instance().CreateAllNCCLComms(devices, rid); -#elif defined(PADDLE_WITH_XPU_BKCL) - std::vector devices = ctx.Attr>("devices"); - int ring_id = ctx.Attr("ring_id"); - - if (devices.empty()) { - int count = platform::GetXPUDeviceCount(); - for (int i = 0; i < count; ++i) { - devices.push_back(i); - } - } - - if (devices.size() > 1) { - std::vector place_list_; - for (size_t i = 0; i < devices.size(); ++i) { - auto p = phi::XPUPlace(devices[i]); - place_list_.push_back(p); - } - - // create pthread to bkcl_init_rank on all devices - auto ptr = new platform::BKCLContextMap(place_list_); - ptr->init(); - - for (size_t i = 0; i < devices.size(); ++i) { - platform::BKCLCommContext::Instance().AssignBKCLComm( - ptr->contexts_.at(devices[i]).comm_, - devices.size(), - devices[i], - devices[i], - ring_id); - - VLOG(0) << "bkcl communicator of rank " << devices[i] << " in ring " - << ring_id << " has been created on device " << devices[i]; - - // TODO(WorgenZhang): need release comm_map_ when quit - // std::call_once(once_flag_, []() { - // std::atexit([]() { - // platform::BKCLCommContext::Instance().ReleaseBKCLComms(); }); - // }); - } - - VLOG(0) << "done bkcl_init_rank on all devices"; - } -#endif - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op_xpu.cc b/paddle/fluid/operators/collective/c_comm_init_all_op_xpu.cc deleted file mode 100644 index d23f0931c6e73..0000000000000 --- a/paddle/fluid/operators/collective/c_comm_init_all_op_xpu.cc +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/collective/c_comm_init_all_op.h" - -namespace ops = paddle::operators; - -PD_REGISTER_STRUCT_KERNEL( - c_comm_init_all, XPU, ALL_LAYOUT, ops::CCommInitAllKernel, float) {} diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 2b50198ff1a19..8c3fd1f32fc6a 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -169,6 +169,7 @@ 'c_reduce_sum', 'c_softmax_with_cross_entropy', 'c_split', + 'comm_init_all', 'decayed_adagrad', 'distributed_fused_lamb', 'distributed_fused_lamb_', diff --git a/paddle/fluid/primitive/codegen/gen.py b/paddle/fluid/primitive/codegen/gen.py index 85d64ee236798..b9bcabc57d3c9 100644 --- a/paddle/fluid/primitive/codegen/gen.py +++ b/paddle/fluid/primitive/codegen/gen.py @@ -62,6 +62,7 @@ "full", "partial_send", "push_dense", + "comm_init_all", ] # prim op with one input and one output, with no attribute diff --git a/paddle/phi/core/platform/device/xpu/bkcl_helper.h b/paddle/phi/core/platform/device/xpu/bkcl_helper.h index 309ec33746dd4..2271f7d861c52 100644 --- a/paddle/phi/core/platform/device/xpu/bkcl_helper.h +++ b/paddle/phi/core/platform/device/xpu/bkcl_helper.h @@ -33,8 +33,183 @@ #include "xpu/bkcl.h" #include "xpu/runtime.h" +#define BKCL_ID_VARNAME "BKCLID" + namespace paddle { -namespace platform {} // namespace platform +namespace platform { + +inline int GetBKCLRankID(BKCLContext_t comm) { + return reinterpret_cast(comm)[0]; +} + +inline int GetBKCLDevID(BKCLContext_t comm) { + return reinterpret_cast(comm)[1]; +} + +inline int GetBKCLNRanks(BKCLContext_t comm) { + return reinterpret_cast(comm)[2]; +} + +class BKCLGroupGuard { + public: + static std::mutex &BKCLMutex() { + static std::mutex mtx; + return mtx; + } + + inline BKCLGroupGuard() { + BKCLMutex().lock(); + PADDLE_ENFORCE_XPU_SUCCESS(bkcl_group_start()); + } + + inline ~BKCLGroupGuard() PADDLE_MAY_THROW { + PADDLE_ENFORCE_XPU_SUCCESS(bkcl_group_end()); + BKCLMutex().unlock(); + } +}; + +struct BKCLContext { + std::unique_ptr ctx_; + BKCLContext_t comm_; + + explicit BKCLContext(int dev_id) + : ctx_(new phi::XPUContext(phi::XPUPlace(dev_id))), comm_{nullptr} {} + + XPUStream stream() const { return ctx_->stream(); } + BKCLContext_t comm() const { return comm_; } + + int device_id() const { return ctx_->GetPlace().device; } +}; + +struct InitBKCLPara { + BKCLUniqueId *bkcl_id; + int rank; + int nranks; + int dev_id; + BKCLContext_t *ctx; +}; + +static void *init_bkcl_context_func(void *args) { + struct InitBKCLPara *para = (struct InitBKCLPara *)args; + platform::SetXPUDeviceId(para->dev_id); + PADDLE_ENFORCE_XPU_SUCCESS( + bkcl_init_rank(para->ctx, para->rank, para->nranks, para->bkcl_id)); + return nullptr; +} + +struct BKCLContextMap { + std::unordered_map contexts_; + std::vector order_; + std::vector places_; + size_t num_trainers_; + size_t trainer_id_; + BKCLUniqueId *bkcl_id_; + + explicit BKCLContextMap(const std::vector &places, + BKCLUniqueId *bkcl_id = nullptr, + size_t num_trainers = 1, + size_t trainer_id = 0) { + places_ = places; + bkcl_id_ = bkcl_id; + num_trainers_ = num_trainers; + trainer_id_ = trainer_id; + } + + // Synchronization is required and can only be initialized with + // multithreading. + int init() { + PADDLE_ENFORCE_EQ( + !places_.empty(), + true, + common::errors::InvalidArgument("The BKCL place should not be empty.")); + order_.reserve(places_.size()); + for (auto &p : places_) { + int dev_id = p.device; + order_.emplace_back(dev_id); + contexts_.emplace(dev_id, BKCLContext(dev_id)); + } + PADDLE_ENFORCE_EQ( + order_.size(), + contexts_.size(), + common::errors::Unavailable("BKCL Context Map does not support " + "contain two or more same device")); + + std::unique_ptr comms(new BKCLContext_t[order_.size()]); + std::unique_ptr paras(new InitBKCLPara[order_.size()]); + std::unique_ptr pids(new pthread_t[order_.size()]); + BKCLResult_t ret; + BKCLUniqueId id; + // if num_trainers == 1, should create a new bkcl id for local comms. + if (num_trainers_ == 1 && bkcl_id_ == nullptr) { + ret = bkcl_get_unique_id(&id); + PADDLE_ENFORCE_EQ(BKCL_SUCCESS, + ret, + common::errors::PreconditionNotMet( + "bkcl get unique id failed [%d]", ret)); + bkcl_id_ = &id; + } + PADDLE_ENFORCE_NOT_NULL( + bkcl_id_, + common::errors::InvalidArgument("The BKCL id should not be null.")); + { + int nranks = num_trainers_ * order_.size(); + for (size_t i = 0; i < order_.size(); ++i) { + int rank; + if (order_.size() > 1) { + rank = trainer_id_ * order_.size() + i; + } else { + rank = trainer_id_; + } + + paras[i].rank = rank; + paras[i].nranks = nranks; + paras[i].dev_id = order_[i]; + paras[i].bkcl_id = bkcl_id_; + paras[i].ctx = &comms[i]; + PADDLE_ENFORCE_EQ(pthread_create(&pids[i], + nullptr, + init_bkcl_context_func, + reinterpret_cast(¶s[i])), + 0, + common::errors::External("pthread_create failed")); + } + for (size_t i = 0; i < order_.size(); i++) { + pthread_join(pids[i], nullptr); + } + } + int i = 0; + for (auto &dev_id : order_) { + contexts_.at(dev_id).comm_ = comms[i++]; + } + return 0; + } + + BKCLContextMap(const BKCLContextMap &other) = delete; + BKCLContextMap &operator=(const BKCLContextMap &other) = delete; + + phi::XPUContext *DevCtx(int dev_id) const { return at(dev_id).ctx_.get(); } + + phi::XPUContext *DevCtx(phi::Place p) const { return DevCtx(p.device); } + + const BKCLContext &at(phi::Place p) const { return this->at(p.device); } + + const BKCLContext &at(int dev_id) const { return contexts_.at(dev_id); } + + void WaitAll() { + for (auto &p : contexts_) { + p.second.ctx_->Wait(); + } + } +}; + +inline std::string GetFlatBKCLVarName(size_t pos) { + if (pos == 0) { + return BKCL_ID_VARNAME; + } + return string::Sprintf("%s_%d", BKCL_ID_VARNAME, static_cast(pos)); +} + +} // namespace platform } // namespace paddle #endif // PADDLE_WITH_XPU_BKCL diff --git a/paddle/phi/infermeta/nullary.cc b/paddle/phi/infermeta/nullary.cc index a7b6e2e717a2e..f18ed5690f0f3 100644 --- a/paddle/phi/infermeta/nullary.cc +++ b/paddle/phi/infermeta/nullary.cc @@ -41,6 +41,8 @@ void AssignValueInferMeta(const std::vector& shape, out->set_dtype(dtype); } +void CommInitAllInferMeta(const std::vector& devices, int ring_id) {} + void CreateArrayInferMeta(DataType dtype, MetaTensor* out) { out->set_dtype(dtype); } diff --git a/paddle/phi/infermeta/nullary.h b/paddle/phi/infermeta/nullary.h index 62cd693c2cf10..db2835aef23f7 100644 --- a/paddle/phi/infermeta/nullary.h +++ b/paddle/phi/infermeta/nullary.h @@ -42,6 +42,8 @@ void AssignValueInferMeta(const std::vector& shape, DataType dtype, MetaTensor* out); +void CommInitAllInferMeta(const std::vector& devices, int ring_id); + void CreateVecShapeInferMeta(const std::vector& shape, DataType dtype, MetaTensor* out); diff --git a/paddle/phi/kernels/gpu/comm_init_all_kernel.cu b/paddle/phi/kernels/gpu/comm_init_all_kernel.cu new file mode 100644 index 0000000000000..ade7b5a7b42f5 --- /dev/null +++ b/paddle/phi/kernels/gpu/comm_init_all_kernel.cu @@ -0,0 +1,42 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "glog/logging.h" +#include "paddle/phi/core/kernel_registry.h" +#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) +#include "paddle/phi/core/platform/collective_helper.h" +#endif + +namespace phi { + +template +void CommInitAllKernel(const Context& dev_ctx, + const std::vector& devices_input, + int ring_id) { +#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) + std::vector devices = devices_input; + if (devices.empty()) { + devices = phi::backends::gpu::GetSelectedDevices(); + } + + paddle::platform::NCCLCommContext::Instance().CreateAllNCCLComms(devices, + ring_id); +#endif +} + +} // namespace phi + +PD_REGISTER_KERNEL( + comm_init_all, GPU, ALL_LAYOUT, phi::CommInitAllKernel, float) {} diff --git a/paddle/phi/kernels/xpu/comm_init_all_kernel.cc b/paddle/phi/kernels/xpu/comm_init_all_kernel.cc new file mode 100644 index 0000000000000..61402ba2ade51 --- /dev/null +++ b/paddle/phi/kernels/xpu/comm_init_all_kernel.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "glog/logging.h" +#include "paddle/phi/core/kernel_registry.h" +#if defined(PADDLE_WITH_XPU_BKCL) +#include "paddle/phi/core/platform/collective_helper.h" +#include "paddle/phi/core/platform/device/xpu/bkcl_helper.h" +#endif + +namespace phi { + +template +void CommInitAllKernel(const Context& dev_ctx, + const std::vector& devices_input, + int ring_id) { +#if defined(PADDLE_WITH_XPU_BKCL) + std::vector devices = devices_input; + + if (devices.empty()) { + int count = phi::backends::xpu::GetXPUDeviceCount(); + for (int i = 0; i < count; ++i) { + devices.push_back(i); + } + } + + if (devices.size() > 1) { + std::vector place_list_; + for (size_t i = 0; i < devices.size(); ++i) { + auto p = phi::XPUPlace(devices[i]); + place_list_.push_back(p); + } + + // create pthread to bkcl_init_rank on all devices + auto ptr = new paddle::platform::BKCLContextMap(place_list_); + ptr->init(); + + for (size_t i = 0; i < devices.size(); ++i) { + paddle::platform::BKCLCommContext::Instance().AssignBKCLComm( + ptr->contexts_.at(devices[i]).comm_, + devices.size(), + devices[i], + devices[i], + ring_id); + + VLOG(0) << "bkcl communicator of rank " << devices[i] << " in ring " + << ring_id << " has been created on device " << devices[i]; + + // TODO(WorgenZhang): need release comm_map_ when quit + // std::call_once(once_flag_, []() { + // std::atexit([]() { + // platform::BKCLCommContext::Instance().ReleaseBKCLComms(); }); + // }); + } + + VLOG(0) << "done bkcl_init_rank on all devices"; + } +#endif +} + +} // namespace phi + +PD_REGISTER_KERNEL( + comm_init_all, XPU, ALL_LAYOUT, phi::CommInitAllKernel, float) {} diff --git a/paddle/phi/ops/yaml/inconsistent/static_ops.yaml b/paddle/phi/ops/yaml/inconsistent/static_ops.yaml index f40590c61c91c..bee6c9a95e5fc 100644 --- a/paddle/phi/ops/yaml/inconsistent/static_ops.yaml +++ b/paddle/phi/ops/yaml/inconsistent/static_ops.yaml @@ -193,6 +193,16 @@ data_type : dtype inplace: (input -> output) +- op : comm_init_all + args : (int[] devices={}, int ring_id=0) + output : + infer_meta : + func : CommInitAllInferMeta + param : [devices, ring_id] + kernel : + func : comm_init_all + data_type : DataType::FLOAT32 + - op : dequantize_linear args : (Tensor x, Tensor scale, Tensor zero_point, Tensor in_accum, Tensor in_state, int quant_axis = 0, int bit_length = 8, int qmin = -128, int qmax = 127, int round_type = 0, bool is_test = true, bool only_observer = false) output : Tensor(y), Tensor(out_state), Tensor(out_accum), Tensor(out_scale) diff --git a/paddle/phi/ops/yaml/legacy/static_ops.yaml b/paddle/phi/ops/yaml/legacy/static_ops.yaml index 818901ce974f0..1b7b174321e25 100755 --- a/paddle/phi/ops/yaml/legacy/static_ops.yaml +++ b/paddle/phi/ops/yaml/legacy/static_ops.yaml @@ -111,6 +111,16 @@ func : broadcast param: [x, root] +- op : comm_init_all + args : (int[] devices={}, int ring_id=0) + output : + infer_meta : + func : CommInitAllInferMeta + param : [devices, ring_id] + kernel : + func : comm_init_all + data_type : DataType::FLOAT32 + - op : conv2d_transpose args : (Tensor x, Tensor filter, Tensor bias, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") output : Tensor(out) diff --git a/paddle/phi/ops/yaml/op_compat.yaml b/paddle/phi/ops/yaml/op_compat.yaml index 45263bba15cbd..af814df9af4e8 100755 --- a/paddle/phi/ops/yaml/op_compat.yaml +++ b/paddle/phi/ops/yaml/op_compat.yaml @@ -4126,6 +4126,8 @@ outputs: {precision : Precision, recall : Recall, f1_score : F1-Score, num_infer_chunks : NumInferChunks, num_label_chunks : NumLabelChunks, num_correct_chunks : NumCorrectChunks} +- op: comm_init_all(c_comm_init_all) + - op: crf_decoding inputs: emission: Emission