Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

change _Pragma to #pragma #18379

Merged
merged 7 commits into from
May 27, 2020
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions src/io/batchify.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,17 @@

namespace mxnet {
namespace io {

#ifdef _MSC_VER
#if _MSC_VER < 1925
#define omp_parallel __pragma(omp parallel for num_threads(bs))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you make bs argument of the macro?

#else
#define omp_parallel _Pragma("omp parallel for num_threads(bs)")
#endif
#else
#define omp_parallel _Pragma("omp parallel for num_threads(bs)")
#endif

struct GroupBatchifyParam : public dmlc::Parameter<GroupBatchifyParam> {
mxnet::Tuple<std::intptr_t> functions;
// declare parameters
Expand Down Expand Up @@ -150,7 +161,7 @@ class StackBatchify : public BatchifyFunction {
}
int sbs = static_cast<int>(bs);
MSHADOW_TYPE_SWITCH_WITH_BOOL(dtype, DType, {
_Pragma("omp parallel for num_threads(bs)")
omp_parallel
for (int j = 0; j < sbs; ++j) {
omp_exc_.Run([&] {
// inputs[j][i].WaitToRead();
Expand Down Expand Up @@ -276,7 +287,7 @@ class PadBatchify : public BatchifyFunction {
DType *ptr = (*outputs)[i].data().dptr<DType>();
auto asize = ashape.Size();
int sbs = static_cast<int>(bs);
_Pragma("omp parallel for num_threads(bs)")
omp_parallel
for (int j = 0; j < sbs; ++j) {
using namespace mshadow::expr;
auto compact_shapes = CompactShapes(ashape, inputs[j][i].shape());
Expand Down