diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py index 67701020b205..01534f1c949a 100644 --- a/benchmark/python/ffi/benchmark_ffi.py +++ b/benchmark/python/ffi/benchmark_ffi.py @@ -92,6 +92,9 @@ def prepare_workloads(): OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1) OpArgMngr.add_workload("nonzero", pool['2x2']) OpArgMngr.add_workload("tril", pool['2x2'], k=0) + OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2)) + OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64')) + OpArgMngr.add_workload("clip", pool['2x2'], 0, 1) OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0) OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2)) OpArgMngr.add_workload("full_like", pool['2x2'], 2) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 45a71b2daf67..e88796c8158d 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -690,9 +690,9 @@ def take(a, indices, axis=None, mode='raise', out=None): raise NotImplementedError( "function take does not support mode '{}'".format(mode)) if axis is None: - return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out) + return _api_internal.take(_npi.reshape(a, -1), indices, 0, mode, out) else: - return _npi.take(a, indices, axis, mode, out) + return _api_internal.take(a, indices, axis, mode, out) # pylint: enable=redefined-outer-name @@ -4551,11 +4551,7 @@ def clip(a, a_min, a_max, out=None): """ if a_min is None and a_max is None: raise ValueError('array_clip: must set either max or min') - if a_min is None: - a_min = float('-inf') - if a_max is None: - a_max = float('inf') - return _npi.clip(a, a_min, a_max, out=out) + return _api_internal.clip(a, a_min, a_max, out) @set_module('mxnet.ndarray.numpy') diff --git a/python/mxnet/ndarray/numpy/random.py b/python/mxnet/ndarray/numpy/random.py index 93bc0e8223f5..8449852a6e72 100644 --- a/python/mxnet/ndarray/numpy/random.py +++ b/python/mxnet/ndarray/numpy/random.py @@ -535,24 +535,16 @@ def choice(a, size=None, replace=True, p=None, ctx=None, out=None): """ from ...numpy import ndarray as np_ndarray if ctx is None: - ctx = current_context() + ctx = str(current_context()) + else: + ctx = str(ctx) if size == (): size = None if isinstance(a, np_ndarray): - ctx = None - if p is None: - indices = _npi.choice(a, a=None, size=size, - replace=replace, ctx=ctx, weighted=False) - return _npi.take(a, indices) - else: - indices = _npi.choice(a, p, a=None, size=size, - replace=replace, ctx=ctx, weighted=True) - return _npi.take(a, indices) + indices = _api_internal.choice(a, size, replace, p, ctx, out) + return _api_internal.take(a, indices, 0, 'raise', out) else: - if p is None: - return _npi.choice(a=a, size=size, replace=replace, ctx=ctx, weighted=False, out=out) - else: - return _npi.choice(p, a=a, size=size, replace=replace, ctx=ctx, weighted=True, out=out) + return _api_internal.choice(a, size, replace, p, ctx, out) def exponential(scale=1.0, size=None, ctx=None, out=None): @@ -834,7 +826,7 @@ def beta(a, b, size=None, dtype=None, ctx=None): # use fp64 to prevent precision loss X = gamma(a, 1, size=size, dtype='float64', ctx=ctx) Y = gamma(b, 1, size=size, dtype='float64', ctx=ctx) - out = X/(X + Y) + out = X / (X + Y) return out.astype(dtype) diff --git a/src/api/operator/numpy/random/np_choice_op.cc b/src/api/operator/numpy/random/np_choice_op.cc new file mode 100644 index 000000000000..fe7b54d512c8 --- /dev/null +++ b/src/api/operator/numpy/random/np_choice_op.cc @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_choice_op.cc + * \brief Implementation of the API of functions in src/operator/numpy/np_choice_op.cc + */ +#include +#include +#include "../../utils.h" +#include "../../../../operator/numpy/random/np_choice_op.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.choice") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_choice"); + nnvm::NodeAttrs attrs; + op::NumpyChoiceParam param; + + NDArray* inputs[2]; + int num_inputs = 0; + + if (args[0].type_code() == kDLInt) { + param.a = args[0].operator int(); + } else if (args[0].type_code() == kNDArrayHandle) { + param.a = dmlc::nullopt; + inputs[num_inputs] = args[0].operator mxnet::NDArray*(); + num_inputs++; + } + + if (args[1].type_code() == kNull) { + param.size = dmlc::nullopt; + } else { + if (args[1].type_code() == kDLInt) { + param.size = mxnet::Tuple(1, args[1].operator int64_t()); + } else { + param.size = mxnet::Tuple(args[1].operator ObjectRef()); + } + } + + if (args[2].type_code() == kNull) { + param.replace = true; + } else { + param.replace = args[2].operator bool(); + } + + if (args[3].type_code() == kNull) { + param.weighted = false; + } else if (args[0].type_code() == kNDArrayHandle) { + param.weighted = true; + inputs[num_inputs] = args[3].operator mxnet::NDArray*(); + num_inputs++; + } + + attrs.parsed = std::move(param); + attrs.op = op; + if (args[4].type_code() != kNull) { + attrs.dict["ctx"] = args[4].operator std::string(); + } + NDArray* out = args[5].operator mxnet::NDArray*(); + NDArray** outputs = out == nullptr ? nullptr : &out; + int num_outputs = out != nullptr; + auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); + if (out) { + *ret = PythonArg(5); + } else { + *ret = ndoutputs[0]; + } +}); + +} // namespace mxnet diff --git a/src/api/operator/numpy/random/np_laplace_op.cc b/src/api/operator/numpy/random/np_laplace_op.cc index 40e79017c0f2..57f770bfa376 100644 --- a/src/api/operator/numpy/random/np_laplace_op.cc +++ b/src/api/operator/numpy/random/np_laplace_op.cc @@ -19,7 +19,7 @@ /*! * \file np_laplace_op.cc - * \brief Implementation of the API of functions in src/operator/numpy/np_laplace_op.cc + * \brief Implementation of the API of functions in src/operator/numpy/random/np_laplace_op.cc */ #include #include diff --git a/src/api/operator/tensor/indexing_op.cc b/src/api/operator/tensor/indexing_op.cc new file mode 100644 index 000000000000..df194018c712 --- /dev/null +++ b/src/api/operator/tensor/indexing_op.cc @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file indexing_op.cc + * \brief Implementation of the API of functions in src/operator/tensor/indexing_op.cc + */ +#include +#include +#include "../utils.h" +#include "../../../operator/tensor/indexing_op.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.take") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_take"); + nnvm::NodeAttrs attrs; + op::TakeParam param; + NDArray* inputs[2]; + + if (args[0].type_code() != kNull) { + inputs[0] = args[0].operator mxnet::NDArray *(); + } + + if (args[1].type_code() != kNull) { + inputs[1] = args[1].operator mxnet::NDArray *(); + } + + if (args[2].type_code() == kDLInt) { + param.axis = args[2].operator int(); + } + + if (args[3].type_code() != kNull) { + std::string mode = args[3].operator std::string(); + if (mode == "raise") { + param.mode = op::take_::kRaise; + } else if (mode == "clip") { + param.mode = op::take_::kClip; + } else if (mode == "wrap") { + param.mode = op::take_::kWrap; + } + } + + attrs.parsed = param; + attrs.op = op; + SetAttrDict(&attrs); + + NDArray* out = args[4].operator mxnet::NDArray*(); + NDArray** outputs = out == nullptr ? nullptr : &out; + // set the number of outputs provided by the `out` arugment + int num_outputs = out != nullptr; + auto ndoutputs = Invoke(op, &attrs, 2, inputs, &num_outputs, outputs); + if (out) { + *ret = PythonArg(4); + } else { + *ret = ndoutputs[0]; + } +}); + +} // namespace mxnet diff --git a/src/api/operator/tensor/matrix_op.cc b/src/api/operator/tensor/matrix_op.cc new file mode 100644 index 000000000000..ed91b091cc39 --- /dev/null +++ b/src/api/operator/tensor/matrix_op.cc @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file matrix_op.cc + * \brief Implementation of the API of functions in src/operator/tensor/matrix_op.cc + */ +#include +#include +#include "../utils.h" +#include "../../../operator/tensor/matrix_op-inl.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.clip") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_clip"); + nnvm::NodeAttrs attrs; + op::ClipParam param; + NDArray* inputs[1]; + + if (args[0].type_code() != kNull) { + inputs[0] = args[0].operator mxnet::NDArray *(); + } + + if (args[1].type_code() != kNull) { + param.a_min = args[1].operator double(); + } else { + param.a_min = -INFINITY; + } + + if (args[2].type_code() != kNull) { + param.a_max = args[2].operator double(); + } else { + param.a_max = INFINITY; + } + + attrs.parsed = param; + attrs.op = op; + SetAttrDict(&attrs); + + NDArray* out = args[3].operator mxnet::NDArray*(); + NDArray** outputs = out == nullptr ? nullptr : &out; + // set the number of outputs provided by the `out` arugment + int num_outputs = out != nullptr; + auto ndoutputs = Invoke(op, &attrs, 1, inputs, &num_outputs, outputs); + if (out) { + *ret = PythonArg(3); + } else { + *ret = ndoutputs[0]; + } +}); + +} // namespace mxnet diff --git a/src/operator/numpy/random/np_choice_op.h b/src/operator/numpy/random/np_choice_op.h index a6a7cecfefd5..bc1e712aeba0 100644 --- a/src/operator/numpy/random/np_choice_op.h +++ b/src/operator/numpy/random/np_choice_op.h @@ -53,6 +53,17 @@ struct NumpyChoiceParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(replace).set_default(true); DMLC_DECLARE_FIELD(weighted).set_default(false); } + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream a_s, size_s, replace_s, weighted_s; + a_s << a; + size_s << size; + replace_s << replace; + weighted_s << weighted; + (*dict)["a"] = a_s.str(); + (*dict)["size"] = size_s.str(); + (*dict)["replace"] = replace_s.str(); + (*dict)["weighted"] = weighted_s.str(); + } }; inline bool NumpyChoiceOpType(const nnvm::NodeAttrs &attrs, diff --git a/src/operator/tensor/indexing_op.h b/src/operator/tensor/indexing_op.h index 2b048813a464..cd85daa80df3 100644 --- a/src/operator/tensor/indexing_op.h +++ b/src/operator/tensor/indexing_op.h @@ -680,6 +680,27 @@ struct TakeParam: public dmlc::Parameter { " \"wrap\" means to wrap around." " \"raise\" means to raise an error when index out of range."); } + + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream axis_s, mode_s; + axis_s << axis; + mode_s << mode; + (*dict)["axis"] = axis_s.str(); + (*dict)["mode"] = mode_s.str(); + switch (mode) { + case take_::kRaise: + (*dict)["mode"] = "raise"; + break; + case take_::kClip: + (*dict)["mode"] = "clip"; + break; + case take_::kWrap: + (*dict)["mode"] = "wrap"; + break; + default: + (*dict)["mode"] = mode_s.str(); + } + } }; inline bool TakeOpShape(const nnvm::NodeAttrs& attrs, diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h index 6efde79f202b..821fa8587081 100644 --- a/src/operator/tensor/matrix_op-inl.h +++ b/src/operator/tensor/matrix_op-inl.h @@ -1605,6 +1605,14 @@ struct ClipParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(a_max) .describe("Maximum value"); } + + void SetAttrDict(std::unordered_map* dict) { + std::ostringstream a_min_s, a_max_s; + a_min_s << a_min; + a_max_s << a_max; + (*dict)["a_min"] = a_min_s.str(); + (*dict)["a_max"] = a_max_s.str(); + } };