forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
BinaryAddSubKernel.cu
35 lines (28 loc) · 1.04 KB
/
BinaryAddSubKernel.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct AddFunctor {
AddFunctor(scalar_t a): alpha(a) {}
__device__ __forceinline__ scalar_t operator() (const scalar_t a, const scalar_t b) const {
return a + alpha * b;
}
private:
scalar_t alpha;
};
void add_kernel_cuda(TensorIteratorBase& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.common_dtype(), "add_cuda/sub_cuda", [&]() {
AddFunctor<scalar_t> f(alpha_scalar.to<scalar_t>());
gpu_kernel_with_scalars(iter, f);
});
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
add_kernel_cuda(iter, -alpha_scalar);
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
}} // namespace at::native