forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
clip_op.cu
82 lines (71 loc) · 2.18 KB
/
clip_op.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GE(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
ClipKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), min_, max_, X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
ClipGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
min_,
max_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2