From 174b186eab738c349559cf3ef85f1dea6c802c64 Mon Sep 17 00:00:00 2001 From: vishwakftw Date: Mon, 11 Mar 2019 21:26:31 +0530 Subject: [PATCH] Fix dispatch breakage --- maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp | 2 +- maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp | 2 +- maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu | 4 ++-- maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu | 4 ++-- maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp index d35aedf27..cd9fde2ae 100644 --- a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp +++ b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp @@ -239,7 +239,7 @@ at::Tensor ROIAlign_forward_cpu(const at::Tensor& input, return output; } - AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { ROIAlignForward_cpu_kernel( output_size, input.data(), diff --git a/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp b/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp index 1153dea04..639ca472e 100644 --- a/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp +++ b/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp @@ -68,7 +68,7 @@ at::Tensor nms_cpu(const at::Tensor& dets, const at::Tensor& scores, const float threshold) { at::Tensor result; - AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] { + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] { result = nms_cpu_kernel(dets, scores, threshold); }); return result; diff --git a/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu b/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu index 1142fb375..170771aa8 100644 --- a/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu @@ -280,7 +280,7 @@ at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, return output; } - AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { RoIAlignForward<<>>( output_size, input.contiguous().data(), @@ -326,7 +326,7 @@ at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, return grad_input; } - AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<<>>( grad.numel(), grad.contiguous().data(), diff --git a/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu b/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu index 8f072ffc2..cef3beaa4 100644 --- a/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu @@ -134,7 +134,7 @@ std::tuple ROIPool_forward_cuda(const at::Tensor& input, return std::make_tuple(output, argmax); } - AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIPool_forward", [&] { RoIPoolFForward<<>>( output_size, input.contiguous().data(), @@ -182,7 +182,7 @@ at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, return grad_input; } - AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIPool_backward", [&] { RoIPoolFBackward<<>>( grad.numel(), grad.contiguous().data(), diff --git a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu index 7d40767bb..cd9b4c96b 100644 --- a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu @@ -125,7 +125,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda( return losses; } - AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<<>>( losses_size, logits.contiguous().data(), @@ -169,7 +169,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda( return d_logits; } - AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<<>>( d_logits_size, logits.contiguous().data(),