From 1db3cc629ef35957c367e5ab6837138b6009f8af Mon Sep 17 00:00:00 2001 From: Miguel Varela Ramos Date: Tue, 26 Mar 2019 11:36:55 +0100 Subject: [PATCH] Rollback dispatch patch (#603) * Merge branch 'master' of /home/braincreator/projects/maskrcnn-benchmark with conflicts. * rolls back the breaking AT dispatch changes (#555) * revert accidental docker changes * revert accidental docker changes (2) --- maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp | 2 +- maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp | 2 +- maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu | 4 ++-- maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu | 4 ++-- maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp index cd9fde2ae..d35aedf27 100644 --- a/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp +++ b/maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp @@ -239,7 +239,7 @@ at::Tensor ROIAlign_forward_cpu(const at::Tensor& input, return output; } - AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { ROIAlignForward_cpu_kernel( output_size, input.data(), diff --git a/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp b/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp index 639ca472e..1153dea04 100644 --- a/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp +++ b/maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp @@ -68,7 +68,7 @@ at::Tensor nms_cpu(const at::Tensor& dets, const at::Tensor& scores, const float threshold) { at::Tensor result; - AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] { + AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] { result = nms_cpu_kernel(dets, scores, threshold); }); return result; diff --git a/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu b/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu index 29e7ac6ed..2ff36adee 100644 --- a/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu @@ -283,7 +283,7 @@ at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, return output; } - AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { RoIAlignForward<<>>( output_size, input.contiguous().data(), @@ -330,7 +330,7 @@ at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, return grad_input; } - AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<<>>( grad.numel(), grad.contiguous().data(), diff --git a/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu b/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu index f79bb71dc..0b2b1758c 100644 --- a/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu @@ -137,7 +137,7 @@ std::tuple ROIPool_forward_cuda(const at::Tensor& input, return std::make_tuple(output, argmax); } - AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIPool_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] { RoIPoolFForward<<>>( output_size, input.contiguous().data(), @@ -186,7 +186,7 @@ at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, return grad_input; } - AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIPool_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] { RoIPoolFBackward<<>>( grad.numel(), grad.contiguous().data(), diff --git a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu index e25424e00..e0e7d3bff 100644 --- a/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu +++ b/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu @@ -128,7 +128,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda( return losses; } - AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { + AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<<>>( losses_size, logits.contiguous().data(), @@ -174,7 +174,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda( return d_logits; } - AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { + AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<<>>( d_logits_size, logits.contiguous().data(),