Skip to content
This repository has been archived by the owner on Oct 31, 2023. It is now read-only.

Commit

Permalink
Fix dispatch breakage
Browse files Browse the repository at this point in the history
  • Loading branch information
vishwakftw committed Mar 11, 2019
1 parent 90080e6 commit 8df030c
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion maskrcnn_benchmark/csrc/cpu/ROIAlign_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
return output;
}

AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignForward_cpu_kernel<scalar_t>(
output_size,
input.data<scalar_t>(),
Expand Down
2 changes: 1 addition & 1 deletion maskrcnn_benchmark/csrc/cpu/nms_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ at::Tensor nms_cpu(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
at::Tensor result;
AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] {
AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] {
result = nms_cpu_kernel<scalar_t>(dets, scores, threshold);
});
return result;
Expand Down
4 changes: 2 additions & 2 deletions maskrcnn_benchmark/csrc/cuda/ROIAlign_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
return output;
}

AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
Expand Down Expand Up @@ -326,7 +326,7 @@ at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
return grad_input;
}

AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
Expand Down
4 changes: 2 additions & 2 deletions maskrcnn_benchmark/csrc/cuda/ROIPool_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input,
return std::make_tuple(output, argmax);
}

AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIPool_forward", [&] {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolFForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
Expand Down Expand Up @@ -182,7 +182,7 @@ at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
return grad_input;
}

AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIPool_backward", [&] {
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolFBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
Expand Down
4 changes: 2 additions & 2 deletions maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ at::Tensor SigmoidFocalLoss_forward_cuda(
return losses;
}

AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] {
AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] {
SigmoidFocalLossForward<scalar_t><<<grid, block, 0, stream>>>(
losses_size,
logits.contiguous().data<scalar_t>(),
Expand Down Expand Up @@ -169,7 +169,7 @@ at::Tensor SigmoidFocalLoss_backward_cuda(
return d_logits;
}

AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] {
AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] {
SigmoidFocalLossBackward<scalar_t><<<grid, block, 0, stream>>>(
d_logits_size,
logits.contiguous().data<scalar_t>(),
Expand Down

2 comments on commit 8df030c

@Ziqi-Song
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi, this commit cause gcc compiling error when executing 'python setup.py build develop' for maskrcnn_benchmark installation.
I tried gcc v4.8.5, v5.2.0 and v7.2.0. Each of them did not work. The exact reason is changing 'type' to 'scalar_type'. I finally fixed this by changing 'scalar_type' back to 'type' in each relating file.

@vishwakftw
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please try updating to the latest PyTorch nightly as specified in the installation instructions?

Please sign in to comment.