From d42fcdbc9621e2cd74ba0cc5a856c0c948783c0a Mon Sep 17 00:00:00 2001 From: Peter Goldsborough Date: Thu, 29 Mar 2018 13:57:18 -0700 Subject: [PATCH] Add source location information to error messages (#6059) --- aten/src/ATen/ATenAssert.h | 10 -- aten/src/ATen/ArrayRef.h | 6 +- aten/src/ATen/CPUFixedAllocator.h | 5 +- aten/src/ATen/CUDAFixedAllocator.h | 9 +- aten/src/ATen/CheckGenerator.h | 3 +- aten/src/ATen/Context.h | 5 +- aten/src/ATen/Dispatch.h | 102 +++++++++--------- aten/src/ATen/Error.h | 94 ++++++++++++++++ aten/src/ATen/ExpandUtils.h | 4 +- aten/src/ATen/UndefinedTensor.cpp | 13 +-- aten/src/ATen/UndefinedType.cpp | 23 ++-- aten/src/ATen/Utils.cpp | 20 ---- aten/src/ATen/Utils.h | 17 +-- aten/src/ATen/copy_wrapper.py | 2 +- aten/src/ATen/function_wrapper.py | 4 +- aten/src/ATen/native/Distributions.cpp | 3 +- aten/src/ATen/native/Embedding.cpp | 4 +- aten/src/ATen/native/Indexing.cpp | 12 +-- aten/src/ATen/native/LinearAlgebra.cpp | 8 +- aten/src/ATen/native/Memory.cpp | 5 +- aten/src/ATen/native/SparseMM.cpp | 4 +- aten/src/ATen/native/TensorCompare.cpp | 7 +- aten/src/ATen/native/TensorFactories.cpp | 12 ++- aten/src/ATen/native/TensorShape.cpp | 21 ++-- aten/src/ATen/native/cuda/Embedding.cu | 1 + aten/src/ATen/native/cuda/SparseMM.cu | 10 +- aten/src/ATen/native/cudnn/RNN.cpp | 5 +- aten/src/ATen/templates/TensorSparse.cpp | 6 +- aten/src/ATen/test/basic.cpp | 28 +++-- aten/src/ATen/test/scalar_tensor_test.cpp | 2 +- tools/autograd/templates/VariableType.cpp | 16 +-- .../templates/python_torch_functions.cpp | 8 +- torch/csrc/Exceptions.h | 7 +- torch/csrc/tensor/python_tensor.cpp | 2 +- torch/csrc/utils/tensor_new.cpp | 3 +- 35 files changed, 286 insertions(+), 195 deletions(-) delete mode 100644 aten/src/ATen/ATenAssert.h create mode 100644 aten/src/ATen/Error.h delete mode 100644 aten/src/ATen/Utils.cpp diff --git a/aten/src/ATen/ATenAssert.h b/aten/src/ATen/ATenAssert.h deleted file mode 100644 index 3e840fe2f1378a..00000000000000 --- a/aten/src/ATen/ATenAssert.h +++ /dev/null @@ -1,10 +0,0 @@ -#include "ATenGeneral.h" - -namespace at { - -#define AT_ASSERT(cond, ...) if (! (cond) ) { at::runtime_error(__VA_ARGS__); } - -[[noreturn]] -AT_API void runtime_error(const char *format, ...); - -} diff --git a/aten/src/ATen/ArrayRef.h b/aten/src/ATen/ArrayRef.h index fe425adfa5c9d2..156d7505b18516 100644 --- a/aten/src/ATen/ArrayRef.h +++ b/aten/src/ATen/ArrayRef.h @@ -14,10 +14,12 @@ // removed a bunch of slice variants for simplicity... #pragma once -#include + +#include + #include +#include #include -#include "ATenAssert.h" namespace at { /// ArrayRef - Represent a constant reference to an array (0 or more elements diff --git a/aten/src/ATen/CPUFixedAllocator.h b/aten/src/ATen/CPUFixedAllocator.h index 43baec6a54f978..c7caea5608dec6 100644 --- a/aten/src/ATen/CPUFixedAllocator.h +++ b/aten/src/ATen/CPUFixedAllocator.h @@ -1,6 +1,7 @@ #pragma once #include "TH/TH.h" +#include "ATen/Error.h" // This file creates a fake allocator that just throws exceptions if // it is actually used. @@ -11,11 +12,11 @@ namespace at { static cpu_fixed_malloc(void *, ptrdiff_t) { - runtime_error("attempting to resize a tensor view of an external blob"); + AT_ERROR("attempting to resize a tensor view of an external blob"); } static cpu_fixed_realloc(void *, void*, ptrdiff_t) { - runtime_error("attempting to resize a tensor view of an external blob"); + AT_ERROR("attempting to resize a tensor view of an external blob"); } static cpu_fixed_free(void * state, void * allocation) { diff --git a/aten/src/ATen/CUDAFixedAllocator.h b/aten/src/ATen/CUDAFixedAllocator.h index cb80adfd752a82..0d2f6584feb71a 100644 --- a/aten/src/ATen/CUDAFixedAllocator.h +++ b/aten/src/ATen/CUDAFixedAllocator.h @@ -1,6 +1,7 @@ #pragma once #include "THC/THC.h" +#include "ATen/Error.h" // This file creates a fake allocator that just throws exceptions if // it is actually used. @@ -11,11 +12,11 @@ namespace at { static cuda_fixed_malloc(void *, void**, size_t, cudaStream_t) { - runtime_error("attempting to resize a tensor view of an external blob"); + AT_ERROR("attempting to resize a tensor view of an external blob"); } static cpu_fixed_realloc(void*, void**, size_t, size_t, cudaStream_t) { - runtime_error("attempting to resize a tensor view of an external blob"); + AT_ERROR("attempting to resize a tensor view of an external blob"); } static cuda_fixed_free(void * state, void * allocation) { @@ -25,11 +26,11 @@ static cuda_fixed_free(void * state, void * allocation) { } static cuda_fixed_emptyCache(void*) { - runtime_error("?? attempting to resize a tensor view of an external blob"); + AT_ERROR("?? attempting to resize a tensor view of an external blob"); } static cuda_fixed_cacheInfo(void*, int, size_t*, size_t*) { - runtime_error("?? attempting to resize a tensor view of an external blob"); + AT_ERROR("?? attempting to resize a tensor view of an external blob"); } diff --git a/aten/src/ATen/CheckGenerator.h b/aten/src/ATen/CheckGenerator.h index ec7a23d94e49b7..cdb6b5a3f51bf8 100644 --- a/aten/src/ATen/CheckGenerator.h +++ b/aten/src/ATen/CheckGenerator.h @@ -1,5 +1,6 @@ #pragma once +#include "ATen/Error.h" #include "ATen/Generator.h" #include "ATen/Utils.h" @@ -11,7 +12,7 @@ static inline T * check_generator(Generator * expr, Generator * defaultValue) { expr = defaultValue; if(auto result = dynamic_cast(expr)) return result; - runtime_error("Expected a '%s' but found '%s'", typeid(T).name(), typeid(expr).name()); + AT_ERROR("Expected a '%s' but found '%s'", typeid(T).name(), typeid(expr).name()); } } // namespace at diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 95c227b66fd17c..7504506397db50 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -5,6 +5,7 @@ #include "ATen/Generator.h" #include "ATen/Type.h" #include "ATen/Utils.h" +#include "ATen/Error.h" #include #include @@ -32,7 +33,7 @@ class AT_API Context { auto & undef = type_registry[static_cast(Backend::Undefined)][static_cast(ScalarType::Undefined)]; if (undef) return *undef; } - runtime_error("%s%sType is not enabled.",toString(p),toString(s)); + AT_ERROR("%s%sType is not enabled.",toString(p),toString(s)); } return *type; } @@ -40,7 +41,7 @@ class AT_API Context { initCUDAIfNeeded(p); auto & generator = generator_registry[static_cast(p)]; if(!generator) - runtime_error("%s backend type not enabled.",toString(p)); + AT_ERROR("%s backend type not enabled.",toString(p)); return *generator; } bool hasMKL() const; diff --git a/aten/src/ATen/Dispatch.h b/aten/src/ATen/Dispatch.h index 9b7ef68bb8dff2..c42c2f818cc049 100644 --- a/aten/src/ATen/Dispatch.h +++ b/aten/src/ATen/Dispatch.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -10,62 +10,58 @@ return __VA_ARGS__(); \ } -#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ - [&] { \ - const at::Type& the_type = TYPE; \ - switch (the_type.scalarType()) { \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ - default: \ - at::runtime_error( \ - "%s not implemented for '%s'", (NAME), the_type.toString()); \ - } \ +#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + [&] { \ + const at::Type& the_type = TYPE; \ + switch (the_type.scalarType()) { \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ + default: \ + AT_ERROR("%s not implemented for '%s'", (NAME), the_type.toString()); \ + } \ }() -#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \ - [&] { \ - const at::Type& the_type = TYPE; \ - switch (the_type.scalarType()) { \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Half, Half, __VA_ARGS__) \ - default: \ - at::runtime_error( \ - "%s not implemented for '%s'", (NAME), the_type.toString()); \ - } \ +#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \ + [&] { \ + const at::Type& the_type = TYPE; \ + switch (the_type.scalarType()) { \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Half, Half, __VA_ARGS__) \ + default: \ + AT_ERROR("%s not implemented for '%s'", (NAME), the_type.toString()); \ + } \ }() -#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \ - [&] { \ - const at::Type& the_type = TYPE; \ - switch (the_type.scalarType()) { \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Char, int8_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Int, int32_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, __VA_ARGS__) \ - default: \ - at::runtime_error( \ - "%s not implemented for '%s'", (NAME), the_type.toString()); \ - } \ +#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \ + [&] { \ + const at::Type& the_type = TYPE; \ + switch (the_type.scalarType()) { \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Char, int8_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Int, int32_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, __VA_ARGS__) \ + default: \ + AT_ERROR("%s not implemented for '%s'", (NAME), the_type.toString()); \ + } \ }() -#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \ - [&] { \ - const at::Type& the_type = TYPE; \ - switch (the_type.scalarType()) { \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Char, int8_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Int, int32_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, __VA_ARGS__) \ - AT_PRIVATE_CASE_TYPE(at::ScalarType::Half, Half, __VA_ARGS__) \ - default: \ - at::runtime_error( \ - "%s not implemented for '%s'", (NAME), the_type.toString()); \ - } \ +#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \ + [&] { \ + const at::Type& the_type = TYPE; \ + switch (the_type.scalarType()) { \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Char, int8_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Double, double, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Int, int32_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE(at::ScalarType::Half, Half, __VA_ARGS__) \ + default: \ + AT_ERROR("%s not implemented for '%s'", (NAME), the_type.toString()); \ + } \ }() diff --git a/aten/src/ATen/Error.h b/aten/src/ATen/Error.h new file mode 100644 index 00000000000000..7e130bcd2bf479 --- /dev/null +++ b/aten/src/ATen/Error.h @@ -0,0 +1,94 @@ +#pragma once + +#include // for AT_API + +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace detail { +/// A tiny implementation of static `all_of`. +template +struct pack; +template +struct all_of : std::is_same, pack> {}; + +/// A printf wrapper that returns an std::string. +inline std::string format(const char* format_string, ...) { + static constexpr size_t kMaximumStringLength = 4096; + char buffer[kMaximumStringLength]; + + va_list format_args; + va_start(format_args, format_string); + vsnprintf(buffer, sizeof(buffer), format_string, format_args); + va_end(format_args); + + return buffer; +} + +/// Represents a location in source code (for debugging). +struct SourceLocation { + std::string toString() const { + return format("%s at %s:%d", function, file, line); + } + + const char* function; + const char* file; + uint32_t line; +}; +} // namespace detail + +/// The primary ATen error class. +/// Provides a complete error message with source location information via +/// `what()`, and a more concise message via `what_without_location()`. Should +/// primarily be used with the `AT_ERROR` macro. +struct AT_API Error : public std::exception { + template + Error( + detail::SourceLocation source_location, + const char* format_string, + FormatArgs&&... format_args) + : what_without_location_(detail::format( + format_string, + std::forward(format_args)...)), + what_( + what_without_location_ + " (" + source_location.toString() + ")") { + // NOTE: A "literal type" + // (http://en.cppreference.com/w/cpp/concept/LiteralType) could also be a + // constexpr struct, so it's not 100% guaranteed that the `printf` call + // inside `format` is safe, but it will catch 99.9% of all errors we'll run + // into, such as passing `std::string`. + static_assert( + detail::all_of::value...>::value, + "arguments to `format` must be literal types!"); + } + + /// Returns the complete error message including the source location. + const char* what() const noexcept override { + return what_.c_str(); + } + + /// Returns only the error message string, without source location. + const char* what_without_location() const noexcept { + return what_without_location_.c_str(); + } + + private: + std::string what_without_location_; + std::string what_; +}; +} // namespace at + +#define AT_ERROR(...) \ + throw at::Error({__func__, __FILE__, __LINE__}, __VA_ARGS__) + +#define AT_ASSERT(cond, ...) \ + if (!(cond)) { \ + AT_ERROR(__VA_ARGS__); \ + } diff --git a/aten/src/ATen/ExpandUtils.h b/aten/src/ATen/ExpandUtils.h index 0d3463727c2c3c..8a8223e1367235 100644 --- a/aten/src/ATen/ExpandUtils.h +++ b/aten/src/ATen/ExpandUtils.h @@ -1,6 +1,8 @@ #pragma once #include "ATen/Tensor.h" +#include "ATen/Error.h" + #include #include #include @@ -14,7 +16,7 @@ std::tuple, std::vector > inferExpandGeometry(cons inline void check_defined(std::initializer_list> tensors, const char *api_name) { for (auto& t : tensors) { if (!t.get().defined()) { - runtime_error("%s(...) called with an undefined Tensor", api_name); + AT_ERROR("%s(...) called with an undefined Tensor", api_name); } } } diff --git a/aten/src/ATen/UndefinedTensor.cpp b/aten/src/ATen/UndefinedTensor.cpp index 52bd909c9d062c..9c9e989417ac1a 100644 --- a/aten/src/ATen/UndefinedTensor.cpp +++ b/aten/src/ATen/UndefinedTensor.cpp @@ -1,5 +1,6 @@ #include "ATen/UndefinedTensor.h" #include "ATen/Context.h" +#include "ATen/Error.h" namespace at { @@ -13,28 +14,28 @@ const char * UndefinedTensor::toString() const { } IntList UndefinedTensor::sizes() const { - runtime_error("sizes() called on undefined Tensor"); + AT_ERROR("sizes() called on undefined Tensor"); } int64_t UndefinedTensor::dim() const { - runtime_error("dim() called on undefined Tensor"); + AT_ERROR("dim() called on undefined Tensor"); } const char * UndefinedTensor::typeString() { return "UndefinedType"; } void * UndefinedTensor::unsafeGetTH(bool retain) { - runtime_error("unsafeGetTH(bool retain) called on undefined Tensor"); + AT_ERROR("unsafeGetTH(bool retain) called on undefined Tensor"); } std::unique_ptr UndefinedTensor::storage() { - runtime_error("storage() called on undefined Tensor"); + AT_ERROR("storage() called on undefined Tensor"); } IntList UndefinedTensor::strides() const { - runtime_error("strides() called on undefined Tensor"); + AT_ERROR("strides() called on undefined Tensor"); } Scalar UndefinedTensor::localScalar() { - runtime_error("localScalar() called on undefined Tensor"); + AT_ERROR("localScalar() called on undefined Tensor"); } UndefinedTensor UndefinedTensor::_singleton; diff --git a/aten/src/ATen/UndefinedType.cpp b/aten/src/ATen/UndefinedType.cpp index 24422c661c6f17..8b3f31ff5c4349 100644 --- a/aten/src/ATen/UndefinedType.cpp +++ b/aten/src/ATen/UndefinedType.cpp @@ -1,4 +1,5 @@ #include "ATen/UndefinedType.h" +#include "ATen/Error.h" namespace at { @@ -15,25 +16,25 @@ bool UndefinedType::is_sparse() const { return false; } bool UndefinedType::is_distributed() const { return false; } std::unique_ptr UndefinedType::storage() const { - runtime_error("storage not defined for UndefinedType"); + AT_ERROR("storage not defined for UndefinedType"); } std::unique_ptr UndefinedType::storage(size_t size) const { - runtime_error("storage(size_t) not defined for UndefinedType"); + AT_ERROR("storage(size_t) not defined for UndefinedType"); } std::unique_ptr UndefinedType::storageFromBlob(void * data, int64_t size, const std::function & deleter) const { - runtime_error("storageFromBlob not defined for UndefinedType"); + AT_ERROR("storageFromBlob not defined for UndefinedType"); } std::unique_ptr UndefinedType::unsafeStorageFromTH(void * th_pointer, bool retain) const { - runtime_error("unsafeStorageFromTH not defined for UndefinedType"); + AT_ERROR("unsafeStorageFromTH not defined for UndefinedType"); } std::unique_ptr UndefinedType::storageWithAllocator(int64_t size, std::unique_ptr allocator) const { - runtime_error("storageWithAllocator not defined for UndefinedType"); + AT_ERROR("storageWithAllocator not defined for UndefinedType"); } Tensor UndefinedType::unsafeTensorFromTH(void * th_pointer, bool retain) const { - runtime_error("unsafeTensorFromTH not defined for UndefinedType"); + AT_ERROR("unsafeTensorFromTH not defined for UndefinedType"); } std::unique_ptr UndefinedType::generator() const { - runtime_error("generator not defined for UndefinedType"); + AT_ERROR("generator not defined for UndefinedType"); } const char * UndefinedType::toString() const { @@ -44,20 +45,20 @@ TypeID UndefinedType::ID() const { } std::size_t UndefinedType::elementSizeInBytes() const { - runtime_error("elementSizeInBytes not defined for UndefinedType"); + AT_ERROR("elementSizeInBytes not defined for UndefinedType"); } Type & UndefinedType::toBackend(Backend b) const { if (b == Backend::Undefined) { return Type::toBackend(b); } - runtime_error("toBackend not implemented for UndefinedType to non-UndefinedType"); + AT_ERROR("toBackend not implemented for UndefinedType to non-UndefinedType"); } Type & UndefinedType::toScalarType(ScalarType s) const { if (s == ScalarType::Undefined) { return Type::toScalarType(s); } - runtime_error("toScalarType not implemented for UndefinedType to non-UndefinedType"); + AT_ERROR("toScalarType not implemented for UndefinedType to non-UndefinedType"); } const char * UndefinedType::typeString() { @@ -65,7 +66,7 @@ const char * UndefinedType::typeString() { } Tensor & UndefinedType::s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const { - runtime_error("s_copy not defined for UndefinedType"); + AT_ERROR("s_copy not defined for UndefinedType"); } } diff --git a/aten/src/ATen/Utils.cpp b/aten/src/ATen/Utils.cpp deleted file mode 100644 index fbd84eb25f7a98..00000000000000 --- a/aten/src/ATen/Utils.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "ATen/Utils.h" -#include -#include -#include - -namespace at { - -void runtime_error(const char *format, ...) { - static const size_t ERROR_BUF_SIZE = 1024; - char error_buf[ERROR_BUF_SIZE]; - - va_list fmt_args; - va_start(fmt_args, format); - vsnprintf(error_buf, ERROR_BUF_SIZE, format, fmt_args); - va_end(fmt_args); - - throw std::runtime_error(error_buf); -} - -} // at diff --git a/aten/src/ATen/Utils.h b/aten/src/ATen/Utils.h index d87a46a5d66022..6bb93ee4fa77a9 100644 --- a/aten/src/ATen/Utils.h +++ b/aten/src/ATen/Utils.h @@ -1,19 +1,20 @@ #pragma once -#include "ArrayRef.h" -#include "ATenGeneral.h" -#include "UndefinedTensor.h" +#include "ATen/ATenGeneral.h" +#include "ATen/ArrayRef.h" +#include "ATen/Error.h" +#include "ATen/UndefinedTensor.h" + #include #include #include -#include "ATenAssert.h" namespace at { template static inline T* checked_cast_storage(Base* expr, const char * name, int pos) { if (typeid(*expr) != typeid(T)) - runtime_error("Expected object of type %s but found type %s for argument #%d '%s'", + AT_ERROR("Expected object of type %s but found type %s for argument #%d '%s'", T::typeString(),expr->type().toString(),pos,name); return static_cast(expr); } @@ -24,7 +25,7 @@ inline T* checked_cast_tensor(Base* expr, const char * name, int pos, bool allow return nullptr; } if (typeid(*expr) != typeid(T)) - runtime_error("Expected object of type %s but found type %s for argument #%d '%s'", + AT_ERROR("Expected object of type %s but found type %s for argument #%d '%s'", T::typeString(),expr->type().toString(),pos,name); return static_cast(expr); } @@ -39,7 +40,7 @@ static inline std::vector tensor_list_checked_cast(ArrayRef tensors, if (result) { casted[i] = result->tensor; } else { - runtime_error("Expected a Tensor of type %s but found a type %s for sequence element %u " + AT_ERROR("Expected a Tensor of type %s but found a type %s for sequence element %u " " in sequence argument at position #%d '%s'", T::typeString(),expr->type().toString(),i,pos,name); @@ -59,7 +60,7 @@ std::array check_intlist(ArrayRef list, const char * name, return res; } if (list.size() != N) { - runtime_error("Expected a list of %zd ints but got %zd for argument #%d '%s'", + AT_ERROR("Expected a list of %zd ints but got %zd for argument #%d '%s'", N, list.size(), pos, name); } std::copy_n(list.begin(), N, res.begin()); diff --git a/aten/src/ATen/copy_wrapper.py b/aten/src/ATen/copy_wrapper.py index 78914d3cfff312..40b0624b6aff7d 100644 --- a/aten/src/ATen/copy_wrapper.py +++ b/aten/src/ATen/copy_wrapper.py @@ -51,7 +51,7 @@ switch (src.type().ID()) { ${copy_body} default: - runtime_error("copy does not support %s to %s copy.", src.type().toString(), toString()); + AT_ERROR("copy does not support %s to %s copy.", src.type().toString(), toString()); break; } self.pImpl->setScalar(src.pImpl->isScalar()); diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index a1a98a39e5ae1b..76aab3d7ba624f 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -63,7 +63,7 @@ def TypedDict(name, attrs, total=True): # type: ignore """) TYPE_METHOD_DEFINITION_ABSTRACT = CodeTemplate("""\ ${return_type} Type::${method_prefix_derived}${api_name}(${type_method_formals}) const { - runtime_error("${method_prefix_derived}${api_name} is not implemented for type %s", toString()); + AT_ERROR("${method_prefix_derived}${api_name} is not implemented for type %s", toString()); } """) TYPE_METHOD_DECLARATION_CONCRETE = CodeTemplate("""\ @@ -130,7 +130,7 @@ def TypedDict(name, attrs, total=True): # type: ignore }""") ZERO_DIM_ONLY = CodeTemplate("""\ -runtime_error("${api_name} only supports a 0-dimensional ${check_name} tensor, but got tensor " +AT_ERROR("${api_name} only supports a 0-dimensional ${check_name} tensor, but got tensor " "with %" PRId64 " dimension(s)", ${check_name}.dim()); """) diff --git a/aten/src/ATen/native/Distributions.cpp b/aten/src/ATen/native/Distributions.cpp index 31a969704740b0..c418e30830a959 100644 --- a/aten/src/ATen/native/Distributions.cpp +++ b/aten/src/ATen/native/Distributions.cpp @@ -1,6 +1,7 @@ #include "ATen/ATen.h" #include "ATen/CPUApplyUtils.h" #include "ATen/Dispatch.h" +#include "ATen/Error.h" #include "ATen/ExpandUtils.h" #include "ATen/NativeFunctions.h" @@ -196,7 +197,7 @@ Tensor _standard_gamma_grad_cpu(const Tensor& self, const Tensor& output) { } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { - runtime_error("_standard_gamma_grad is not implemented for CUDA types"); + AT_ERROR("_standard_gamma_grad is not implemented for CUDA types"); } Tensor _s_poisson_cpu(const Tensor& lambda, Generator *gen) { diff --git a/aten/src/ATen/native/Embedding.cpp b/aten/src/ATen/native/Embedding.cpp index 9e344d0c373d58..7672257e6f00df 100644 --- a/aten/src/ATen/native/Embedding.cpp +++ b/aten/src/ATen/native/Embedding.cpp @@ -35,7 +35,6 @@ Tensor embedding(const Tensor & weight, const Tensor & indices, Tensor embedding_backward( const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { - if (sparse) { return at::embedding_sparse_backward( grad, indices, num_weights, padding_idx, scale_grad_by_freq); @@ -55,7 +54,8 @@ Tensor embedding_sparse_backward( // TODO: implement scale_grad_by_freq if (scale_grad_by_freq) { - runtime_error("embedding_backward: scale_grad_by_freq not supported with sparse gradients"); + AT_ERROR( + "embedding_backward: scale_grad_by_freq not supported with sparse gradients"); } Tensor indices = indices_; diff --git a/aten/src/ATen/native/Indexing.cpp b/aten/src/ATen/native/Indexing.cpp index f6f72a59af661c..1197bf27870227 100644 --- a/aten/src/ATen/native/Indexing.cpp +++ b/aten/src/ATen/native/Indexing.cpp @@ -229,7 +229,7 @@ static std::tuple makeLinearIndex(Tensor self, TensorList orig) Tensor index(const Tensor & self, TensorList indices) { if (indices.size() > (size_t)self.dim()) { - runtime_error("too many indices for tensor of dimension %d (got %d)", + AT_ERROR("too many indices for tensor of dimension %d (got %d)", (int)self.dim(), (int)indices.size()); } @@ -240,7 +240,7 @@ Tensor index(const Tensor & self, TensorList indices) { Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & value) { if (indices.size() > (size_t)self.dim()) { - runtime_error("too many indices for tensor of dimension %d (got %d)", + AT_ERROR("too many indices for tensor of dimension %d (got %d)", (int)self.dim(), (int)indices.size()); } @@ -254,23 +254,23 @@ Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Ten dim = maybe_wrap_dim(dim, self.dim()); if (index.dim() >= 2) { - runtime_error( + AT_ERROR( "index_copy_(): Index should have dimension 1 or 0 (got %d)", (int)index.dim()); } int64_t numIndices = index.numel(); if (source.dim() == 0 && numIndices != 1) { - runtime_error( + AT_ERROR( "index_copy_(): When source is scalar, index should have one element (got %d)", (int)numIndices); } if (source.dim() > 0 && numIndices != source.size(dim)) { - runtime_error( + AT_ERROR( "index_copy_(): Number of indices (%d) should be equal to source.size(dim) (%d)", (int)numIndices, (int)source.size(dim)); } if (index.type().scalarType() != ScalarType::Long) { - runtime_error("index_copy_(): Expected LongTensor for index"); + AT_ERROR("index_copy_(): Expected LongTensor for index"); } // Check that source and destination slices have the same size diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index 63d7b9691a909c..9dee3393b0213a 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -98,7 +98,7 @@ std::tuple slogdet(const Tensor& self) { static void check_1d(const Tensor& t, const char* arg, const char* fn) { if (t.dim() != 1) { - runtime_error("%s: Expected 1-D argument %s, but got %d-D", fn, arg, t.dim()); + AT_ERROR("%s: Expected 1-D argument %s, but got %d-D", fn, arg, t.dim()); } } @@ -173,10 +173,10 @@ Tensor& addr_out(Tensor &result, const Tensor& self, const Tensor& vec1, const T Tensor dot(const Tensor& self, const Tensor& tensor) { if (self.dim() != 1) { - runtime_error("Expected argument self to have 1 dimension, but has %d", self.dim()); + AT_ERROR("Expected argument self to have 1 dimension, but has %d", self.dim()); } if (tensor.dim() != 1) { - runtime_error("Expected argument tensor to have 1 dimension, but has %d", tensor.dim()); + AT_ERROR("Expected argument tensor to have 1 dimension, but has %d", tensor.dim()); } return self._dot(tensor); } @@ -273,7 +273,7 @@ Tensor matmul(const Tensor & tensor1, const Tensor & tensor2) { return at::_unsafe_view(output, output_shape); } - runtime_error("both arguments to matmul need to be at least 1D, but they are %dD and %dD", + AT_ERROR("both arguments to matmul need to be at least 1D, but they are %dD and %dD", dim_tensor1, dim_tensor2); } diff --git a/aten/src/ATen/native/Memory.cpp b/aten/src/ATen/native/Memory.cpp index 8bb50a988f9b28..c6913e6d00be78 100644 --- a/aten/src/ATen/native/Memory.cpp +++ b/aten/src/ATen/native/Memory.cpp @@ -1,13 +1,14 @@ #include "ATen/ATen.h" -#include "ATen/PinnedMemoryAllocator.h" +#include "ATen/Error.h" #include "ATen/NativeFunctions.h" +#include "ATen/PinnedMemoryAllocator.h" namespace at { namespace native { Tensor pin_memory(const Tensor& self) { if (self.type().backend() != kCPU) { - runtime_error("cannot pin '%s' only CPU memory can be pinned", self.type().toString()); + AT_ERROR("cannot pin '%s' only CPU memory can be pinned", self.type().toString()); } auto allocator = std::unique_ptr(new PinnedMemoryAllocator()); auto tensor = self.type().tensorWithAllocator(self.sizes(), self.strides(), std::move(allocator)); diff --git a/aten/src/ATen/native/SparseMM.cpp b/aten/src/ATen/native/SparseMM.cpp index 77e9addd1bd41b..169b99f93ccb7d 100644 --- a/aten/src/ATen/native/SparseMM.cpp +++ b/aten/src/ATen/native/SparseMM.cpp @@ -19,7 +19,7 @@ namespace native { template void sspaddmm_TH_dispatch(Tensor & result, Scalar beta, const Tensor& self, Scalar alpha, const Tensor& mat1, const Tensor& mat2) { - runtime_error("sspaddmm NYI for types %s %s %s", + AT_ERROR("sspaddmm NYI for types %s %s %s", self.type().toString(), mat1.type().toString(), mat2.type().toString()); } @@ -66,7 +66,7 @@ Tensor& _sspaddmm_out_cpu( // sparse, sparse, sparse, dense, real, real -> sparse Tensor& _sspaddmm_out_only_sparse(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) { - runtime_error("tensor.sspaddmm(...) can only be called on sparse tensors"); + AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors"); return result; } diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index 6752966679f9d4..10efc9b3079b44 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -1,6 +1,7 @@ #include "ATen/ATen.h" #include "ATen/CPUApplyUtils.h" #include "ATen/Dispatch.h" +#include "ATen/Error.h" #include "ATen/ExpandUtils.h" #include "ATen/NativeFunctions.h" @@ -37,7 +38,7 @@ bool allclose(const Tensor& self, const Tensor& other, double rtol, double atol) bool is_nonzero(const Tensor& self) { if (self.numel() != 1) { - runtime_error("bool value of Tensor with more than one value is ambiguous"); + AT_ERROR("bool value of Tensor with more than one value is ambiguous"); } Scalar localScalar = self.pImpl->localScalar(); if (localScalar.isFloatingPoint()) { @@ -45,12 +46,12 @@ bool is_nonzero(const Tensor& self) { } else if (localScalar.isIntegral()){ return localScalar.to() != 0; } - runtime_error("expected non-Tensor backed scalar"); + AT_ERROR("expected non-Tensor backed scalar"); } Tensor where(const Tensor& condition, const Tensor& self, const Tensor& other) { if (condition.type().scalarType() != ScalarType::Byte) { - runtime_error("Expected condition to have ScalarType Byte, but got ScalarType %s", + AT_ERROR("Expected condition to have ScalarType Byte, but got ScalarType %s", toString(condition.type().scalarType())); } Tensor b_condition, b_self, b_other; diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index 3a84c94a2dee85..01bba34ad85c21 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -1,10 +1,12 @@ #include "ATen/ATen.h" -#include "ATen/NativeFunctions.h" -#include "TH/THRandom.h" -#include "ATen/CheckGenerator.h" #include "ATen/CPUGenerator.h" +#include "ATen/CheckGenerator.h" #include "ATen/Dispatch.h" +#include "ATen/Error.h" +#include "ATen/NativeFunctions.h" #include "ATen/ScalarType.h" +#include "TH/THRandom.h" + #include #include @@ -102,7 +104,7 @@ Tensor& eye_out_cpu(Tensor& result, int64_t n, int64_t m) { Tensor full(const Type& dtype, IntList size, Scalar fill_value) { if (dtype.is_sparse()) { - at::runtime_error("full(...) is not implemented for sparse types, got: %s", dtype.toString()); + AT_ERROR("full(...) is not implemented for sparse types, got: %s", dtype.toString()); } auto result = dtype.tensor(size); return result.fill_(fill_value); @@ -110,7 +112,7 @@ Tensor full(const Type& dtype, IntList size, Scalar fill_value) { Tensor& full_out(Tensor& result, IntList size, Scalar fill_value) { if (result.is_sparse()) { - at::runtime_error("full(...) is not implemented for sparse types, got: %s", result.type().toString()); + AT_ERROR("full(...) is not implemented for sparse types, got: %s", result.type().toString()); } result.resize_(size); return result.fill_(fill_value); diff --git a/aten/src/ATen/native/TensorShape.cpp b/aten/src/ATen/native/TensorShape.cpp index 681f29146f1714..0fc1b2d704d7b6 100644 --- a/aten/src/ATen/native/TensorShape.cpp +++ b/aten/src/ATen/native/TensorShape.cpp @@ -1,4 +1,5 @@ #include "ATen/ATen.h" +#include "ATen/Error.h" #include "ATen/ExpandUtils.h" #include "ATen/NativeFunctions.h" #include "ATen/WrapDimUtils.h" @@ -13,7 +14,7 @@ static void check_cat_no_zero_dim(TensorList tensors) { for(size_t i = 0; i < tensors.size(); ++i) { auto& t = tensors[i]; if (t.dim() == 0) { - runtime_error("zero-dimensional tensor (at position %zu) cannot be concatenated", i); + AT_ERROR("zero-dimensional tensor (at position %zu) cannot be concatenated", i); } } } @@ -75,10 +76,10 @@ Tensor narrow(const Tensor& self, int64_t dim, int64_t start, int64_t length) { AT_ASSERT(self.dim() > 0, "narrow() cannot be applied to a 0-dim tensor."); auto cur_size = self.size(dim); if (start < 0 || start >= cur_size) { - runtime_error("start out of range"); + AT_ERROR("start out of range"); } if (length <= 0 || start > cur_size - length) { - runtime_error("length out of range"); + AT_ERROR("length out of range"); } return at::native::slice(self, dim, start, start + length, 1); } @@ -86,7 +87,7 @@ Tensor narrow(const Tensor& self, int64_t dim, int64_t start, int64_t length) { Tensor permute(const Tensor& self, IntList dims) { auto nDims = self.dim(); if (dims.size() != (size_t)nDims) { - runtime_error("number of dims don't match in permute"); + AT_ERROR("number of dims don't match in permute"); } auto oldSizes = self.sizes(); auto oldStrides = self.strides(); @@ -96,7 +97,7 @@ Tensor permute(const Tensor& self, IntList dims) { for (int64_t i = 0; i < nDims; i++) { auto dim = maybe_wrap_dim(dims[i], nDims); if (seen[dim]) { - runtime_error("repeated dim in permute"); + AT_ERROR("repeated dim in permute"); } seen[dim] = true; newSizes[i] = oldSizes[dim]; @@ -107,7 +108,7 @@ Tensor permute(const Tensor& self, IntList dims) { Tensor repeat(const Tensor& self, IntList repeats) { if (repeats.size() < (size_t)self.dim()) { - runtime_error("Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); + AT_ERROR("Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); } // Add new leading dimensions to the tensor if the @@ -149,7 +150,7 @@ static std::vector infer_size(IntList shape, int64_t numel) { } else if (shape[dim] >= 0) { newsize *= shape[dim]; } else { - runtime_error("invalid shape dimension %zd", shape[dim]); + AT_ERROR("invalid shape dimension %zd", shape[dim]); } } @@ -214,7 +215,7 @@ compute_stride(const Tensor& self, IntList newshape) { Tensor reshape(const Tensor& self, IntList proposed_shape) { if (self.type().is_sparse()) { - runtime_error("reshape is not implemented for sparse tensors"); + AT_ERROR("reshape is not implemented for sparse tensors"); } auto shape = infer_size(proposed_shape, self.numel()); if (auto stride = compute_stride(self, shape)) { @@ -361,7 +362,7 @@ Tensor& stack_out(Tensor& result, TensorList tensors, int64_t dim) { static inline Tensor & sparse_transpose_(Tensor & self, int64_t dim0, int64_t dim1) { int64_t ndimI = self._indices().size(0); if (dim0 >= ndimI || dim1 >= ndimI) { - runtime_error( + AT_ERROR( "sparse transpose_: transposed dimensions must be sparse ", "Got nDimI: %llu, d0: %llu, d1: %llu", (long long)ndimI, (long long)dim0, (long long)dim1); @@ -404,7 +405,7 @@ Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1) { Tensor & t_(Tensor & self) { if (self.ndimension() != 2) { - runtime_error("t_() expects a 2D tensor, but self is %llu", + AT_ERROR("t_() expects a 2D tensor, but self is %llu", (long long)self.ndimension()); } return self.transpose_(0, 1); diff --git a/aten/src/ATen/native/cuda/Embedding.cu b/aten/src/ATen/native/cuda/Embedding.cu index 2dfa00ddbd88fb..5ca7f153e56187 100644 --- a/aten/src/ATen/native/cuda/Embedding.cu +++ b/aten/src/ATen/native/cuda/Embedding.cu @@ -1,6 +1,7 @@ #include "ATen/ATen.h" #include "ATen/TensorUtils.h" #include "ATen/NativeFunctions.h" +#include "ATen/Error.h" #include "ATen/cuda/AccumulateType.cuh" #include "ATen/cuda/CUDATensorMethods.cuh" diff --git a/aten/src/ATen/native/cuda/SparseMM.cu b/aten/src/ATen/native/cuda/SparseMM.cu index b44f7babcb50ef..8096e4bb8a23fb 100644 --- a/aten/src/ATen/native/cuda/SparseMM.cu +++ b/aten/src/ATen/native/cuda/SparseMM.cu @@ -1,13 +1,11 @@ #include "ATen/ATen.h" +#include "ATen/Error.h" #include "ATen/NativeFunctions.h" -namespace at { -namespace native { - +namespace at { namespace native { Tensor& _sspaddmm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) { - runtime_error("NYI: CUDA sspaddmm is not implemented"); + AT_ERROR("NYI: CUDA sspaddmm is not implemented"); return result; } - -}} +}} // namespace at::native diff --git a/aten/src/ATen/native/cudnn/RNN.cpp b/aten/src/ATen/native/cudnn/RNN.cpp index ca71001bfab2a0..6d2c5c3259cb3a 100644 --- a/aten/src/ATen/native/cudnn/RNN.cpp +++ b/aten/src/ATen/native/cudnn/RNN.cpp @@ -1,7 +1,8 @@ #include -#include #include +#include #include +#include #if !AT_CUDNN_ENABLED() @@ -364,7 +365,7 @@ namespace { case CUDNN_RNN_TANH: return 2; default: - at::runtime_error("unknown cuDNN RNN mode %d", mode); + AT_ERROR("unknown cuDNN RNN mode %d", mode); } } diff --git a/aten/src/ATen/templates/TensorSparse.cpp b/aten/src/ATen/templates/TensorSparse.cpp index 7e88d4fe87afcc..71bb3ea832151c 100644 --- a/aten/src/ATen/templates/TensorSparse.cpp +++ b/aten/src/ATen/templates/TensorSparse.cpp @@ -1,10 +1,10 @@ // included as 'TensorDenseOrSparse' in TensorDerived.cpp IntList ${Tensor}::strides() const { - runtime_error("Sparse tensors do not have strides."); + AT_ERROR("Sparse tensors do not have strides."); } Scalar ${Tensor}::localScalar() { - runtime_error("NYI localScalar() on sparse tensors."); + AT_ERROR("NYI localScalar() on sparse tensors."); } std::unique_ptr ${Tensor}::storage() { - runtime_error("storage() is not implemented for %s", type().toString()); + AT_ERROR("storage() is not implemented for %s", type().toString()); } diff --git a/aten/src/ATen/test/basic.cpp b/aten/src/ATen/test/basic.cpp index 4988eccc372766..864feabd4ec179 100644 --- a/aten/src/ATen/test/basic.cpp +++ b/aten/src/ATen/test/basic.cpp @@ -16,7 +16,7 @@ extern "C" void THFloatTensor_fill(THFloatTensor *, float v); using namespace at; - +using Catch::Matchers::StartsWith; static void test(Type & type) { SECTION( "resize" ) { @@ -225,8 +225,7 @@ static void test(Type & type) { std::string expect = "1e-07 *"; REQUIRE(s.str().substr(0,expect.size()) == expect); } - { - // Indexing by Scalar + SECTION("indexing by Scalar") { Tensor tensor = CPU(kInt).arange(0, 10); Tensor one = CPU(kInt).ones({1}); for (int64_t i = 0; i < tensor.numel(); ++i) { @@ -244,18 +243,27 @@ static void test(Type & type) { for (int8_t i = 0; i < tensor.numel(); ++i) { REQUIRE(tensor[i].equal(one * i)); } - REQUIRE_THROWS_WITH(tensor[Scalar(3.14)].equal(one), "Can only index tensors with integral scalars (got CPUDoubleType)"); + REQUIRE_THROWS_WITH( + tensor[Scalar(3.14)].equal(one), + StartsWith( + "Can only index tensors with integral scalars (got CPUDoubleType)")); } - { - // Indexing by zero-dim tensor + SECTION("indexing by zero-dim tensor") { Tensor tensor = CPU(kInt).arange(0, 10); Tensor one = CPU(kInt).ones({}); for (int i = 0; i < tensor.numel(); ++i) { REQUIRE(tensor[one * i].equal(one * i)); } - REQUIRE_THROWS_WITH(tensor[CPU(kFloat).ones({}) * 3.14].equal(one), "Can only index tensors with integral scalars (got CPUFloatType)"); - REQUIRE_THROWS_WITH(tensor[Tensor()].equal(one), "Can only index with tensors that are defined"); - REQUIRE_THROWS_WITH(tensor[CPU(kInt).ones({2, 3, 4})].equal(one), "Can only index with tensors that are scalars (zero-dim)"); + REQUIRE_THROWS_WITH( + tensor[CPU(kFloat).ones({}) * 3.14].equal(one), + StartsWith( + "Can only index tensors with integral scalars (got CPUFloatType)")); + REQUIRE_THROWS_WITH( + tensor[Tensor()].equal(one), + StartsWith("Can only index with tensors that are defined")); + REQUIRE_THROWS_WITH( + tensor[CPU(kInt).ones({2, 3, 4})].equal(one), + StartsWith("Can only index with tensors that are scalars (zero-dim)")); } } @@ -272,5 +280,3 @@ TEST_CASE( "basic tests GPU", "[cuda]" ) { test(CUDA(kFloat)); } } - - diff --git a/aten/src/ATen/test/scalar_tensor_test.cpp b/aten/src/ATen/test/scalar_tensor_test.cpp index df5dcbc5143f29..8b8c87598db998 100644 --- a/aten/src/ATen/test/scalar_tensor_test.cpp +++ b/aten/src/ATen/test/scalar_tensor_test.cpp @@ -17,7 +17,7 @@ using namespace at; fn; \ _passed = true; \ els; \ - } catch (std::runtime_error &e) { \ + } catch (std::exception &e) { \ REQUIRE(!_passed); \ catc; \ } \ diff --git a/tools/autograd/templates/VariableType.cpp b/tools/autograd/templates/VariableType.cpp index d3ed48a61000bc..1b9d8860e86687 100644 --- a/tools/autograd/templates/VariableType.cpp +++ b/tools/autograd/templates/VariableType.cpp @@ -156,11 +156,11 @@ std::vector VariableType::allTypes() { Variable & VariableType::checked_cast_variable(const Tensor & t, const char * name, int pos) { if (!t.defined()) { - runtime_error("Expected a Tensor of type Variable but found an undefined Tensor for argument #%d '%s'", + AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #%d '%s'", pos, name); } if (!isVariableType(t.type())) { - runtime_error("Expected object of type Variable but found type %s for argument #%d '%s'", + AT_ERROR("Expected object of type Variable but found type %s for argument #%d '%s'", t.type().toString(), pos, name); } return as_variable_ref(const_cast(t)); @@ -186,12 +186,12 @@ std::vector VariableType::unpack(at::TensorList tl, const char *name for (size_t i = 0; i < tl.size(); ++i) { const auto &t = tl[i]; if (!t.defined()) { - runtime_error("Expected a Tensor of type Variable but found an undefined Tensor at position #%d " + AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor at position #%d " "for iterable argument #%d '%s'", i, pos, name); } if (!isVariableType(t.type())) { - runtime_error("Expected object of type Variable but found type %s at position #%d " + AT_ERROR("Expected object of type Variable but found type %s at position #%d " "for iterable argument #%d '%s'", t.type().toString(), i, pos, name); } @@ -280,13 +280,13 @@ static void check_no_requires_grad(const Tensor& tensor, const char* name) { static void check_inplace(const Tensor& tensor) { auto& var = static_cast(tensor); if (var.requires_grad() && var.is_leaf() && GradMode::is_enabled()) { - at::runtime_error( + AT_ERROR( "a leaf Variable that requires grad has been used in an in-place operation."); } } static void throw_error_out_requires_grad(const char* name) { - at::runtime_error( + AT_ERROR( "%s(): functions with out=... arguments don't support automatic differentiation, " "but one of the arguments requires grad.", name); } @@ -385,7 +385,7 @@ Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_block Tensor & VariableType::resize_(Tensor & self, IntList size) const { auto& self_ = unpack(self, "self", 0); if (as_variable_ref(self).requires_grad()) { - at::runtime_error("cannot resize variables that require grad"); + AT_ERROR("cannot resize variables that require grad"); } baseType->resize_(self_, size); return self; @@ -395,7 +395,7 @@ Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) co auto& self_ = unpack(self, "self", 0); auto& the_template_ = unpack(the_template, "the_template", 1); if (as_variable_ref(self).requires_grad()) { - at::runtime_error("cannot resize variables that require grad"); + AT_ERROR("cannot resize variables that require grad"); } baseType->resize_as_(self_, the_template_); return self; diff --git a/tools/autograd/templates/python_torch_functions.cpp b/tools/autograd/templates/python_torch_functions.cpp index 8af1ab82a49f46..b86cd39c5893eb 100644 --- a/tools/autograd/templates/python_torch_functions.cpp +++ b/tools/autograd/templates/python_torch_functions.cpp @@ -32,8 +32,10 @@ static Tensor set_requires_grad(Tensor self, bool requires_grad) { static void check_out_type_matches(Tensor result, const at::Type &type) { if (result.type() != type) { - at::runtime_error("type corresponding to %s does not match type of out parameter (%s)", - type.toString(), result.type().toString()); + AT_ERROR( + "type corresponding to %s does not match type of out parameter (%s)", + type.toString(), + result.type().toString()); } } @@ -94,7 +96,7 @@ static PyObject * THPVariable__promote_types(PyObject* self, PyObject* args, PyO const at::Type& t1 = r.type(0); const at::Type& t2 = r.type(1); if (t1.backend() != t2.backend()) { - at::runtime_error("_promote_types only supports types with the same backends. Got %s and %s.", + AT_ERROR("_promote_types only supports types with the same backends. Got %s and %s.", at::toString(t1.backend()), at::toString(t2.backend())); } ScalarType promoted = at::promoteTypes(t1.scalarType(), t2.scalarType()); diff --git a/torch/csrc/Exceptions.h b/torch/csrc/Exceptions.h index 28cafce7269cea..5651edbff53939 100644 --- a/torch/csrc/Exceptions.h +++ b/torch/csrc/Exceptions.h @@ -6,6 +6,7 @@ #ifndef NO_PYTHON +#include "ATen/Error.h" #include "THP_export.h" #include "torch/csrc/utils/object_ptr.h" #include "torch/csrc/utils/auto_gil.h" @@ -16,11 +17,15 @@ #define END_HANDLE_TH_ERRORS_RET(retval) \ } catch (python_error &e) { \ return retval; \ + } catch (const at::Error &e) { \ + auto msg = torch::processErrorMsg(e.what_without_location()); \ + PyErr_SetString(PyExc_RuntimeError, msg.c_str()); \ + return retval; \ } catch (torch::PyTorchError &e) { \ auto msg = torch::processErrorMsg(e.what()); \ PyErr_SetString(e.python_type(), msg.c_str()); \ return retval; \ - } catch (std::exception &e) { \ + } catch (const std::exception &e) { \ auto msg = torch::processErrorMsg(e.what()); \ PyErr_SetString(PyExc_RuntimeError, msg.c_str()); \ return retval; \ diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index ffbed54e7fb543..5321dc901821af 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -150,7 +150,7 @@ static const char* get_module(Backend backend) { case kCUDA: return "torch.cuda"; case kSparseCPU: return "torch.sparse"; case kSparseCUDA: return "torch.cuda.sparse"; - default: runtime_error("invalid backend: %s", toString(backend)); + default: AT_ERROR("invalid backend: %s", toString(backend)); } } diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 67f5376c799916..a6e1c329756efb 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -2,6 +2,7 @@ #include "tensor_new.h" #include +#include #include #include "torch/csrc/DynamicTypes.h" @@ -143,7 +144,7 @@ static ScalarType infer_scalar_type(PyObject *obj) { } return *scalarType; } - at::runtime_error("Could not infer dtype of %s", Py_TYPE(obj)->tp_name); + AT_ERROR("Could not infer dtype of %s", Py_TYPE(obj)->tp_name); } static void recursive_store(char* data, IntList sizes, IntList strides, int64_t dim,