diff options
author | Edward Z. Yang <ezyang@mit.edu> | 2018-05-01 10:28:31 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-05-01 10:28:31 -0400 |
commit | 0427afadd1a2a9b0c12906664d010920b6f8c6fb (patch) | |
tree | 8f709e814bfb406c4a78e8aa61500308a601e561 /tools | |
parent | 24461a756a90209df45073193ebb2d310f0a830b (diff) | |
download | pytorch-0427afadd1a2a9b0c12906664d010920b6f8c6fb.tar.gz pytorch-0427afadd1a2a9b0c12906664d010920b6f8c6fb.tar.bz2 pytorch-0427afadd1a2a9b0c12906664d010920b6f8c6fb.zip |
Make AT_ASSERT/AT_ERROR non-printf based, other tweaks (#7104)
* Make AT_ASSERT/AT_ERROR non-printf based, other tweaks
- AT_ASSERT/AT_ERROR don't take printf strings anymore; instead,
they take a comma-separated list of things you wanted to print
(bringing it inline with Caffe2's conventions).
Instead of AT_ASSERT(x == 0, "%d is not zero", x)
you write AT_ASSERT(x == 0, x, " is not zero")
This is done by way of a new variadic template at::str(), which
takes a list of arguments and cats their string reps (as per
operator<<) together.
- A bunch of the demangling logic that was in Error.h is now
moved to Error.cpp (better header hygiene.) Also, demangle
has been moved out to its own helper function, and also
a new helper demangle_type (from Caffe2) added.
- A bunch of AT_ASSERT converted into AT_CHECK, to more properly
convey which checks can be caused by user error, and which are
due to logic error in ATen.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* CR
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* Fix test failure.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* buildfix
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* More fixes.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* One more fix
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
* Try harder
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/autograd/templates/Functions.cpp | 12 | ||||
-rw-r--r-- | tools/autograd/templates/VariableType.cpp | 20 |
2 files changed, 14 insertions, 18 deletions
diff --git a/tools/autograd/templates/Functions.cpp b/tools/autograd/templates/Functions.cpp index 279f2c3012..aeae0dc792 100644 --- a/tools/autograd/templates/Functions.cpp +++ b/tools/autograd/templates/Functions.cpp @@ -36,14 +36,14 @@ struct IndexRangeGenerator { }; void copy_range(variable_list& out, IndexRange range, const Tensor & t) { - AT_ASSERT(range.second <= out.size(), "range out of bounds"); - AT_ASSERT(range.second - range.first == 1, "inconsistent range for Tensor output"); + AT_ASSERT(range.second <= out.size()); + AT_ASSERTM(range.second - range.first == 1, "inconsistent range for Tensor output"); out[range.first] = t; } void copy_range(variable_list& out, IndexRange range, at::ArrayRef<Tensor> t) { - AT_ASSERT(range.second <= out.size(), "range out of bounds"); - AT_ASSERT(range.second - range.first == t.size(), "inconsistent range for TensorList output"); + AT_ASSERT(range.second <= out.size()); + AT_ASSERTM(range.second - range.first == t.size(), "inconsistent range for TensorList output"); std::copy(t.begin(), t.end(), out.begin() + range.first); } @@ -972,7 +972,7 @@ Tensor logdet_backward(const Tensor & grad, const Tensor& self, const Tensor& lo Tensor slogdet_backward(const std::vector<torch::autograd::Variable> &grads, const Tensor& self, const Tensor& signdet, const Tensor& logabsdet) { - AT_ASSERT(!grads[0].defined(), "slogdet's sign output should never have gradient"); + AT_ASSERTM(!grads[0].defined(), "slogdet's sign output should never have gradient"); auto signdet_val = signdet.toCDouble(); if (signdet_val != 0 /* det != 0, invertible */) { return grads[1] * self.inverse().t(); @@ -1275,7 +1275,7 @@ std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward( if (output_mask[0] && !ggO.defined()) ggO = at::zeros_like(gO); if (output_mask[1] && !gG.defined()) { - AT_ASSERT(affine, "gamma should always be defined when it requires grad"); + AT_ASSERTM(affine, "gamma should always be defined when it requires grad"); gG = at::zeros_like(gamma); } if (output_mask[2] && !gI.defined()) gI = at::zeros_like(input); diff --git a/tools/autograd/templates/VariableType.cpp b/tools/autograd/templates/VariableType.cpp index 9846170564..6235fc8fa1 100644 --- a/tools/autograd/templates/VariableType.cpp +++ b/tools/autograd/templates/VariableType.cpp @@ -157,12 +157,10 @@ std::vector<at::Type*> VariableType::allTypes() { Variable & VariableType::checked_cast_variable(const Tensor & t, const char * name, int pos) { if (!t.defined()) { - AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #%d '%s'", - pos, name); + AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'"); } if (!isVariableType(t.type())) { - AT_ERROR("Expected object of type Variable but found type %s for argument #%d '%s'", - t.type().toString(), pos, name); + AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " for argument #", pos, " '", name, "'"); } return as_variable_ref(const_cast<Tensor&>(t)); } @@ -187,14 +185,12 @@ std::vector<at::Tensor> VariableType::unpack(at::TensorList tl, const char *name for (size_t i = 0; i < tl.size(); ++i) { const auto &t = tl[i]; if (!t.defined()) { - AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor at position #%d " - "for iterable argument #%d '%s'", - i, pos, name); + AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor at position #", i, " " + "for iterable argument #", pos, " '", name, "'"); } if (!isVariableType(t.type())) { - AT_ERROR("Expected object of type Variable but found type %s at position #%d " - "for iterable argument #%d '%s'", - t.type().toString(), i, pos, name); + AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " at position #", i, " " + "for iterable argument #", pos, " '", name, "'"); } ret[i] = static_cast<const Variable&>(t).data(); } @@ -288,8 +284,8 @@ static void check_inplace(const Tensor& tensor) { static void throw_error_out_requires_grad(const char* name) { AT_ERROR( - "%s(): functions with out=... arguments don't support automatic differentiation, " - "but one of the arguments requires grad.", name); + name, "(): functions with out=... arguments don't support automatic differentiation, " + "but one of the arguments requires grad."); } static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn) { |