summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgchanan <gregchanan@gmail.com>2018-01-12 14:26:38 -0500
committerGitHub <noreply@github.com>2018-01-12 14:26:38 -0500
commiteb857ec36760eac9db02f9d0cd6426a1415f3718 (patch)
tree4795d67ecbd97bbf0eb902dcc01efb4865533976
parenta14dd69be825158680c9b7ca213ac451ace9fdf6 (diff)
downloadpytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.tar.gz
pytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.tar.bz2
pytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.zip
Introduce a (non-public) autograd scalar method and improve printing (#4586)
* Specialize Variable pinting and always print device for GPU tensors/Variables. * Introduce a (non-public) _scalar_sum() method for autograd scalar testing.
-rw-r--r--tools/autograd/templates/python_variable_methods.cpp27
-rw-r--r--torch/_tensor_str.py16
-rw-r--r--torch/autograd/variable.py20
-rw-r--r--torch/csrc/autograd/python_variable.cpp4
-rw-r--r--torch/csrc/autograd/python_variable.h3
5 files changed, 59 insertions, 11 deletions
diff --git a/tools/autograd/templates/python_variable_methods.cpp b/tools/autograd/templates/python_variable_methods.cpp
index c324b4d121..93513876a3 100644
--- a/tools/autograd/templates/python_variable_methods.cpp
+++ b/tools/autograd/templates/python_variable_methods.cpp
@@ -496,6 +496,32 @@ static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwa
END_HANDLE_TH_ERRORS
}
+// FixMe: remove when scalars fully supported
+inline PyObject* _wrap_scalar(at::Tensor tensor) {
+ if (!tensor.sizes().equals({1})) {
+ throw std::runtime_error("tried to wrap scalar of non-scalar size");
+ }
+ auto v = Variable(std::move(tensor));
+ v.data().squeeze_();
+ return THPVariable_Wrap(v, true);
+}
+
+static PyObject * THPVariable__scalar_sum(PyObject* self, PyObject* args, PyObject* kwargs)
+{
+ HANDLE_TH_ERRORS
+ static PythonArgParser parser({
+ "sum()",
+ });
+ auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
+ PyObject* parsed_args[3];
+ auto r = parser.parse(args, kwargs, parsed_args);
+ if (r.idx == 0) {
+ return _wrap_scalar(dispatch_sum(self_));
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
// generated methods start here
${py_methods}
@@ -552,6 +578,7 @@ PyMethodDef variable_methods[] = {
{"stride", (PyCFunction)THPVariable_stride, METH_VARARGS | METH_KEYWORDS, NULL},
{"tolist", (PyCFunction)THPVariable_tolist, METH_NOARGS, NULL},
{"type", (PyCFunction)THPVariable_type, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"_scalar_sum", (PyCFunction)THPVariable__scalar_sum, METH_VARARGS | METH_KEYWORDS, NULL},
${py_method_defs}
{NULL}
};
diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py
index 1d93fd10e5..83113b2cdd 100644
--- a/torch/_tensor_str.py
+++ b/torch/_tensor_str.py
@@ -288,9 +288,9 @@ def _vector_str(self):
'\n')
-def _str(self):
+def _str(self, include_footer=True):
if self.ndimension() == 0:
- return '[{} with no dimension]\n'.format(torch.typename(self))
+ strt = ''
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
@@ -298,9 +298,11 @@ def _str(self):
else:
strt = _tensor_str(self)
- size_str = 'x'.join(str(size) for size in self.size())
- device_str = '' if not self.is_cuda else \
- ' (GPU {})'.format(self.get_device())
- strt += '[{} of size {}{}]\n'.format(torch.typename(self),
- size_str, device_str)
+ if include_footer:
+ size_str = 'x'.join(str(size) for size in self.size())
+ size_str_prefix = 'of size ' if self.ndimension() > 0 else 'with no dimension'
+ device_str = '' if not self.is_cuda else \
+ ' (GPU {})'.format(self.get_device())
+ strt += '[{} {}{}{}]\n'.format(torch.typename(self), size_str_prefix,
+ size_str, device_str)
return '\n' + strt
diff --git a/torch/autograd/variable.py b/torch/autograd/variable.py
index 4c95bc8760..ce11c38cc2 100644
--- a/torch/autograd/variable.py
+++ b/torch/autograd/variable.py
@@ -70,7 +70,25 @@ class Variable(_C._VariableBase):
self.requires_grad, _, self._backward_hooks = state
def __repr__(self):
- return 'Variable containing:' + self.data.__repr__()
+ strt = 'Variable containing:' + torch._tensor_str._str(self.data, False)
+ # let's make our own Variable-specific footer
+ size_str = '(' + ','.join(str(size) for size in self.size()) + (',)' if len(self.size()) == 1 else ')')
+ device_str = '' if not self.is_cuda else \
+ ' (GPU {})'.format(self.get_device())
+ strt += '[{} of size {}{}]\n'.format(torch.typename(self.data),
+ size_str, device_str)
+
+ # All strings are unicode in Python 3, while we have to encode unicode
+ # strings in Python2. If we can't, let python decide the best
+ # characters to replace unicode characters with.
+ if sys.version_info > (3,):
+ return strt
+ else:
+ if hasattr(sys.stdout, 'encoding'):
+ return strt.encode(
+ sys.stdout.encoding or 'UTF-8', 'replace')
+ else:
+ return strt.encode('UTF-8', 'replace')
def backward(self, gradient=None, retain_graph=None, create_graph=False):
"""Computes the gradient of current variable w.r.t. graph leaves.
diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp
index f541e1343f..9fa3bc9040 100644
--- a/torch/csrc/autograd/python_variable.cpp
+++ b/torch/csrc/autograd/python_variable.cpp
@@ -44,13 +44,13 @@ static PyObject* THPVariable_NewWithVar(PyTypeObject* type, Variable var)
return obj;
}
-PyObject * THPVariable_Wrap(Variable var)
+PyObject * THPVariable_Wrap(Variable var, bool allow_scalar)
{
if (!var.defined()) {
Py_RETURN_NONE;
}
- if (var.dim() == 0) {
+ if (!allow_scalar && var.dim() == 0) {
throw std::runtime_error("Variable API does not support Scalars");
}
diff --git a/torch/csrc/autograd/python_variable.h b/torch/csrc/autograd/python_variable.h
index 328d0a6e33..91f6acc3bd 100644
--- a/torch/csrc/autograd/python_variable.h
+++ b/torch/csrc/autograd/python_variable.h
@@ -24,7 +24,8 @@ struct THPVariable {
extern PyObject *THPVariableClass;
bool THPVariable_initModule(PyObject *module);
-PyObject * THPVariable_Wrap(torch::autograd::Variable var);
+// FixMe: remove allow_scalar when scalars are fully supported.
+PyObject * THPVariable_Wrap(torch::autograd::Variable var, bool allow_scalar=false);
PyObject * THPVariable_get_data(THPVariable *self);
inline bool THPVariable_Check(PyObject *obj)