summaryrefslogtreecommitdiff
path: root/torch
diff options
context:
space:
mode:
authorgchanan <gregchanan@gmail.com>2018-01-12 14:26:38 -0500
committerGitHub <noreply@github.com>2018-01-12 14:26:38 -0500
commiteb857ec36760eac9db02f9d0cd6426a1415f3718 (patch)
tree4795d67ecbd97bbf0eb902dcc01efb4865533976 /torch
parenta14dd69be825158680c9b7ca213ac451ace9fdf6 (diff)
downloadpytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.tar.gz
pytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.tar.bz2
pytorch-eb857ec36760eac9db02f9d0cd6426a1415f3718.zip
Introduce a (non-public) autograd scalar method and improve printing (#4586)
* Specialize Variable pinting and always print device for GPU tensors/Variables. * Introduce a (non-public) _scalar_sum() method for autograd scalar testing.
Diffstat (limited to 'torch')
-rw-r--r--torch/_tensor_str.py16
-rw-r--r--torch/autograd/variable.py20
-rw-r--r--torch/csrc/autograd/python_variable.cpp4
-rw-r--r--torch/csrc/autograd/python_variable.h3
4 files changed, 32 insertions, 11 deletions
diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py
index 1d93fd10e5..83113b2cdd 100644
--- a/torch/_tensor_str.py
+++ b/torch/_tensor_str.py
@@ -288,9 +288,9 @@ def _vector_str(self):
'\n')
-def _str(self):
+def _str(self, include_footer=True):
if self.ndimension() == 0:
- return '[{} with no dimension]\n'.format(torch.typename(self))
+ strt = ''
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
@@ -298,9 +298,11 @@ def _str(self):
else:
strt = _tensor_str(self)
- size_str = 'x'.join(str(size) for size in self.size())
- device_str = '' if not self.is_cuda else \
- ' (GPU {})'.format(self.get_device())
- strt += '[{} of size {}{}]\n'.format(torch.typename(self),
- size_str, device_str)
+ if include_footer:
+ size_str = 'x'.join(str(size) for size in self.size())
+ size_str_prefix = 'of size ' if self.ndimension() > 0 else 'with no dimension'
+ device_str = '' if not self.is_cuda else \
+ ' (GPU {})'.format(self.get_device())
+ strt += '[{} {}{}{}]\n'.format(torch.typename(self), size_str_prefix,
+ size_str, device_str)
return '\n' + strt
diff --git a/torch/autograd/variable.py b/torch/autograd/variable.py
index 4c95bc8760..ce11c38cc2 100644
--- a/torch/autograd/variable.py
+++ b/torch/autograd/variable.py
@@ -70,7 +70,25 @@ class Variable(_C._VariableBase):
self.requires_grad, _, self._backward_hooks = state
def __repr__(self):
- return 'Variable containing:' + self.data.__repr__()
+ strt = 'Variable containing:' + torch._tensor_str._str(self.data, False)
+ # let's make our own Variable-specific footer
+ size_str = '(' + ','.join(str(size) for size in self.size()) + (',)' if len(self.size()) == 1 else ')')
+ device_str = '' if not self.is_cuda else \
+ ' (GPU {})'.format(self.get_device())
+ strt += '[{} of size {}{}]\n'.format(torch.typename(self.data),
+ size_str, device_str)
+
+ # All strings are unicode in Python 3, while we have to encode unicode
+ # strings in Python2. If we can't, let python decide the best
+ # characters to replace unicode characters with.
+ if sys.version_info > (3,):
+ return strt
+ else:
+ if hasattr(sys.stdout, 'encoding'):
+ return strt.encode(
+ sys.stdout.encoding or 'UTF-8', 'replace')
+ else:
+ return strt.encode('UTF-8', 'replace')
def backward(self, gradient=None, retain_graph=None, create_graph=False):
"""Computes the gradient of current variable w.r.t. graph leaves.
diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp
index f541e1343f..9fa3bc9040 100644
--- a/torch/csrc/autograd/python_variable.cpp
+++ b/torch/csrc/autograd/python_variable.cpp
@@ -44,13 +44,13 @@ static PyObject* THPVariable_NewWithVar(PyTypeObject* type, Variable var)
return obj;
}
-PyObject * THPVariable_Wrap(Variable var)
+PyObject * THPVariable_Wrap(Variable var, bool allow_scalar)
{
if (!var.defined()) {
Py_RETURN_NONE;
}
- if (var.dim() == 0) {
+ if (!allow_scalar && var.dim() == 0) {
throw std::runtime_error("Variable API does not support Scalars");
}
diff --git a/torch/csrc/autograd/python_variable.h b/torch/csrc/autograd/python_variable.h
index 328d0a6e33..91f6acc3bd 100644
--- a/torch/csrc/autograd/python_variable.h
+++ b/torch/csrc/autograd/python_variable.h
@@ -24,7 +24,8 @@ struct THPVariable {
extern PyObject *THPVariableClass;
bool THPVariable_initModule(PyObject *module);
-PyObject * THPVariable_Wrap(torch::autograd::Variable var);
+// FixMe: remove allow_scalar when scalars are fully supported.
+PyObject * THPVariable_Wrap(torch::autograd::Variable var, bool allow_scalar=false);
PyObject * THPVariable_get_data(THPVariable *self);
inline bool THPVariable_Check(PyObject *obj)