summaryrefslogtreecommitdiff
path: root/test/test_multiprocessing.py
diff options
context:
space:
mode:
authorgchanan <gregchanan@gmail.com>2018-04-27 15:11:45 -0400
committerGitHub <noreply@github.com>2018-04-27 15:11:45 -0400
commita6bfa16c17c2e2847dbec2ccc1b2b60741ae4c65 (patch)
treede8e0bf6da6b26a247f9db13ed60759a89ece06f /test/test_multiprocessing.py
parentbdd27ea9567675f41eb7000e29f02a841718d25e (diff)
downloadpytorch-a6bfa16c17c2e2847dbec2ccc1b2b60741ae4c65.tar.gz
pytorch-a6bfa16c17c2e2847dbec2ccc1b2b60741ae4c65.tar.bz2
pytorch-a6bfa16c17c2e2847dbec2ccc1b2b60741ae4c65.zip
torch.arange: add numpy-style type inference. (#7016)
* torch.arange: add numpy-style type inference. This is a backwards-compatibility breaking change. * Fix flake8. * Use at::optional. * Remove unneeded header files. * Use reference wrapper. * Update arange for test. * Address review comments.
Diffstat (limited to 'test/test_multiprocessing.py')
-rw-r--r--test/test_multiprocessing.py10
1 files changed, 5 insertions, 5 deletions
diff --git a/test/test_multiprocessing.py b/test/test_multiprocessing.py
index 4eec7080f3..a7c0e1befd 100644
--- a/test/test_multiprocessing.py
+++ b/test/test_multiprocessing.py
@@ -92,7 +92,7 @@ def autograd_sharing(queue, ready, master_modified):
ready.set()
master_modified.wait()
- expected_var = torch.arange(1, 26).view(5, 5)
+ expected_var = torch.arange(1., 26).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5)
@@ -314,7 +314,7 @@ class TestMultiprocessing(TestCase):
tensors = []
for i in range(5):
device = i % 2
- tensors += [torch.arange(i * 5, (i + 1) * 5).cuda(device)]
+ tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]
inq = ctx.Queue()
outq = ctx.Queue()
@@ -329,7 +329,7 @@ class TestMultiprocessing(TestCase):
for i, tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
- self.assertEqual(v, torch.arange(i * 5, (i + 1) * 5).sum())
+ self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
self.assertEqual(storage_size, 5)
@@ -412,12 +412,12 @@ class TestMultiprocessing(TestCase):
def test_variable_sharing(self):
for requires_grad in [True, False]:
- var = Variable(torch.arange(1, 26).view(5, 5),
+ var = Variable(torch.arange(1., 26).view(5, 5),
requires_grad=requires_grad)
self._test_autograd_sharing(var)
def test_parameter_sharing(self):
- param = Parameter(torch.arange(1, 26).view(5, 5))
+ param = Parameter(torch.arange(1., 26).view(5, 5))
self._test_autograd_sharing(param)
def test_empty_shared(self):