diff options
author | Jerry Zhang <jerryzh@fb.com> | 2019-04-17 16:10:05 -0700 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-04-17 16:17:40 -0700 |
commit | ad8f34fcca6aadbe0711227e43f87ae47cf417eb (patch) | |
tree | 2708643a6bab2bea35cf632ac6ace61e7bb2c59e /test | |
parent | 4371cb5e0193d2eaa8d23673eb153874113eab4e (diff) | |
download | pytorch-ad8f34fcca6aadbe0711227e43f87ae47cf417eb.tar.gz pytorch-ad8f34fcca6aadbe0711227e43f87ae47cf417eb.tar.bz2 pytorch-ad8f34fcca6aadbe0711227e43f87ae47cf417eb.zip |
Add empty_quantized (#18960)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18960
empty_affine_quantized creates an empty affine quantized Tensor from scratch.
We might need this when we implement quantized operators.
Differential Revision: D14810261
fbshipit-source-id: f07d8bf89822d02a202ee81c78a17aa4b3e571cc
Diffstat (limited to 'test')
-rw-r--r-- | test/test_torch.py | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/test/test_torch.py b/test/test_torch.py index cb6db50cb7..39abb94fd9 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -2695,6 +2695,19 @@ class _TestTorchMixin(object): print(rqr.numpy()) self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale)) + def test_qtensor_creation(self): + scale = 0.5 + zero_point = 10 + val = 100 + numel = 10 + q = torch.empty_affine_quantized(numel, dtype=torch.qint8, scale=scale, zero_point=zero_point) + # for i in range(numel): + # # wait for th_fill + # q[i] = val + # r = q.dequantize() + # for i in range(numel): + # self.assertEqual(r[i], (val - zero_point) * scale) + @unittest.skipIf(torch.cuda.device_count() < 2, 'fewer than 2 GPUs detected') def test_device_guard(self): # verify that all operators with `device_guard: False` behave properly with multiple devices. |