summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Zhang <jerryzh@fb.com>2019-04-23 21:24:40 -0700
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2019-04-23 21:29:31 -0700
commit309c15e2df3ed300e0c09bdbb4fbfe2ba98267ad (patch)
tree635748a6e200b314dd80a56e6b5228214a3b3dcf
parentd902774cadd085c89bd27391d1a3c5a8488235de (diff)
downloadpytorch-309c15e2df3ed300e0c09bdbb4fbfe2ba98267ad.tar.gz
pytorch-309c15e2df3ed300e0c09bdbb4fbfe2ba98267ad.tar.bz2
pytorch-309c15e2df3ed300e0c09bdbb4fbfe2ba98267ad.zip
Enable assignment for QTensor in pytorch frontend (#19530)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/19530 Make copy work with QTensor, enable assignment of QTensor in pytorch frontend. Differential Revision: D15008160 fbshipit-source-id: 5f1166246d768b23f009cde1fa03e8952368a332
-rw-r--r--aten/src/ATen/native/native_functions.yaml3
-rw-r--r--aten/src/ATen/native/quantized/Copy.cpp21
-rw-r--r--c10/core/TensorOptions.h3
-rw-r--r--test/test_torch.py6
-rw-r--r--torch/csrc/autograd/python_variable_indexing.cpp8
5 files changed, 36 insertions, 5 deletions
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 27fc892175..a23d81c3d2 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -445,6 +445,7 @@
dispatch:
CPU: _s_copy__cpu
CUDA: _s_copy__cuda
+ QuantizedCPU: _s_copy__quantized
- func: _s_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
cpu_half: True
@@ -688,7 +689,7 @@
- func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0) -> Tensor
dispatch:
- CPU: empty_affine_quantized_cpu
+ QuantizedCPU: empty_affine_quantized_cpu
- func: resize_(Tensor(a!) self, int[] size) -> Tensor(a!)
variants: method
diff --git a/aten/src/ATen/native/quantized/Copy.cpp b/aten/src/ATen/native/quantized/Copy.cpp
new file mode 100644
index 0000000000..ea353c2405
--- /dev/null
+++ b/aten/src/ATen/native/quantized/Copy.cpp
@@ -0,0 +1,21 @@
+#include <ATen/native/Copy.h>
+
+#include <ATen/ATen.h>
+#include <ATen/quantized/Quantizer.h>
+#include <ATen/CPUApplyUtils.h>
+
+namespace at { namespace native {
+Tensor& _s_copy__quantized(
+ Tensor& self,
+ const Tensor& src,
+ bool non_blocking) {
+ AT_CHECK(self.scalar_type() == at::kQInt8, "Quantized copy only works with kQInt8 as target Tensor");
+ AT_CHECK(src.scalar_type() == at::kFloat, "Quantized copy only works with kFloat as source Tensor");
+ qint8* self_data = self.data<qint8>();
+ float* src_data = src.data<float>();
+ for (int i = 0; i < self.numel(); ++i) {
+ self_data[i] = quantize_uint8(self.q_scale().to<float>(), self.q_zero_point().to<uint8_t>(), src_data[i]);
+ }
+ return self;
+}
+}} // at::native
diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h
index b2d2a51f5c..b3027eb843 100644
--- a/c10/core/TensorOptions.h
+++ b/c10/core/TensorOptions.h
@@ -360,6 +360,9 @@ struct C10_API TensorOptions {
if (isComplexType(typeMetaToScalarType(dtype()))) {
return ComplexCPUTensorId();
}
+ if (isQIntType(typeMetaToScalarType(dtype()))) {
+ return QuantizedCPUTensorId();
+ }
return CPUTensorId();
case DeviceType::CUDA:
if (isComplexType(typeMetaToScalarType(dtype()))) {
diff --git a/test/test_torch.py b/test/test_torch.py
index 613e1f977c..a40f2c1061 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -2680,9 +2680,9 @@ class _TestTorchMixin(object):
self.assertEqual(qr.item(), 1)
self.assertEqual(qr[0].item(), 1)
# assignment
- # This calls _th_fill_
- # qr[0] = 8 # float asignment
- # self.assertEqual(qr.item(), 8)
+ self.assertTrue(qr[0].is_quantized)
+ qr[0] = 11.3 # float asignment
+ self.assertEqual(qr.item(), 11)
def test_qtensor_quant_dequant(self):
r = np.random.rand(3, 2) * 2 - 4
diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp
index 1415a48544..10467abe78 100644
--- a/torch/csrc/autograd/python_variable_indexing.cpp
+++ b/torch/csrc/autograd/python_variable_indexing.cpp
@@ -15,6 +15,7 @@
#include <ATen/DeviceGuard.h>
#include <ATen/ExpandUtils.h>
#include <c10/core/TensorOptions.h>
+#include <ATen/core/LegacyTypeDispatch.h>
#include <vector>
#include <tuple>
@@ -334,7 +335,12 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
OptionalDeviceGuard device_guard(device_of(self_));
- auto value = valueToTensor(self_.dispatch_type(), self_.scalar_type(), py_value);
+ Variable value;
+ if (isQIntType(self_.scalar_type())) {
+ value = valueToTensor(at::globalContext().getVariableType(at::Backend::CPU, at::kFloat), at::kFloat, py_value);
+ } else {
+ value = valueToTensor(self_.dispatch_type(), self_.scalar_type(), py_value);
+ }
// handle simple types: integers, slices, ellipsis, bool
if (index == Py_False) { // NOLINT(cppcoreguidelines-pro-type-cstyle-cast)