diff options
author | Zhou Chang <achang.zhou@gmail.com> | 2018-04-10 01:39:31 +0800 |
---|---|---|
committer | Sam Gross <colesbury@gmail.com> | 2018-04-09 13:39:31 -0400 |
commit | d0f395f7446a89f21dd8a5d8bce6c509975436f3 (patch) | |
tree | 3cbf4df55278c6bdaf2f1aed48aa27e255c27333 /tools | |
parent | 57ee202022a11eded50791af5f3d21c4b8815ae3 (diff) | |
download | pytorch-d0f395f7446a89f21dd8a5d8bce6c509975436f3.tar.gz pytorch-d0f395f7446a89f21dd8a5d8bce6c509975436f3.tar.bz2 pytorch-d0f395f7446a89f21dd8a5d8bce6c509975436f3.zip |
[pytorch] Fix clamp is missing kwarg out (#6028) (#6418)
torch.clamp is out from template code, add it manually, same with auto
generated code.
Diffstat (limited to 'tools')
-rw-r--r-- | tools/autograd/templates/python_torch_functions.cpp | 40 | ||||
-rw-r--r-- | tools/autograd/templates/python_torch_functions_dispatch.h | 32 |
2 files changed, 51 insertions, 21 deletions
diff --git a/tools/autograd/templates/python_torch_functions.cpp b/tools/autograd/templates/python_torch_functions.cpp index 2dc9e0ee26..e2009bac1f 100644 --- a/tools/autograd/templates/python_torch_functions.cpp +++ b/tools/autograd/templates/python_torch_functions.cpp @@ -43,40 +43,38 @@ static void check_out_type_matches(Tensor result, const THPDtype &dtype, const T } } -static Tensor dispatch_clamp(const Tensor & self, Scalar min, Scalar max) { - AutoNoGIL no_gil; - AutoGPU auto_gpu(self); - return self.clamp(min, max); -} -static Tensor dispatch_clamp_min(const Tensor & self, Scalar min) { - AutoNoGIL no_gil; - AutoGPU auto_gpu(self); - return self.clamp_min(min); -} -static Tensor dispatch_clamp_max(const Tensor & self, Scalar max) { - AutoNoGIL no_gil; - AutoGPU auto_gpu(self); - return self.clamp_max(max); -} - // The Python clamp() syntax has to be mapped to one of three C++ functions static PyObject * THPVariable_clamp(PyObject* module, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS static PythonArgParser parser({ - "clamp(Tensor input, Scalar min=None, Scalar max=None)", + "clamp(Tensor input, Scalar min=None, Scalar max=None, *, Tensor out=None)", }); - ParsedArgs<3> parsed_args; + + ParsedArgs<4> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); if (!r.isNone(1) && !r.isNone(2)) { - return THPVariable_Wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); + if (!r.isNone(3)) { + return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2), r.tensor(3))); + } else { + return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); + } } else if (!r.isNone(1)) { - return THPVariable_Wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); + if (!r.isNone(3)) { + return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1), r.tensor(3))); + } else { + return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); + } } else if (!r.isNone(2)) { - return THPVariable_Wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); + if (!r.isNone(3)) { + return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2), r.tensor(3))); + } else { + return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); + } } else { throw std::runtime_error("At least one of 'min' or 'max' must not be None"); } + Py_RETURN_NONE; END_HANDLE_TH_ERRORS } diff --git a/tools/autograd/templates/python_torch_functions_dispatch.h b/tools/autograd/templates/python_torch_functions_dispatch.h index dfae2585cf..45c4f79868 100644 --- a/tools/autograd/templates/python_torch_functions_dispatch.h +++ b/tools/autograd/templates/python_torch_functions_dispatch.h @@ -32,6 +32,38 @@ static void maybe_initialize_cuda(const at::Type &type) { } } +// manual dispatch code for clamp +inline Tensor dispatch_clamp(const Tensor & self, Scalar min, Scalar max) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(self); + return self.clamp(min, max); +} +inline Tensor dispatch_clamp_min(const Tensor & self, Scalar min) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(self); + return self.clamp_min(min); +} +inline Tensor dispatch_clamp_max(const Tensor & self, Scalar max) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(self); + return self.clamp_max(max); +} +inline Tensor & dispatch_clamp(const Tensor & self, Scalar min, Scalar max, Tensor result) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(result); + return at::clamp_out(result, self, min, max); +} +inline Tensor & dispatch_clamp_min(const Tensor & self, Scalar min, Tensor result) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(result); + return at::clamp_min_out(result, self, min); +} +inline Tensor & dispatch_clamp_max(const Tensor & self, Scalar max, Tensor result) { + AutoNoGIL no_gil; + AutoGPU auto_gpu(result); + return at::clamp_max_out(result, self, max); +} + ${py_method_dispatch} }} // namespace torch::autograd |