diff options
author | Gregory Chanan <gchanan@fb.com> | 2019-04-02 07:31:54 -0700 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-04-02 07:34:55 -0700 |
commit | d5bf6ddc29ac760d62392c3f3208b5ae49e56ff4 (patch) | |
tree | 3b1c3b1a408bc9432762e1087f3d9d4794fd2452 | |
parent | af84371ba812de5ec09f868f656d3b8bb9470a3f (diff) | |
download | pytorch-d5bf6ddc29ac760d62392c3f3208b5ae49e56ff4.tar.gz pytorch-d5bf6ddc29ac760d62392c3f3208b5ae49e56ff4.tar.bz2 pytorch-d5bf6ddc29ac760d62392c3f3208b5ae49e56ff4.zip |
Kill LegacyBridge functions that don't do multiple dispatch. (#18696)
Summary:
At some point, we needed these functions to deal with autograd dispatching to the sparse of TH version of a backwards. But we rewrote all backwards definitions in terms of native functions, so this is no longer necessary.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18696
Differential Revision: D14710834
Pulled By: gchanan
fbshipit-source-id: b22568c58eefc79d672555bd8832398ccd965cb7
-rw-r--r-- | aten/src/ATen/native/LegacyBridge.cpp | 47 | ||||
-rw-r--r-- | aten/src/ATen/native/LegacyDefinitions.cpp | 20 | ||||
-rw-r--r-- | aten/src/ATen/native/native_functions.yaml | 44 |
3 files changed, 41 insertions, 70 deletions
diff --git a/aten/src/ATen/native/LegacyBridge.cpp b/aten/src/ATen/native/LegacyBridge.cpp index 788b6ec727..62f2984f2b 100644 --- a/aten/src/ATen/native/LegacyBridge.cpp +++ b/aten/src/ATen/native/LegacyBridge.cpp @@ -16,53 +16,6 @@ namespace { } } -// These native operations are not "really" native; they're actually just bridge -// functions that decide whether or not to call native sparse functions, or -// TH functions. This file should be temporary; when all of TH gets ported, we -// can just use the native mechanism straight. - -// TODO: Maybe the foo_ variants should call th_foo_ - -Tensor clone(const Tensor& self) { - if (_has_native(self)) { - return native_clone(self); - } else { - return legacy::th::_th_clone(self); - } -} - -Tensor& resize_as_(Tensor& self, const Tensor& the_template) { - if (_has_native(self)) { - return native_resize_as_(self, the_template); - } else { - return legacy::th::_th_resize_as_(self, the_template); - } -} - -Tensor& pow_out(Tensor& result, const Tensor& self, Scalar exponent) { - if (_has_native(self)) { - return native_pow_out(result, self, exponent); - } else { - return legacy::th::_th_pow_out(result, self, exponent); - } -} - -Tensor pow(const Tensor& self, Scalar exponent) { - if (_has_native(self)) { - return native_pow(self, exponent); - } else { - return legacy::th::_th_pow(self, exponent); - } -} - -Tensor& zero_(Tensor& self) { - if (_has_native(self)) { - return native_zero_(self); - } else { - return legacy::th::_th_zero_(self); - } -} - // Note [Multiple dispatch to sparse] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // In an ideal world, we would use direct support for multiple dispatch to diff --git a/aten/src/ATen/native/LegacyDefinitions.cpp b/aten/src/ATen/native/LegacyDefinitions.cpp index 69aac691a8..42efd87ab9 100644 --- a/aten/src/ATen/native/LegacyDefinitions.cpp +++ b/aten/src/ATen/native/LegacyDefinitions.cpp @@ -30,6 +30,26 @@ bool is_set_to(const Tensor& self, const Tensor & tensor) { return at::legacy::th::_th_is_set_to(self, tensor); } +Tensor clone(const Tensor& self) { + return legacy::th::_th_clone(self); +} + +Tensor& resize_as_(Tensor& self, const Tensor& the_template) { + return legacy::th::_th_resize_as_(self, the_template); +} + +Tensor& pow_out(Tensor& result, const Tensor& self, Scalar exponent) { + return legacy::th::_th_pow_out(result, self, exponent); +} + +Tensor pow(const Tensor& self, Scalar exponent) { + return legacy::th::_th_pow(self, exponent); +} + +Tensor& zero_(Tensor& self) { + return legacy::th::_th_zero_(self); +} + Tensor & masked_fill_(Tensor& self, const Tensor & mask, Scalar value) { return at::legacy::th::_th_masked_fill_(self, mask, value); } diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index e4200ab385..ba49a531be 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -2531,56 +2531,54 @@ matches_jit_signature: True variants: function -- func: native_clone(Tensor self) -> Tensor +- func: clone(Tensor self) -> Tensor matches_jit_signature: True + variants: function, method + cpu_half: True + cpu_bool: True dispatch: + CPU: clone + CUDA: clone SparseCPU: clone_sparse SparseCUDA: clone_sparse -- func: clone(Tensor self) -> Tensor +- func: resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) matches_jit_signature: True variants: function, method - -- func: native_resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) - matches_jit_signature: True dispatch: + CPU: resize_as_ + CUDA: resize_as_ SparseCPU: resize_as_sparse_ SparseCUDA: resize_as_sparse_ -- func: resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) - matches_jit_signature: True - variants: function, method - -- func: native_pow(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +- func: pow(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) matches_jit_signature: True dispatch: + CPU: pow_out + CUDA: pow_out SparseCPU: pow_out_sparse_scalar SparseCUDA: pow_out_sparse_scalar -- func: native_pow(Tensor self, Scalar exponent) -> Tensor +- func: pow(Tensor self, Scalar exponent) -> Tensor matches_jit_signature: True + variants: function, method dispatch: + CPU: pow + CUDA: pow SparseCPU: pow_sparse_scalar SparseCUDA: pow_sparse_scalar -- func: pow(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) - matches_jit_signature: True - -- func: pow(Tensor self, Scalar exponent) -> Tensor +- func: zero_(Tensor(a!) self) -> Tensor(a!) matches_jit_signature: True - variants: function, method variants: method, function - -- func: native_zero_(Tensor(a!) self) -> Tensor(a!) - matches_jit_signature: True + cpu_half: True + cpu_bool: True dispatch: + CPU: zero_ + CUDA: zero_ SparseCPU: zero_sparse_ SparseCUDA: zero_sparse_ -- func: zero_(Tensor(a!) self) -> Tensor(a!) - matches_jit_signature: True - variants: method, function - - func: sub(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) matches_jit_signature: True |