summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--aten/README.md121
-rw-r--r--aten/doc/Functions.h2483
-rw-r--r--aten/doc/Tensor.h1586
-rw-r--r--aten/doc/Type.h901
-rw-r--r--aten/src/ATen/templates/Type.h6
-rwxr-xr-xaten/tools/update_doc.sh3
6 files changed, 5097 insertions, 3 deletions
diff --git a/aten/README.md b/aten/README.md
new file mode 100644
index 0000000000..6d2ddd5f50
--- /dev/null
+++ b/aten/README.md
@@ -0,0 +1,121 @@
+# ATen: A TENsor library
+
+ATen is a simple tensor library thats exposes the Tensor operations in Torch
+and PyTorch directly in C++11. The wrapper respects the semantics of operators
+in PyTorch, except minor details due to differences between C++ in Python in
+the way default arguments are handled. See the [documentation for tensors](http://pytorch.org/docs/tensors.html) in PyTorch for what these operations do.
+ATen's API is auto-generated from the same declarations PyTorch uses so the
+two APIs will track each other over time.
+
+Tensor types are resolved dynamically, such that the API is generic and
+does not include templates. That is, there is one `Tensor` type. It can hold a
+CPU or CUDA Tensor, and the tensor may have Doubles, Float, Ints, etc. This design
+makes it easy to write generic code without templating everything.
+
+See the _generated_ [`Tensor.h` file](doc/Tensor.h) and [`Functions.h` file](doc/Tensor.h) for the provided API. Excerpt:
+```c++
+Tensor atan2(const Tensor & other) const;
+Tensor & atan2_(const Tensor & other);
+Tensor pow(Scalar exponent) const;
+Tensor pow(const Tensor & exponent) const;
+Tensor & pow_(Scalar exponent);
+Tensor & pow_(const Tensor & exponent);
+Tensor lerp(const Tensor & end, Scalar weight) const;
+Tensor & lerp_(const Tensor & end, Scalar weight);
+Tensor histc() const;
+Tensor histc(int64_t bins) const;
+Tensor histc(int64_t bins, Scalar min) const;
+Tensor histc(int64_t bins, Scalar min, Scalar max) const;
+```
+
+Inplace operations are also provided, and always suffixed by `_` to indicate they will modify the Tensor.
+
+### Installation
+
+TH/THC/THNN/THCUNN are provided (as git subtrees), so the repo is standalone. You will need a C++11 compiler, cmake, and the pyyaml python package.
+```
+
+# Install pyyaml used by python code generation to read API declarations
+
+# OSX: if you don't have pip
+sudo easy_install pip
+# Ubuntu: if you don't have pip
+apt-get -y install python-pip
+
+# if you don't have pyyaml
+sudo pip install pyyaml
+
+mkdir build
+cd build
+cmake .. -DCMAKE_INSTALL_PREFIX=/where/you/want # specify your dest directory
+make install
+```
+
+### Example usage
+
+Here is a simple example; again, the syntax follows Torch semantics.
+
+```c++
+using namespace at; // assumed in the following
+
+Tensor d = CPU(kFloat).ones({3, 4});
+Tensor r = CPU(kFloat).zeros({3,4})
+for(auto i = 0; i < 100000; i++) {
+ r = r.add(d);
+ // equivalently
+ r = r + d;
+ // or
+ r += d;
+}
+```
+
+Want this running on the GPU?
+```c++
+using namespace at; // assumed in the following
+
+Tensor d = CUDA(kFloat).ones({3, 4});
+Tensor r = CUDA(kFloat).zeros({3,4})
+for(auto i = 0; i < 100000; i++) {
+ r = r.add(d);
+ // equivalently
+ r = r + d;
+ // or
+ r += d;
+}
+```
+
+Expressions like `CUDA(kFloat)` are first-class `at::Type` objects that represent
+the type of a Tensor and are used to create Tensors when their type cannot be
+inferred. See the _generated_ [Type header](doc/Type.h) for its API.
+
+See more in [sample files](src/ATen/test).
+
+### Creating your kernel
+
+It is easy to create new kernels, thanks to the `dispatch<>()` templated function. Example:
+```c++
+
+// a simple sum kernel (for CPU only)
+template<typename T>
+struct sum_op {
+ // dispatch handles variable arguments for you
+ Tensor CPU(const Type & t, Tensor & x_)
+ {
+ Tensor x = x_.contiguous();
+ auto x_p = x.data<T>();
+ int64_t size = x.numel();
+ T sum = 0;
+ for(int64_t i = 0; i < size; i++) {
+ sum += x_p[i];
+ }
+ return sum;
+ };
+ Tensor CUDA(Tensor& x) {
+ throw std::invalid_argument("device not supported");
+ };
+};
+
+Tensor a = CPU(kFloat).rand({3, 7});
+std::cout << a << std::endl;
+std::cout << dispatch<sum_op>(a.type(),a) << " == " << a.sum() << std::endl;
+```
diff --git a/aten/doc/Functions.h b/aten/doc/Functions.h
new file mode 100644
index 0000000000..bb7e1f6dcf
--- /dev/null
+++ b/aten/doc/Functions.h
@@ -0,0 +1,2483 @@
+#pragma once
+
+#include "ATen/Scalar.h"
+#include "ATen/Type.h"
+#include "ATen/Tensor.h"
+#include "ATen/Storage.h"
+#include "ATen/Generator.h"
+
+
+
+namespace at {
+
+static inline Tensor & copy_out(const Tensor & src, Tensor & dst) {
+ dst.resize_(src.sizes());
+ dst.type().copy(src,dst);
+}
+
+static inline Tensor & zeros_out(IntList size, Tensor & result);
+static inline Tensor & ones_out(IntList size, Tensor & result);
+static inline int64_t numel(const Tensor & self);
+static inline Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & result);
+static inline Tensor masked_select(const Tensor & self, const Tensor & mask);
+static inline Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1);
+static inline Tensor t(const Tensor & self);
+static inline Tensor & squeeze_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor squeeze(const Tensor & self, int64_t dim);
+static inline Tensor & squeeze_out(const Tensor & self, Tensor & result);
+static inline Tensor squeeze(const Tensor & self);
+static inline Tensor & unsqueeze_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor unsqueeze(const Tensor & self, int64_t dim);
+static inline Tensor & nonzero_out(const Tensor & self, Tensor & result);
+static inline Tensor nonzero(const Tensor & self);
+static inline Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result);
+static inline Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index);
+static inline Tensor & range_out(Scalar start, Scalar end, Scalar step, Tensor & result);
+static inline Tensor & range_out(Scalar start, Scalar end, Tensor & result);
+static inline Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result);
+static inline Tensor gather(const Tensor & self, int64_t dim, const Tensor & index);
+static inline bool equal(const Tensor & self, const Tensor & other);
+static inline Tensor & __and___out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor __and__(const Tensor & self, Scalar value);
+static inline Tensor & __and___out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor __and__(const Tensor & self, const Tensor & other);
+static inline Tensor & __iand__(Tensor & self, Scalar value);
+static inline Tensor & __iand__(Tensor & self, const Tensor & other);
+static inline Tensor & __or___out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor __or__(const Tensor & self, Scalar value);
+static inline Tensor & __or___out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor __or__(const Tensor & self, const Tensor & other);
+static inline Tensor & __ior__(Tensor & self, Scalar value);
+static inline Tensor & __ior__(Tensor & self, const Tensor & other);
+static inline Tensor & __xor___out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor __xor__(const Tensor & self, Scalar value);
+static inline Tensor & __xor___out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor __xor__(const Tensor & self, const Tensor & other);
+static inline Tensor & __ixor__(Tensor & self, Scalar value);
+static inline Tensor & __ixor__(Tensor & self, const Tensor & other);
+static inline Tensor & __lshift___out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor __lshift__(const Tensor & self, Scalar value);
+static inline Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor __lshift__(const Tensor & self, const Tensor & other);
+static inline Tensor & __ilshift__(Tensor & self, Scalar value);
+static inline Tensor & __ilshift__(Tensor & self, const Tensor & other);
+static inline Tensor & __rshift___out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor __rshift__(const Tensor & self, Scalar value);
+static inline Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor __rshift__(const Tensor & self, const Tensor & other);
+static inline Tensor & __irshift__(Tensor & self, Scalar value);
+static inline Tensor & __irshift__(Tensor & self, const Tensor & other);
+static inline Tensor & lt_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor lt(const Tensor & tensor, Scalar value);
+static inline Tensor & lt_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor lt(const Tensor & tensor, const Tensor & other);
+static inline Tensor & gt_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor gt(const Tensor & tensor, Scalar value);
+static inline Tensor & gt_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor gt(const Tensor & tensor, const Tensor & other);
+static inline Tensor & le_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor le(const Tensor & tensor, Scalar value);
+static inline Tensor & le_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor le(const Tensor & tensor, const Tensor & other);
+static inline Tensor & ge_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor ge(const Tensor & tensor, Scalar value);
+static inline Tensor & ge_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor ge(const Tensor & tensor, const Tensor & other);
+static inline Tensor & eq_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor eq(const Tensor & tensor, Scalar value);
+static inline Tensor & eq_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor eq(const Tensor & tensor, const Tensor & other);
+static inline Tensor & ne_out(const Tensor & tensor, Scalar value, Tensor & result);
+static inline Tensor ne(const Tensor & tensor, Scalar value);
+static inline Tensor & ne_out(const Tensor & tensor, const Tensor & other, Tensor & result);
+static inline Tensor ne(const Tensor & tensor, const Tensor & other);
+static inline std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices);
+static inline std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, Tensor & min, Tensor & min_indices);
+static inline std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim);
+static inline Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor min(const Tensor & self, const Tensor & other);
+static inline Scalar min(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_indices);
+static inline std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, Tensor & max, Tensor & max_indices);
+static inline std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim);
+static inline Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor max(const Tensor & self, const Tensor & other);
+static inline Scalar max(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k);
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim);
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim);
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim);
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim, bool keepdim);
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim);
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending);
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k);
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted);
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest);
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices);
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim);
+static inline Tensor & abs_out(const Tensor & self, Tensor & destination);
+static inline Tensor abs(const Tensor & self);
+static inline Tensor & sigmoid_out(const Tensor & self, Tensor & result);
+static inline Tensor sigmoid(const Tensor & self);
+static inline Tensor & log_out(const Tensor & self, Tensor & result);
+static inline Tensor log(const Tensor & self);
+static inline Tensor & log1p_out(const Tensor & self, Tensor & result);
+static inline Tensor log1p(const Tensor & self);
+static inline Tensor & lgamma_out(const Tensor & self, Tensor & result);
+static inline Tensor lgamma(const Tensor & self);
+static inline Tensor & exp_out(const Tensor & self, Tensor & result);
+static inline Tensor exp(const Tensor & self);
+static inline Tensor & cos_out(const Tensor & self, Tensor & result);
+static inline Tensor cos(const Tensor & self);
+static inline Tensor & acos_out(const Tensor & self, Tensor & result);
+static inline Tensor acos(const Tensor & self);
+static inline Tensor & cosh_out(const Tensor & self, Tensor & result);
+static inline Tensor cosh(const Tensor & self);
+static inline Tensor & sin_out(const Tensor & self, Tensor & result);
+static inline Tensor sin(const Tensor & self);
+static inline Tensor & asin_out(const Tensor & self, Tensor & result);
+static inline Tensor asin(const Tensor & self);
+static inline Tensor & sinh_out(const Tensor & self, Tensor & result);
+static inline Tensor sinh(const Tensor & self);
+static inline Tensor & tan_out(const Tensor & self, Tensor & result);
+static inline Tensor tan(const Tensor & self);
+static inline Tensor & atan_out(const Tensor & self, Tensor & result);
+static inline Tensor atan(const Tensor & self);
+static inline Tensor & tanh_out(const Tensor & self, Tensor & result);
+static inline Tensor tanh(const Tensor & self);
+static inline Tensor & sqrt_out(const Tensor & self, Tensor & result);
+static inline Tensor sqrt(const Tensor & self);
+static inline Tensor & rsqrt_out(const Tensor & self, Tensor & result);
+static inline Tensor rsqrt(const Tensor & self);
+static inline Tensor & ceil_out(const Tensor & self, Tensor & result);
+static inline Tensor ceil(const Tensor & self);
+static inline Tensor & floor_out(const Tensor & self, Tensor & result);
+static inline Tensor floor(const Tensor & self);
+static inline Tensor & round_out(const Tensor & self, Tensor & result);
+static inline Tensor round(const Tensor & self);
+static inline Tensor & trunc_out(const Tensor & self, Tensor & result);
+static inline Tensor trunc(const Tensor & self);
+static inline Tensor & frac_out(const Tensor & self, Tensor & result);
+static inline Tensor frac(const Tensor & self);
+static inline Tensor & mean_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination);
+static inline Tensor mean(const Tensor & self, int64_t dim, bool keepdim);
+static inline Tensor & mean_out(const Tensor & self, int64_t dim, Tensor & destination);
+static inline Tensor mean(const Tensor & self, int64_t dim);
+static inline Scalar mean(const Tensor & self);
+static inline Tensor & var_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination);
+static inline Tensor var(const Tensor & self, int64_t dim, bool keepdim);
+static inline Tensor & var_out(const Tensor & self, int64_t dim, Tensor & destination);
+static inline Tensor var(const Tensor & self, int64_t dim);
+static inline Scalar var(const Tensor & self);
+static inline Tensor & std_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination);
+static inline Tensor std(const Tensor & self, int64_t dim, bool keepdim);
+static inline Tensor & std_out(const Tensor & self, int64_t dim, Tensor & destination);
+static inline Tensor std(const Tensor & self, int64_t dim);
+static inline Scalar std(const Tensor & self);
+static inline Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, bool keepdim, Tensor & destination);
+static inline Tensor norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim);
+static inline Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, Tensor & destination);
+static inline Tensor norm(const Tensor & self, Scalar p, int64_t dim);
+static inline Scalar norm(const Tensor & self, Scalar p);
+static inline Scalar norm(const Tensor & self);
+static inline Tensor & renorm_out(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm, Tensor & destination);
+static inline Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
+static inline Scalar dist(const Tensor & self, const Tensor & other, Scalar p);
+static inline Scalar dist(const Tensor & self, const Tensor & other);
+static inline Tensor & reciprocal_out(const Tensor & self, Tensor & destination);
+static inline Tensor reciprocal(const Tensor & self);
+static inline Tensor & neg_out(const Tensor & self, Tensor & destination);
+static inline Tensor neg(const Tensor & self);
+static inline Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & destination);
+static inline Tensor atan2(const Tensor & self, const Tensor & other);
+static inline Tensor & pow_out(const Tensor & self, Scalar exponent, Tensor & destination);
+static inline Tensor pow(const Tensor & self, Scalar exponent);
+static inline Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & destination);
+static inline Tensor pow(const Tensor & self, const Tensor & exponent);
+static inline Tensor & lerp_out(const Tensor & self, const Tensor & end, Scalar weight, Tensor & destination);
+static inline Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight);
+static inline Tensor & linspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result);
+static inline Tensor & linspace_out(Scalar start, Scalar end, Tensor & result);
+static inline Tensor & logspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result);
+static inline Tensor & logspace_out(Scalar start, Scalar end, Tensor & result);
+static inline Tensor & histc_out(const Tensor & self, Tensor & destination);
+static inline Tensor histc(const Tensor & self);
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Tensor & destination);
+static inline Tensor histc(const Tensor & self, int64_t bins);
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Tensor & destination);
+static inline Tensor histc(const Tensor & self, int64_t bins, Scalar min);
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Scalar max, Tensor & destination);
+static inline Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max);
+static inline Tensor & sum_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result);
+static inline Tensor sum(const Tensor & self, int64_t dim, bool keepdim);
+static inline Tensor & sum_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor sum(const Tensor & self, int64_t dim);
+static inline Scalar sum(const Tensor & self);
+static inline Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result);
+static inline Tensor prod(const Tensor & self, int64_t dim, bool keepdim);
+static inline Tensor & prod_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor prod(const Tensor & self, int64_t dim);
+static inline Scalar prod(const Tensor & self);
+static inline Tensor & cumsum_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor cumsum(const Tensor & self, int64_t dim);
+static inline Tensor & cumprod_out(const Tensor & self, int64_t dim, Tensor & result);
+static inline Tensor cumprod(const Tensor & self, int64_t dim);
+static inline Tensor & sign_out(const Tensor & self, Tensor & result);
+static inline Tensor sign(const Tensor & self);
+static inline Scalar trace(const Tensor & self);
+static inline Tensor & add_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result);
+static inline Tensor add(const Tensor & self, Scalar value, const Tensor & other);
+static inline Tensor & add_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor add(const Tensor & self, Scalar value);
+static inline Tensor & add_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor add(const Tensor & self, const Tensor & other);
+static inline Tensor & sub_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result);
+static inline Tensor sub(const Tensor & self, Scalar value, const Tensor & other);
+static inline Tensor & sub_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor sub(const Tensor & self, Scalar value);
+static inline Tensor & sub_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor sub(const Tensor & self, const Tensor & other);
+static inline Tensor & mul_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor mul(const Tensor & self, Scalar value);
+static inline Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor mul(const Tensor & self, const Tensor & other);
+static inline Tensor & div_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor div(const Tensor & self, Scalar value);
+static inline Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor div(const Tensor & self, const Tensor & other);
+static inline Tensor & fmod_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor fmod(const Tensor & self, Scalar value);
+static inline Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor fmod(const Tensor & self, const Tensor & other);
+static inline Tensor & remainder_out(const Tensor & self, Scalar value, Tensor & result);
+static inline Tensor remainder(const Tensor & self, Scalar value);
+static inline Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & result);
+static inline Tensor remainder(const Tensor & self, const Tensor & other);
+static inline Tensor & clamp_out(const Tensor & self, Scalar min, Scalar max, Tensor & destination);
+static inline Tensor clamp(const Tensor & self, Scalar min, Scalar max);
+static inline Tensor & clamp_out(const Tensor & self, Scalar min, Tensor & result);
+static inline Tensor clamp(const Tensor & self, Scalar min);
+static inline Scalar dot(const Tensor & self, const Tensor & tensor);
+static inline Tensor & tril_out(const Tensor & self, int64_t k, Tensor & destination);
+static inline Tensor tril(const Tensor & self, int64_t k);
+static inline Tensor & tril_out(const Tensor & self, Tensor & destination);
+static inline Tensor tril(const Tensor & self);
+static inline Tensor & triu_out(const Tensor & self, int64_t k, Tensor & destination);
+static inline Tensor triu(const Tensor & self, int64_t k);
+static inline Tensor & triu_out(const Tensor & self, Tensor & destination);
+static inline Tensor triu(const Tensor & self);
+static inline Tensor & cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & destination);
+static inline Tensor cross(const Tensor & self, const Tensor & other, int64_t dim);
+static inline Tensor & cross_out(const Tensor & self, const Tensor & other, Tensor & destination);
+static inline Tensor cross(const Tensor & self, const Tensor & other);
+static inline Tensor & eye_out(int64_t n, Tensor & result);
+static inline Tensor & eye_out(int64_t n, int64_t m, Tensor & result);
+static inline Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & result);
+static inline Tensor diag(const Tensor & self, int64_t diagonal);
+static inline Tensor & diag_out(const Tensor & self, Tensor & result);
+static inline Tensor diag(const Tensor & self);
+static inline Tensor & addmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2, Tensor & result);
+static inline Tensor addmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2);
+static inline Tensor & addmm_out(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result);
+static inline Tensor addmm(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2);
+static inline Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result);
+static inline Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2);
+static inline Tensor & addmv_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec, Tensor & result);
+static inline Tensor addmv(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec);
+static inline Tensor & addmv_out(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result);
+static inline Tensor addmv(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec);
+static inline Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result);
+static inline Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec);
+static inline Tensor & addr_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2, Tensor & result);
+static inline Tensor addr(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2);
+static inline Tensor & addr_out(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result);
+static inline Tensor addr(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2);
+static inline Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result);
+static inline Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2);
+static inline Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & result);
+static inline Tensor ger(const Tensor & self, const Tensor & vec2);
+static inline Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & result);
+static inline Tensor mv(const Tensor & self, const Tensor & vec);
+static inline Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & result);
+static inline Tensor mm(const Tensor & self, const Tensor & mat2);
+static inline Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & result);
+static inline Tensor bmm(const Tensor & self, const Tensor & mat2);
+static inline Tensor & addbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor addbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & addbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor addbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & baddbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor baddbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & baddbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor baddbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result);
+static inline Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2);
+static inline Tensor & addcmul_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result);
+static inline Tensor addcmul(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2);
+static inline Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result);
+static inline Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2);
+static inline Tensor & addcdiv_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result);
+static inline Tensor addcdiv(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2);
+static inline Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result);
+static inline Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2);
+static inline std::tuple<Tensor &,Tensor &> gesv_out(const Tensor & self, const Tensor & A, Tensor & solution, Tensor & lu);
+static inline std::tuple<Tensor,Tensor> gesv(const Tensor & self, const Tensor & A);
+static inline std::tuple<Tensor &,Tensor &> gels_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> gels(const Tensor & self, const Tensor & A);
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular);
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose);
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper);
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A);
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, bool upper, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors, bool upper);
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors);
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors);
+static inline std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> eig(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, bool some, Tensor & res1, Tensor & res2, Tensor & res3);
+static inline std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self, bool some);
+static inline std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, Tensor & res1, Tensor & res2, Tensor & res3);
+static inline std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self);
+static inline Tensor & inverse_out(const Tensor & self, Tensor & output);
+static inline Tensor inverse(const Tensor & self);
+static inline Tensor & potrf_out(const Tensor & self, bool upper, Tensor & output);
+static inline Tensor potrf(const Tensor & self, bool upper);
+static inline Tensor & potrf_out(const Tensor & self, Tensor & output);
+static inline Tensor potrf(const Tensor & self);
+static inline Tensor & potrs_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & result);
+static inline Tensor potrs(const Tensor & self, const Tensor & input2, bool upper);
+static inline Tensor & potrs_out(const Tensor & self, const Tensor & input2, Tensor & result);
+static inline Tensor potrs(const Tensor & self, const Tensor & input2);
+static inline Tensor & potri_out(const Tensor & self, bool upper, Tensor & output);
+static inline Tensor potri(const Tensor & self, bool upper);
+static inline Tensor & potri_out(const Tensor & self, Tensor & output);
+static inline Tensor potri(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Scalar tol, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper, Scalar tol);
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper);
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Scalar tol, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, Scalar tol);
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> qr_out(const Tensor & self, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> qr(const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> geqrf_out(const Tensor & self, Tensor & res1, Tensor & res2);
+static inline std::tuple<Tensor,Tensor> geqrf(const Tensor & self);
+static inline std::tuple<Tensor &,const Tensor &> orgqr_out(const Tensor & self, const Tensor & input2, Tensor & result);
+static inline std::tuple<Tensor,const Tensor &> orgqr(const Tensor & self, const Tensor & input2);
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & result);
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose);
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, Tensor & result);
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left);
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, Tensor & result);
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3);
+static inline std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & info, const Tensor & self, Tensor & result, Tensor & pivots);
+static inline std::tuple<Tensor,Tensor> btrifact(const Tensor & info, const Tensor & self);
+static inline std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & self, Tensor & result, Tensor & pivots);
+static inline std::tuple<Tensor,Tensor> btrifact(const Tensor & self);
+static inline Tensor & btrisolve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & result);
+static inline Tensor btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots);
+static inline Tensor & randperm_out(Generator & generator, int64_t n, Tensor & result);
+static inline Tensor & randperm_out(int64_t n, Tensor & result);
+static inline Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement, Tensor & result);
+static inline Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement);
+static inline Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, Tensor & result);
+static inline Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples);
+static inline Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, Tensor & result);
+static inline Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement);
+static inline Tensor & multinomial_out(const Tensor & self, int64_t num_samples, Tensor & result);
+static inline Tensor multinomial(const Tensor & self, int64_t num_samples);
+static inline Tensor & rand_out(Generator & generator, IntList size, Tensor & result);
+static inline Tensor & rand_out(IntList size, Tensor & result);
+static inline Tensor & randn_out(Generator & generator, IntList size, Tensor & result);
+static inline Tensor & randn_out(IntList size, Tensor & result);
+static inline Tensor & select_out(const Tensor & self, int dim, int64_t sliceIndex, Tensor & result);
+static inline Tensor select(const Tensor & self, int dim, int64_t sliceIndex);
+static inline void Abs_updateOutput(const Tensor & input, const Tensor & output);
+static inline void Abs_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput);
+static inline void AbsCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void AbsCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights);
+static inline void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights);
+static inline void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index);
+static inline void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index);
+static inline void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index);
+static inline void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index);
+static inline void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight);
+static inline void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight);
+static inline void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight);
+static inline void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight);
+static inline void ELU_updateOutput(const Tensor & input, const Tensor & output, Scalar alpha, bool inplace);
+static inline void ELU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar alpha, bool inplace);
+static inline void DistKLDivCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void DistKLDivCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void GatedLinear_updateOutput(const Tensor & input, const Tensor & output, int dim);
+static inline void GatedLinear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int dim);
+static inline void HardShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda);
+static inline void HardShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda);
+static inline void HardTanh_updateOutput(const Tensor & input, const Tensor & output, Scalar min_val, Scalar max_val, bool inplace);
+static inline void HardTanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar min_val, Scalar max_val, bool inplace);
+static inline void L1Cost_updateOutput(const Tensor & input, const Tensor & output);
+static inline void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput);
+static inline void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradInput);
+static inline void LeakyReLU_updateOutput(const Tensor & input, const Tensor & output, Scalar negval, bool inplace);
+static inline void LeakyReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar negval, bool inplace);
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & hx, const Tensor & output, const Tensor & storage);
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & hx, const Tensor & output, const Tensor & storage);
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & hx, const Tensor & output, const Tensor & storage);
+static inline void GRUFused_updateGradInput(const Tensor & gradInInput, const Tensor & gradInHidden, const Tensor & gradOutput, const Tensor & gradInputHx, const Tensor & storage);
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & cell, const Tensor & output, const Tensor & outputCell);
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & cell, const Tensor & output, const Tensor & outputCell);
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & cell, const Tensor & output, const Tensor & outputCell);
+static inline void LSTMFused_updateGradInput(const Tensor & storage, const Tensor & gradInGates, const Tensor & cx, const Tensor & cy, const Tensor & gradOutput, const Tensor & gradOutputCell, const Tensor & gradInputCx);
+static inline void LogSigmoid_updateOutput(const Tensor & input, const Tensor & output, const Tensor & buffer);
+static inline void LogSigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & buffer);
+static inline void LogSoftMax_updateOutput(const Tensor & input, const Tensor & output);
+static inline void LogSoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void MarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, Scalar margin);
+static inline void MarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, Scalar margin);
+static inline void SoftMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void SoftMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void MSECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void MSECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void MultiLabelMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, const Tensor & isTarget, bool sizeAverage);
+static inline void MultiLabelMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, const Tensor & isTarget, bool sizeAverage);
+static inline void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, const Tensor & weights, Scalar margin);
+static inline void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, Scalar margin);
+static inline void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, const Tensor & weights, Scalar margin);
+static inline void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, Scalar margin);
+static inline void PReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, int64_t nOutputPlane);
+static inline void PReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int64_t nOutputPlane);
+static inline void PReLU_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradWeight, const Tensor & gradWeightBuf, const Tensor & gradWeightBuf2, int64_t nOutputPlane, Scalar scale);
+static inline void Linear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & addBuffer);
+static inline void Linear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight);
+static inline void Linear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & addBuffer, Scalar scale);
+static inline void RReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace, Generator & generator);
+static inline void RReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace);
+static inline void Sigmoid_updateOutput(const Tensor & input, const Tensor & output);
+static inline void Sigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void Sigmoid_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void SmoothL1Criterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage);
+static inline void SmoothL1Criterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage);
+static inline void SoftMax_updateOutput(const Tensor & input, const Tensor & output);
+static inline void SoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void SoftPlus_updateOutput(const Tensor & input, const Tensor & output, Scalar beta, Scalar threshold);
+static inline void SoftPlus_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar beta, Scalar threshold);
+static inline void SoftShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda);
+static inline void SoftShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda);
+static inline void IndexLinear_updateOutput(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & normalizedValues, int train);
+static inline void IndexLinear_accGradParameters(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, const Tensor & valuesBuffer, Scalar weightDecay, Scalar scale);
+static inline void SparseLinear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias);
+static inline void SparseLinear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, Scalar weightDecay, Scalar scale);
+static inline void Sqrt_updateOutput(const Tensor & input, const Tensor & output, Scalar eps);
+static inline void Sqrt_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void Square_updateOutput(const Tensor & input, const Tensor & output);
+static inline void Square_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput);
+static inline void Tanh_updateOutput(const Tensor & input, const Tensor & output);
+static inline void Tanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void Tanh_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output);
+static inline void Threshold_updateOutput(const Tensor & input, const Tensor & output, Scalar threshold, Scalar val, bool inplace);
+static inline void Threshold_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar threshold, Scalar val, bool inplace);
+static inline void TemporalConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize, int outputFrameSize);
+static inline void TemporalConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW);
+static inline void TemporalConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale);
+static inline void TemporalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int dW);
+static inline void TemporalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int dW);
+static inline void TemporalSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize);
+static inline void TemporalSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW);
+static inline void TemporalSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale);
+static inline void TemporalRowConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst);
+static inline void TemporalRowConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst);
+static inline void TemporalRowConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst, Scalar scale);
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps);
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps);
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps);
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps);
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps);
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps);
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps);
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps);
+static inline void SpatialConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
+static inline void SpatialConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
+static inline void SpatialConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale);
+static inline void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale);
+static inline void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale);
+static inline void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialDepthWiseConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
+static inline void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale);
+static inline void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale);
+static inline void SpatialConvolutionLocal_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight);
+static inline void SpatialConvolutionLocal_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight);
+static inline void SpatialConvolutionLocal_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight, Scalar scale);
+static inline void SpatialAdaptiveMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight);
+static inline void SpatialAdaptiveMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices);
+static inline void SpatialAdaptiveAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int owidth, int oheight);
+static inline void SpatialAdaptiveAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput);
+static inline void SpatialAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad);
+static inline void SpatialAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad);
+static inline void SpatialFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples);
+static inline void SpatialFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices);
+static inline void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH);
+static inline void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH);
+static inline void SpatialFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH);
+static inline void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale);
+static inline void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale);
+static inline void SpatialFullConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
+static inline void SpatialFullConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
+static inline void SpatialFullConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale);
+static inline void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
+static inline void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
+static inline void SpatialDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
+static inline void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale);
+static inline void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale);
+static inline void SpatialMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode);
+static inline void SpatialMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode);
+static inline void SpatialDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode);
+static inline void SpatialDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode);
+static inline void SpatialMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight);
+static inline void SpatialMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int owidth, int oheight);
+static inline void SpatialSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int kH, int dW, int dH);
+static inline void SpatialSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int kH, int dW, int dH);
+static inline void SpatialSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int kH, int dW, int dH, Scalar scale);
+static inline void SpatialUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor);
+static inline void SpatialUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor);
+static inline void SpatialUpSamplingBilinear_updateOutput(const Tensor & input, const Tensor & output, int outputHeight, int outputWidth);
+static inline void SpatialUpSamplingBilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth);
+static inline void VolumetricAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kT, int kW, int kH, int dT, int dW, int dH);
+static inline void VolumetricAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kT, int kW, int kH, int dT, int dW, int dH);
+static inline void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale);
+static inline void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale);
+static inline void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale);
+static inline void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale);
+static inline void VolumetricFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples);
+static inline void VolumetricFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices);
+static inline void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH);
+static inline void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH);
+static inline void VolumetricFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH);
+static inline void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale);
+static inline void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale);
+static inline void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH);
+static inline void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH);
+static inline void VolumetricDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH);
+static inline void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale);
+static inline void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale);
+static inline void VolumetricMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode);
+static inline void VolumetricMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode);
+static inline void VolumetricDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode);
+static inline void VolumetricDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode);
+static inline void VolumetricMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void VolumetricMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH);
+static inline void SpatialReflectionPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b);
+static inline void SpatialReflectionPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b);
+static inline void SpatialReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b);
+static inline void SpatialReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b);
+static inline void VolumetricReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pleft, int pright, int ptop, int pbottom, int pfront, int pback);
+static inline void VolumetricReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback);
+static inline void VolumetricUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor);
+static inline void VolumetricUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor);
+static inline void VolumetricUpSamplingTrilinear_updateOutput(const Tensor & input, const Tensor & output, int outputDepth, int outputHeight, int outputWidth);
+static inline void VolumetricUpSamplingTrilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputDepth, int inputHeight, int inputWidth, int outputDepth, int outputHeight, int outputWidth);
+static inline void SpatialCrossMapLRN_updateOutput(const Tensor & input, const Tensor & output, const Tensor & scale, int size, Scalar alpha, Scalar beta, Scalar k);
+static inline void SpatialCrossMapLRN_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & scale, const Tensor & output, int size, Scalar alpha, Scalar beta, Scalar k);
+
+// function definitions are all static inline because
+// they are one-line statically dispatched functions that
+// invoke the actual dynamic dispatch on the correct argument
+static inline Tensor & zeros_out(IntList size, Tensor & result) {
+ return result.type().zeros_out(size, result);
+}
+static inline Tensor & ones_out(IntList size, Tensor & result) {
+ return result.type().ones_out(size, result);
+}
+static inline int64_t numel(const Tensor & self) {
+ return self.type().numel(self);
+}
+static inline Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & result) {
+ return self.type().masked_select_out(self, mask, result);
+}
+static inline Tensor masked_select(const Tensor & self, const Tensor & mask) {
+ return self.type().masked_select(self, mask);
+}
+static inline Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1) {
+ return self.type().transpose(self, dim0, dim1);
+}
+static inline Tensor t(const Tensor & self) {
+ return self.type().t(self);
+}
+static inline Tensor & squeeze_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().squeeze_out(self, dim, result);
+}
+static inline Tensor squeeze(const Tensor & self, int64_t dim) {
+ return self.type().squeeze(self, dim);
+}
+static inline Tensor & squeeze_out(const Tensor & self, Tensor & result) {
+ return self.type().squeeze_out(self, result);
+}
+static inline Tensor squeeze(const Tensor & self) {
+ return self.type().squeeze(self);
+}
+static inline Tensor & unsqueeze_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().unsqueeze_out(self, dim, result);
+}
+static inline Tensor unsqueeze(const Tensor & self, int64_t dim) {
+ return self.type().unsqueeze(self, dim);
+}
+static inline Tensor & nonzero_out(const Tensor & self, Tensor & result) {
+ return self.type().nonzero_out(self, result);
+}
+static inline Tensor nonzero(const Tensor & self) {
+ return self.type().nonzero(self);
+}
+static inline Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result) {
+ return self.type().index_select_out(self, dim, index, result);
+}
+static inline Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) {
+ return self.type().index_select(self, dim, index);
+}
+static inline Tensor & range_out(Scalar start, Scalar end, Scalar step, Tensor & result) {
+ return result.type().range_out(start, end, step, result);
+}
+static inline Tensor & range_out(Scalar start, Scalar end, Tensor & result) {
+ return result.type().range_out(start, end, result);
+}
+static inline Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result) {
+ return self.type().gather_out(self, dim, index, result);
+}
+static inline Tensor gather(const Tensor & self, int64_t dim, const Tensor & index) {
+ return self.type().gather(self, dim, index);
+}
+static inline bool equal(const Tensor & self, const Tensor & other) {
+ return self.type().equal(self, other);
+}
+static inline Tensor & __and___out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().__and___out(self, value, result);
+}
+static inline Tensor __and__(const Tensor & self, Scalar value) {
+ return self.type().__and__(self, value);
+}
+static inline Tensor & __and___out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().__and___out(self, other, result);
+}
+static inline Tensor __and__(const Tensor & self, const Tensor & other) {
+ return self.type().__and__(self, other);
+}
+static inline Tensor & __iand__(Tensor & self, Scalar value) {
+ return self.type().__iand__(self, value);
+}
+static inline Tensor & __iand__(Tensor & self, const Tensor & other) {
+ return self.type().__iand__(self, other);
+}
+static inline Tensor & __or___out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().__or___out(self, value, result);
+}
+static inline Tensor __or__(const Tensor & self, Scalar value) {
+ return self.type().__or__(self, value);
+}
+static inline Tensor & __or___out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().__or___out(self, other, result);
+}
+static inline Tensor __or__(const Tensor & self, const Tensor & other) {
+ return self.type().__or__(self, other);
+}
+static inline Tensor & __ior__(Tensor & self, Scalar value) {
+ return self.type().__ior__(self, value);
+}
+static inline Tensor & __ior__(Tensor & self, const Tensor & other) {
+ return self.type().__ior__(self, other);
+}
+static inline Tensor & __xor___out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().__xor___out(self, value, result);
+}
+static inline Tensor __xor__(const Tensor & self, Scalar value) {
+ return self.type().__xor__(self, value);
+}
+static inline Tensor & __xor___out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().__xor___out(self, other, result);
+}
+static inline Tensor __xor__(const Tensor & self, const Tensor & other) {
+ return self.type().__xor__(self, other);
+}
+static inline Tensor & __ixor__(Tensor & self, Scalar value) {
+ return self.type().__ixor__(self, value);
+}
+static inline Tensor & __ixor__(Tensor & self, const Tensor & other) {
+ return self.type().__ixor__(self, other);
+}
+static inline Tensor & __lshift___out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().__lshift___out(self, value, result);
+}
+static inline Tensor __lshift__(const Tensor & self, Scalar value) {
+ return self.type().__lshift__(self, value);
+}
+static inline Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().__lshift___out(self, other, result);
+}
+static inline Tensor __lshift__(const Tensor & self, const Tensor & other) {
+ return self.type().__lshift__(self, other);
+}
+static inline Tensor & __ilshift__(Tensor & self, Scalar value) {
+ return self.type().__ilshift__(self, value);
+}
+static inline Tensor & __ilshift__(Tensor & self, const Tensor & other) {
+ return self.type().__ilshift__(self, other);
+}
+static inline Tensor & __rshift___out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().__rshift___out(self, value, result);
+}
+static inline Tensor __rshift__(const Tensor & self, Scalar value) {
+ return self.type().__rshift__(self, value);
+}
+static inline Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().__rshift___out(self, other, result);
+}
+static inline Tensor __rshift__(const Tensor & self, const Tensor & other) {
+ return self.type().__rshift__(self, other);
+}
+static inline Tensor & __irshift__(Tensor & self, Scalar value) {
+ return self.type().__irshift__(self, value);
+}
+static inline Tensor & __irshift__(Tensor & self, const Tensor & other) {
+ return self.type().__irshift__(self, other);
+}
+static inline Tensor & lt_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().lt_out(tensor, value, result);
+}
+static inline Tensor lt(const Tensor & tensor, Scalar value) {
+ return tensor.type().lt(tensor, value);
+}
+static inline Tensor & lt_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().lt_out(tensor, other, result);
+}
+static inline Tensor lt(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().lt(tensor, other);
+}
+static inline Tensor & gt_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().gt_out(tensor, value, result);
+}
+static inline Tensor gt(const Tensor & tensor, Scalar value) {
+ return tensor.type().gt(tensor, value);
+}
+static inline Tensor & gt_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().gt_out(tensor, other, result);
+}
+static inline Tensor gt(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().gt(tensor, other);
+}
+static inline Tensor & le_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().le_out(tensor, value, result);
+}
+static inline Tensor le(const Tensor & tensor, Scalar value) {
+ return tensor.type().le(tensor, value);
+}
+static inline Tensor & le_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().le_out(tensor, other, result);
+}
+static inline Tensor le(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().le(tensor, other);
+}
+static inline Tensor & ge_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().ge_out(tensor, value, result);
+}
+static inline Tensor ge(const Tensor & tensor, Scalar value) {
+ return tensor.type().ge(tensor, value);
+}
+static inline Tensor & ge_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().ge_out(tensor, other, result);
+}
+static inline Tensor ge(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().ge(tensor, other);
+}
+static inline Tensor & eq_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().eq_out(tensor, value, result);
+}
+static inline Tensor eq(const Tensor & tensor, Scalar value) {
+ return tensor.type().eq(tensor, value);
+}
+static inline Tensor & eq_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().eq_out(tensor, other, result);
+}
+static inline Tensor eq(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().eq(tensor, other);
+}
+static inline Tensor & ne_out(const Tensor & tensor, Scalar value, Tensor & result) {
+ return tensor.type().ne_out(tensor, value, result);
+}
+static inline Tensor ne(const Tensor & tensor, Scalar value) {
+ return tensor.type().ne(tensor, value);
+}
+static inline Tensor & ne_out(const Tensor & tensor, const Tensor & other, Tensor & result) {
+ return tensor.type().ne_out(tensor, other, result);
+}
+static inline Tensor ne(const Tensor & tensor, const Tensor & other) {
+ return tensor.type().ne(tensor, other);
+}
+static inline std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices) {
+ return self.type().min_out(self, dim, keepdim, min, min_indices);
+}
+static inline std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().min(self, dim, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, Tensor & min, Tensor & min_indices) {
+ return self.type().min_out(self, dim, min, min_indices);
+}
+static inline std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim) {
+ return self.type().min(self, dim);
+}
+static inline Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().min_out(self, other, result);
+}
+static inline Tensor min(const Tensor & self, const Tensor & other) {
+ return self.type().min(self, other);
+}
+static inline Scalar min(const Tensor & self) {
+ return self.type().min(self);
+}
+static inline std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_indices) {
+ return self.type().max_out(self, dim, keepdim, max, max_indices);
+}
+static inline std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().max(self, dim, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, Tensor & max, Tensor & max_indices) {
+ return self.type().max_out(self, dim, max, max_indices);
+}
+static inline std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim) {
+ return self.type().max(self, dim);
+}
+static inline Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().max_out(self, other, result);
+}
+static inline Tensor max(const Tensor & self, const Tensor & other) {
+ return self.type().max(self, other);
+}
+static inline Scalar max(const Tensor & self) {
+ return self.type().max(self);
+}
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().kthvalue_out(self, k, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, bool keepdim) {
+ return self.type().kthvalue(self, k, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices) {
+ return self.type().kthvalue_out(self, k, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k) {
+ return self.type().kthvalue(self, k);
+}
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().kthvalue_out(self, k, dim, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) {
+ return self.type().kthvalue(self, k, dim, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices) {
+ return self.type().kthvalue_out(self, k, dim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim) {
+ return self.type().kthvalue(self, k, dim);
+}
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().mode_out(self, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, bool keepdim) {
+ return self.type().mode(self, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, Tensor & values, Tensor & indices) {
+ return self.type().mode_out(self, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self) {
+ return self.type().mode(self);
+}
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().mode_out(self, dim, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().mode(self, dim, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) {
+ return self.type().mode_out(self, dim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim) {
+ return self.type().mode(self, dim);
+}
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().median_out(self, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, bool keepdim) {
+ return self.type().median(self, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, Tensor & values, Tensor & indices) {
+ return self.type().median_out(self, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self) {
+ return self.type().median(self);
+}
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) {
+ return self.type().median_out(self, dim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim) {
+ return self.type().median(self, dim);
+}
+static inline std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) {
+ return self.type().median_out(self, dim, keepdim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().median(self, dim, keepdim);
+}
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, Tensor & values, Tensor & indices) {
+ return self.type().sort_out(self, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self) {
+ return self.type().sort(self);
+}
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) {
+ return self.type().sort_out(self, dim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim) {
+ return self.type().sort(self, dim);
+}
+static inline std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) {
+ return self.type().sort_out(self, dim, descending, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending) {
+ return self.type().sort(self, dim, descending);
+}
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices) {
+ return self.type().topk_out(self, k, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k) {
+ return self.type().topk(self, k);
+}
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices) {
+ return self.type().topk_out(self, k, dim, largest, sorted, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
+ return self.type().topk(self, k, dim, largest, sorted);
+}
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, Tensor & values, Tensor & indices) {
+ return self.type().topk_out(self, k, dim, largest, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest) {
+ return self.type().topk(self, k, dim, largest);
+}
+static inline std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices) {
+ return self.type().topk_out(self, k, dim, values, indices);
+}
+static inline std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim) {
+ return self.type().topk(self, k, dim);
+}
+static inline Tensor & abs_out(const Tensor & self, Tensor & destination) {
+ return self.type().abs_out(self, destination);
+}
+static inline Tensor abs(const Tensor & self) {
+ return self.type().abs(self);
+}
+static inline Tensor & sigmoid_out(const Tensor & self, Tensor & result) {
+ return self.type().sigmoid_out(self, result);
+}
+static inline Tensor sigmoid(const Tensor & self) {
+ return self.type().sigmoid(self);
+}
+static inline Tensor & log_out(const Tensor & self, Tensor & result) {
+ return self.type().log_out(self, result);
+}
+static inline Tensor log(const Tensor & self) {
+ return self.type().log(self);
+}
+static inline Tensor & log1p_out(const Tensor & self, Tensor & result) {
+ return self.type().log1p_out(self, result);
+}
+static inline Tensor log1p(const Tensor & self) {
+ return self.type().log1p(self);
+}
+static inline Tensor & lgamma_out(const Tensor & self, Tensor & result) {
+ return self.type().lgamma_out(self, result);
+}
+static inline Tensor lgamma(const Tensor & self) {
+ return self.type().lgamma(self);
+}
+static inline Tensor & exp_out(const Tensor & self, Tensor & result) {
+ return self.type().exp_out(self, result);
+}
+static inline Tensor exp(const Tensor & self) {
+ return self.type().exp(self);
+}
+static inline Tensor & cos_out(const Tensor & self, Tensor & result) {
+ return self.type().cos_out(self, result);
+}
+static inline Tensor cos(const Tensor & self) {
+ return self.type().cos(self);
+}
+static inline Tensor & acos_out(const Tensor & self, Tensor & result) {
+ return self.type().acos_out(self, result);
+}
+static inline Tensor acos(const Tensor & self) {
+ return self.type().acos(self);
+}
+static inline Tensor & cosh_out(const Tensor & self, Tensor & result) {
+ return self.type().cosh_out(self, result);
+}
+static inline Tensor cosh(const Tensor & self) {
+ return self.type().cosh(self);
+}
+static inline Tensor & sin_out(const Tensor & self, Tensor & result) {
+ return self.type().sin_out(self, result);
+}
+static inline Tensor sin(const Tensor & self) {
+ return self.type().sin(self);
+}
+static inline Tensor & asin_out(const Tensor & self, Tensor & result) {
+ return self.type().asin_out(self, result);
+}
+static inline Tensor asin(const Tensor & self) {
+ return self.type().asin(self);
+}
+static inline Tensor & sinh_out(const Tensor & self, Tensor & result) {
+ return self.type().sinh_out(self, result);
+}
+static inline Tensor sinh(const Tensor & self) {
+ return self.type().sinh(self);
+}
+static inline Tensor & tan_out(const Tensor & self, Tensor & result) {
+ return self.type().tan_out(self, result);
+}
+static inline Tensor tan(const Tensor & self) {
+ return self.type().tan(self);
+}
+static inline Tensor & atan_out(const Tensor & self, Tensor & result) {
+ return self.type().atan_out(self, result);
+}
+static inline Tensor atan(const Tensor & self) {
+ return self.type().atan(self);
+}
+static inline Tensor & tanh_out(const Tensor & self, Tensor & result) {
+ return self.type().tanh_out(self, result);
+}
+static inline Tensor tanh(const Tensor & self) {
+ return self.type().tanh(self);
+}
+static inline Tensor & sqrt_out(const Tensor & self, Tensor & result) {
+ return self.type().sqrt_out(self, result);
+}
+static inline Tensor sqrt(const Tensor & self) {
+ return self.type().sqrt(self);
+}
+static inline Tensor & rsqrt_out(const Tensor & self, Tensor & result) {
+ return self.type().rsqrt_out(self, result);
+}
+static inline Tensor rsqrt(const Tensor & self) {
+ return self.type().rsqrt(self);
+}
+static inline Tensor & ceil_out(const Tensor & self, Tensor & result) {
+ return self.type().ceil_out(self, result);
+}
+static inline Tensor ceil(const Tensor & self) {
+ return self.type().ceil(self);
+}
+static inline Tensor & floor_out(const Tensor & self, Tensor & result) {
+ return self.type().floor_out(self, result);
+}
+static inline Tensor floor(const Tensor & self) {
+ return self.type().floor(self);
+}
+static inline Tensor & round_out(const Tensor & self, Tensor & result) {
+ return self.type().round_out(self, result);
+}
+static inline Tensor round(const Tensor & self) {
+ return self.type().round(self);
+}
+static inline Tensor & trunc_out(const Tensor & self, Tensor & result) {
+ return self.type().trunc_out(self, result);
+}
+static inline Tensor trunc(const Tensor & self) {
+ return self.type().trunc(self);
+}
+static inline Tensor & frac_out(const Tensor & self, Tensor & result) {
+ return self.type().frac_out(self, result);
+}
+static inline Tensor frac(const Tensor & self) {
+ return self.type().frac(self);
+}
+static inline Tensor & mean_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) {
+ return self.type().mean_out(self, dim, keepdim, destination);
+}
+static inline Tensor mean(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().mean(self, dim, keepdim);
+}
+static inline Tensor & mean_out(const Tensor & self, int64_t dim, Tensor & destination) {
+ return self.type().mean_out(self, dim, destination);
+}
+static inline Tensor mean(const Tensor & self, int64_t dim) {
+ return self.type().mean(self, dim);
+}
+static inline Scalar mean(const Tensor & self) {
+ return self.type().mean(self);
+}
+static inline Tensor & var_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) {
+ return self.type().var_out(self, dim, keepdim, destination);
+}
+static inline Tensor var(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().var(self, dim, keepdim);
+}
+static inline Tensor & var_out(const Tensor & self, int64_t dim, Tensor & destination) {
+ return self.type().var_out(self, dim, destination);
+}
+static inline Tensor var(const Tensor & self, int64_t dim) {
+ return self.type().var(self, dim);
+}
+static inline Scalar var(const Tensor & self) {
+ return self.type().var(self);
+}
+static inline Tensor & std_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) {
+ return self.type().std_out(self, dim, keepdim, destination);
+}
+static inline Tensor std(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().std(self, dim, keepdim);
+}
+static inline Tensor & std_out(const Tensor & self, int64_t dim, Tensor & destination) {
+ return self.type().std_out(self, dim, destination);
+}
+static inline Tensor std(const Tensor & self, int64_t dim) {
+ return self.type().std(self, dim);
+}
+static inline Scalar std(const Tensor & self) {
+ return self.type().std(self);
+}
+static inline Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, bool keepdim, Tensor & destination) {
+ return self.type().norm_out(self, p, dim, keepdim, destination);
+}
+static inline Tensor norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) {
+ return self.type().norm(self, p, dim, keepdim);
+}
+static inline Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, Tensor & destination) {
+ return self.type().norm_out(self, p, dim, destination);
+}
+static inline Tensor norm(const Tensor & self, Scalar p, int64_t dim) {
+ return self.type().norm(self, p, dim);
+}
+static inline Scalar norm(const Tensor & self, Scalar p) {
+ return self.type().norm(self, p);
+}
+static inline Scalar norm(const Tensor & self) {
+ return self.type().norm(self);
+}
+static inline Tensor & renorm_out(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm, Tensor & destination) {
+ return self.type().renorm_out(self, p, dim, maxnorm, destination);
+}
+static inline Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
+ return self.type().renorm(self, p, dim, maxnorm);
+}
+static inline Scalar dist(const Tensor & self, const Tensor & other, Scalar p) {
+ return self.type().dist(self, other, p);
+}
+static inline Scalar dist(const Tensor & self, const Tensor & other) {
+ return self.type().dist(self, other);
+}
+static inline Tensor & reciprocal_out(const Tensor & self, Tensor & destination) {
+ return self.type().reciprocal_out(self, destination);
+}
+static inline Tensor reciprocal(const Tensor & self) {
+ return self.type().reciprocal(self);
+}
+static inline Tensor & neg_out(const Tensor & self, Tensor & destination) {
+ return self.type().neg_out(self, destination);
+}
+static inline Tensor neg(const Tensor & self) {
+ return self.type().neg(self);
+}
+static inline Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & destination) {
+ return self.type().atan2_out(self, other, destination);
+}
+static inline Tensor atan2(const Tensor & self, const Tensor & other) {
+ return self.type().atan2(self, other);
+}
+static inline Tensor & pow_out(const Tensor & self, Scalar exponent, Tensor & destination) {
+ return self.type().pow_out(self, exponent, destination);
+}
+static inline Tensor pow(const Tensor & self, Scalar exponent) {
+ return self.type().pow(self, exponent);
+}
+static inline Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & destination) {
+ return self.type().pow_out(self, exponent, destination);
+}
+static inline Tensor pow(const Tensor & self, const Tensor & exponent) {
+ return self.type().pow(self, exponent);
+}
+static inline Tensor & lerp_out(const Tensor & self, const Tensor & end, Scalar weight, Tensor & destination) {
+ return self.type().lerp_out(self, end, weight, destination);
+}
+static inline Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight) {
+ return self.type().lerp(self, end, weight);
+}
+static inline Tensor & linspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result) {
+ return result.type().linspace_out(start, end, steps, result);
+}
+static inline Tensor & linspace_out(Scalar start, Scalar end, Tensor & result) {
+ return result.type().linspace_out(start, end, result);
+}
+static inline Tensor & logspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result) {
+ return result.type().logspace_out(start, end, steps, result);
+}
+static inline Tensor & logspace_out(Scalar start, Scalar end, Tensor & result) {
+ return result.type().logspace_out(start, end, result);
+}
+static inline Tensor & histc_out(const Tensor & self, Tensor & destination) {
+ return self.type().histc_out(self, destination);
+}
+static inline Tensor histc(const Tensor & self) {
+ return self.type().histc(self);
+}
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Tensor & destination) {
+ return self.type().histc_out(self, bins, destination);
+}
+static inline Tensor histc(const Tensor & self, int64_t bins) {
+ return self.type().histc(self, bins);
+}
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Tensor & destination) {
+ return self.type().histc_out(self, bins, min, destination);
+}
+static inline Tensor histc(const Tensor & self, int64_t bins, Scalar min) {
+ return self.type().histc(self, bins, min);
+}
+static inline Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Scalar max, Tensor & destination) {
+ return self.type().histc_out(self, bins, min, max, destination);
+}
+static inline Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) {
+ return self.type().histc(self, bins, min, max);
+}
+static inline Tensor & sum_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result) {
+ return self.type().sum_out(self, dim, keepdim, result);
+}
+static inline Tensor sum(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().sum(self, dim, keepdim);
+}
+static inline Tensor & sum_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().sum_out(self, dim, result);
+}
+static inline Tensor sum(const Tensor & self, int64_t dim) {
+ return self.type().sum(self, dim);
+}
+static inline Scalar sum(const Tensor & self) {
+ return self.type().sum(self);
+}
+static inline Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result) {
+ return self.type().prod_out(self, dim, keepdim, result);
+}
+static inline Tensor prod(const Tensor & self, int64_t dim, bool keepdim) {
+ return self.type().prod(self, dim, keepdim);
+}
+static inline Tensor & prod_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().prod_out(self, dim, result);
+}
+static inline Tensor prod(const Tensor & self, int64_t dim) {
+ return self.type().prod(self, dim);
+}
+static inline Scalar prod(const Tensor & self) {
+ return self.type().prod(self);
+}
+static inline Tensor & cumsum_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().cumsum_out(self, dim, result);
+}
+static inline Tensor cumsum(const Tensor & self, int64_t dim) {
+ return self.type().cumsum(self, dim);
+}
+static inline Tensor & cumprod_out(const Tensor & self, int64_t dim, Tensor & result) {
+ return self.type().cumprod_out(self, dim, result);
+}
+static inline Tensor cumprod(const Tensor & self, int64_t dim) {
+ return self.type().cumprod(self, dim);
+}
+static inline Tensor & sign_out(const Tensor & self, Tensor & result) {
+ return self.type().sign_out(self, result);
+}
+static inline Tensor sign(const Tensor & self) {
+ return self.type().sign(self);
+}
+static inline Scalar trace(const Tensor & self) {
+ return self.type().trace(self);
+}
+static inline Tensor & add_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result) {
+ return self.type().add_out(self, value, other, result);
+}
+static inline Tensor add(const Tensor & self, Scalar value, const Tensor & other) {
+ return self.type().add(self, value, other);
+}
+static inline Tensor & add_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().add_out(self, value, result);
+}
+static inline Tensor add(const Tensor & self, Scalar value) {
+ return self.type().add(self, value);
+}
+static inline Tensor & add_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().add_out(self, other, result);
+}
+static inline Tensor add(const Tensor & self, const Tensor & other) {
+ return self.type().add(self, other);
+}
+static inline Tensor & sub_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result) {
+ return self.type().sub_out(self, value, other, result);
+}
+static inline Tensor sub(const Tensor & self, Scalar value, const Tensor & other) {
+ return self.type().sub(self, value, other);
+}
+static inline Tensor & sub_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().sub_out(self, value, result);
+}
+static inline Tensor sub(const Tensor & self, Scalar value) {
+ return self.type().sub(self, value);
+}
+static inline Tensor & sub_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().sub_out(self, other, result);
+}
+static inline Tensor sub(const Tensor & self, const Tensor & other) {
+ return self.type().sub(self, other);
+}
+static inline Tensor & mul_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().mul_out(self, value, result);
+}
+static inline Tensor mul(const Tensor & self, Scalar value) {
+ return self.type().mul(self, value);
+}
+static inline Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().mul_out(self, other, result);
+}
+static inline Tensor mul(const Tensor & self, const Tensor & other) {
+ return self.type().mul(self, other);
+}
+static inline Tensor & div_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().div_out(self, value, result);
+}
+static inline Tensor div(const Tensor & self, Scalar value) {
+ return self.type().div(self, value);
+}
+static inline Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().div_out(self, other, result);
+}
+static inline Tensor div(const Tensor & self, const Tensor & other) {
+ return self.type().div(self, other);
+}
+static inline Tensor & fmod_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().fmod_out(self, value, result);
+}
+static inline Tensor fmod(const Tensor & self, Scalar value) {
+ return self.type().fmod(self, value);
+}
+static inline Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().fmod_out(self, other, result);
+}
+static inline Tensor fmod(const Tensor & self, const Tensor & other) {
+ return self.type().fmod(self, other);
+}
+static inline Tensor & remainder_out(const Tensor & self, Scalar value, Tensor & result) {
+ return self.type().remainder_out(self, value, result);
+}
+static inline Tensor remainder(const Tensor & self, Scalar value) {
+ return self.type().remainder(self, value);
+}
+static inline Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & result) {
+ return self.type().remainder_out(self, other, result);
+}
+static inline Tensor remainder(const Tensor & self, const Tensor & other) {
+ return self.type().remainder(self, other);
+}
+static inline Tensor & clamp_out(const Tensor & self, Scalar min, Scalar max, Tensor & destination) {
+ return self.type().clamp_out(self, min, max, destination);
+}
+static inline Tensor clamp(const Tensor & self, Scalar min, Scalar max) {
+ return self.type().clamp(self, min, max);
+}
+static inline Tensor & clamp_out(const Tensor & self, Scalar min, Tensor & result) {
+ return self.type().clamp_out(self, min, result);
+}
+static inline Tensor clamp(const Tensor & self, Scalar min) {
+ return self.type().clamp(self, min);
+}
+static inline Scalar dot(const Tensor & self, const Tensor & tensor) {
+ return self.type().dot(self, tensor);
+}
+static inline Tensor & tril_out(const Tensor & self, int64_t k, Tensor & destination) {
+ return self.type().tril_out(self, k, destination);
+}
+static inline Tensor tril(const Tensor & self, int64_t k) {
+ return self.type().tril(self, k);
+}
+static inline Tensor & tril_out(const Tensor & self, Tensor & destination) {
+ return self.type().tril_out(self, destination);
+}
+static inline Tensor tril(const Tensor & self) {
+ return self.type().tril(self);
+}
+static inline Tensor & triu_out(const Tensor & self, int64_t k, Tensor & destination) {
+ return self.type().triu_out(self, k, destination);
+}
+static inline Tensor triu(const Tensor & self, int64_t k) {
+ return self.type().triu(self, k);
+}
+static inline Tensor & triu_out(const Tensor & self, Tensor & destination) {
+ return self.type().triu_out(self, destination);
+}
+static inline Tensor triu(const Tensor & self) {
+ return self.type().triu(self);
+}
+static inline Tensor & cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & destination) {
+ return self.type().cross_out(self, other, dim, destination);
+}
+static inline Tensor cross(const Tensor & self, const Tensor & other, int64_t dim) {
+ return self.type().cross(self, other, dim);
+}
+static inline Tensor & cross_out(const Tensor & self, const Tensor & other, Tensor & destination) {
+ return self.type().cross_out(self, other, destination);
+}
+static inline Tensor cross(const Tensor & self, const Tensor & other) {
+ return self.type().cross(self, other);
+}
+static inline Tensor & eye_out(int64_t n, Tensor & result) {
+ return result.type().eye_out(n, result);
+}
+static inline Tensor & eye_out(int64_t n, int64_t m, Tensor & result) {
+ return result.type().eye_out(n, m, result);
+}
+static inline Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & result) {
+ return self.type().diag_out(self, diagonal, result);
+}
+static inline Tensor diag(const Tensor & self, int64_t diagonal) {
+ return self.type().diag(self, diagonal);
+}
+static inline Tensor & diag_out(const Tensor & self, Tensor & result) {
+ return self.type().diag_out(self, result);
+}
+static inline Tensor diag(const Tensor & self) {
+ return self.type().diag(self);
+}
+static inline Tensor & addmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2, Tensor & result) {
+ return self.type().addmm_out(beta, self, alpha, mat1, mat2, result);
+}
+static inline Tensor addmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2) {
+ return self.type().addmm(beta, self, alpha, mat1, mat2);
+}
+static inline Tensor & addmm_out(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result) {
+ return self.type().addmm_out(beta, self, mat1, mat2, result);
+}
+static inline Tensor addmm(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2) {
+ return self.type().addmm(beta, self, mat1, mat2);
+}
+static inline Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result) {
+ return self.type().addmm_out(self, mat1, mat2, result);
+}
+static inline Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2) {
+ return self.type().addmm(self, mat1, mat2);
+}
+static inline Tensor & addmv_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec, Tensor & result) {
+ return self.type().addmv_out(beta, self, alpha, mat, vec, result);
+}
+static inline Tensor addmv(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec) {
+ return self.type().addmv(beta, self, alpha, mat, vec);
+}
+static inline Tensor & addmv_out(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result) {
+ return self.type().addmv_out(beta, self, mat, vec, result);
+}
+static inline Tensor addmv(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec) {
+ return self.type().addmv(beta, self, mat, vec);
+}
+static inline Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result) {
+ return self.type().addmv_out(self, mat, vec, result);
+}
+static inline Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec) {
+ return self.type().addmv(self, mat, vec);
+}
+static inline Tensor & addr_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2, Tensor & result) {
+ return self.type().addr_out(beta, self, alpha, vec1, vec2, result);
+}
+static inline Tensor addr(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2) {
+ return self.type().addr(beta, self, alpha, vec1, vec2);
+}
+static inline Tensor & addr_out(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result) {
+ return self.type().addr_out(beta, self, vec1, vec2, result);
+}
+static inline Tensor addr(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2) {
+ return self.type().addr(beta, self, vec1, vec2);
+}
+static inline Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result) {
+ return self.type().addr_out(self, vec1, vec2, result);
+}
+static inline Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2) {
+ return self.type().addr(self, vec1, vec2);
+}
+static inline Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & result) {
+ return self.type().ger_out(self, vec2, result);
+}
+static inline Tensor ger(const Tensor & self, const Tensor & vec2) {
+ return self.type().ger(self, vec2);
+}
+static inline Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & result) {
+ return self.type().mv_out(self, vec, result);
+}
+static inline Tensor mv(const Tensor & self, const Tensor & vec) {
+ return self.type().mv(self, vec);
+}
+static inline Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & result) {
+ return self.type().mm_out(self, mat2, result);
+}
+static inline Tensor mm(const Tensor & self, const Tensor & mat2) {
+ return self.type().mm(self, mat2);
+}
+static inline Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & result) {
+ return self.type().bmm_out(self, mat2, result);
+}
+static inline Tensor bmm(const Tensor & self, const Tensor & mat2) {
+ return self.type().bmm(self, mat2);
+}
+static inline Tensor & addbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().addbmm_out(beta, self, alpha, batch1, batch2, result);
+}
+static inline Tensor addbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().addbmm(beta, self, alpha, batch1, batch2);
+}
+static inline Tensor & addbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().addbmm_out(beta, self, batch1, batch2, result);
+}
+static inline Tensor addbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().addbmm(beta, self, batch1, batch2);
+}
+static inline Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().addbmm_out(self, batch1, batch2, result);
+}
+static inline Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().addbmm(self, batch1, batch2);
+}
+static inline Tensor & baddbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().baddbmm_out(beta, self, alpha, batch1, batch2, result);
+}
+static inline Tensor baddbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().baddbmm(beta, self, alpha, batch1, batch2);
+}
+static inline Tensor & baddbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().baddbmm_out(beta, self, batch1, batch2, result);
+}
+static inline Tensor baddbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().baddbmm(beta, self, batch1, batch2);
+}
+static inline Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) {
+ return self.type().baddbmm_out(self, batch1, batch2, result);
+}
+static inline Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2) {
+ return self.type().baddbmm(self, batch1, batch2);
+}
+static inline Tensor & addcmul_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) {
+ return self.type().addcmul_out(self, value, tensor1, tensor2, result);
+}
+static inline Tensor addcmul(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) {
+ return self.type().addcmul(self, value, tensor1, tensor2);
+}
+static inline Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) {
+ return self.type().addcmul_out(self, tensor1, tensor2, result);
+}
+static inline Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2) {
+ return self.type().addcmul(self, tensor1, tensor2);
+}
+static inline Tensor & addcdiv_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) {
+ return self.type().addcdiv_out(self, value, tensor1, tensor2, result);
+}
+static inline Tensor addcdiv(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) {
+ return self.type().addcdiv(self, value, tensor1, tensor2);
+}
+static inline Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) {
+ return self.type().addcdiv_out(self, tensor1, tensor2, result);
+}
+static inline Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2) {
+ return self.type().addcdiv(self, tensor1, tensor2);
+}
+static inline std::tuple<Tensor &,Tensor &> gesv_out(const Tensor & self, const Tensor & A, Tensor & solution, Tensor & lu) {
+ return self.type().gesv_out(self, A, solution, lu);
+}
+static inline std::tuple<Tensor,Tensor> gesv(const Tensor & self, const Tensor & A) {
+ return self.type().gesv(self, A);
+}
+static inline std::tuple<Tensor &,Tensor &> gels_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2) {
+ return self.type().gels_out(self, A, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> gels(const Tensor & self, const Tensor & A) {
+ return self.type().gels(self, A);
+}
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & res1, Tensor & res2) {
+ return self.type().trtrs_out(self, A, upper, transpose, unitriangular, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) {
+ return self.type().trtrs(self, A, upper, transpose, unitriangular);
+}
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, Tensor & res1, Tensor & res2) {
+ return self.type().trtrs_out(self, A, upper, transpose, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose) {
+ return self.type().trtrs(self, A, upper, transpose);
+}
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, Tensor & res1, Tensor & res2) {
+ return self.type().trtrs_out(self, A, upper, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper) {
+ return self.type().trtrs(self, A, upper);
+}
+static inline std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2) {
+ return self.type().trtrs_out(self, A, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A) {
+ return self.type().trtrs(self, A);
+}
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, bool upper, Tensor & res1, Tensor & res2) {
+ return self.type().symeig_out(self, eigenvectors, upper, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors, bool upper) {
+ return self.type().symeig(self, eigenvectors, upper);
+}
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2) {
+ return self.type().symeig_out(self, eigenvectors, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors) {
+ return self.type().symeig(self, eigenvectors);
+}
+static inline std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, Tensor & res1, Tensor & res2) {
+ return self.type().symeig_out(self, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> symeig(const Tensor & self) {
+ return self.type().symeig(self);
+}
+static inline std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2) {
+ return self.type().eig_out(self, eigenvectors, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors) {
+ return self.type().eig(self, eigenvectors);
+}
+static inline std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, Tensor & res1, Tensor & res2) {
+ return self.type().eig_out(self, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> eig(const Tensor & self) {
+ return self.type().eig(self);
+}
+static inline std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, bool some, Tensor & res1, Tensor & res2, Tensor & res3) {
+ return self.type().svd_out(self, some, res1, res2, res3);
+}
+static inline std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self, bool some) {
+ return self.type().svd(self, some);
+}
+static inline std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, Tensor & res1, Tensor & res2, Tensor & res3) {
+ return self.type().svd_out(self, res1, res2, res3);
+}
+static inline std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self) {
+ return self.type().svd(self);
+}
+static inline Tensor & inverse_out(const Tensor & self, Tensor & output) {
+ return self.type().inverse_out(self, output);
+}
+static inline Tensor inverse(const Tensor & self) {
+ return self.type().inverse(self);
+}
+static inline Tensor & potrf_out(const Tensor & self, bool upper, Tensor & output) {
+ return self.type().potrf_out(self, upper, output);
+}
+static inline Tensor potrf(const Tensor & self, bool upper) {
+ return self.type().potrf(self, upper);
+}
+static inline Tensor & potrf_out(const Tensor & self, Tensor & output) {
+ return self.type().potrf_out(self, output);
+}
+static inline Tensor potrf(const Tensor & self) {
+ return self.type().potrf(self);
+}
+static inline Tensor & potrs_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & result) {
+ return self.type().potrs_out(self, input2, upper, result);
+}
+static inline Tensor potrs(const Tensor & self, const Tensor & input2, bool upper) {
+ return self.type().potrs(self, input2, upper);
+}
+static inline Tensor & potrs_out(const Tensor & self, const Tensor & input2, Tensor & result) {
+ return self.type().potrs_out(self, input2, result);
+}
+static inline Tensor potrs(const Tensor & self, const Tensor & input2) {
+ return self.type().potrs(self, input2);
+}
+static inline Tensor & potri_out(const Tensor & self, bool upper, Tensor & output) {
+ return self.type().potri_out(self, upper, output);
+}
+static inline Tensor potri(const Tensor & self, bool upper) {
+ return self.type().potri(self, upper);
+}
+static inline Tensor & potri_out(const Tensor & self, Tensor & output) {
+ return self.type().potri_out(self, output);
+}
+static inline Tensor potri(const Tensor & self) {
+ return self.type().potri(self);
+}
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Scalar tol, Tensor & res1, Tensor & res2) {
+ return self.type().pstrf_out(self, upper, tol, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper, Scalar tol) {
+ return self.type().pstrf(self, upper, tol);
+}
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Tensor & res1, Tensor & res2) {
+ return self.type().pstrf_out(self, upper, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper) {
+ return self.type().pstrf(self, upper);
+}
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Scalar tol, Tensor & res1, Tensor & res2) {
+ return self.type().pstrf_out(self, tol, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self, Scalar tol) {
+ return self.type().pstrf(self, tol);
+}
+static inline std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Tensor & res1, Tensor & res2) {
+ return self.type().pstrf_out(self, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> pstrf(const Tensor & self) {
+ return self.type().pstrf(self);
+}
+static inline std::tuple<Tensor &,Tensor &> qr_out(const Tensor & self, Tensor & res1, Tensor & res2) {
+ return self.type().qr_out(self, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> qr(const Tensor & self) {
+ return self.type().qr(self);
+}
+static inline std::tuple<Tensor &,Tensor &> geqrf_out(const Tensor & self, Tensor & res1, Tensor & res2) {
+ return self.type().geqrf_out(self, res1, res2);
+}
+static inline std::tuple<Tensor,Tensor> geqrf(const Tensor & self) {
+ return self.type().geqrf(self);
+}
+static inline std::tuple<Tensor &,const Tensor &> orgqr_out(const Tensor & self, const Tensor & input2, Tensor & result) {
+ return self.type().orgqr_out(self, input2, result);
+}
+static inline std::tuple<Tensor,const Tensor &> orgqr(const Tensor & self, const Tensor & input2) {
+ return self.type().orgqr(self, input2);
+}
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & result) {
+ return self.type().ormqr_out(self, input2, input3, left, transpose, result);
+}
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) {
+ return self.type().ormqr(self, input2, input3, left, transpose);
+}
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, Tensor & result) {
+ return self.type().ormqr_out(self, input2, input3, left, result);
+}
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left) {
+ return self.type().ormqr(self, input2, input3, left);
+}
+static inline std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, Tensor & result) {
+ return self.type().ormqr_out(self, input2, input3, result);
+}
+static inline std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3) {
+ return self.type().ormqr(self, input2, input3);
+}
+static inline std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & info, const Tensor & self, Tensor & result, Tensor & pivots) {
+ return self.type().btrifact_out(info, self, result, pivots);
+}
+static inline std::tuple<Tensor,Tensor> btrifact(const Tensor & info, const Tensor & self) {
+ return self.type().btrifact(info, self);
+}
+static inline std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & self, Tensor & result, Tensor & pivots) {
+ return self.type().btrifact_out(self, result, pivots);
+}
+static inline std::tuple<Tensor,Tensor> btrifact(const Tensor & self) {
+ return self.type().btrifact(self);
+}
+static inline Tensor & btrisolve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & result) {
+ return self.type().btrisolve_out(self, LU_data, LU_pivots, result);
+}
+static inline Tensor btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) {
+ return self.type().btrisolve(self, LU_data, LU_pivots);
+}
+static inline Tensor & randperm_out(Generator & generator, int64_t n, Tensor & result) {
+ return result.type().randperm_out(generator, n, result);
+}
+static inline Tensor & randperm_out(int64_t n, Tensor & result) {
+ return result.type().randperm_out(n, result);
+}
+static inline Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement, Tensor & result) {
+ return self.type().multinomial_out(generator, self, num_samples, replacement, result);
+}
+static inline Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement) {
+ return self.type().multinomial(generator, self, num_samples, replacement);
+}
+static inline Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, Tensor & result) {
+ return self.type().multinomial_out(generator, self, num_samples, result);
+}
+static inline Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples) {
+ return self.type().multinomial(generator, self, num_samples);
+}
+static inline Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, Tensor & result) {
+ return self.type().multinomial_out(self, num_samples, replacement, result);
+}
+static inline Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement) {
+ return self.type().multinomial(self, num_samples, replacement);
+}
+static inline Tensor & multinomial_out(const Tensor & self, int64_t num_samples, Tensor & result) {
+ return self.type().multinomial_out(self, num_samples, result);
+}
+static inline Tensor multinomial(const Tensor & self, int64_t num_samples) {
+ return self.type().multinomial(self, num_samples);
+}
+static inline Tensor & rand_out(Generator & generator, IntList size, Tensor & result) {
+ return result.type().rand_out(generator, size, result);
+}
+static inline Tensor & rand_out(IntList size, Tensor & result) {
+ return result.type().rand_out(size, result);
+}
+static inline Tensor & randn_out(Generator & generator, IntList size, Tensor & result) {
+ return result.type().randn_out(generator, size, result);
+}
+static inline Tensor & randn_out(IntList size, Tensor & result) {
+ return result.type().randn_out(size, result);
+}
+static inline Tensor & select_out(const Tensor & self, int dim, int64_t sliceIndex, Tensor & result) {
+ return self.type().select_out(self, dim, sliceIndex, result);
+}
+static inline Tensor select(const Tensor & self, int dim, int64_t sliceIndex) {
+ return self.type().select(self, dim, sliceIndex);
+}
+static inline void Abs_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().Abs_updateOutput(input, output);
+}
+static inline void Abs_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) {
+ return input.type().Abs_updateGradInput(input, gradOutput, gradInput);
+}
+static inline void AbsCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().AbsCriterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void AbsCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().AbsCriterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights) {
+ return input.type().BCECriterion_updateOutput(input, target, output, sizeAverage, weights);
+}
+static inline void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().BCECriterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights) {
+ return input.type().BCECriterion_updateGradInput(input, target, gradInput, sizeAverage, weights);
+}
+static inline void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().BCECriterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index) {
+ return input.type().ClassNLLCriterion_updateOutput(input, target, output, sizeAverage, weights, total_weight, ignore_index);
+}
+static inline void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index) {
+ return input.type().ClassNLLCriterion_updateOutput(input, target, output, sizeAverage, total_weight, ignore_index);
+}
+static inline void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index) {
+ return input.type().ClassNLLCriterion_updateGradInput(input, target, gradInput, sizeAverage, weights, total_weight, ignore_index);
+}
+static inline void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index) {
+ return input.type().ClassNLLCriterion_updateGradInput(input, target, gradInput, sizeAverage, total_weight, ignore_index);
+}
+static inline void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight) {
+ return input.type().SpatialClassNLLCriterion_updateOutput(input, target, output, sizeAverage, weights, total_weight);
+}
+static inline void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight) {
+ return input.type().SpatialClassNLLCriterion_updateOutput(input, target, output, sizeAverage, total_weight);
+}
+static inline void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight) {
+ return input.type().SpatialClassNLLCriterion_updateGradInput(input, target, gradInput, sizeAverage, weights, total_weight);
+}
+static inline void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight) {
+ return input.type().SpatialClassNLLCriterion_updateGradInput(input, target, gradInput, sizeAverage, total_weight);
+}
+static inline void ELU_updateOutput(const Tensor & input, const Tensor & output, Scalar alpha, bool inplace) {
+ return input.type().ELU_updateOutput(input, output, alpha, inplace);
+}
+static inline void ELU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar alpha, bool inplace) {
+ return input.type().ELU_updateGradInput(input, gradOutput, gradInput, output, alpha, inplace);
+}
+static inline void DistKLDivCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().DistKLDivCriterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void DistKLDivCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().DistKLDivCriterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void GatedLinear_updateOutput(const Tensor & input, const Tensor & output, int dim) {
+ return input.type().GatedLinear_updateOutput(input, output, dim);
+}
+static inline void GatedLinear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int dim) {
+ return input.type().GatedLinear_updateGradInput(input, gradOutput, gradInput, dim);
+}
+static inline void HardShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda) {
+ return input.type().HardShrink_updateOutput(input, output, lambda);
+}
+static inline void HardShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda) {
+ return input.type().HardShrink_updateGradInput(input, gradOutput, gradInput, lambda);
+}
+static inline void HardTanh_updateOutput(const Tensor & input, const Tensor & output, Scalar min_val, Scalar max_val, bool inplace) {
+ return input.type().HardTanh_updateOutput(input, output, min_val, max_val, inplace);
+}
+static inline void HardTanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar min_val, Scalar max_val, bool inplace) {
+ return input.type().HardTanh_updateGradInput(input, gradOutput, gradInput, min_val, max_val, inplace);
+}
+static inline void L1Cost_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().L1Cost_updateOutput(input, output);
+}
+static inline void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) {
+ return input.type().L1Cost_updateGradInput(input, gradOutput, gradInput);
+}
+static inline void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradInput) {
+ return input.type().L1Cost_updateGradInput(input, gradInput);
+}
+static inline void LeakyReLU_updateOutput(const Tensor & input, const Tensor & output, Scalar negval, bool inplace) {
+ return input.type().LeakyReLU_updateOutput(input, output, negval, inplace);
+}
+static inline void LeakyReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar negval, bool inplace) {
+ return input.type().LeakyReLU_updateGradInput(input, gradOutput, gradInput, negval, inplace);
+}
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & hx, const Tensor & output, const Tensor & storage) {
+ return input.type().GRUFused_updateOutput(input, hidden, bias1, bias2, hx, output, storage);
+}
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & hx, const Tensor & output, const Tensor & storage) {
+ return input.type().GRUFused_updateOutput(input, hidden, bias1, hx, output, storage);
+}
+static inline void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & hx, const Tensor & output, const Tensor & storage) {
+ return input.type().GRUFused_updateOutput(input, hidden, hx, output, storage);
+}
+static inline void GRUFused_updateGradInput(const Tensor & gradInInput, const Tensor & gradInHidden, const Tensor & gradOutput, const Tensor & gradInputHx, const Tensor & storage) {
+ return gradInInput.type().GRUFused_updateGradInput(gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
+}
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & cell, const Tensor & output, const Tensor & outputCell) {
+ return input.type().LSTMFused_updateOutput(input, hidden, bias1, bias2, cell, output, outputCell);
+}
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & cell, const Tensor & output, const Tensor & outputCell) {
+ return input.type().LSTMFused_updateOutput(input, hidden, bias1, cell, output, outputCell);
+}
+static inline void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & cell, const Tensor & output, const Tensor & outputCell) {
+ return input.type().LSTMFused_updateOutput(input, hidden, cell, output, outputCell);
+}
+static inline void LSTMFused_updateGradInput(const Tensor & storage, const Tensor & gradInGates, const Tensor & cx, const Tensor & cy, const Tensor & gradOutput, const Tensor & gradOutputCell, const Tensor & gradInputCx) {
+ return storage.type().LSTMFused_updateGradInput(storage, gradInGates, cx, cy, gradOutput, gradOutputCell, gradInputCx);
+}
+static inline void LogSigmoid_updateOutput(const Tensor & input, const Tensor & output, const Tensor & buffer) {
+ return input.type().LogSigmoid_updateOutput(input, output, buffer);
+}
+static inline void LogSigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & buffer) {
+ return input.type().LogSigmoid_updateGradInput(input, gradOutput, gradInput, buffer);
+}
+static inline void LogSoftMax_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().LogSoftMax_updateOutput(input, output);
+}
+static inline void LogSoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return input.type().LogSoftMax_updateGradInput(input, gradOutput, gradInput, output);
+}
+static inline void MarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, Scalar margin) {
+ return input.type().MarginCriterion_updateOutput(input, target, output, sizeAverage, margin);
+}
+static inline void MarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, Scalar margin) {
+ return input.type().MarginCriterion_updateGradInput(input, target, gradInput, sizeAverage, margin);
+}
+static inline void SoftMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().SoftMarginCriterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void SoftMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().SoftMarginCriterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void MSECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().MSECriterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void MSECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().MSECriterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void MultiLabelMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, const Tensor & isTarget, bool sizeAverage) {
+ return input.type().MultiLabelMarginCriterion_updateOutput(input, target, output, isTarget, sizeAverage);
+}
+static inline void MultiLabelMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, const Tensor & isTarget, bool sizeAverage) {
+ return input.type().MultiLabelMarginCriterion_updateGradInput(input, target, gradInput, isTarget, sizeAverage);
+}
+static inline void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, const Tensor & weights, Scalar margin) {
+ return input.type().MultiMarginCriterion_updateOutput(input, target, output, sizeAverage, p, weights, margin);
+}
+static inline void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, Scalar margin) {
+ return input.type().MultiMarginCriterion_updateOutput(input, target, output, sizeAverage, p, margin);
+}
+static inline void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, const Tensor & weights, Scalar margin) {
+ return input.type().MultiMarginCriterion_updateGradInput(input, target, gradInput, sizeAverage, p, weights, margin);
+}
+static inline void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, Scalar margin) {
+ return input.type().MultiMarginCriterion_updateGradInput(input, target, gradInput, sizeAverage, p, margin);
+}
+static inline void PReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, int64_t nOutputPlane) {
+ return input.type().PReLU_updateOutput(input, output, weight, nOutputPlane);
+}
+static inline void PReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int64_t nOutputPlane) {
+ return input.type().PReLU_updateGradInput(input, gradOutput, gradInput, weight, nOutputPlane);
+}
+static inline void PReLU_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradWeight, const Tensor & gradWeightBuf, const Tensor & gradWeightBuf2, int64_t nOutputPlane, Scalar scale) {
+ return input.type().PReLU_accGradParameters(input, gradOutput, gradInput, weight, gradWeight, gradWeightBuf, gradWeightBuf2, nOutputPlane, scale);
+}
+static inline void Linear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & addBuffer) {
+ return input.type().Linear_updateOutput(input, output, weight, bias, addBuffer);
+}
+static inline void Linear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight) {
+ return input.type().Linear_updateGradInput(input, gradOutput, gradInput, weight);
+}
+static inline void Linear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & addBuffer, Scalar scale) {
+ return input.type().Linear_accGradParameters(input, gradOutput, gradInput, weight, bias, gradWeight, gradBias, addBuffer, scale);
+}
+static inline void RReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace, Generator & generator) {
+ return input.type().RReLU_updateOutput(input, output, noise, lower, upper, train, inplace, generator);
+}
+static inline void RReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace) {
+ return input.type().RReLU_updateGradInput(input, gradOutput, gradInput, noise, lower, upper, train, inplace);
+}
+static inline void Sigmoid_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().Sigmoid_updateOutput(input, output);
+}
+static inline void Sigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return input.type().Sigmoid_updateGradInput(input, gradOutput, gradInput, output);
+}
+static inline void Sigmoid_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return gradOutput.type().Sigmoid_updateGradInput(gradOutput, gradInput, output);
+}
+static inline void SmoothL1Criterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) {
+ return input.type().SmoothL1Criterion_updateOutput(input, target, output, sizeAverage);
+}
+static inline void SmoothL1Criterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) {
+ return input.type().SmoothL1Criterion_updateGradInput(input, target, gradInput, sizeAverage);
+}
+static inline void SoftMax_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().SoftMax_updateOutput(input, output);
+}
+static inline void SoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return input.type().SoftMax_updateGradInput(input, gradOutput, gradInput, output);
+}
+static inline void SoftPlus_updateOutput(const Tensor & input, const Tensor & output, Scalar beta, Scalar threshold) {
+ return input.type().SoftPlus_updateOutput(input, output, beta, threshold);
+}
+static inline void SoftPlus_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar beta, Scalar threshold) {
+ return input.type().SoftPlus_updateGradInput(input, gradOutput, gradInput, output, beta, threshold);
+}
+static inline void SoftShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda) {
+ return input.type().SoftShrink_updateOutput(input, output, lambda);
+}
+static inline void SoftShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda) {
+ return input.type().SoftShrink_updateGradInput(input, gradOutput, gradInput, lambda);
+}
+static inline void IndexLinear_updateOutput(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & normalizedValues, int train) {
+ return values.type().IndexLinear_updateOutput(keys, keysOffset, values, sizes, cumSumSizes, output, weight, bias, normalizedValues, train);
+}
+static inline void IndexLinear_accGradParameters(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, const Tensor & valuesBuffer, Scalar weightDecay, Scalar scale) {
+ return values.type().IndexLinear_accGradParameters(keys, keysOffset, values, sizes, cumSumSizes, gradOutput, gradWeight, gradBias, weight, bias, valuesBuffer, weightDecay, scale);
+}
+static inline void SparseLinear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias) {
+ return input.type().SparseLinear_updateOutput(input, output, weight, bias);
+}
+static inline void SparseLinear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, Scalar weightDecay, Scalar scale) {
+ return input.type().SparseLinear_accGradParameters(input, gradOutput, gradWeight, gradBias, weight, bias, weightDecay, scale);
+}
+static inline void Sqrt_updateOutput(const Tensor & input, const Tensor & output, Scalar eps) {
+ return input.type().Sqrt_updateOutput(input, output, eps);
+}
+static inline void Sqrt_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return input.type().Sqrt_updateGradInput(input, gradOutput, gradInput, output);
+}
+static inline void Square_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().Square_updateOutput(input, output);
+}
+static inline void Square_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) {
+ return input.type().Square_updateGradInput(input, gradOutput, gradInput);
+}
+static inline void Tanh_updateOutput(const Tensor & input, const Tensor & output) {
+ return input.type().Tanh_updateOutput(input, output);
+}
+static inline void Tanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return input.type().Tanh_updateGradInput(input, gradOutput, gradInput, output);
+}
+static inline void Tanh_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) {
+ return gradOutput.type().Tanh_updateGradInput(gradOutput, gradInput, output);
+}
+static inline void Threshold_updateOutput(const Tensor & input, const Tensor & output, Scalar threshold, Scalar val, bool inplace) {
+ return input.type().Threshold_updateOutput(input, output, threshold, val, inplace);
+}
+static inline void Threshold_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar threshold, Scalar val, bool inplace) {
+ return input.type().Threshold_updateGradInput(input, gradOutput, gradInput, threshold, val, inplace);
+}
+static inline void TemporalConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize, int outputFrameSize) {
+ return input.type().TemporalConvolution_updateOutput(input, output, weight, bias, kW, dW, inputFrameSize, outputFrameSize);
+}
+static inline void TemporalConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW) {
+ return input.type().TemporalConvolution_updateGradInput(input, gradOutput, gradInput, weight, kW, dW);
+}
+static inline void TemporalConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale) {
+ return input.type().TemporalConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, kW, dW, scale);
+}
+static inline void TemporalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int dW) {
+ return input.type().TemporalMaxPooling_updateOutput(input, output, indices, kW, dW);
+}
+static inline void TemporalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int dW) {
+ return input.type().TemporalMaxPooling_updateGradInput(input, gradOutput, gradInput, indices, kW, dW);
+}
+static inline void TemporalSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize) {
+ return input.type().TemporalSubSampling_updateOutput(input, output, weight, bias, kW, dW, inputFrameSize);
+}
+static inline void TemporalSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW) {
+ return input.type().TemporalSubSampling_updateGradInput(input, gradOutput, gradInput, weight, kW, dW);
+}
+static inline void TemporalSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale) {
+ return input.type().TemporalSubSampling_accGradParameters(input, gradOutput, gradWeight, gradBias, kW, dW, scale);
+}
+static inline void TemporalRowConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst) {
+ return input.type().TemporalRowConvolution_updateOutput(input, output, weight, bias, finput, fgradInput, kW, dW, padW, featFirst);
+}
+static inline void TemporalRowConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst) {
+ return input.type().TemporalRowConvolution_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, kW, dW, padW, featFirst);
+}
+static inline void TemporalRowConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst, Scalar scale) {
+ return input.type().TemporalRowConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, kW, dW, padW, featFirst, scale);
+}
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) {
+ return input.type().BatchNormalization_updateOutput(input, output, weight, bias, running_mean, running_var, save_mean, save_std, train, momentum, eps);
+}
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) {
+ return input.type().BatchNormalization_updateOutput(input, output, weight, running_mean, running_var, save_mean, save_std, train, momentum, eps);
+}
+static inline void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) {
+ return input.type().BatchNormalization_updateOutput(input, output, running_mean, running_var, save_mean, save_std, train, momentum, eps);
+}
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) {
+ return input.type().BatchNormalization_backward(input, gradOutput, gradInput, gradWeight, gradBias, weight, running_mean, running_var, save_mean, save_std, train, scale, eps);
+}
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) {
+ return input.type().BatchNormalization_backward(input, gradOutput, gradInput, gradWeight, gradBias, running_mean, running_var, save_mean, save_std, train, scale, eps);
+}
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) {
+ return input.type().BatchNormalization_backward(input, gradOutput, gradInput, gradWeight, running_mean, running_var, save_mean, save_std, train, scale, eps);
+}
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) {
+ return input.type().BatchNormalization_backward(input, gradOutput, gradInput, running_mean, running_var, save_mean, save_std, train, scale, eps);
+}
+static inline void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) {
+ return input.type().BatchNormalization_backward(input, gradOutput, running_mean, running_var, save_mean, save_std, train, scale, eps);
+}
+static inline void SpatialConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) {
+ return input.type().SpatialConvolutionMap_updateOutput(input, output, weight, bias, connTable, nInputPlane, nOutputPlane, dW, dH);
+}
+static inline void SpatialConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) {
+ return input.type().SpatialConvolutionMap_updateGradInput(input, gradOutput, gradInput, weight, bias, connTable, nInputPlane, nOutputPlane, dW, dH);
+}
+static inline void SpatialConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale) {
+ return input.type().SpatialConvolutionMap_accGradParameters(input, gradOutput, gradWeight, gradBias, connTable, nInputPlane, nOutputPlane, dW, dH, scale);
+}
+static inline void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialConvolutionMM_updateOutput(input, output, weight, bias, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialConvolutionMM_updateOutput(input, output, weight, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialConvolutionMM_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) {
+ return input.type().SpatialConvolutionMM_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, kW, kH, dW, dH, padW, padH, scale);
+}
+static inline void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) {
+ return input.type().SpatialConvolutionMM_accGradParameters(input, gradOutput, gradWeight, finput, fgradInput, kW, kH, dW, dH, padW, padH, scale);
+}
+static inline void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialDepthWiseConvolution_updateOutput(input, output, weight, bias, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialDepthWiseConvolution_updateOutput(input, output, weight, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialDepthWiseConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) {
+ return input.type().SpatialDepthWiseConvolution_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, kW, kH, dW, dH, padW, padH);
+}
+static inline void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) {
+ return input.type().SpatialDepthWiseConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, kW, kH, dW, dH, padW, padH, scale);
+}
+static inline void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) {
+ return input.type().SpatialDepthWiseConvolution_accGradParameters(input, gradOutput, gradWeight, finput, fgradInput, kW, kH, dW, dH, padW, padH, scale);
+}
+static inline void SpatialConvolutionLocal_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) {
+ return input.type().SpatialConvolutionLocal_updateOutput(input, output, weight, bias, finput, fgradInput, kW, kH, dW, dH, padW, padH, inputWidth, inputHeight, outputWidth, outputHeight);
+}
+static inline void SpatialConvolutionLocal_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) {
+ return input.type().SpatialConvolutionLocal_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, kW, kH, dW, dH, padW, padH, inputWidth, inputHeight, outputWidth, outputHeight);
+}
+static inline void SpatialConvolutionLocal_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight, Scalar scale) {
+ return input.type().SpatialConvolutionLocal_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, kW, kH, dW, dH, padW, padH, inputWidth, inputHeight, outputWidth, outputHeight, scale);
+}
+static inline void SpatialAdaptiveMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight) {
+ return input.type().SpatialAdaptiveMaxPooling_updateOutput(input, output, indices, owidth, oheight);
+}
+static inline void SpatialAdaptiveMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices) {
+ return input.type().SpatialAdaptiveMaxPooling_updateGradInput(input, gradOutput, gradInput, indices);
+}
+static inline void SpatialAdaptiveAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int owidth, int oheight) {
+ return input.type().SpatialAdaptiveAveragePooling_updateOutput(input, output, owidth, oheight);
+}
+static inline void SpatialAdaptiveAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) {
+ return input.type().SpatialAdaptiveAveragePooling_updateGradInput(input, gradOutput, gradInput);
+}
+static inline void SpatialAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) {
+ return input.type().SpatialAveragePooling_updateOutput(input, output, kW, kH, dW, dH, padW, padH, ceil_mode, count_include_pad);
+}
+static inline void SpatialAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) {
+ return input.type().SpatialAveragePooling_updateGradInput(input, gradOutput, gradInput, kW, kH, dW, dH, padW, padH, ceil_mode, count_include_pad);
+}
+static inline void SpatialFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples) {
+ return input.type().SpatialFractionalMaxPooling_updateOutput(input, output, outputW, outputH, poolSizeW, poolSizeH, indices, randomSamples);
+}
+static inline void SpatialFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices) {
+ return input.type().SpatialFractionalMaxPooling_updateGradInput(input, gradOutput, gradInput, outputW, outputH, poolSizeW, poolSizeH, indices);
+}
+static inline void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) {
+ return input.type().SpatialFullConvolution_updateOutput(input, output, weight, bias, columns, ones, kW, kH, dW, dH, padW, padH, adjW, adjH);
+}
+static inline void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) {
+ return input.type().SpatialFullConvolution_updateOutput(input, output, weight, columns, ones, kW, kH, dW, dH, padW, padH, adjW, adjH);
+}
+static inline void SpatialFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) {
+ return input.type().SpatialFullConvolution_updateGradInput(input, gradOutput, gradInput, weight, gradColumns, kW, kH, dW, dH, padW, padH, adjW, adjH);
+}
+static inline void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale) {
+ return input.type().SpatialFullConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, columns, ones, kW, kH, dW, dH, padW, padH, adjW, adjH, scale);
+}
+static inline void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale) {
+ return input.type().SpatialFullConvolution_accGradParameters(input, gradOutput, gradWeight, columns, ones, kW, kH, dW, dH, padW, padH, adjW, adjH, scale);
+}
+static inline void SpatialFullConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) {
+ return input.type().SpatialFullConvolutionMap_updateOutput(input, output, weight, bias, connTable, nInputPlane, nOutputPlane, dW, dH);
+}
+static inline void SpatialFullConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) {
+ return input.type().SpatialFullConvolutionMap_updateGradInput(input, gradOutput, gradInput, weight, bias, connTable, nInputPlane, nOutputPlane, dW, dH);
+}
+static inline void SpatialFullConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale) {
+ return input.type().SpatialFullConvolutionMap_accGradParameters(input, gradOutput, gradWeight, gradBias, connTable, nInputPlane, nOutputPlane, dW, dH, scale);
+}
+static inline void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) {
+ return input.type().SpatialDilatedConvolution_updateOutput(input, output, weight, bias, columns, ones, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
+}
+static inline void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) {
+ return input.type().SpatialDilatedConvolution_updateOutput(input, output, weight, columns, ones, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
+}
+static inline void SpatialDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) {
+ return input.type().SpatialDilatedConvolution_updateGradInput(input, gradOutput, gradInput, weight, gradColumns, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
+}
+static inline void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale) {
+ return input.type().SpatialDilatedConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, columns, ones, kW, kH, dW, dH, padW, padH, dilationW, dilationH, scale);
+}
+static inline void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale) {
+ return input.type().SpatialDilatedConvolution_accGradParameters(input, gradOutput, gradWeight, columns, ones, kW, kH, dW, dH, padW, padH, dilationW, dilationH, scale);
+}
+static inline void SpatialMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) {
+ return input.type().SpatialMaxPooling_updateOutput(input, output, indices, kW, kH, dW, dH, padW, padH, ceil_mode);
+}
+static inline void SpatialMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) {
+ return input.type().SpatialMaxPooling_updateGradInput(input, gradOutput, gradInput, indices, kW, kH, dW, dH, padW, padH, ceil_mode);
+}
+static inline void SpatialDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode) {
+ return input.type().SpatialDilatedMaxPooling_updateOutput(input, output, indices, kW, kH, dW, dH, padW, padH, dilationW, dilationH, ceil_mode);
+}
+static inline void SpatialDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode) {
+ return input.type().SpatialDilatedMaxPooling_updateGradInput(input, gradOutput, gradInput, indices, kW, kH, dW, dH, padW, padH, dilationW, dilationH, ceil_mode);
+}
+static inline void SpatialMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight) {
+ return input.type().SpatialMaxUnpooling_updateOutput(input, output, indices, owidth, oheight);
+}
+static inline void SpatialMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int owidth, int oheight) {
+ return input.type().SpatialMaxUnpooling_updateGradInput(input, gradOutput, gradInput, indices, owidth, oheight);
+}
+static inline void SpatialSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int kH, int dW, int dH) {
+ return input.type().SpatialSubSampling_updateOutput(input, output, weight, bias, kW, kH, dW, dH);
+}
+static inline void SpatialSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int kH, int dW, int dH) {
+ return input.type().SpatialSubSampling_updateGradInput(input, gradOutput, gradInput, weight, kW, kH, dW, dH);
+}
+static inline void SpatialSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int kH, int dW, int dH, Scalar scale) {
+ return input.type().SpatialSubSampling_accGradParameters(input, gradOutput, gradWeight, gradBias, kW, kH, dW, dH, scale);
+}
+static inline void SpatialUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor) {
+ return input.type().SpatialUpSamplingNearest_updateOutput(input, output, scale_factor);
+}
+static inline void SpatialUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor) {
+ return input.type().SpatialUpSamplingNearest_updateGradInput(input, gradOutput, gradInput, scale_factor);
+}
+static inline void SpatialUpSamplingBilinear_updateOutput(const Tensor & input, const Tensor & output, int outputHeight, int outputWidth) {
+ return input.type().SpatialUpSamplingBilinear_updateOutput(input, output, outputHeight, outputWidth);
+}
+static inline void SpatialUpSamplingBilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) {
+ return gradOutput.type().SpatialUpSamplingBilinear_updateGradInput(gradOutput, gradInput, nbatch, nchannels, inputHeight, inputWidth, outputHeight, outputWidth);
+}
+static inline void VolumetricAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kT, int kW, int kH, int dT, int dW, int dH) {
+ return input.type().VolumetricAveragePooling_updateOutput(input, output, kT, kW, kH, dT, dW, dH);
+}
+static inline void VolumetricAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kT, int kW, int kH, int dT, int dW, int dH) {
+ return input.type().VolumetricAveragePooling_updateGradInput(input, gradOutput, gradInput, kT, kW, kH, dT, dW, dH);
+}
+static inline void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolution_updateOutput(input, output, weight, bias, finput, fgradInput, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolution_updateOutput(input, output, weight, finput, fgradInput, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolution_updateGradInput(input, gradOutput, gradInput, weight, finput, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) {
+ return input.type().VolumetricConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, dT, dW, dH, pT, pW, pH, scale);
+}
+static inline void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) {
+ return input.type().VolumetricConvolution_accGradParameters(input, gradOutput, gradWeight, finput, fgradInput, dT, dW, dH, pT, pW, pH, scale);
+}
+static inline void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolutionMM_updateOutput(input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolutionMM_updateOutput(input, output, weight, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricConvolutionMM_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) {
+ return input.type().VolumetricConvolutionMM_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, scale);
+}
+static inline void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) {
+ return input.type().VolumetricConvolutionMM_accGradParameters(input, gradOutput, gradWeight, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, scale);
+}
+static inline void VolumetricFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples) {
+ return input.type().VolumetricFractionalMaxPooling_updateOutput(input, output, outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH, indices, randomSamples);
+}
+static inline void VolumetricFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices) {
+ return input.type().VolumetricFractionalMaxPooling_updateGradInput(input, gradOutput, gradInput, outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH, indices);
+}
+static inline void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) {
+ return input.type().VolumetricFullConvolution_updateOutput(input, output, weight, bias, finput, fgradInput, dT, dW, dH, pT, pW, pH, aT, aW, aH);
+}
+static inline void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) {
+ return input.type().VolumetricFullConvolution_updateOutput(input, output, weight, finput, fgradInput, dT, dW, dH, pT, pW, pH, aT, aW, aH);
+}
+static inline void VolumetricFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) {
+ return input.type().VolumetricFullConvolution_updateGradInput(input, gradOutput, gradInput, weight, finput, fgradInput, dT, dW, dH, pT, pW, pH, aT, aW, aH);
+}
+static inline void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale) {
+ return input.type().VolumetricFullConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, finput, fgradInput, dT, dW, dH, pT, pW, pH, aT, aW, aH, scale);
+}
+static inline void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale) {
+ return input.type().VolumetricFullConvolution_accGradParameters(input, gradOutput, gradWeight, finput, fgradInput, dT, dW, dH, pT, pW, pH, aT, aW, aH, scale);
+}
+static inline void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) {
+ return input.type().VolumetricDilatedConvolution_updateOutput(input, output, weight, bias, columns, ones, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH);
+}
+static inline void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) {
+ return input.type().VolumetricDilatedConvolution_updateOutput(input, output, weight, columns, ones, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH);
+}
+static inline void VolumetricDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) {
+ return input.type().VolumetricDilatedConvolution_updateGradInput(input, gradOutput, gradInput, weight, gradColumns, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH);
+}
+static inline void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale) {
+ return input.type().VolumetricDilatedConvolution_accGradParameters(input, gradOutput, gradWeight, gradBias, columns, ones, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, scale);
+}
+static inline void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale) {
+ return input.type().VolumetricDilatedConvolution_accGradParameters(input, gradOutput, gradWeight, columns, ones, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH, scale);
+}
+static inline void VolumetricMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode) {
+ return input.type().VolumetricMaxPooling_updateOutput(input, output, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, ceilMode);
+}
+static inline void VolumetricMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode) {
+ return input.type().VolumetricMaxPooling_updateGradInput(input, gradOutput, gradInput, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, ceilMode);
+}
+static inline void VolumetricDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) {
+ return input.type().VolumetricDilatedMaxPooling_updateOutput(input, output, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode);
+}
+static inline void VolumetricDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) {
+ return input.type().VolumetricDilatedMaxPooling_updateGradInput(input, gradOutput, gradInput, indices, kT, kW, kH, dT, dW, dH, pT, pW, pH, dilationT, dilationW, dilationH, ceilMode);
+}
+static inline void VolumetricMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricMaxUnpooling_updateOutput(input, output, indices, oT, oW, oH, dT, dW, dH, pT, pW, pH);
+}
+static inline void VolumetricMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) {
+ return input.type().VolumetricMaxUnpooling_updateGradInput(input, gradOutput, gradInput, indices, oT, oW, oH, dT, dW, dH, pT, pW, pH);
+}
+static inline void SpatialReflectionPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b) {
+ return input.type().SpatialReflectionPadding_updateOutput(input, output, pad_l, pad_r, pad_t, pad_b);
+}
+static inline void SpatialReflectionPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b) {
+ return input.type().SpatialReflectionPadding_updateGradInput(input, gradOutput, gradInput, pad_l, pad_r, pad_t, pad_b);
+}
+static inline void SpatialReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b) {
+ return input.type().SpatialReplicationPadding_updateOutput(input, output, pad_l, pad_r, pad_t, pad_b);
+}
+static inline void SpatialReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b) {
+ return input.type().SpatialReplicationPadding_updateGradInput(input, gradOutput, gradInput, pad_l, pad_r, pad_t, pad_b);
+}
+static inline void VolumetricReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) {
+ return input.type().VolumetricReplicationPadding_updateOutput(input, output, pleft, pright, ptop, pbottom, pfront, pback);
+}
+static inline void VolumetricReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) {
+ return input.type().VolumetricReplicationPadding_updateGradInput(input, gradOutput, gradInput, pleft, pright, ptop, pbottom, pfront, pback);
+}
+static inline void VolumetricUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor) {
+ return input.type().VolumetricUpSamplingNearest_updateOutput(input, output, scale_factor);
+}
+static inline void VolumetricUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor) {
+ return input.type().VolumetricUpSamplingNearest_updateGradInput(input, gradOutput, gradInput, scale_factor);
+}
+static inline void VolumetricUpSamplingTrilinear_updateOutput(const Tensor & input, const Tensor & output, int outputDepth, int outputHeight, int outputWidth) {
+ return input.type().VolumetricUpSamplingTrilinear_updateOutput(input, output, outputDepth, outputHeight, outputWidth);
+}
+static inline void VolumetricUpSamplingTrilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputDepth, int inputHeight, int inputWidth, int outputDepth, int outputHeight, int outputWidth) {
+ return gradOutput.type().VolumetricUpSamplingTrilinear_updateGradInput(gradOutput, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth);
+}
+static inline void SpatialCrossMapLRN_updateOutput(const Tensor & input, const Tensor & output, const Tensor & scale, int size, Scalar alpha, Scalar beta, Scalar k) {
+ return input.type().SpatialCrossMapLRN_updateOutput(input, output, scale, size, alpha, beta, k);
+}
+static inline void SpatialCrossMapLRN_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & scale, const Tensor & output, int size, Scalar alpha, Scalar beta, Scalar k) {
+ return input.type().SpatialCrossMapLRN_updateGradInput(input, gradOutput, gradInput, scale, output, size, alpha, beta, k);
+}
+
+}
diff --git a/aten/doc/Tensor.h b/aten/doc/Tensor.h
new file mode 100644
index 0000000000..f090122cbc
--- /dev/null
+++ b/aten/doc/Tensor.h
@@ -0,0 +1,1586 @@
+#pragma once
+
+#include "ATen/Scalar.h"
+#include "ATen/Type.h"
+#include "ATen/TensorImpl.h"
+#include "ATen/Utils.h"
+#include "ATen/TensorAccessor.h"
+
+namespace at {
+class Type;
+
+struct Tensor {
+
+ Tensor()
+ : pImpl(nullptr){}
+ explicit Tensor(TensorImpl * self, bool retain = true)
+ : pImpl(self) {
+ if(pImpl != nullptr && retain)
+ pImpl->retain();
+ }
+ Tensor(Tensor const & rhs)
+ : pImpl(rhs.pImpl) {
+ if(pImpl != nullptr)
+ pImpl->retain();
+ }
+ Tensor(Tensor && rhs)
+ : pImpl(rhs.pImpl) {
+ rhs.pImpl = nullptr;
+ }
+ ~Tensor() {
+ if(pImpl != nullptr)
+ pImpl->release();
+ }
+ Tensor & operator=(Tensor && rhs) {
+ rhs.swap(*this);
+ return *this;
+ }
+ Tensor & operator=(Tensor const & rhs) {
+ //Tensor ctor retains original rhs.pImpl
+ //then rhs.pImpl is swapped with this->pImpl
+ //finally Tensor dtor releases rhs.pImpl, which was originally this->pImpl
+ Tensor(rhs).swap(*this);
+ return *this;
+ }
+ void reset() {
+ Tensor().swap(*this);
+ }
+ void reset(TensorImpl * rhs) {
+ Tensor(rhs).swap(*this);
+ }
+ void reset(TensorImpl * rhs, bool retain) {
+ Tensor(rhs, retain).swap(*this );
+ }
+ TensorImpl * get() {
+ return pImpl;
+ }
+ TensorImpl * detach() {
+ TensorImpl * ret = pImpl;
+ pImpl = nullptr;
+ return ret;
+ }
+ bool defined() const {
+ return pImpl != nullptr;
+ }
+ void swap(Tensor & rhs) {
+ TensorImpl * tmp = pImpl;
+ pImpl = rhs.pImpl;
+ rhs.pImpl = tmp;
+ }
+ const char * toString() const {
+ return pImpl->toString();
+ }
+ IntList sizes() const {
+ return pImpl->sizes();
+ }
+ IntList strides() const {
+ return pImpl->strides();
+ }
+ Type & type() const {
+ return pImpl->type();
+ }
+ Tensor toType(Type & t) const {
+ if(type().ID() ==t.ID())
+ return *this;
+ return t.copy(*this);
+ }
+ Tensor & copy_(const Tensor & src) {
+ resize_(src.sizes());
+ type().copy(src,*this);
+ return *this;
+ }
+ Tensor toType(ScalarType t) {
+ return toType(type().toScalarType(t));
+ }
+ Tensor toBackend(Backend b) {
+ return toType(type().toBackend(b));
+ }
+ int64_t dim() const {
+ return ndimension();
+ }
+ template<typename T>
+ T * data() const;
+
+ template<typename T, size_t N>
+ TensorAccessor<T,N> accessor() {
+ static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()");
+ AT_ASSERT(dim() == N, "expected %d dims but tensor has %d",N,dim());
+ return TensorAccessor<T,N>(data<T>(),sizes().data(),strides().data());
+ }
+
+ Tensor operator-() {
+ return neg();
+ }
+ Tensor& operator+=(const Tensor & other) {
+ add_(other);
+ }
+ Tensor& operator+=(Scalar other) {
+ add_(other);
+ }
+ Tensor& operator-=(const Tensor & other) {
+ sub_(other);
+ }
+ Tensor& operator-=(Scalar other) {
+ sub_(other);
+ }
+ Tensor& operator*=(const Tensor & other) {
+ mul_(other);
+ }
+ Tensor& operator*=(Scalar other) {
+ mul_(other);
+ }
+ Tensor& operator/=(const Tensor & other) {
+ div_(other);
+ }
+ Tensor& operator/=(Scalar other) {
+ div_(other);
+ }
+ Tensor operator[](int64_t idx) {
+ return select(0,idx);
+ }
+
+ //example
+ //Tensor * add(Tensor & b);
+ int64_t storage_offset() const;
+ int64_t ndimension() const;
+ Tensor & resize_(IntList size);
+ int64_t numel() const;
+ Tensor & set_(Storage & storage);
+ Tensor & set_(Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride);
+ Tensor & set_(Storage & sourceStorage, int64_t storage_offset, IntList size);
+ Tensor & set_(const Tensor & source);
+ Tensor & set_();
+ Tensor & fill_(Scalar value);
+ bool is_same_size(const Tensor & other) const;
+ bool is_contiguous() const;
+ bool is_set_to(const Tensor & tensor) const;
+ Tensor & masked_fill_(const Tensor & mask, Scalar value);
+ Tensor & masked_scatter_(const Tensor & mask, const Tensor & source);
+ Tensor masked_select(const Tensor & mask) const;
+ Tensor transpose(int64_t dim0, int64_t dim1) const;
+ Tensor & transpose_(int64_t dim0, int64_t dim1);
+ Tensor t() const;
+ Tensor & t_();
+ Tensor squeeze(int64_t dim) const;
+ Tensor squeeze() const;
+ Tensor & squeeze_(int64_t dim);
+ Tensor & squeeze_();
+ Tensor unsqueeze(int64_t dim) const;
+ Tensor & unsqueeze_(int64_t dim);
+ Tensor nonzero() const;
+ Tensor contiguous() const;
+ Tensor clone() const;
+ Tensor view(IntList size) const;
+ Tensor expand(IntList size) const;
+ Tensor & resize_as_(const Tensor & the_template);
+ Tensor index_select(int64_t dim, const Tensor & index) const;
+ Tensor & index_copy_(int64_t dim, const Tensor & index, const Tensor & source);
+ Tensor & index_add_(int64_t dim, const Tensor & index, const Tensor & source);
+ Tensor & index_fill_(int64_t dim, const Tensor & index, Scalar value);
+ Tensor narrow(int64_t dimension, int64_t start, int64_t length) const;
+ Tensor unfold(int64_t dimension, int64_t size, int64_t step) const;
+ Tensor & scatter_(int64_t dim, const Tensor & index, const Tensor & src);
+ Tensor & scatter_(int64_t dim, const Tensor & index, Scalar value);
+ Tensor & scatter_add_(int64_t dim, const Tensor & index, const Tensor & src);
+ Tensor gather(int64_t dim, const Tensor & index) const;
+ void* data_ptr() const;
+ bool equal(const Tensor & other) const;
+ Tensor __and__(Scalar value) const;
+ Tensor __and__(const Tensor & other) const;
+ Tensor & __iand__(Scalar value);
+ Tensor & __iand__(const Tensor & other);
+ Tensor __or__(Scalar value) const;
+ Tensor __or__(const Tensor & other) const;
+ Tensor & __ior__(Scalar value);
+ Tensor & __ior__(const Tensor & other);
+ Tensor __xor__(Scalar value) const;
+ Tensor __xor__(const Tensor & other) const;
+ Tensor & __ixor__(Scalar value);
+ Tensor & __ixor__(const Tensor & other);
+ Tensor __lshift__(Scalar value) const;
+ Tensor __lshift__(const Tensor & other) const;
+ Tensor & __ilshift__(Scalar value);
+ Tensor & __ilshift__(const Tensor & other);
+ Tensor __rshift__(Scalar value) const;
+ Tensor __rshift__(const Tensor & other) const;
+ Tensor & __irshift__(Scalar value);
+ Tensor & __irshift__(const Tensor & other);
+ Tensor lt(Scalar value) const;
+ Tensor lt(const Tensor & other) const;
+ Tensor & lt_(Scalar value);
+ Tensor & lt_(const Tensor & other);
+ Tensor gt(Scalar value) const;
+ Tensor gt(const Tensor & other) const;
+ Tensor & gt_(Scalar value);
+ Tensor & gt_(const Tensor & other);
+ Tensor le(Scalar value) const;
+ Tensor le(const Tensor & other) const;
+ Tensor & le_(Scalar value);
+ Tensor & le_(const Tensor & other);
+ Tensor ge(Scalar value) const;
+ Tensor ge(const Tensor & other) const;
+ Tensor & ge_(Scalar value);
+ Tensor & ge_(const Tensor & other);
+ Tensor eq(Scalar value) const;
+ Tensor eq(const Tensor & other) const;
+ Tensor & eq_(Scalar value);
+ Tensor & eq_(const Tensor & other);
+ Tensor ne(Scalar value) const;
+ Tensor ne(const Tensor & other) const;
+ Tensor & ne_(Scalar value);
+ Tensor & ne_(const Tensor & other);
+ std::tuple<Tensor,Tensor> min(int64_t dim, bool keepdim) const;
+ std::tuple<Tensor,Tensor> min(int64_t dim) const;
+ Tensor min(const Tensor & other) const;
+ Scalar min() const;
+ std::tuple<Tensor,Tensor> max(int64_t dim, bool keepdim) const;
+ std::tuple<Tensor,Tensor> max(int64_t dim) const;
+ Tensor max(const Tensor & other) const;
+ Scalar max() const;
+ std::tuple<Tensor,Tensor> kthvalue(int64_t k, bool keepdim) const;
+ std::tuple<Tensor,Tensor> kthvalue(int64_t k) const;
+ std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim, bool keepdim) const;
+ std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim) const;
+ std::tuple<Tensor,Tensor> mode(bool keepdim) const;
+ std::tuple<Tensor,Tensor> mode() const;
+ std::tuple<Tensor,Tensor> mode(int64_t dim, bool keepdim) const;
+ std::tuple<Tensor,Tensor> mode(int64_t dim) const;
+ std::tuple<Tensor,Tensor> median(bool keepdim) const;
+ std::tuple<Tensor,Tensor> median() const;
+ std::tuple<Tensor,Tensor> median(int64_t dim) const;
+ std::tuple<Tensor,Tensor> median(int64_t dim, bool keepdim) const;
+ std::tuple<Tensor,Tensor> sort() const;
+ std::tuple<Tensor,Tensor> sort(int64_t dim) const;
+ std::tuple<Tensor,Tensor> sort(int64_t dim, bool descending) const;
+ std::tuple<Tensor,Tensor> topk(int64_t k) const;
+ std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim, bool largest, bool sorted) const;
+ std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim, bool largest) const;
+ std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim) const;
+ bool all() const;
+ bool any() const;
+ int64_t get_device() const;
+ Tensor abs() const;
+ Tensor & abs_();
+ Tensor & sigmoid_();
+ Tensor sigmoid() const;
+ Tensor & log_();
+ Tensor log() const;
+ Tensor & log1p_();
+ Tensor log1p() const;
+ Tensor lgamma() const;
+ Tensor & lgamma_();
+ Tensor & exp_();
+ Tensor exp() const;
+ Tensor & cos_();
+ Tensor cos() const;
+ Tensor & acos_();
+ Tensor acos() const;
+ Tensor & cosh_();
+ Tensor cosh() const;
+ Tensor & sin_();
+ Tensor sin() const;
+ Tensor & asin_();
+ Tensor asin() const;
+ Tensor & sinh_();
+ Tensor sinh() const;
+ Tensor & tan_();
+ Tensor tan() const;
+ Tensor & atan_();
+ Tensor atan() const;
+ Tensor & tanh_();
+ Tensor tanh() const;
+ Tensor & sqrt_();
+ Tensor sqrt() const;
+ Tensor & rsqrt_();
+ Tensor rsqrt() const;
+ Tensor & ceil_();
+ Tensor ceil() const;
+ Tensor & floor_();
+ Tensor floor() const;
+ Tensor & round_();
+ Tensor round() const;
+ Tensor & trunc_();
+ Tensor trunc() const;
+ Tensor & frac_();
+ Tensor frac() const;
+ Tensor mean(int64_t dim, bool keepdim) const;
+ Tensor mean(int64_t dim) const;
+ Scalar mean() const;
+ Tensor var(int64_t dim, bool keepdim) const;
+ Tensor var(int64_t dim) const;
+ Scalar var() const;
+ Tensor std(int64_t dim, bool keepdim) const;
+ Tensor std(int64_t dim) const;
+ Scalar std() const;
+ Tensor norm(Scalar p, int64_t dim, bool keepdim) const;
+ Tensor norm(Scalar p, int64_t dim) const;
+ Scalar norm(Scalar p) const;
+ Scalar norm() const;
+ Tensor renorm(Scalar p, int64_t dim, Scalar maxnorm) const;
+ Tensor & renorm_(Scalar p, int64_t dim, Scalar maxnorm);
+ Scalar dist(const Tensor & other, Scalar p) const;
+ Scalar dist(const Tensor & other) const;
+ Tensor reciprocal() const;
+ Tensor & reciprocal_();
+ Tensor neg() const;
+ Tensor & neg_();
+ Tensor atan2(const Tensor & other) const;
+ Tensor & atan2_(const Tensor & other);
+ Tensor pow(Scalar exponent) const;
+ Tensor pow(const Tensor & exponent) const;
+ Tensor & pow_(Scalar exponent);
+ Tensor & pow_(const Tensor & exponent);
+ Tensor lerp(const Tensor & end, Scalar weight) const;
+ Tensor & lerp_(const Tensor & end, Scalar weight);
+ Tensor histc() const;
+ Tensor histc(int64_t bins) const;
+ Tensor histc(int64_t bins, Scalar min) const;
+ Tensor histc(int64_t bins, Scalar min, Scalar max) const;
+ Tensor & zero_();
+ Tensor sum(int64_t dim, bool keepdim) const;
+ Tensor sum(int64_t dim) const;
+ Scalar sum() const;
+ Tensor prod(int64_t dim, bool keepdim) const;
+ Tensor prod(int64_t dim) const;
+ Scalar prod() const;
+ Tensor cumsum(int64_t dim) const;
+ Tensor cumprod(int64_t dim) const;
+ Tensor sign() const;
+ Tensor & sign_();
+ Scalar trace() const;
+ Tensor add(Scalar value, const Tensor & other) const;
+ Tensor add(Scalar value) const;
+ Tensor add(const Tensor & other) const;
+ Tensor & add_(Scalar value, const Tensor & other);
+ Tensor & add_(Scalar value);
+ Tensor & add_(const Tensor & other);
+ Tensor sub(Scalar value, const Tensor & other) const;
+ Tensor sub(Scalar value) const;
+ Tensor sub(const Tensor & other) const;
+ Tensor & sub_(Scalar value, const Tensor & other);
+ Tensor & sub_(Scalar value);
+ Tensor & sub_(const Tensor & other);
+ Tensor mul(Scalar value) const;
+ Tensor mul(const Tensor & other) const;
+ Tensor & mul_(Scalar value);
+ Tensor & mul_(const Tensor & other);
+ Tensor div(Scalar value) const;
+ Tensor div(const Tensor & other) const;
+ Tensor & div_(Scalar value);
+ Tensor & div_(const Tensor & other);
+ Tensor fmod(Scalar value) const;
+ Tensor fmod(const Tensor & other) const;
+ Tensor & fmod_(Scalar value);
+ Tensor & fmod_(const Tensor & other);
+ Tensor remainder(Scalar value) const;
+ Tensor remainder(const Tensor & other) const;
+ Tensor & remainder_(Scalar value);
+ Tensor & remainder_(const Tensor & other);
+ Tensor clamp(Scalar min, Scalar max) const;
+ Tensor clamp(Scalar min) const;
+ Tensor & clamp_(Scalar min, Scalar max);
+ Tensor & clamp_(Scalar min);
+ Scalar dot(const Tensor & tensor) const;
+ Tensor tril(int64_t k) const;
+ Tensor tril() const;
+ Tensor & tril_(int64_t k);
+ Tensor & tril_();
+ Tensor triu(int64_t k) const;
+ Tensor triu() const;
+ Tensor & triu_(int64_t k);
+ Tensor & triu_();
+ Tensor cross(const Tensor & other, int64_t dim) const;
+ Tensor cross(const Tensor & other) const;
+ Tensor diag(int64_t diagonal) const;
+ Tensor diag() const;
+ Tensor addmm(Scalar beta, Scalar alpha, const Tensor & mat1, const Tensor & mat2) const;
+ Tensor addmm(Scalar beta, const Tensor & mat1, const Tensor & mat2) const;
+ Tensor addmm(const Tensor & mat1, const Tensor & mat2) const;
+ Tensor & addmm_(Scalar beta, Scalar alpha, const Tensor & mat1, const Tensor & mat2);
+ Tensor & addmm_(Scalar beta, const Tensor & mat1, const Tensor & mat2);
+ Tensor & addmm_(const Tensor & mat1, const Tensor & mat2);
+ Tensor addmv(Scalar beta, Scalar alpha, const Tensor & mat, const Tensor & vec) const;
+ Tensor addmv(Scalar beta, const Tensor & mat, const Tensor & vec) const;
+ Tensor addmv(const Tensor & mat, const Tensor & vec) const;
+ Tensor & addmv_(Scalar beta, Scalar alpha, const Tensor & mat, const Tensor & vec);
+ Tensor & addmv_(Scalar beta, const Tensor & mat, const Tensor & vec);
+ Tensor & addmv_(const Tensor & mat, const Tensor & vec);
+ Tensor addr(Scalar beta, Scalar alpha, const Tensor & vec1, const Tensor & vec2) const;
+ Tensor addr(Scalar beta, const Tensor & vec1, const Tensor & vec2) const;
+ Tensor addr(const Tensor & vec1, const Tensor & vec2) const;
+ Tensor & addr_(Scalar beta, Scalar alpha, const Tensor & vec1, const Tensor & vec2);
+ Tensor & addr_(Scalar beta, const Tensor & vec1, const Tensor & vec2);
+ Tensor & addr_(const Tensor & vec1, const Tensor & vec2);
+ Tensor ger(const Tensor & vec2) const;
+ Tensor mv(const Tensor & vec) const;
+ Tensor mm(const Tensor & mat2) const;
+ Tensor bmm(const Tensor & mat2) const;
+ Tensor addbmm(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) const;
+ Tensor addbmm(Scalar beta, const Tensor & batch1, const Tensor & batch2) const;
+ Tensor addbmm(const Tensor & batch1, const Tensor & batch2) const;
+ Tensor & addbmm_(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2);
+ Tensor & addbmm_(Scalar beta, const Tensor & batch1, const Tensor & batch2);
+ Tensor & addbmm_(const Tensor & batch1, const Tensor & batch2);
+ Tensor baddbmm(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) const;
+ Tensor baddbmm(Scalar beta, const Tensor & batch1, const Tensor & batch2) const;
+ Tensor baddbmm(const Tensor & batch1, const Tensor & batch2) const;
+ Tensor & baddbmm_(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2);
+ Tensor & baddbmm_(Scalar beta, const Tensor & batch1, const Tensor & batch2);
+ Tensor & baddbmm_(const Tensor & batch1, const Tensor & batch2);
+ Tensor addcmul(Scalar value, const Tensor & tensor1, const Tensor & tensor2) const;
+ Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2) const;
+ Tensor & addcmul_(Scalar value, const Tensor & tensor1, const Tensor & tensor2);
+ Tensor & addcmul_(const Tensor & tensor1, const Tensor & tensor2);
+ Tensor addcdiv(Scalar value, const Tensor & tensor1, const Tensor & tensor2) const;
+ Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2) const;
+ Tensor & addcdiv_(Scalar value, const Tensor & tensor1, const Tensor & tensor2);
+ Tensor & addcdiv_(const Tensor & tensor1, const Tensor & tensor2);
+ std::tuple<Tensor,Tensor> gesv(const Tensor & A) const;
+ std::tuple<Tensor,Tensor> gels(const Tensor & A) const;
+ std::tuple<Tensor,Tensor> trtrs(const Tensor & A, bool upper, bool transpose, bool unitriangular) const;
+ std::tuple<Tensor,Tensor> trtrs(const Tensor & A, bool upper, bool transpose) const;
+ std::tuple<Tensor,Tensor> trtrs(const Tensor & A, bool upper) const;
+ std::tuple<Tensor,Tensor> trtrs(const Tensor & A) const;
+ std::tuple<Tensor,Tensor> symeig(bool eigenvectors, bool upper) const;
+ std::tuple<Tensor,Tensor> symeig(bool eigenvectors) const;
+ std::tuple<Tensor,Tensor> symeig() const;
+ std::tuple<Tensor,Tensor> eig(bool eigenvectors) const;
+ std::tuple<Tensor,Tensor> eig() const;
+ std::tuple<Tensor,Tensor,Tensor> svd(bool some) const;
+ std::tuple<Tensor,Tensor,Tensor> svd() const;
+ Tensor inverse() const;
+ Tensor potrf(bool upper) const;
+ Tensor potrf() const;
+ Tensor potrs(const Tensor & input2, bool upper) const;
+ Tensor potrs(const Tensor & input2) const;
+ Tensor potri(bool upper) const;
+ Tensor potri() const;
+ std::tuple<Tensor,Tensor> pstrf(bool upper, Scalar tol) const;
+ std::tuple<Tensor,Tensor> pstrf(bool upper) const;
+ std::tuple<Tensor,Tensor> pstrf(Scalar tol) const;
+ std::tuple<Tensor,Tensor> pstrf() const;
+ std::tuple<Tensor,Tensor> qr() const;
+ std::tuple<Tensor,Tensor> geqrf() const;
+ std::tuple<Tensor,const Tensor &> orgqr(const Tensor & input2) const;
+ std::tuple<Tensor,const Tensor &> ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const;
+ std::tuple<Tensor,const Tensor &> ormqr(const Tensor & input2, const Tensor & input3, bool left) const;
+ std::tuple<Tensor,const Tensor &> ormqr(const Tensor & input2, const Tensor & input3) const;
+ std::tuple<Tensor,Tensor> btrifact(const Tensor & info) const;
+ std::tuple<Tensor,Tensor> btrifact() const;
+ Tensor btrisolve(const Tensor & LU_data, const Tensor & LU_pivots) const;
+ Tensor multinomial(Generator & generator, int64_t num_samples, bool replacement) const;
+ Tensor multinomial(Generator & generator, int64_t num_samples) const;
+ Tensor multinomial(int64_t num_samples, bool replacement) const;
+ Tensor multinomial(int64_t num_samples) const;
+ Tensor & uniform_(Generator & generator, double from, double to);
+ Tensor & uniform_(Generator & generator, double from);
+ Tensor & uniform_(double from, double to);
+ Tensor & uniform_(Generator & generator);
+ Tensor & uniform_(double from);
+ Tensor & uniform_();
+ Tensor & cauchy_(Generator & generator, double median, double sigma);
+ Tensor & cauchy_(Generator & generator, double median);
+ Tensor & cauchy_(double median, double sigma);
+ Tensor & cauchy_(Generator & generator);
+ Tensor & cauchy_(double median);
+ Tensor & cauchy_();
+ Tensor & log_normal_(Generator & generator, double mean, double std);
+ Tensor & log_normal_(Generator & generator, double mean);
+ Tensor & log_normal_(double mean, double std);
+ Tensor & log_normal_(Generator & generator);
+ Tensor & log_normal_(double mean);
+ Tensor & log_normal_();
+ Tensor & geometric_(Generator & generator, double p);
+ Tensor & geometric_(double p);
+ int64_t size(int64_t dim) const;
+ int64_t stride(int64_t dim) const;
+ Tensor select(int dim, int64_t sliceIndex) const;
+
+ friend class Type;
+
+//TODO(zach): sort out friend classes
+public:
+ TensorImpl * pImpl;
+};
+
+// all static inline to allow for inlining of the non-dynamic part of dispatch
+inline int64_t Tensor::storage_offset() const {
+ return type().m_storage_offset(*this);
+}
+inline int64_t Tensor::ndimension() const {
+ return type().m_ndimension(*this);
+}
+inline Tensor & Tensor::resize_(IntList size) {
+ return type().m_resize_(*this, size);
+}
+inline int64_t Tensor::numel() const {
+ return type().numel(*this);
+}
+inline Tensor & Tensor::set_(Storage & storage) {
+ return type().m_set_(*this, storage);
+}
+inline Tensor & Tensor::set_(Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride) {
+ return type().m_set_(*this, sourceStorage, storage_offset, size, stride);
+}
+inline Tensor & Tensor::set_(Storage & sourceStorage, int64_t storage_offset, IntList size) {
+ return type().m_set_(*this, sourceStorage, storage_offset, size);
+}
+inline Tensor & Tensor::set_(const Tensor & source) {
+ return type().m_set_(*this, source);
+}
+inline Tensor & Tensor::set_() {
+ return type().m_set_(*this);
+}
+inline Tensor & Tensor::fill_(Scalar value) {
+ return type().m_fill_(*this, value);
+}
+inline bool Tensor::is_same_size(const Tensor & other) const {
+ return type().m_is_same_size(*this, other);
+}
+inline bool Tensor::is_contiguous() const {
+ return type().m_is_contiguous(*this);
+}
+inline bool Tensor::is_set_to(const Tensor & tensor) const {
+ return type().m_is_set_to(*this, tensor);
+}
+inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) {
+ return type().m_masked_fill_(*this, mask, value);
+}
+inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) {
+ return type().m_masked_scatter_(*this, mask, source);
+}
+inline Tensor Tensor::masked_select(const Tensor & mask) const {
+ return type().masked_select(*this, mask);
+}
+inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const {
+ return type().transpose(*this, dim0, dim1);
+}
+inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) {
+ return type().m_transpose_(*this, dim0, dim1);
+}
+inline Tensor Tensor::t() const {
+ return type().t(*this);
+}
+inline Tensor & Tensor::t_() {
+ return type().m_t_(*this);
+}
+inline Tensor Tensor::squeeze(int64_t dim) const {
+ return type().squeeze(*this, dim);
+}
+inline Tensor Tensor::squeeze() const {
+ return type().squeeze(*this);
+}
+inline Tensor & Tensor::squeeze_(int64_t dim) {
+ return type().m_squeeze_(*this, dim);
+}
+inline Tensor & Tensor::squeeze_() {
+ return type().m_squeeze_(*this);
+}
+inline Tensor Tensor::unsqueeze(int64_t dim) const {
+ return type().unsqueeze(*this, dim);
+}
+inline Tensor & Tensor::unsqueeze_(int64_t dim) {
+ return type().m_unsqueeze_(*this, dim);
+}
+inline Tensor Tensor::nonzero() const {
+ return type().nonzero(*this);
+}
+inline Tensor Tensor::contiguous() const {
+ return type().m_contiguous(*this);
+}
+inline Tensor Tensor::clone() const {
+ return type().m_clone(*this);
+}
+inline Tensor Tensor::view(IntList size) const {
+ return type().m_view(*this, size);
+}
+inline Tensor Tensor::expand(IntList size) const {
+ return type().m_expand(*this, size);
+}
+inline Tensor & Tensor::resize_as_(const Tensor & the_template) {
+ return type().m_resize_as_(*this, the_template);
+}
+inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const {
+ return type().index_select(*this, dim, index);
+}
+inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Tensor & source) {
+ return type().m_index_copy_(*this, dim, index, source);
+}
+inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tensor & source) {
+ return type().m_index_add_(*this, dim, index, source);
+}
+inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) {
+ return type().m_index_fill_(*this, dim, index, value);
+}
+inline Tensor Tensor::narrow(int64_t dimension, int64_t start, int64_t length) const {
+ return type().m_narrow(*this, dimension, start, length);
+}
+inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const {
+ return type().m_unfold(*this, dimension, size, step);
+}
+inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) {
+ return type().m_scatter_(*this, dim, index, src);
+}
+inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value) {
+ return type().m_scatter_(*this, dim, index, value);
+}
+inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) {
+ return type().m_scatter_add_(*this, dim, index, src);
+}
+inline Tensor Tensor::gather(int64_t dim, const Tensor & index) const {
+ return type().gather(*this, dim, index);
+}
+inline void* Tensor::data_ptr() const {
+ return type().m_data_ptr(*this);
+}
+inline bool Tensor::equal(const Tensor & other) const {
+ return type().equal(*this, other);
+}
+inline Tensor Tensor::__and__(Scalar value) const {
+ return type().__and__(*this, value);
+}
+inline Tensor Tensor::__and__(const Tensor & other) const {
+ return type().__and__(*this, other);
+}
+inline Tensor & Tensor::__iand__(Scalar value) {
+ return type().__iand__(*this, value);
+}
+inline Tensor & Tensor::__iand__(const Tensor & other) {
+ return type().__iand__(*this, other);
+}
+inline Tensor Tensor::__or__(Scalar value) const {
+ return type().__or__(*this, value);
+}
+inline Tensor Tensor::__or__(const Tensor & other) const {
+ return type().__or__(*this, other);
+}
+inline Tensor & Tensor::__ior__(Scalar value) {
+ return type().__ior__(*this, value);
+}
+inline Tensor & Tensor::__ior__(const Tensor & other) {
+ return type().__ior__(*this, other);
+}
+inline Tensor Tensor::__xor__(Scalar value) const {
+ return type().__xor__(*this, value);
+}
+inline Tensor Tensor::__xor__(const Tensor & other) const {
+ return type().__xor__(*this, other);
+}
+inline Tensor & Tensor::__ixor__(Scalar value) {
+ return type().__ixor__(*this, value);
+}
+inline Tensor & Tensor::__ixor__(const Tensor & other) {
+ return type().__ixor__(*this, other);
+}
+inline Tensor Tensor::__lshift__(Scalar value) const {
+ return type().__lshift__(*this, value);
+}
+inline Tensor Tensor::__lshift__(const Tensor & other) const {
+ return type().__lshift__(*this, other);
+}
+inline Tensor & Tensor::__ilshift__(Scalar value) {
+ return type().__ilshift__(*this, value);
+}
+inline Tensor & Tensor::__ilshift__(const Tensor & other) {
+ return type().__ilshift__(*this, other);
+}
+inline Tensor Tensor::__rshift__(Scalar value) const {
+ return type().__rshift__(*this, value);
+}
+inline Tensor Tensor::__rshift__(const Tensor & other) const {
+ return type().__rshift__(*this, other);
+}
+inline Tensor & Tensor::__irshift__(Scalar value) {
+ return type().__irshift__(*this, value);
+}
+inline Tensor & Tensor::__irshift__(const Tensor & other) {
+ return type().__irshift__(*this, other);
+}
+inline Tensor Tensor::lt(Scalar value) const {
+ return type().m_lt(*this, value);
+}
+inline Tensor Tensor::lt(const Tensor & other) const {
+ return type().m_lt(*this, other);
+}
+inline Tensor & Tensor::lt_(Scalar value) {
+ return type().m_lt_(*this, value);
+}
+inline Tensor & Tensor::lt_(const Tensor & other) {
+ return type().m_lt_(*this, other);
+}
+inline Tensor Tensor::gt(Scalar value) const {
+ return type().m_gt(*this, value);
+}
+inline Tensor Tensor::gt(const Tensor & other) const {
+ return type().m_gt(*this, other);
+}
+inline Tensor & Tensor::gt_(Scalar value) {
+ return type().m_gt_(*this, value);
+}
+inline Tensor & Tensor::gt_(const Tensor & other) {
+ return type().m_gt_(*this, other);
+}
+inline Tensor Tensor::le(Scalar value) const {
+ return type().m_le(*this, value);
+}
+inline Tensor Tensor::le(const Tensor & other) const {
+ return type().m_le(*this, other);
+}
+inline Tensor & Tensor::le_(Scalar value) {
+ return type().m_le_(*this, value);
+}
+inline Tensor & Tensor::le_(const Tensor & other) {
+ return type().m_le_(*this, other);
+}
+inline Tensor Tensor::ge(Scalar value) const {
+ return type().m_ge(*this, value);
+}
+inline Tensor Tensor::ge(const Tensor & other) const {
+ return type().m_ge(*this, other);
+}
+inline Tensor & Tensor::ge_(Scalar value) {
+ return type().m_ge_(*this, value);
+}
+inline Tensor & Tensor::ge_(const Tensor & other) {
+ return type().m_ge_(*this, other);
+}
+inline Tensor Tensor::eq(Scalar value) const {
+ return type().m_eq(*this, value);
+}
+inline Tensor Tensor::eq(const Tensor & other) const {
+ return type().m_eq(*this, other);
+}
+inline Tensor & Tensor::eq_(Scalar value) {
+ return type().m_eq_(*this, value);
+}
+inline Tensor & Tensor::eq_(const Tensor & other) {
+ return type().m_eq_(*this, other);
+}
+inline Tensor Tensor::ne(Scalar value) const {
+ return type().m_ne(*this, value);
+}
+inline Tensor Tensor::ne(const Tensor & other) const {
+ return type().m_ne(*this, other);
+}
+inline Tensor & Tensor::ne_(Scalar value) {
+ return type().m_ne_(*this, value);
+}
+inline Tensor & Tensor::ne_(const Tensor & other) {
+ return type().m_ne_(*this, other);
+}
+inline std::tuple<Tensor,Tensor> Tensor::min(int64_t dim, bool keepdim) const {
+ return type().min(*this, dim, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::min(int64_t dim) const {
+ return type().min(*this, dim);
+}
+inline Tensor Tensor::min(const Tensor & other) const {
+ return type().min(*this, other);
+}
+inline Scalar Tensor::min() const {
+ return type().min(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::max(int64_t dim, bool keepdim) const {
+ return type().max(*this, dim, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::max(int64_t dim) const {
+ return type().max(*this, dim);
+}
+inline Tensor Tensor::max(const Tensor & other) const {
+ return type().max(*this, other);
+}
+inline Scalar Tensor::max() const {
+ return type().max(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, bool keepdim) const {
+ return type().kthvalue(*this, k, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k) const {
+ return type().kthvalue(*this, k);
+}
+inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const {
+ return type().kthvalue(*this, k, dim, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::kthvalue(int64_t k, int64_t dim) const {
+ return type().kthvalue(*this, k, dim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::mode(bool keepdim) const {
+ return type().mode(*this, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::mode() const {
+ return type().mode(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::mode(int64_t dim, bool keepdim) const {
+ return type().mode(*this, dim, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::mode(int64_t dim) const {
+ return type().mode(*this, dim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::median(bool keepdim) const {
+ return type().median(*this, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::median() const {
+ return type().median(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::median(int64_t dim) const {
+ return type().median(*this, dim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::median(int64_t dim, bool keepdim) const {
+ return type().median(*this, dim, keepdim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::sort() const {
+ return type().sort(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::sort(int64_t dim) const {
+ return type().sort(*this, dim);
+}
+inline std::tuple<Tensor,Tensor> Tensor::sort(int64_t dim, bool descending) const {
+ return type().sort(*this, dim, descending);
+}
+inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k) const {
+ return type().topk(*this, k);
+}
+inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const {
+ return type().topk(*this, k, dim, largest, sorted);
+}
+inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k, int64_t dim, bool largest) const {
+ return type().topk(*this, k, dim, largest);
+}
+inline std::tuple<Tensor,Tensor> Tensor::topk(int64_t k, int64_t dim) const {
+ return type().topk(*this, k, dim);
+}
+inline bool Tensor::all() const {
+ return type().m_all(*this);
+}
+inline bool Tensor::any() const {
+ return type().m_any(*this);
+}
+inline int64_t Tensor::get_device() const {
+ return type().m_get_device(*this);
+}
+inline Tensor Tensor::abs() const {
+ return type().abs(*this);
+}
+inline Tensor & Tensor::abs_() {
+ return type().m_abs_(*this);
+}
+inline Tensor & Tensor::sigmoid_() {
+ return type().m_sigmoid_(*this);
+}
+inline Tensor Tensor::sigmoid() const {
+ return type().sigmoid(*this);
+}
+inline Tensor & Tensor::log_() {
+ return type().m_log_(*this);
+}
+inline Tensor Tensor::log() const {
+ return type().log(*this);
+}
+inline Tensor & Tensor::log1p_() {
+ return type().m_log1p_(*this);
+}
+inline Tensor Tensor::log1p() const {
+ return type().log1p(*this);
+}
+inline Tensor Tensor::lgamma() const {
+ return type().lgamma(*this);
+}
+inline Tensor & Tensor::lgamma_() {
+ return type().m_lgamma_(*this);
+}
+inline Tensor & Tensor::exp_() {
+ return type().m_exp_(*this);
+}
+inline Tensor Tensor::exp() const {
+ return type().exp(*this);
+}
+inline Tensor & Tensor::cos_() {
+ return type().m_cos_(*this);
+}
+inline Tensor Tensor::cos() const {
+ return type().cos(*this);
+}
+inline Tensor & Tensor::acos_() {
+ return type().m_acos_(*this);
+}
+inline Tensor Tensor::acos() const {
+ return type().acos(*this);
+}
+inline Tensor & Tensor::cosh_() {
+ return type().m_cosh_(*this);
+}
+inline Tensor Tensor::cosh() const {
+ return type().cosh(*this);
+}
+inline Tensor & Tensor::sin_() {
+ return type().m_sin_(*this);
+}
+inline Tensor Tensor::sin() const {
+ return type().sin(*this);
+}
+inline Tensor & Tensor::asin_() {
+ return type().m_asin_(*this);
+}
+inline Tensor Tensor::asin() const {
+ return type().asin(*this);
+}
+inline Tensor & Tensor::sinh_() {
+ return type().m_sinh_(*this);
+}
+inline Tensor Tensor::sinh() const {
+ return type().sinh(*this);
+}
+inline Tensor & Tensor::tan_() {
+ return type().m_tan_(*this);
+}
+inline Tensor Tensor::tan() const {
+ return type().tan(*this);
+}
+inline Tensor & Tensor::atan_() {
+ return type().m_atan_(*this);
+}
+inline Tensor Tensor::atan() const {
+ return type().atan(*this);
+}
+inline Tensor & Tensor::tanh_() {
+ return type().m_tanh_(*this);
+}
+inline Tensor Tensor::tanh() const {
+ return type().tanh(*this);
+}
+inline Tensor & Tensor::sqrt_() {
+ return type().m_sqrt_(*this);
+}
+inline Tensor Tensor::sqrt() const {
+ return type().sqrt(*this);
+}
+inline Tensor & Tensor::rsqrt_() {
+ return type().m_rsqrt_(*this);
+}
+inline Tensor Tensor::rsqrt() const {
+ return type().rsqrt(*this);
+}
+inline Tensor & Tensor::ceil_() {
+ return type().m_ceil_(*this);
+}
+inline Tensor Tensor::ceil() const {
+ return type().ceil(*this);
+}
+inline Tensor & Tensor::floor_() {
+ return type().m_floor_(*this);
+}
+inline Tensor Tensor::floor() const {
+ return type().floor(*this);
+}
+inline Tensor & Tensor::round_() {
+ return type().m_round_(*this);
+}
+inline Tensor Tensor::round() const {
+ return type().round(*this);
+}
+inline Tensor & Tensor::trunc_() {
+ return type().m_trunc_(*this);
+}
+inline Tensor Tensor::trunc() const {
+ return type().trunc(*this);
+}
+inline Tensor & Tensor::frac_() {
+ return type().m_frac_(*this);
+}
+inline Tensor Tensor::frac() const {
+ return type().frac(*this);
+}
+inline Tensor Tensor::mean(int64_t dim, bool keepdim) const {
+ return type().mean(*this, dim, keepdim);
+}
+inline Tensor Tensor::mean(int64_t dim) const {
+ return type().mean(*this, dim);
+}
+inline Scalar Tensor::mean() const {
+ return type().mean(*this);
+}
+inline Tensor Tensor::var(int64_t dim, bool keepdim) const {
+ return type().var(*this, dim, keepdim);
+}
+inline Tensor Tensor::var(int64_t dim) const {
+ return type().var(*this, dim);
+}
+inline Scalar Tensor::var() const {
+ return type().var(*this);
+}
+inline Tensor Tensor::std(int64_t dim, bool keepdim) const {
+ return type().std(*this, dim, keepdim);
+}
+inline Tensor Tensor::std(int64_t dim) const {
+ return type().std(*this, dim);
+}
+inline Scalar Tensor::std() const {
+ return type().std(*this);
+}
+inline Tensor Tensor::norm(Scalar p, int64_t dim, bool keepdim) const {
+ return type().norm(*this, p, dim, keepdim);
+}
+inline Tensor Tensor::norm(Scalar p, int64_t dim) const {
+ return type().norm(*this, p, dim);
+}
+inline Scalar Tensor::norm(Scalar p) const {
+ return type().norm(*this, p);
+}
+inline Scalar Tensor::norm() const {
+ return type().norm(*this);
+}
+inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const {
+ return type().renorm(*this, p, dim, maxnorm);
+}
+inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) {
+ return type().m_renorm_(*this, p, dim, maxnorm);
+}
+inline Scalar Tensor::dist(const Tensor & other, Scalar p) const {
+ return type().dist(*this, other, p);
+}
+inline Scalar Tensor::dist(const Tensor & other) const {
+ return type().dist(*this, other);
+}
+inline Tensor Tensor::reciprocal() const {
+ return type().reciprocal(*this);
+}
+inline Tensor & Tensor::reciprocal_() {
+ return type().m_reciprocal_(*this);
+}
+inline Tensor Tensor::neg() const {
+ return type().neg(*this);
+}
+inline Tensor & Tensor::neg_() {
+ return type().m_neg_(*this);
+}
+inline Tensor Tensor::atan2(const Tensor & other) const {
+ return type().atan2(*this, other);
+}
+inline Tensor & Tensor::atan2_(const Tensor & other) {
+ return type().m_atan2_(*this, other);
+}
+inline Tensor Tensor::pow(Scalar exponent) const {
+ return type().pow(*this, exponent);
+}
+inline Tensor Tensor::pow(const Tensor & exponent) const {
+ return type().pow(*this, exponent);
+}
+inline Tensor & Tensor::pow_(Scalar exponent) {
+ return type().m_pow_(*this, exponent);
+}
+inline Tensor & Tensor::pow_(const Tensor & exponent) {
+ return type().m_pow_(*this, exponent);
+}
+inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const {
+ return type().lerp(*this, end, weight);
+}
+inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) {
+ return type().m_lerp_(*this, end, weight);
+}
+inline Tensor Tensor::histc() const {
+ return type().histc(*this);
+}
+inline Tensor Tensor::histc(int64_t bins) const {
+ return type().histc(*this, bins);
+}
+inline Tensor Tensor::histc(int64_t bins, Scalar min) const {
+ return type().histc(*this, bins, min);
+}
+inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const {
+ return type().histc(*this, bins, min, max);
+}
+inline Tensor & Tensor::zero_() {
+ return type().m_zero_(*this);
+}
+inline Tensor Tensor::sum(int64_t dim, bool keepdim) const {
+ return type().sum(*this, dim, keepdim);
+}
+inline Tensor Tensor::sum(int64_t dim) const {
+ return type().sum(*this, dim);
+}
+inline Scalar Tensor::sum() const {
+ return type().sum(*this);
+}
+inline Tensor Tensor::prod(int64_t dim, bool keepdim) const {
+ return type().prod(*this, dim, keepdim);
+}
+inline Tensor Tensor::prod(int64_t dim) const {
+ return type().prod(*this, dim);
+}
+inline Scalar Tensor::prod() const {
+ return type().prod(*this);
+}
+inline Tensor Tensor::cumsum(int64_t dim) const {
+ return type().cumsum(*this, dim);
+}
+inline Tensor Tensor::cumprod(int64_t dim) const {
+ return type().cumprod(*this, dim);
+}
+inline Tensor Tensor::sign() const {
+ return type().sign(*this);
+}
+inline Tensor & Tensor::sign_() {
+ return type().m_sign_(*this);
+}
+inline Scalar Tensor::trace() const {
+ return type().trace(*this);
+}
+inline Tensor Tensor::add(Scalar value, const Tensor & other) const {
+ return type().add(*this, value, other);
+}
+inline Tensor Tensor::add(Scalar value) const {
+ return type().add(*this, value);
+}
+inline Tensor Tensor::add(const Tensor & other) const {
+ return type().add(*this, other);
+}
+inline Tensor & Tensor::add_(Scalar value, const Tensor & other) {
+ return type().m_add_(*this, value, other);
+}
+inline Tensor & Tensor::add_(Scalar value) {
+ return type().m_add_(*this, value);
+}
+inline Tensor & Tensor::add_(const Tensor & other) {
+ return type().m_add_(*this, other);
+}
+inline Tensor Tensor::sub(Scalar value, const Tensor & other) const {
+ return type().sub(*this, value, other);
+}
+inline Tensor Tensor::sub(Scalar value) const {
+ return type().sub(*this, value);
+}
+inline Tensor Tensor::sub(const Tensor & other) const {
+ return type().sub(*this, other);
+}
+inline Tensor & Tensor::sub_(Scalar value, const Tensor & other) {
+ return type().m_sub_(*this, value, other);
+}
+inline Tensor & Tensor::sub_(Scalar value) {
+ return type().m_sub_(*this, value);
+}
+inline Tensor & Tensor::sub_(const Tensor & other) {
+ return type().m_sub_(*this, other);
+}
+inline Tensor Tensor::mul(Scalar value) const {
+ return type().mul(*this, value);
+}
+inline Tensor Tensor::mul(const Tensor & other) const {
+ return type().mul(*this, other);
+}
+inline Tensor & Tensor::mul_(Scalar value) {
+ return type().m_mul_(*this, value);
+}
+inline Tensor & Tensor::mul_(const Tensor & other) {
+ return type().m_mul_(*this, other);
+}
+inline Tensor Tensor::div(Scalar value) const {
+ return type().div(*this, value);
+}
+inline Tensor Tensor::div(const Tensor & other) const {
+ return type().div(*this, other);
+}
+inline Tensor & Tensor::div_(Scalar value) {
+ return type().m_div_(*this, value);
+}
+inline Tensor & Tensor::div_(const Tensor & other) {
+ return type().m_div_(*this, other);
+}
+inline Tensor Tensor::fmod(Scalar value) const {
+ return type().fmod(*this, value);
+}
+inline Tensor Tensor::fmod(const Tensor & other) const {
+ return type().fmod(*this, other);
+}
+inline Tensor & Tensor::fmod_(Scalar value) {
+ return type().m_fmod_(*this, value);
+}
+inline Tensor & Tensor::fmod_(const Tensor & other) {
+ return type().m_fmod_(*this, other);
+}
+inline Tensor Tensor::remainder(Scalar value) const {
+ return type().remainder(*this, value);
+}
+inline Tensor Tensor::remainder(const Tensor & other) const {
+ return type().remainder(*this, other);
+}
+inline Tensor & Tensor::remainder_(Scalar value) {
+ return type().m_remainder_(*this, value);
+}
+inline Tensor & Tensor::remainder_(const Tensor & other) {
+ return type().m_remainder_(*this, other);
+}
+inline Tensor Tensor::clamp(Scalar min, Scalar max) const {
+ return type().clamp(*this, min, max);
+}
+inline Tensor Tensor::clamp(Scalar min) const {
+ return type().clamp(*this, min);
+}
+inline Tensor & Tensor::clamp_(Scalar min, Scalar max) {
+ return type().m_clamp_(*this, min, max);
+}
+inline Tensor & Tensor::clamp_(Scalar min) {
+ return type().m_clamp_(*this, min);
+}
+inline Scalar Tensor::dot(const Tensor & tensor) const {
+ return type().dot(*this, tensor);
+}
+inline Tensor Tensor::tril(int64_t k) const {
+ return type().tril(*this, k);
+}
+inline Tensor Tensor::tril() const {
+ return type().tril(*this);
+}
+inline Tensor & Tensor::tril_(int64_t k) {
+ return type().m_tril_(*this, k);
+}
+inline Tensor & Tensor::tril_() {
+ return type().m_tril_(*this);
+}
+inline Tensor Tensor::triu(int64_t k) const {
+ return type().triu(*this, k);
+}
+inline Tensor Tensor::triu() const {
+ return type().triu(*this);
+}
+inline Tensor & Tensor::triu_(int64_t k) {
+ return type().m_triu_(*this, k);
+}
+inline Tensor & Tensor::triu_() {
+ return type().m_triu_(*this);
+}
+inline Tensor Tensor::cross(const Tensor & other, int64_t dim) const {
+ return type().cross(*this, other, dim);
+}
+inline Tensor Tensor::cross(const Tensor & other) const {
+ return type().cross(*this, other);
+}
+inline Tensor Tensor::diag(int64_t diagonal) const {
+ return type().diag(*this, diagonal);
+}
+inline Tensor Tensor::diag() const {
+ return type().diag(*this);
+}
+inline Tensor Tensor::addmm(Scalar beta, Scalar alpha, const Tensor & mat1, const Tensor & mat2) const {
+ return type().addmm(beta, *this, alpha, mat1, mat2);
+}
+inline Tensor Tensor::addmm(Scalar beta, const Tensor & mat1, const Tensor & mat2) const {
+ return type().addmm(beta, *this, mat1, mat2);
+}
+inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2) const {
+ return type().addmm(*this, mat1, mat2);
+}
+inline Tensor & Tensor::addmm_(Scalar beta, Scalar alpha, const Tensor & mat1, const Tensor & mat2) {
+ return type().m_addmm_(*this, beta, alpha, mat1, mat2);
+}
+inline Tensor & Tensor::addmm_(Scalar beta, const Tensor & mat1, const Tensor & mat2) {
+ return type().m_addmm_(*this, beta, mat1, mat2);
+}
+inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2) {
+ return type().m_addmm_(*this, mat1, mat2);
+}
+inline Tensor Tensor::addmv(Scalar beta, Scalar alpha, const Tensor & mat, const Tensor & vec) const {
+ return type().addmv(beta, *this, alpha, mat, vec);
+}
+inline Tensor Tensor::addmv(Scalar beta, const Tensor & mat, const Tensor & vec) const {
+ return type().addmv(beta, *this, mat, vec);
+}
+inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec) const {
+ return type().addmv(*this, mat, vec);
+}
+inline Tensor & Tensor::addmv_(Scalar beta, Scalar alpha, const Tensor & mat, const Tensor & vec) {
+ return type().m_addmv_(*this, beta, alpha, mat, vec);
+}
+inline Tensor & Tensor::addmv_(Scalar beta, const Tensor & mat, const Tensor & vec) {
+ return type().m_addmv_(*this, beta, mat, vec);
+}
+inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec) {
+ return type().m_addmv_(*this, mat, vec);
+}
+inline Tensor Tensor::addr(Scalar beta, Scalar alpha, const Tensor & vec1, const Tensor & vec2) const {
+ return type().addr(beta, *this, alpha, vec1, vec2);
+}
+inline Tensor Tensor::addr(Scalar beta, const Tensor & vec1, const Tensor & vec2) const {
+ return type().addr(beta, *this, vec1, vec2);
+}
+inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2) const {
+ return type().addr(*this, vec1, vec2);
+}
+inline Tensor & Tensor::addr_(Scalar beta, Scalar alpha, const Tensor & vec1, const Tensor & vec2) {
+ return type().m_addr_(*this, beta, alpha, vec1, vec2);
+}
+inline Tensor & Tensor::addr_(Scalar beta, const Tensor & vec1, const Tensor & vec2) {
+ return type().m_addr_(*this, beta, vec1, vec2);
+}
+inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2) {
+ return type().m_addr_(*this, vec1, vec2);
+}
+inline Tensor Tensor::ger(const Tensor & vec2) const {
+ return type().ger(*this, vec2);
+}
+inline Tensor Tensor::mv(const Tensor & vec) const {
+ return type().mv(*this, vec);
+}
+inline Tensor Tensor::mm(const Tensor & mat2) const {
+ return type().mm(*this, mat2);
+}
+inline Tensor Tensor::bmm(const Tensor & mat2) const {
+ return type().bmm(*this, mat2);
+}
+inline Tensor Tensor::addbmm(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) const {
+ return type().addbmm(beta, *this, alpha, batch1, batch2);
+}
+inline Tensor Tensor::addbmm(Scalar beta, const Tensor & batch1, const Tensor & batch2) const {
+ return type().addbmm(beta, *this, batch1, batch2);
+}
+inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2) const {
+ return type().addbmm(*this, batch1, batch2);
+}
+inline Tensor & Tensor::addbmm_(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) {
+ return type().m_addbmm_(*this, beta, alpha, batch1, batch2);
+}
+inline Tensor & Tensor::addbmm_(Scalar beta, const Tensor & batch1, const Tensor & batch2) {
+ return type().m_addbmm_(*this, beta, batch1, batch2);
+}
+inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2) {
+ return type().m_addbmm_(*this, batch1, batch2);
+}
+inline Tensor Tensor::baddbmm(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) const {
+ return type().baddbmm(beta, *this, alpha, batch1, batch2);
+}
+inline Tensor Tensor::baddbmm(Scalar beta, const Tensor & batch1, const Tensor & batch2) const {
+ return type().baddbmm(beta, *this, batch1, batch2);
+}
+inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2) const {
+ return type().baddbmm(*this, batch1, batch2);
+}
+inline Tensor & Tensor::baddbmm_(Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) {
+ return type().m_baddbmm_(*this, beta, alpha, batch1, batch2);
+}
+inline Tensor & Tensor::baddbmm_(Scalar beta, const Tensor & batch1, const Tensor & batch2) {
+ return type().m_baddbmm_(*this, beta, batch1, batch2);
+}
+inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2) {
+ return type().m_baddbmm_(*this, batch1, batch2);
+}
+inline Tensor Tensor::addcmul(Scalar value, const Tensor & tensor1, const Tensor & tensor2) const {
+ return type().addcmul(*this, value, tensor1, tensor2);
+}
+inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2) const {
+ return type().addcmul(*this, tensor1, tensor2);
+}
+inline Tensor & Tensor::addcmul_(Scalar value, const Tensor & tensor1, const Tensor & tensor2) {
+ return type().m_addcmul_(*this, value, tensor1, tensor2);
+}
+inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2) {
+ return type().m_addcmul_(*this, tensor1, tensor2);
+}
+inline Tensor Tensor::addcdiv(Scalar value, const Tensor & tensor1, const Tensor & tensor2) const {
+ return type().addcdiv(*this, value, tensor1, tensor2);
+}
+inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2) const {
+ return type().addcdiv(*this, tensor1, tensor2);
+}
+inline Tensor & Tensor::addcdiv_(Scalar value, const Tensor & tensor1, const Tensor & tensor2) {
+ return type().m_addcdiv_(*this, value, tensor1, tensor2);
+}
+inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2) {
+ return type().m_addcdiv_(*this, tensor1, tensor2);
+}
+inline std::tuple<Tensor,Tensor> Tensor::gesv(const Tensor & A) const {
+ return type().gesv(*this, A);
+}
+inline std::tuple<Tensor,Tensor> Tensor::gels(const Tensor & A) const {
+ return type().gels(*this, A);
+}
+inline std::tuple<Tensor,Tensor> Tensor::trtrs(const Tensor & A, bool upper, bool transpose, bool unitriangular) const {
+ return type().trtrs(*this, A, upper, transpose, unitriangular);
+}
+inline std::tuple<Tensor,Tensor> Tensor::trtrs(const Tensor & A, bool upper, bool transpose) const {
+ return type().trtrs(*this, A, upper, transpose);
+}
+inline std::tuple<Tensor,Tensor> Tensor::trtrs(const Tensor & A, bool upper) const {
+ return type().trtrs(*this, A, upper);
+}
+inline std::tuple<Tensor,Tensor> Tensor::trtrs(const Tensor & A) const {
+ return type().trtrs(*this, A);
+}
+inline std::tuple<Tensor,Tensor> Tensor::symeig(bool eigenvectors, bool upper) const {
+ return type().symeig(*this, eigenvectors, upper);
+}
+inline std::tuple<Tensor,Tensor> Tensor::symeig(bool eigenvectors) const {
+ return type().symeig(*this, eigenvectors);
+}
+inline std::tuple<Tensor,Tensor> Tensor::symeig() const {
+ return type().symeig(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::eig(bool eigenvectors) const {
+ return type().eig(*this, eigenvectors);
+}
+inline std::tuple<Tensor,Tensor> Tensor::eig() const {
+ return type().eig(*this);
+}
+inline std::tuple<Tensor,Tensor,Tensor> Tensor::svd(bool some) const {
+ return type().svd(*this, some);
+}
+inline std::tuple<Tensor,Tensor,Tensor> Tensor::svd() const {
+ return type().svd(*this);
+}
+inline Tensor Tensor::inverse() const {
+ return type().inverse(*this);
+}
+inline Tensor Tensor::potrf(bool upper) const {
+ return type().potrf(*this, upper);
+}
+inline Tensor Tensor::potrf() const {
+ return type().potrf(*this);
+}
+inline Tensor Tensor::potrs(const Tensor & input2, bool upper) const {
+ return type().potrs(*this, input2, upper);
+}
+inline Tensor Tensor::potrs(const Tensor & input2) const {
+ return type().potrs(*this, input2);
+}
+inline Tensor Tensor::potri(bool upper) const {
+ return type().potri(*this, upper);
+}
+inline Tensor Tensor::potri() const {
+ return type().potri(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::pstrf(bool upper, Scalar tol) const {
+ return type().pstrf(*this, upper, tol);
+}
+inline std::tuple<Tensor,Tensor> Tensor::pstrf(bool upper) const {
+ return type().pstrf(*this, upper);
+}
+inline std::tuple<Tensor,Tensor> Tensor::pstrf(Scalar tol) const {
+ return type().pstrf(*this, tol);
+}
+inline std::tuple<Tensor,Tensor> Tensor::pstrf() const {
+ return type().pstrf(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::qr() const {
+ return type().qr(*this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::geqrf() const {
+ return type().geqrf(*this);
+}
+inline std::tuple<Tensor,const Tensor &> Tensor::orgqr(const Tensor & input2) const {
+ return type().orgqr(*this, input2);
+}
+inline std::tuple<Tensor,const Tensor &> Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const {
+ return type().ormqr(*this, input2, input3, left, transpose);
+}
+inline std::tuple<Tensor,const Tensor &> Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left) const {
+ return type().ormqr(*this, input2, input3, left);
+}
+inline std::tuple<Tensor,const Tensor &> Tensor::ormqr(const Tensor & input2, const Tensor & input3) const {
+ return type().ormqr(*this, input2, input3);
+}
+inline std::tuple<Tensor,Tensor> Tensor::btrifact(const Tensor & info) const {
+ return type().btrifact(info, *this);
+}
+inline std::tuple<Tensor,Tensor> Tensor::btrifact() const {
+ return type().btrifact(*this);
+}
+inline Tensor Tensor::btrisolve(const Tensor & LU_data, const Tensor & LU_pivots) const {
+ return type().btrisolve(*this, LU_data, LU_pivots);
+}
+inline Tensor Tensor::multinomial(Generator & generator, int64_t num_samples, bool replacement) const {
+ return type().multinomial(generator, *this, num_samples, replacement);
+}
+inline Tensor Tensor::multinomial(Generator & generator, int64_t num_samples) const {
+ return type().multinomial(generator, *this, num_samples);
+}
+inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement) const {
+ return type().multinomial(*this, num_samples, replacement);
+}
+inline Tensor Tensor::multinomial(int64_t num_samples) const {
+ return type().multinomial(*this, num_samples);
+}
+inline Tensor & Tensor::uniform_(Generator & generator, double from, double to) {
+ return type().m_uniform_(*this, generator, from, to);
+}
+inline Tensor & Tensor::uniform_(Generator & generator, double from) {
+ return type().m_uniform_(*this, generator, from);
+}
+inline Tensor & Tensor::uniform_(double from, double to) {
+ return type().m_uniform_(*this, from, to);
+}
+inline Tensor & Tensor::uniform_(Generator & generator) {
+ return type().m_uniform_(*this, generator);
+}
+inline Tensor & Tensor::uniform_(double from) {
+ return type().m_uniform_(*this, from);
+}
+inline Tensor & Tensor::uniform_() {
+ return type().m_uniform_(*this);
+}
+inline Tensor & Tensor::cauchy_(Generator & generator, double median, double sigma) {
+ return type().m_cauchy_(*this, generator, median, sigma);
+}
+inline Tensor & Tensor::cauchy_(Generator & generator, double median) {
+ return type().m_cauchy_(*this, generator, median);
+}
+inline Tensor & Tensor::cauchy_(double median, double sigma) {
+ return type().m_cauchy_(*this, median, sigma);
+}
+inline Tensor & Tensor::cauchy_(Generator & generator) {
+ return type().m_cauchy_(*this, generator);
+}
+inline Tensor & Tensor::cauchy_(double median) {
+ return type().m_cauchy_(*this, median);
+}
+inline Tensor & Tensor::cauchy_() {
+ return type().m_cauchy_(*this);
+}
+inline Tensor & Tensor::log_normal_(Generator & generator, double mean, double std) {
+ return type().m_log_normal_(*this, generator, mean, std);
+}
+inline Tensor & Tensor::log_normal_(Generator & generator, double mean) {
+ return type().m_log_normal_(*this, generator, mean);
+}
+inline Tensor & Tensor::log_normal_(double mean, double std) {
+ return type().m_log_normal_(*this, mean, std);
+}
+inline Tensor & Tensor::log_normal_(Generator & generator) {
+ return type().m_log_normal_(*this, generator);
+}
+inline Tensor & Tensor::log_normal_(double mean) {
+ return type().m_log_normal_(*this, mean);
+}
+inline Tensor & Tensor::log_normal_() {
+ return type().m_log_normal_(*this);
+}
+inline Tensor & Tensor::geometric_(Generator & generator, double p) {
+ return type().m_geometric_(*this, generator, p);
+}
+inline Tensor & Tensor::geometric_(double p) {
+ return type().m_geometric_(*this, p);
+}
+inline int64_t Tensor::size(int64_t dim) const {
+ return type().m_size(*this, dim);
+}
+inline int64_t Tensor::stride(int64_t dim) const {
+ return type().m_stride(*this, dim);
+}
+inline Tensor Tensor::select(int dim, int64_t sliceIndex) const {
+ return type().select(*this, dim, sliceIndex);
+}
+
+template<typename T>
+inline T* Tensor::data() const {
+ runtime_error("data() cast to unexpected type.");
+}
+#define DEFINE_CAST(T,name,_) \
+template<> \
+inline T* Tensor::data() const { \
+ AT_ASSERT(type().scalarType() == ScalarType::name, \
+ "expected scalar type % s but found %s", #name, \
+ at::toString(type().scalarType())); \
+ return static_cast<T*>(this->data_ptr()); \
+}
+
+AT_FORALL_SCALAR_TYPES(DEFINE_CAST)
+#undef DEFINE_CAST
+
+} //namespace at
diff --git a/aten/doc/Type.h b/aten/doc/Type.h
new file mode 100644
index 0000000000..7105558b61
--- /dev/null
+++ b/aten/doc/Type.h
@@ -0,0 +1,901 @@
+#pragma once
+
+#include <memory>
+
+#include "ATen/Scalar.h"
+#include "ATen/ArrayRef.h"
+
+namespace at {
+
+class Context;
+class Storage;
+class Tensor;
+class TensorRef;
+class Generator;
+
+enum class ScalarType {
+#define DEFINE_ENUM(_1,n,_2) \
+ n,
+ AT_FORALL_SCALAR_TYPES(DEFINE_ENUM)
+#undef DEFINE_ENUM
+ NumOptions
+};
+
+enum class Backend {
+ CPU,
+ CUDA,
+ NumOptions
+};
+
+constexpr Backend kCPU = Backend::CPU;
+constexpr Backend kCUDA = Backend::CUDA;
+
+static inline const char * toString(Backend b) {
+ switch(b) {
+ case Backend::CPU: return "CPU";
+ case Backend::CUDA: return "CUDA";
+ default: return "UNKNOWN_BACKEND";
+ }
+}
+
+#define DEFINE_CONSTANT(_,name,_2) \
+constexpr ScalarType k##name = ScalarType::name;
+
+AT_FORALL_SCALAR_TYPES(DEFINE_CONSTANT)
+#undef DEFINE_CONSTANT
+
+static inline const char * toString(ScalarType t) {
+#define DEFINE_CASE(_,name,_2) \
+ case ScalarType:: name : return #name;
+
+ switch(t) {
+ AT_FORALL_SCALAR_TYPES(DEFINE_CASE)
+ default:
+ return "UNKNOWN_SCALAR_TYPE";
+ }
+#undef DEFINE_CASE
+}
+
+struct CPUTag {
+ static constexpr Backend value = Backend::CPU;
+};
+struct CUDATag {
+ static constexpr Backend value = Backend::CUDA;
+};
+
+enum class TypeID {
+ CPUByte,
+ CPUChar,
+ CPUDouble,
+ CPUFloat,
+ CPUInt,
+ CPULong,
+ CPUShort,
+ CPUHalf,
+ CUDAByte,
+ CUDAChar,
+ CUDADouble,
+ CUDAFloat,
+ CUDAInt,
+ CUDALong,
+ CUDAShort,
+ CUDAHalf,
+ NumOptions
+};
+
+
+typedef ArrayRef<int64_t> IntList;
+
+struct Type {
+ Type(Context * context)
+ : context(context) {}
+ virtual ScalarType scalarType() = 0;
+ virtual Backend backend() = 0;
+ virtual bool isSparse() = 0;
+ virtual bool isDistributed() = 0;
+ static void registerAll(Context * context);
+ virtual std::unique_ptr<Storage> storage() = 0;
+ virtual std::unique_ptr<Storage> storage(size_t size) = 0;
+ virtual std::unique_ptr<Generator> generator() = 0;
+ virtual const char * toString() const = 0;
+ Type & toBackend(Backend b);
+ Type & toScalarType(ScalarType s);
+
+ // contingious IDs for all types in the system
+ // for external dispatch
+ virtual TypeID ID() const = 0;
+
+ // example
+ // virtual Tensor * add(Tensor & a, Tensor & b) = 0;
+ virtual void copy(const Tensor & src, Tensor & dst) = 0;
+ Tensor copy(const Tensor & src);
+ virtual int64_t m_storage_offset(const Tensor & self) ;
+ virtual int64_t m_ndimension(const Tensor & self) ;
+ virtual Tensor & m_resize_(Tensor & self, IntList size) ;
+ virtual Tensor & zeros_out(IntList size, Tensor & result) ;
+ virtual Tensor zeros(IntList size) ;
+ virtual Tensor & ones_out(IntList size, Tensor & result) ;
+ virtual Tensor ones(IntList size) ;
+ virtual int64_t numel(const Tensor & self) ;
+ virtual Tensor & m_set_(Tensor & self, Storage & storage) ;
+ virtual Tensor & m_set_(Tensor & self, Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride) ;
+ virtual Tensor & m_set_(Tensor & self, Storage & sourceStorage, int64_t storage_offset, IntList size) ;
+ virtual Tensor & m_set_(Tensor & self, const Tensor & source) ;
+ virtual Tensor & m_set_(Tensor & self) ;
+ virtual Tensor & m_fill_(Tensor & self, Scalar value) ;
+ virtual bool m_is_same_size(const Tensor & self, const Tensor & other) ;
+ virtual bool m_is_contiguous(const Tensor & self) ;
+ virtual bool m_is_set_to(const Tensor & self, const Tensor & tensor) ;
+ virtual Tensor & m_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) ;
+ virtual Tensor & m_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) ;
+ virtual Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & result) ;
+ virtual Tensor masked_select(const Tensor & self, const Tensor & mask) ;
+ virtual Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1) ;
+ virtual Tensor & m_transpose_(Tensor & self, int64_t dim0, int64_t dim1) ;
+ virtual Tensor t(const Tensor & self) ;
+ virtual Tensor & m_t_(Tensor & self) ;
+ virtual Tensor & squeeze_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor squeeze(const Tensor & self, int64_t dim) ;
+ virtual Tensor & squeeze_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor squeeze(const Tensor & self) ;
+ virtual Tensor & m_squeeze_(Tensor & self, int64_t dim) ;
+ virtual Tensor & m_squeeze_(Tensor & self) ;
+ virtual Tensor & unsqueeze_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor unsqueeze(const Tensor & self, int64_t dim) ;
+ virtual Tensor & m_unsqueeze_(Tensor & self, int64_t dim) ;
+ virtual Tensor & nonzero_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor nonzero(const Tensor & self) ;
+ virtual Tensor m_contiguous(const Tensor & self) ;
+ virtual Tensor m_clone(const Tensor & self) ;
+ virtual Tensor m_view(const Tensor & self, IntList size) ;
+ virtual Tensor m_expand(const Tensor & self, IntList size) ;
+ virtual Tensor & m_resize_as_(Tensor & self, const Tensor & the_template) ;
+ virtual Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result) ;
+ virtual Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) ;
+ virtual Tensor & m_index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) ;
+ virtual Tensor & m_index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) ;
+ virtual Tensor & m_index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) ;
+ virtual Tensor m_narrow(const Tensor & self, int64_t dimension, int64_t start, int64_t length) ;
+ virtual Tensor m_unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) ;
+ virtual Tensor & range_out(Scalar start, Scalar end, Scalar step, Tensor & result) ;
+ virtual Tensor range(Scalar start, Scalar end, Scalar step) ;
+ virtual Tensor & range_out(Scalar start, Scalar end, Tensor & result) ;
+ virtual Tensor range(Scalar start, Scalar end) ;
+ virtual Tensor & m_scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) ;
+ virtual Tensor & m_scatter_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) ;
+ virtual Tensor & m_scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) ;
+ virtual Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & result) ;
+ virtual Tensor gather(const Tensor & self, int64_t dim, const Tensor & index) ;
+ virtual void* m_data_ptr(const Tensor & self) ;
+ virtual bool equal(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __and___out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor __and__(const Tensor & self, Scalar value) ;
+ virtual Tensor & __and___out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor __and__(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __iand__(Tensor & self, Scalar value) ;
+ virtual Tensor & __iand__(Tensor & self, const Tensor & other) ;
+ virtual Tensor & __or___out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor __or__(const Tensor & self, Scalar value) ;
+ virtual Tensor & __or___out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor __or__(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __ior__(Tensor & self, Scalar value) ;
+ virtual Tensor & __ior__(Tensor & self, const Tensor & other) ;
+ virtual Tensor & __xor___out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor __xor__(const Tensor & self, Scalar value) ;
+ virtual Tensor & __xor___out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor __xor__(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __ixor__(Tensor & self, Scalar value) ;
+ virtual Tensor & __ixor__(Tensor & self, const Tensor & other) ;
+ virtual Tensor & __lshift___out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor __lshift__(const Tensor & self, Scalar value) ;
+ virtual Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor __lshift__(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __ilshift__(Tensor & self, Scalar value) ;
+ virtual Tensor & __ilshift__(Tensor & self, const Tensor & other) ;
+ virtual Tensor & __rshift___out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor __rshift__(const Tensor & self, Scalar value) ;
+ virtual Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor __rshift__(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & __irshift__(Tensor & self, Scalar value) ;
+ virtual Tensor & __irshift__(Tensor & self, const Tensor & other) ;
+ virtual Tensor m_lt(const Tensor & self, Scalar value) ;
+ virtual Tensor m_lt(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_lt_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_lt_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & lt_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor lt(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & lt_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor lt(const Tensor & tensor, const Tensor & other) ;
+ virtual Tensor m_gt(const Tensor & self, Scalar value) ;
+ virtual Tensor m_gt(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_gt_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_gt_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & gt_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor gt(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & gt_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor gt(const Tensor & tensor, const Tensor & other) ;
+ virtual Tensor m_le(const Tensor & self, Scalar value) ;
+ virtual Tensor m_le(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_le_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_le_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & le_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor le(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & le_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor le(const Tensor & tensor, const Tensor & other) ;
+ virtual Tensor m_ge(const Tensor & self, Scalar value) ;
+ virtual Tensor m_ge(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_ge_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_ge_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & ge_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor ge(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & ge_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor ge(const Tensor & tensor, const Tensor & other) ;
+ virtual Tensor m_eq(const Tensor & self, Scalar value) ;
+ virtual Tensor m_eq(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_eq_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_eq_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & eq_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor eq(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & eq_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor eq(const Tensor & tensor, const Tensor & other) ;
+ virtual Tensor m_ne(const Tensor & self, Scalar value) ;
+ virtual Tensor m_ne(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_ne_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_ne_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & ne_out(const Tensor & tensor, Scalar value, Tensor & result) ;
+ virtual Tensor ne(const Tensor & tensor, Scalar value) ;
+ virtual Tensor & ne_out(const Tensor & tensor, const Tensor & other, Tensor & result) ;
+ virtual Tensor ne(const Tensor & tensor, const Tensor & other) ;
+ virtual std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices) ;
+ virtual std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> min_out(const Tensor & self, int64_t dim, Tensor & min, Tensor & min_indices) ;
+ virtual std::tuple<Tensor,Tensor> min(const Tensor & self, int64_t dim) ;
+ virtual Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor min(const Tensor & self, const Tensor & other) ;
+ virtual Scalar min(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_indices) ;
+ virtual std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> max_out(const Tensor & self, int64_t dim, Tensor & max, Tensor & max_indices) ;
+ virtual std::tuple<Tensor,Tensor> max(const Tensor & self, int64_t dim) ;
+ virtual Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor max(const Tensor & self, const Tensor & other) ;
+ virtual Scalar max(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k) ;
+ virtual std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> kthvalue_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> kthvalue(const Tensor & self, int64_t k, int64_t dim) ;
+ virtual std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> mode(const Tensor & self, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> mode(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> mode_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> mode(const Tensor & self, int64_t dim) ;
+ virtual std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> median(const Tensor & self, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> median(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim) ;
+ virtual std::tuple<Tensor &,Tensor &> median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> median(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> sort(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim) ;
+ virtual std::tuple<Tensor &,Tensor &> sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending) ;
+ virtual std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k) ;
+ virtual std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) ;
+ virtual std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest) ;
+ virtual std::tuple<Tensor &,Tensor &> topk_out(const Tensor & self, int64_t k, int64_t dim, Tensor & values, Tensor & indices) ;
+ virtual std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim) ;
+ virtual bool m_all(const Tensor & self) ;
+ virtual bool m_any(const Tensor & self) ;
+ virtual int64_t m_get_device(const Tensor & self) ;
+ virtual Tensor & abs_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor abs(const Tensor & self) ;
+ virtual Tensor & m_abs_(Tensor & self) ;
+ virtual Tensor & m_sigmoid_(Tensor & self) ;
+ virtual Tensor & sigmoid_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor sigmoid(const Tensor & self) ;
+ virtual Tensor & m_log_(Tensor & self) ;
+ virtual Tensor & log_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor log(const Tensor & self) ;
+ virtual Tensor & m_log1p_(Tensor & self) ;
+ virtual Tensor & log1p_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor log1p(const Tensor & self) ;
+ virtual Tensor & lgamma_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor lgamma(const Tensor & self) ;
+ virtual Tensor & m_lgamma_(Tensor & self) ;
+ virtual Tensor & m_exp_(Tensor & self) ;
+ virtual Tensor & exp_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor exp(const Tensor & self) ;
+ virtual Tensor & m_cos_(Tensor & self) ;
+ virtual Tensor & cos_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor cos(const Tensor & self) ;
+ virtual Tensor & m_acos_(Tensor & self) ;
+ virtual Tensor & acos_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor acos(const Tensor & self) ;
+ virtual Tensor & m_cosh_(Tensor & self) ;
+ virtual Tensor & cosh_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor cosh(const Tensor & self) ;
+ virtual Tensor & m_sin_(Tensor & self) ;
+ virtual Tensor & sin_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor sin(const Tensor & self) ;
+ virtual Tensor & m_asin_(Tensor & self) ;
+ virtual Tensor & asin_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor asin(const Tensor & self) ;
+ virtual Tensor & m_sinh_(Tensor & self) ;
+ virtual Tensor & sinh_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor sinh(const Tensor & self) ;
+ virtual Tensor & m_tan_(Tensor & self) ;
+ virtual Tensor & tan_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor tan(const Tensor & self) ;
+ virtual Tensor & m_atan_(Tensor & self) ;
+ virtual Tensor & atan_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor atan(const Tensor & self) ;
+ virtual Tensor & m_tanh_(Tensor & self) ;
+ virtual Tensor & tanh_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor tanh(const Tensor & self) ;
+ virtual Tensor & m_sqrt_(Tensor & self) ;
+ virtual Tensor & sqrt_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor sqrt(const Tensor & self) ;
+ virtual Tensor & m_rsqrt_(Tensor & self) ;
+ virtual Tensor & rsqrt_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor rsqrt(const Tensor & self) ;
+ virtual Tensor & m_ceil_(Tensor & self) ;
+ virtual Tensor & ceil_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor ceil(const Tensor & self) ;
+ virtual Tensor & m_floor_(Tensor & self) ;
+ virtual Tensor & floor_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor floor(const Tensor & self) ;
+ virtual Tensor & m_round_(Tensor & self) ;
+ virtual Tensor & round_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor round(const Tensor & self) ;
+ virtual Tensor & m_trunc_(Tensor & self) ;
+ virtual Tensor & trunc_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor trunc(const Tensor & self) ;
+ virtual Tensor & m_frac_(Tensor & self) ;
+ virtual Tensor & frac_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor frac(const Tensor & self) ;
+ virtual Tensor & mean_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) ;
+ virtual Tensor mean(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual Tensor & mean_out(const Tensor & self, int64_t dim, Tensor & destination) ;
+ virtual Tensor mean(const Tensor & self, int64_t dim) ;
+ virtual Scalar mean(const Tensor & self) ;
+ virtual Tensor & var_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) ;
+ virtual Tensor var(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual Tensor & var_out(const Tensor & self, int64_t dim, Tensor & destination) ;
+ virtual Tensor var(const Tensor & self, int64_t dim) ;
+ virtual Scalar var(const Tensor & self) ;
+ virtual Tensor & std_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & destination) ;
+ virtual Tensor std(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual Tensor & std_out(const Tensor & self, int64_t dim, Tensor & destination) ;
+ virtual Tensor std(const Tensor & self, int64_t dim) ;
+ virtual Scalar std(const Tensor & self) ;
+ virtual Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, bool keepdim, Tensor & destination) ;
+ virtual Tensor norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) ;
+ virtual Tensor & norm_out(const Tensor & self, Scalar p, int64_t dim, Tensor & destination) ;
+ virtual Tensor norm(const Tensor & self, Scalar p, int64_t dim) ;
+ virtual Scalar norm(const Tensor & self, Scalar p) ;
+ virtual Scalar norm(const Tensor & self) ;
+ virtual Tensor & renorm_out(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm, Tensor & destination) ;
+ virtual Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) ;
+ virtual Tensor & m_renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) ;
+ virtual Scalar dist(const Tensor & self, const Tensor & other, Scalar p) ;
+ virtual Scalar dist(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & reciprocal_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor reciprocal(const Tensor & self) ;
+ virtual Tensor & m_reciprocal_(Tensor & self) ;
+ virtual Tensor & neg_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor neg(const Tensor & self) ;
+ virtual Tensor & m_neg_(Tensor & self) ;
+ virtual Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & destination) ;
+ virtual Tensor atan2(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_atan2_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & pow_out(const Tensor & self, Scalar exponent, Tensor & destination) ;
+ virtual Tensor pow(const Tensor & self, Scalar exponent) ;
+ virtual Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & destination) ;
+ virtual Tensor pow(const Tensor & self, const Tensor & exponent) ;
+ virtual Tensor & m_pow_(Tensor & self, Scalar exponent) ;
+ virtual Tensor & m_pow_(Tensor & self, const Tensor & exponent) ;
+ virtual Tensor & lerp_out(const Tensor & self, const Tensor & end, Scalar weight, Tensor & destination) ;
+ virtual Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight) ;
+ virtual Tensor & m_lerp_(Tensor & self, const Tensor & end, Scalar weight) ;
+ virtual Tensor & linspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result) ;
+ virtual Tensor linspace(Scalar start, Scalar end, int64_t steps) ;
+ virtual Tensor & linspace_out(Scalar start, Scalar end, Tensor & result) ;
+ virtual Tensor linspace(Scalar start, Scalar end) ;
+ virtual Tensor & logspace_out(Scalar start, Scalar end, int64_t steps, Tensor & result) ;
+ virtual Tensor logspace(Scalar start, Scalar end, int64_t steps) ;
+ virtual Tensor & logspace_out(Scalar start, Scalar end, Tensor & result) ;
+ virtual Tensor logspace(Scalar start, Scalar end) ;
+ virtual Tensor & histc_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor histc(const Tensor & self) ;
+ virtual Tensor & histc_out(const Tensor & self, int64_t bins, Tensor & destination) ;
+ virtual Tensor histc(const Tensor & self, int64_t bins) ;
+ virtual Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Tensor & destination) ;
+ virtual Tensor histc(const Tensor & self, int64_t bins, Scalar min) ;
+ virtual Tensor & histc_out(const Tensor & self, int64_t bins, Scalar min, Scalar max, Tensor & destination) ;
+ virtual Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) ;
+ virtual Tensor & m_zero_(Tensor & self) ;
+ virtual Tensor & sum_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result) ;
+ virtual Tensor sum(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual Tensor & sum_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor sum(const Tensor & self, int64_t dim) ;
+ virtual Scalar sum(const Tensor & self) ;
+ virtual Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & result) ;
+ virtual Tensor prod(const Tensor & self, int64_t dim, bool keepdim) ;
+ virtual Tensor & prod_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor prod(const Tensor & self, int64_t dim) ;
+ virtual Scalar prod(const Tensor & self) ;
+ virtual Tensor & cumsum_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor cumsum(const Tensor & self, int64_t dim) ;
+ virtual Tensor & cumprod_out(const Tensor & self, int64_t dim, Tensor & result) ;
+ virtual Tensor cumprod(const Tensor & self, int64_t dim) ;
+ virtual Tensor & sign_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor sign(const Tensor & self) ;
+ virtual Tensor & m_sign_(Tensor & self) ;
+ virtual Scalar trace(const Tensor & self) ;
+ virtual Tensor & add_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result) ;
+ virtual Tensor add(const Tensor & self, Scalar value, const Tensor & other) ;
+ virtual Tensor & add_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor add(const Tensor & self, Scalar value) ;
+ virtual Tensor & add_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor add(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_add_(Tensor & self, Scalar value, const Tensor & other) ;
+ virtual Tensor & m_add_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_add_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & sub_out(const Tensor & self, Scalar value, const Tensor & other, Tensor & result) ;
+ virtual Tensor sub(const Tensor & self, Scalar value, const Tensor & other) ;
+ virtual Tensor & sub_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor sub(const Tensor & self, Scalar value) ;
+ virtual Tensor & sub_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor sub(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_sub_(Tensor & self, Scalar value, const Tensor & other) ;
+ virtual Tensor & m_sub_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_sub_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & mul_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor mul(const Tensor & self, Scalar value) ;
+ virtual Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor mul(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_mul_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_mul_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & div_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor div(const Tensor & self, Scalar value) ;
+ virtual Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor div(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_div_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_div_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & fmod_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor fmod(const Tensor & self, Scalar value) ;
+ virtual Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor fmod(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_fmod_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_fmod_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & remainder_out(const Tensor & self, Scalar value, Tensor & result) ;
+ virtual Tensor remainder(const Tensor & self, Scalar value) ;
+ virtual Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & result) ;
+ virtual Tensor remainder(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & m_remainder_(Tensor & self, Scalar value) ;
+ virtual Tensor & m_remainder_(Tensor & self, const Tensor & other) ;
+ virtual Tensor & clamp_out(const Tensor & self, Scalar min, Scalar max, Tensor & destination) ;
+ virtual Tensor clamp(const Tensor & self, Scalar min, Scalar max) ;
+ virtual Tensor & clamp_out(const Tensor & self, Scalar min, Tensor & result) ;
+ virtual Tensor clamp(const Tensor & self, Scalar min) ;
+ virtual Tensor & m_clamp_(Tensor & self, Scalar min, Scalar max) ;
+ virtual Tensor & m_clamp_(Tensor & self, Scalar min) ;
+ virtual Scalar dot(const Tensor & self, const Tensor & tensor) ;
+ virtual Tensor & tril_out(const Tensor & self, int64_t k, Tensor & destination) ;
+ virtual Tensor tril(const Tensor & self, int64_t k) ;
+ virtual Tensor & tril_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor tril(const Tensor & self) ;
+ virtual Tensor & m_tril_(Tensor & self, int64_t k) ;
+ virtual Tensor & m_tril_(Tensor & self) ;
+ virtual Tensor & triu_out(const Tensor & self, int64_t k, Tensor & destination) ;
+ virtual Tensor triu(const Tensor & self, int64_t k) ;
+ virtual Tensor & triu_out(const Tensor & self, Tensor & destination) ;
+ virtual Tensor triu(const Tensor & self) ;
+ virtual Tensor & m_triu_(Tensor & self, int64_t k) ;
+ virtual Tensor & m_triu_(Tensor & self) ;
+ virtual Tensor & cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & destination) ;
+ virtual Tensor cross(const Tensor & self, const Tensor & other, int64_t dim) ;
+ virtual Tensor & cross_out(const Tensor & self, const Tensor & other, Tensor & destination) ;
+ virtual Tensor cross(const Tensor & self, const Tensor & other) ;
+ virtual Tensor & eye_out(int64_t n, Tensor & result) ;
+ virtual Tensor eye(int64_t n) ;
+ virtual Tensor & eye_out(int64_t n, int64_t m, Tensor & result) ;
+ virtual Tensor eye(int64_t n, int64_t m) ;
+ virtual Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & result) ;
+ virtual Tensor diag(const Tensor & self, int64_t diagonal) ;
+ virtual Tensor & diag_out(const Tensor & self, Tensor & result) ;
+ virtual Tensor diag(const Tensor & self) ;
+ virtual Tensor & addmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2, Tensor & result) ;
+ virtual Tensor addmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & addmm_out(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result) ;
+ virtual Tensor addmm(Scalar beta, const Tensor & self, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Tensor & result) ;
+ virtual Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & m_addmm_(Tensor & self, Scalar beta, Scalar alpha, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & m_addmm_(Tensor & self, Scalar beta, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & m_addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2) ;
+ virtual Tensor & addmv_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec, Tensor & result) ;
+ virtual Tensor addmv(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & addmv_out(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result) ;
+ virtual Tensor addmv(Scalar beta, const Tensor & self, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, Tensor & result) ;
+ virtual Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & m_addmv_(Tensor & self, Scalar beta, Scalar alpha, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & m_addmv_(Tensor & self, Scalar beta, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & m_addmv_(Tensor & self, const Tensor & mat, const Tensor & vec) ;
+ virtual Tensor & addr_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2, Tensor & result) ;
+ virtual Tensor addr(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & addr_out(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result) ;
+ virtual Tensor addr(Scalar beta, const Tensor & self, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Tensor & result) ;
+ virtual Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & m_addr_(Tensor & self, Scalar beta, Scalar alpha, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & m_addr_(Tensor & self, Scalar beta, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & m_addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2) ;
+ virtual Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & result) ;
+ virtual Tensor ger(const Tensor & self, const Tensor & vec2) ;
+ virtual Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & result) ;
+ virtual Tensor mv(const Tensor & self, const Tensor & vec) ;
+ virtual Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & result) ;
+ virtual Tensor mm(const Tensor & self, const Tensor & mat2) ;
+ virtual Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & result) ;
+ virtual Tensor bmm(const Tensor & self, const Tensor & mat2) ;
+ virtual Tensor & addbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor addbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & addbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor addbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_addbmm_(Tensor & self, Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_addbmm_(Tensor & self, Scalar beta, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & baddbmm_out(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor baddbmm(Scalar beta, const Tensor & self, Scalar alpha, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & baddbmm_out(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor baddbmm(Scalar beta, const Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Tensor & result) ;
+ virtual Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_baddbmm_(Tensor & self, Scalar beta, Scalar alpha, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_baddbmm_(Tensor & self, Scalar beta, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & m_baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2) ;
+ virtual Tensor & addcmul_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) ;
+ virtual Tensor addcmul(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) ;
+ virtual Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & m_addcmul_(Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & m_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & addcdiv_out(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) ;
+ virtual Tensor addcdiv(const Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Tensor & result) ;
+ virtual Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & m_addcdiv_(Tensor & self, Scalar value, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual Tensor & m_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2) ;
+ virtual std::tuple<Tensor &,Tensor &> gesv_out(const Tensor & self, const Tensor & A, Tensor & solution, Tensor & lu) ;
+ virtual std::tuple<Tensor,Tensor> gesv(const Tensor & self, const Tensor & A) ;
+ virtual std::tuple<Tensor &,Tensor &> gels_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> gels(const Tensor & self, const Tensor & A) ;
+ virtual std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) ;
+ virtual std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose) ;
+ virtual std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, bool upper, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper) ;
+ virtual std::tuple<Tensor &,Tensor &> trtrs_out(const Tensor & self, const Tensor & A, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A) ;
+ virtual std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, bool upper, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors, bool upper) ;
+ virtual std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors) ;
+ virtual std::tuple<Tensor &,Tensor &> symeig_out(const Tensor & self, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> symeig(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, bool eigenvectors, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors) ;
+ virtual std::tuple<Tensor &,Tensor &> eig_out(const Tensor & self, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> eig(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, bool some, Tensor & res1, Tensor & res2, Tensor & res3) ;
+ virtual std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self, bool some) ;
+ virtual std::tuple<Tensor &,Tensor &,Tensor &> svd_out(const Tensor & self, Tensor & res1, Tensor & res2, Tensor & res3) ;
+ virtual std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self) ;
+ virtual Tensor & inverse_out(const Tensor & self, Tensor & output) ;
+ virtual Tensor inverse(const Tensor & self) ;
+ virtual Tensor & potrf_out(const Tensor & self, bool upper, Tensor & output) ;
+ virtual Tensor potrf(const Tensor & self, bool upper) ;
+ virtual Tensor & potrf_out(const Tensor & self, Tensor & output) ;
+ virtual Tensor potrf(const Tensor & self) ;
+ virtual Tensor & potrs_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & result) ;
+ virtual Tensor potrs(const Tensor & self, const Tensor & input2, bool upper) ;
+ virtual Tensor & potrs_out(const Tensor & self, const Tensor & input2, Tensor & result) ;
+ virtual Tensor potrs(const Tensor & self, const Tensor & input2) ;
+ virtual Tensor & potri_out(const Tensor & self, bool upper, Tensor & output) ;
+ virtual Tensor potri(const Tensor & self, bool upper) ;
+ virtual Tensor & potri_out(const Tensor & self, Tensor & output) ;
+ virtual Tensor potri(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Scalar tol, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper, Scalar tol) ;
+ virtual std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, bool upper, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper) ;
+ virtual std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Scalar tol, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> pstrf(const Tensor & self, Scalar tol) ;
+ virtual std::tuple<Tensor &,Tensor &> pstrf_out(const Tensor & self, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> pstrf(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> qr_out(const Tensor & self, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> qr(const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> geqrf_out(const Tensor & self, Tensor & res1, Tensor & res2) ;
+ virtual std::tuple<Tensor,Tensor> geqrf(const Tensor & self) ;
+ virtual std::tuple<Tensor &,const Tensor &> orgqr_out(const Tensor & self, const Tensor & input2, Tensor & result) ;
+ virtual std::tuple<Tensor,const Tensor &> orgqr(const Tensor & self, const Tensor & input2) ;
+ virtual std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & result) ;
+ virtual std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) ;
+ virtual std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, Tensor & result) ;
+ virtual std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left) ;
+ virtual std::tuple<Tensor &,const Tensor &> ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, Tensor & result) ;
+ virtual std::tuple<Tensor,const Tensor &> ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3) ;
+ virtual std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & info, const Tensor & self, Tensor & result, Tensor & pivots) ;
+ virtual std::tuple<Tensor,Tensor> btrifact(const Tensor & info, const Tensor & self) ;
+ virtual std::tuple<Tensor &,Tensor &> btrifact_out(const Tensor & self, Tensor & result, Tensor & pivots) ;
+ virtual std::tuple<Tensor,Tensor> btrifact(const Tensor & self) ;
+ virtual Tensor & btrisolve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & result) ;
+ virtual Tensor btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) ;
+ virtual Tensor & randperm_out(Generator & generator, int64_t n, Tensor & result) ;
+ virtual Tensor randperm(Generator & generator, int64_t n) ;
+ virtual Tensor & randperm_out(int64_t n, Tensor & result) ;
+ virtual Tensor randperm(int64_t n) ;
+ virtual Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement, Tensor & result) ;
+ virtual Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples, bool replacement) ;
+ virtual Tensor & multinomial_out(Generator & generator, const Tensor & self, int64_t num_samples, Tensor & result) ;
+ virtual Tensor multinomial(Generator & generator, const Tensor & self, int64_t num_samples) ;
+ virtual Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, Tensor & result) ;
+ virtual Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement) ;
+ virtual Tensor & multinomial_out(const Tensor & self, int64_t num_samples, Tensor & result) ;
+ virtual Tensor multinomial(const Tensor & self, int64_t num_samples) ;
+ virtual Tensor & m_uniform_(Tensor & self, Generator & generator, double from, double to) ;
+ virtual Tensor & m_uniform_(Tensor & self, Generator & generator, double from) ;
+ virtual Tensor & m_uniform_(Tensor & self, double from, double to) ;
+ virtual Tensor & m_uniform_(Tensor & self, Generator & generator) ;
+ virtual Tensor & m_uniform_(Tensor & self, double from) ;
+ virtual Tensor & m_uniform_(Tensor & self) ;
+ virtual Tensor & m_cauchy_(Tensor & self, Generator & generator, double median, double sigma) ;
+ virtual Tensor & m_cauchy_(Tensor & self, Generator & generator, double median) ;
+ virtual Tensor & m_cauchy_(Tensor & self, double median, double sigma) ;
+ virtual Tensor & m_cauchy_(Tensor & self, Generator & generator) ;
+ virtual Tensor & m_cauchy_(Tensor & self, double median) ;
+ virtual Tensor & m_cauchy_(Tensor & self) ;
+ virtual Tensor & m_log_normal_(Tensor & self, Generator & generator, double mean, double std) ;
+ virtual Tensor & m_log_normal_(Tensor & self, Generator & generator, double mean) ;
+ virtual Tensor & m_log_normal_(Tensor & self, double mean, double std) ;
+ virtual Tensor & m_log_normal_(Tensor & self, Generator & generator) ;
+ virtual Tensor & m_log_normal_(Tensor & self, double mean) ;
+ virtual Tensor & m_log_normal_(Tensor & self) ;
+ virtual Tensor & rand_out(Generator & generator, IntList size, Tensor & result) ;
+ virtual Tensor rand(Generator & generator, IntList size) ;
+ virtual Tensor & rand_out(IntList size, Tensor & result) ;
+ virtual Tensor rand(IntList size) ;
+ virtual Tensor & randn_out(Generator & generator, IntList size, Tensor & result) ;
+ virtual Tensor randn(Generator & generator, IntList size) ;
+ virtual Tensor & randn_out(IntList size, Tensor & result) ;
+ virtual Tensor randn(IntList size) ;
+ virtual Tensor & m_geometric_(Tensor & self, Generator & generator, double p) ;
+ virtual Tensor & m_geometric_(Tensor & self, double p) ;
+ virtual int64_t m_size(const Tensor & self, int64_t dim) ;
+ virtual int64_t m_stride(const Tensor & self, int64_t dim) ;
+ virtual Tensor tensor() ;
+ virtual Tensor & select_out(const Tensor & self, int dim, int64_t sliceIndex, Tensor & result) ;
+ virtual Tensor select(const Tensor & self, int dim, int64_t sliceIndex) ;
+ virtual void Abs_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void Abs_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) ;
+ virtual void AbsCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void AbsCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights) ;
+ virtual void BCECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights) ;
+ virtual void BCECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index) ;
+ virtual void ClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index) ;
+ virtual void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight, int64_t ignore_index) ;
+ virtual void ClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight, int64_t ignore_index) ;
+ virtual void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & weights, const Tensor & total_weight) ;
+ virtual void SpatialClassNLLCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, const Tensor & total_weight) ;
+ virtual void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & weights, const Tensor & total_weight) ;
+ virtual void SpatialClassNLLCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, const Tensor & total_weight) ;
+ virtual void ELU_updateOutput(const Tensor & input, const Tensor & output, Scalar alpha, bool inplace) ;
+ virtual void ELU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar alpha, bool inplace) ;
+ virtual void DistKLDivCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void DistKLDivCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void GatedLinear_updateOutput(const Tensor & input, const Tensor & output, int dim) ;
+ virtual void GatedLinear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int dim) ;
+ virtual void HardShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda) ;
+ virtual void HardShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda) ;
+ virtual void HardTanh_updateOutput(const Tensor & input, const Tensor & output, Scalar min_val, Scalar max_val, bool inplace) ;
+ virtual void HardTanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar min_val, Scalar max_val, bool inplace) ;
+ virtual void L1Cost_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) ;
+ virtual void L1Cost_updateGradInput(const Tensor & input, const Tensor & gradInput) ;
+ virtual void LeakyReLU_updateOutput(const Tensor & input, const Tensor & output, Scalar negval, bool inplace) ;
+ virtual void LeakyReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar negval, bool inplace) ;
+ virtual void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & hx, const Tensor & output, const Tensor & storage) ;
+ virtual void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & hx, const Tensor & output, const Tensor & storage) ;
+ virtual void GRUFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & hx, const Tensor & output, const Tensor & storage) ;
+ virtual void GRUFused_updateGradInput(const Tensor & gradInInput, const Tensor & gradInHidden, const Tensor & gradOutput, const Tensor & gradInputHx, const Tensor & storage) ;
+ virtual void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & bias2, const Tensor & cell, const Tensor & output, const Tensor & outputCell) ;
+ virtual void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & bias1, const Tensor & cell, const Tensor & output, const Tensor & outputCell) ;
+ virtual void LSTMFused_updateOutput(const Tensor & input, const Tensor & hidden, const Tensor & cell, const Tensor & output, const Tensor & outputCell) ;
+ virtual void LSTMFused_updateGradInput(const Tensor & storage, const Tensor & gradInGates, const Tensor & cx, const Tensor & cy, const Tensor & gradOutput, const Tensor & gradOutputCell, const Tensor & gradInputCx) ;
+ virtual void LogSigmoid_updateOutput(const Tensor & input, const Tensor & output, const Tensor & buffer) ;
+ virtual void LogSigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & buffer) ;
+ virtual void LogSoftMax_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void LogSoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void MarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, Scalar margin) ;
+ virtual void MarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, Scalar margin) ;
+ virtual void SoftMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void SoftMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void MSECriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void MSECriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void MultiLabelMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, const Tensor & isTarget, bool sizeAverage) ;
+ virtual void MultiLabelMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, const Tensor & isTarget, bool sizeAverage) ;
+ virtual void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, const Tensor & weights, Scalar margin) ;
+ virtual void MultiMarginCriterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage, int p, Scalar margin) ;
+ virtual void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, const Tensor & weights, Scalar margin) ;
+ virtual void MultiMarginCriterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage, int p, Scalar margin) ;
+ virtual void PReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, int64_t nOutputPlane) ;
+ virtual void PReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int64_t nOutputPlane) ;
+ virtual void PReLU_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradWeight, const Tensor & gradWeightBuf, const Tensor & gradWeightBuf2, int64_t nOutputPlane, Scalar scale) ;
+ virtual void Linear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & addBuffer) ;
+ virtual void Linear_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight) ;
+ virtual void Linear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & addBuffer, Scalar scale) ;
+ virtual void RReLU_updateOutput(const Tensor & input, const Tensor & output, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace, Generator & generator) ;
+ virtual void RReLU_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & noise, Scalar lower, Scalar upper, bool train, bool inplace) ;
+ virtual void Sigmoid_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void Sigmoid_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void Sigmoid_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void SmoothL1Criterion_updateOutput(const Tensor & input, const Tensor & target, const Tensor & output, bool sizeAverage) ;
+ virtual void SmoothL1Criterion_updateGradInput(const Tensor & input, const Tensor & target, const Tensor & gradInput, bool sizeAverage) ;
+ virtual void SoftMax_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void SoftMax_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void SoftPlus_updateOutput(const Tensor & input, const Tensor & output, Scalar beta, Scalar threshold) ;
+ virtual void SoftPlus_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output, Scalar beta, Scalar threshold) ;
+ virtual void SoftShrink_updateOutput(const Tensor & input, const Tensor & output, Scalar lambda) ;
+ virtual void SoftShrink_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar lambda) ;
+ virtual void IndexLinear_updateOutput(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & normalizedValues, int train) ;
+ virtual void IndexLinear_accGradParameters(const Tensor & keys, int64_t keysOffset, const Tensor & values, const Tensor & sizes, const Tensor & cumSumSizes, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, const Tensor & valuesBuffer, Scalar weightDecay, Scalar scale) ;
+ virtual void SparseLinear_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias) ;
+ virtual void SparseLinear_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & bias, Scalar weightDecay, Scalar scale) ;
+ virtual void Sqrt_updateOutput(const Tensor & input, const Tensor & output, Scalar eps) ;
+ virtual void Sqrt_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void Square_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void Square_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) ;
+ virtual void Tanh_updateOutput(const Tensor & input, const Tensor & output) ;
+ virtual void Tanh_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void Tanh_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, const Tensor & output) ;
+ virtual void Threshold_updateOutput(const Tensor & input, const Tensor & output, Scalar threshold, Scalar val, bool inplace) ;
+ virtual void Threshold_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, Scalar threshold, Scalar val, bool inplace) ;
+ virtual void TemporalConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize, int outputFrameSize) ;
+ virtual void TemporalConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW) ;
+ virtual void TemporalConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale) ;
+ virtual void TemporalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int dW) ;
+ virtual void TemporalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int dW) ;
+ virtual void TemporalSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int dW, int inputFrameSize) ;
+ virtual void TemporalSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int dW) ;
+ virtual void TemporalSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int dW, Scalar scale) ;
+ virtual void TemporalRowConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst) ;
+ virtual void TemporalRowConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst) ;
+ virtual void TemporalRowConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int dW, int padW, bool featFirst, Scalar scale) ;
+ virtual void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) ;
+ virtual void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) ;
+ virtual void BatchNormalization_updateOutput(const Tensor & input, const Tensor & output, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double momentum, double eps) ;
+ virtual void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) ;
+ virtual void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) ;
+ virtual void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & gradWeight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) ;
+ virtual void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) ;
+ virtual void BatchNormalization_backward(const Tensor & input, const Tensor & gradOutput, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_std, bool train, double scale, double eps) ;
+ virtual void SpatialConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) ;
+ virtual void SpatialConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) ;
+ virtual void SpatialConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale) ;
+ virtual void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) ;
+ virtual void SpatialConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) ;
+ virtual void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialDepthWiseConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialDepthWiseConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) ;
+ virtual void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) ;
+ virtual void SpatialDepthWiseConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, Scalar scale) ;
+ virtual void SpatialConvolutionLocal_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) ;
+ virtual void SpatialConvolutionLocal_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) ;
+ virtual void SpatialConvolutionLocal_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight, Scalar scale) ;
+ virtual void SpatialAdaptiveMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight) ;
+ virtual void SpatialAdaptiveMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices) ;
+ virtual void SpatialAdaptiveAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int owidth, int oheight) ;
+ virtual void SpatialAdaptiveAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput) ;
+ virtual void SpatialAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) ;
+ virtual void SpatialAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) ;
+ virtual void SpatialFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples) ;
+ virtual void SpatialFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, const Tensor & indices) ;
+ virtual void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) ;
+ virtual void SpatialFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) ;
+ virtual void SpatialFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH) ;
+ virtual void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale) ;
+ virtual void SpatialFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, Scalar scale) ;
+ virtual void SpatialFullConvolutionMap_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) ;
+ virtual void SpatialFullConvolutionMap_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & bias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH) ;
+ virtual void SpatialFullConvolutionMap_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & connTable, int nInputPlane, int nOutputPlane, int dW, int dH, Scalar scale) ;
+ virtual void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) ;
+ virtual void SpatialDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) ;
+ virtual void SpatialDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH) ;
+ virtual void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale) ;
+ virtual void SpatialDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, Scalar scale) ;
+ virtual void SpatialMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) ;
+ virtual void SpatialMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode) ;
+ virtual void SpatialDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode) ;
+ virtual void SpatialDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, bool ceil_mode) ;
+ virtual void SpatialMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int owidth, int oheight) ;
+ virtual void SpatialMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int owidth, int oheight) ;
+ virtual void SpatialSubSampling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, int kW, int kH, int dW, int dH) ;
+ virtual void SpatialSubSampling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, int kW, int kH, int dW, int dH) ;
+ virtual void SpatialSubSampling_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, int kW, int kH, int dW, int dH, Scalar scale) ;
+ virtual void SpatialUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor) ;
+ virtual void SpatialUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor) ;
+ virtual void SpatialUpSamplingBilinear_updateOutput(const Tensor & input, const Tensor & output, int outputHeight, int outputWidth) ;
+ virtual void SpatialUpSamplingBilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) ;
+ virtual void VolumetricAveragePooling_updateOutput(const Tensor & input, const Tensor & output, int kT, int kW, int kH, int dT, int dW, int dH) ;
+ virtual void VolumetricAveragePooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int kT, int kW, int kH, int dT, int dW, int dH) ;
+ virtual void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) ;
+ virtual void VolumetricConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) ;
+ virtual void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolutionMM_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolutionMM_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) ;
+ virtual void VolumetricConvolutionMM_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, Scalar scale) ;
+ virtual void VolumetricFractionalMaxPooling_updateOutput(const Tensor & input, const Tensor & output, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices, const Tensor & randomSamples) ;
+ virtual void VolumetricFractionalMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int outputT, int outputW, int outputH, int poolSizeT, int poolSizeW, int poolSizeH, const Tensor & indices) ;
+ virtual void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) ;
+ virtual void VolumetricFullConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) ;
+ virtual void VolumetricFullConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH) ;
+ virtual void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale) ;
+ virtual void VolumetricFullConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & finput, const Tensor & fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, int aT, int aW, int aH, Scalar scale) ;
+ virtual void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & bias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) ;
+ virtual void VolumetricDilatedConvolution_updateOutput(const Tensor & input, const Tensor & output, const Tensor & weight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) ;
+ virtual void VolumetricDilatedConvolution_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & weight, const Tensor & gradColumns, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH) ;
+ virtual void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & gradBias, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale) ;
+ virtual void VolumetricDilatedConvolution_accGradParameters(const Tensor & input, const Tensor & gradOutput, const Tensor & gradWeight, const Tensor & columns, const Tensor & ones, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, int dilationT, int dilationW, int dilationH, Scalar scale) ;
+ virtual void VolumetricMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode) ;
+ virtual void VolumetricMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode) ;
+ virtual void VolumetricDilatedMaxPooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) ;
+ virtual void VolumetricDilatedMaxPooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int dilationT, int dilationW, int dilationH, bool ceilMode) ;
+ virtual void VolumetricMaxUnpooling_updateOutput(const Tensor & input, const Tensor & output, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void VolumetricMaxUnpooling_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH) ;
+ virtual void SpatialReflectionPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b) ;
+ virtual void SpatialReflectionPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b) ;
+ virtual void SpatialReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pad_l, int pad_r, int pad_t, int pad_b) ;
+ virtual void SpatialReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pad_l, int pad_r, int pad_t, int pad_b) ;
+ virtual void VolumetricReplicationPadding_updateOutput(const Tensor & input, const Tensor & output, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) ;
+ virtual void VolumetricReplicationPadding_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) ;
+ virtual void VolumetricUpSamplingNearest_updateOutput(const Tensor & input, const Tensor & output, int scale_factor) ;
+ virtual void VolumetricUpSamplingNearest_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, int scale_factor) ;
+ virtual void VolumetricUpSamplingTrilinear_updateOutput(const Tensor & input, const Tensor & output, int outputDepth, int outputHeight, int outputWidth) ;
+ virtual void VolumetricUpSamplingTrilinear_updateGradInput(const Tensor & gradOutput, const Tensor & gradInput, int nbatch, int nchannels, int inputDepth, int inputHeight, int inputWidth, int outputDepth, int outputHeight, int outputWidth) ;
+ virtual void SpatialCrossMapLRN_updateOutput(const Tensor & input, const Tensor & output, const Tensor & scale, int size, Scalar alpha, Scalar beta, Scalar k) ;
+ virtual void SpatialCrossMapLRN_updateGradInput(const Tensor & input, const Tensor & gradOutput, const Tensor & gradInput, const Tensor & scale, const Tensor & output, int size, Scalar alpha, Scalar beta, Scalar k) ;
+protected:
+ Context* context;
+};
+
+
+}
diff --git a/aten/src/ATen/templates/Type.h b/aten/src/ATen/templates/Type.h
index a60f4db524..18316b9ee3 100644
--- a/aten/src/ATen/templates/Type.h
+++ b/aten/src/ATen/templates/Type.h
@@ -10,7 +10,6 @@ namespace at {
class Context;
class Storage;
class Tensor;
-class TensorRef;
class Generator;
enum class ScalarType {
@@ -90,10 +89,11 @@ struct Type {
// for external dispatch
virtual TypeID ID() const = 0;
- // example
- // virtual Tensor * add(Tensor & a, Tensor & b) = 0;
virtual void copy(const Tensor & src, Tensor & dst) = 0;
Tensor copy(const Tensor & src);
+
+ // example
+ // virtual Tensor * add(Tensor & a, Tensor & b) = 0;
${type_method_declarations}
protected:
Context* context;
diff --git a/aten/tools/update_doc.sh b/aten/tools/update_doc.sh
new file mode 100755
index 0000000000..f8fb6c3e56
--- /dev/null
+++ b/aten/tools/update_doc.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+cp build/src/ATen/ATen/{Tensor,Type,Functions}.h doc
+