diff options
author | Sam Gross <colesbury@gmail.com> | 2017-12-13 15:40:34 -0500 |
---|---|---|
committer | gchanan <gregchanan@gmail.com> | 2017-12-13 15:40:34 -0500 |
commit | d41b6c7daa11a93b6d8d17e7c814a9f37c9de825 (patch) | |
tree | 36862bd376a61adce1512f455784645cf17a954d /torch/autograd/variable.py | |
parent | 28890b20461453d21564142295e727dec58044c1 (diff) | |
download | pytorch-d41b6c7daa11a93b6d8d17e7c814a9f37c9de825.tar.gz pytorch-d41b6c7daa11a93b6d8d17e7c814a9f37c9de825.tar.bz2 pytorch-d41b6c7daa11a93b6d8d17e7c814a9f37c9de825.zip |
Implement remaining random methods through ATen (#4137)
* Implement remaining random methods through ATen
* Change test_bernoulli on Tensor to avoid broadcasting
The new ATen-dispatched bernoulli_ supports broadcasting. The old
Tensor.bernoulli_ bindings instead require the tensors to have the same
number of elements. I haven't change the old code because it will be
deleted soon.
Diffstat (limited to 'torch/autograd/variable.py')
-rw-r--r-- | torch/autograd/variable.py | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/torch/autograd/variable.py b/torch/autograd/variable.py index a5ad611811..436aa621d6 100644 --- a/torch/autograd/variable.py +++ b/torch/autograd/variable.py @@ -307,12 +307,6 @@ class Variable(_C._VariableBase): def expand_as(self, tensor): return self.expand(tensor.size()) - def multinomial(self, num_samples=1, replacement=False): - return Variable(torch.multinomial(self.data, num_samples, replacement)) - - def bernoulli(self): - return Variable(torch.bernoulli(self.data)) - def __rsub__(self, other): return -self + other @@ -375,13 +369,8 @@ class Variable(_C._VariableBase): return Variable.from_numpy(array) class _torch(object): - @staticmethod - def normal(means, std=1): - if isinstance(means, Variable): - means = means.data - if isinstance(std, Variable): - std = std.data - return Variable(torch.normal(means, std)) + pass + for method in dir(Variable): # This will also wrap some methods that normally aren't part of the |