summaryrefslogtreecommitdiff
path: root/torch/_utils.py
diff options
context:
space:
mode:
authorAdam Paszke <adam.paszke@gmail.com>2016-10-11 11:47:37 -0700
committerAdam Paszke <adam.paszke@gmail.com>2016-10-13 17:17:49 -0700
commit0325e2f646279229890f368d54a09a99a14c7074 (patch)
tree34508f14c384b34b6ef4c1abaacca646b3b8f1ed /torch/_utils.py
parent93b8b5631fd3e15633fbc1d57afaa48d77eed935 (diff)
downloadpytorch-0325e2f646279229890f368d54a09a99a14c7074.tar.gz
pytorch-0325e2f646279229890f368d54a09a99a14c7074.tar.bz2
pytorch-0325e2f646279229890f368d54a09a99a14c7074.zip
Major autograd refactor
Improves autograd performance by more than 2x and fixes a couple of bugs. All core functions have been moved to C.
Diffstat (limited to 'torch/_utils.py')
-rw-r--r--torch/_utils.py6
1 files changed, 2 insertions, 4 deletions
diff --git a/torch/_utils.py b/torch/_utils.py
index b9a325081c..76adab2ce4 100644
--- a/torch/_utils.py
+++ b/torch/_utils.py
@@ -1,3 +1,4 @@
+import torch
def _type(self, new_type=None, async=False):
if new_type is None:
@@ -10,10 +11,7 @@ def _type(self, new_type=None, async=False):
return new_type(self.size()).copy_(self, async)
def _cuda(self, idx=None, async=False):
- import torch.cuda
- # This already is a CUDA tensor.
- # Let's check if it needs to be transfered to another GPU.
- if hasattr(self, 'get_device'):
+ if self.is_cuda:
target_device = idx if idx else torch.cuda.current_device()
if self.get_device() != target_device:
with torch.cuda.device(target_device):