diff options
author | Roy Li <royboy@fb.com> | 2019-04-18 00:18:35 -0700 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-04-18 00:21:43 -0700 |
commit | fbf505cba730fc96a842f1764ad5d5f4daf9f823 (patch) | |
tree | a5ba70e9c137eea577f3dff46c8cad4529218a37 /torch | |
parent | a64cce326ffd923373fbe129d3296cc232019ff2 (diff) | |
download | pytorch-fbf505cba730fc96a842f1764ad5d5f4daf9f823.tar.gz pytorch-fbf505cba730fc96a842f1764ad5d5f4daf9f823.tar.bz2 pytorch-fbf505cba730fc96a842f1764ad5d5f4daf9f823.zip |
Remove copy and copy_ special case on Type (#18972)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18972
ghimport-source-id: b5d3012b00530145fa24ab0cab693a7e80cb5989
Differential Revision: D14816530
Pulled By: li-roy
fbshipit-source-id: 9c7a166abb22d2cd1f81f352e44d9df1541b1774
Diffstat (limited to 'torch')
-rw-r--r-- | torch/csrc/autograd/VariableTypeManual.cpp | 11 | ||||
-rw-r--r-- | torch/csrc/autograd/functions/tensor.cpp | 6 | ||||
-rw-r--r-- | torch/csrc/cuda/comm.cpp | 7 |
3 files changed, 13 insertions, 11 deletions
diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index 2a7414caac..e3d0af500f 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -229,7 +229,8 @@ void VariableType::backward( void VariableType::set_data(Tensor & self, Tensor new_data) const { as_variable_ref(self).set_data(new_data); } -Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const { + +Tensor & VariableType::copy_(Tensor & self, const Tensor & src, bool non_blocking) const { jit::Value* output = nullptr; if(torch::jit::tracer::isTracing()) { const jit::tracer::TracingState& state = *jit::tracer::getTracingState(); @@ -265,9 +266,7 @@ Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_block } { at::AutoNonVariableTypeMode non_var_type_mode(true); - if (self.is_sparse() && src.is_sparse()) baseType->copy_sparse_to_sparse_(self_, src_, non_blocking); - else if (!self.is_sparse() && !src.is_sparse()) baseType->s_copy_(self_, src_, non_blocking); - else AT_ERROR("copy_() between dense and sparse Tensors is not implemented! Found self type = ", self.type(), " and src type = ", src.type()); + baseType->copy_(self_, src_, non_blocking); } increment_version(self); rebase_history(as_variable_ref( self ), std::move(grad_fn)); @@ -277,10 +276,6 @@ Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_block return self; } -Tensor VariableType::_s_copy_from(const Tensor & self, const Tensor & dst, bool non_blocking) const { - AT_ERROR("copy_from does not support automatic differentiation; use copy_ instead"); -} - Tensor & VariableType::resize_(Tensor & self, IntArrayRef size) const { auto& self_ = unpack(self, "self", 0); if (as_variable_ref(self).requires_grad()) { diff --git a/torch/csrc/autograd/functions/tensor.cpp b/torch/csrc/autograd/functions/tensor.cpp index af4f4ec41c..fe2bea3dbe 100644 --- a/torch/csrc/autograd/functions/tensor.cpp +++ b/torch/csrc/autograd/functions/tensor.cpp @@ -27,7 +27,11 @@ auto CopyBackwards::apply(variable_list&& grads) -> variable_list { // TODO: What if !grad.is_cuda(), but src_device is CUDA? // This code is kind of weirdly asymmetric. if (grad.is_cuda() && grad.device() != src_device) { - grad_inputs[1] = src_type->copy(grad); + grad_inputs[1] = grad.to( + src_type->device_type(), + src_type->scalarType(), + /*non_blocking*/false, + /*copy*/true); } else { grad_inputs[1] = grad.toType(*src_type); } diff --git a/torch/csrc/cuda/comm.cpp b/torch/csrc/cuda/comm.cpp index c1f1b43414..53faa6baa5 100644 --- a/torch/csrc/cuda/comm.cpp +++ b/torch/csrc/cuda/comm.cpp @@ -66,14 +66,17 @@ std::vector<Tensor> broadcast(const Tensor& tensor, IntArrayRef devices) { #else { #endif - auto & gpu_type = type.toBackend(type.is_sparse() ? at::Backend::SparseCUDA : at::Backend::CUDA); if (type.is_cuda()) { tensors.push_back(tensor); } IntArrayRef loop_devices = type.is_cuda() ? devices.slice(1) : devices; for (auto device : loop_devices) { _device_guard.set_index(device); - tensors.push_back(gpu_type.copy(tensor, true)); + tensors.push_back(tensor.to( + kCUDA, + type.scalarType(), + /*non_blocking*/true, + /*copy*/true)); } } return tensors; |