diff options
author | Peter Goldsborough <peter@goldsborough.me> | 2018-02-03 17:57:39 -0800 |
---|---|---|
committer | Soumith Chintala <soumith@gmail.com> | 2018-02-03 20:57:39 -0500 |
commit | 61b5ea85d4e85b5ec1fd4c513e103bab845993ff (patch) | |
tree | 7a392c1256402b40261ee5dca97763029fbec5c9 /tools | |
parent | f8388d2aea210b6cf19330e948be2456f5fa9bd8 (diff) | |
download | pytorch-61b5ea85d4e85b5ec1fd4c513e103bab845993ff.tar.gz pytorch-61b5ea85d4e85b5ec1fd4c513e103bab845993ff.tar.bz2 pytorch-61b5ea85d4e85b5ec1fd4c513e103bab845993ff.zip |
Remove FunctionFlags (#5018)
Diffstat (limited to 'tools')
-rw-r--r-- | tools/autograd/gen_variable_type.py | 2 | ||||
-rw-r--r-- | tools/autograd/templates/VariableType.cpp | 8 |
2 files changed, 2 insertions, 8 deletions
diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 8f86898283..eddde451c2 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -93,7 +93,7 @@ if (compute_requires_grad( ${args_with_derivatives} )) { ASSIGN_GRAD_FN = CodeTemplate("""\ grad_fn = std::make_shared<${op}>(${op_ctor}); -grad_fn->next_functions = compute_next_functions( ${args_with_derivatives} ); +grad_fn->next_functions = get_next_functions( ${args_with_derivatives} ); """) CALL_VIA_TYPE = CodeTemplate("""\ diff --git a/tools/autograd/templates/VariableType.cpp b/tools/autograd/templates/VariableType.cpp index 69cbf26823..68fda72e53 100644 --- a/tools/autograd/templates/VariableType.cpp +++ b/tools/autograd/templates/VariableType.cpp @@ -283,12 +283,6 @@ static void check_no_requires_grad(const Tensor& tensor, const char* name) { } } -// NB: This should be called with Tensor/TensorList arguments (not Variables) -template <typename... Args> -static function_list compute_next_functions(Args&&... args) { - return Function::tensor_flags(std::forward<Args>(args)...).next_functions; -} - static void check_inplace(const Tensor& tensor) { auto& var = static_cast<const Variable&>(tensor); if (var.requires_grad() && var.is_leaf() && GradMode::is_enabled()) { @@ -387,7 +381,7 @@ Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_block requires_grad &= isFloatingPoint(self.type().scalarType()); if (requires_grad) { grad_fn = std::make_shared<CopyBackwards>(); - grad_fn->next_functions = compute_next_functions( self, src ); + grad_fn->next_functions = get_next_functions(self, src); grad_fn->num_inputs = 1; grad_fn->src_type = &src.type(); grad_fn->src_device = src.is_cuda() ? src.get_device() : -1; |