diff options
author | Edward Yang <ezyang@fb.com> | 2019-02-05 14:39:43 -0800 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-02-05 14:54:34 -0800 |
commit | 4404762d7dd955383acee92e6f06b48144a0742e (patch) | |
tree | ff8daf228be7b12c4c6a82d554689571c92d581d /caffe2/core | |
parent | e2d3a3fd6a248a788e3d548bb1caff9019c585ef (diff) | |
download | pytorch-4404762d7dd955383acee92e6f06b48144a0742e.tar.gz pytorch-4404762d7dd955383acee92e6f06b48144a0742e.tar.bz2 pytorch-4404762d7dd955383acee92e6f06b48144a0742e.zip |
Rename IntList to IntArrayRef. (#16751)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16751
This was made more complicated by the fact that ivalue::IntList
is a thing. So I had to fix all of the sites where we referring
to IValue post facto.
The following codemods were run, in this order:
```
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>'
codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>'
```
Some manual fixups were done afterwards; they can be reviewed separately
at https://github.com/pytorch/pytorch/pull/16752
Reviewed By: dzhulgakov
Differential Revision: D13954363
fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
Diffstat (limited to 'caffe2/core')
-rw-r--r-- | caffe2/core/blob.h | 6 | ||||
-rw-r--r-- | caffe2/core/operator.h | 8 | ||||
-rw-r--r-- | caffe2/core/tensor.cc | 4 | ||||
-rw-r--r-- | caffe2/core/tensor.h | 14 |
4 files changed, 16 insertions, 16 deletions
diff --git a/caffe2/core/blob.h b/caffe2/core/blob.h index 4613aee6a5..f919a7ac16 100644 --- a/caffe2/core/blob.h +++ b/caffe2/core/blob.h @@ -30,7 +30,7 @@ inline Tensor* BlobSetTensor(Blob* blob, Tensor&& tensor) { inline Tensor GetSizedTensorWithOptions( Tensor&& previous_tensor, - at::IntList dims, + at::IntArrayRef dims, at::TensorOptions options) { Tensor tensor = std::move(previous_tensor); if (!tensor.defined()) { @@ -57,7 +57,7 @@ inline Tensor GetSizedTensorWithOptions( // need to keep both functions that returns Tensor* and the one // returns Tensor for clangr codemod inline Tensor* -BlobGetMutableTensor(Blob* blob, at::IntList dims, at::TensorOptions options) { +BlobGetMutableTensor(Blob* blob, at::IntArrayRef dims, at::TensorOptions options) { if (blob->IsType<Tensor>()) { Tensor* tensor = blob->GetMutable<Tensor>(); if (*tensor) { @@ -86,7 +86,7 @@ BlobGetMutableTensor(Blob* blob, at::IntList dims, at::TensorOptions options) { } inline Tensor -XBlobGetMutableTensor(Blob* blob, at::IntList dims, at::TensorOptions options) { +XBlobGetMutableTensor(Blob* blob, at::IntArrayRef dims, at::TensorOptions options) { return BlobGetMutableTensor(blob, dims, options)->UnsafeSharedInstance(); } diff --git a/caffe2/core/operator.h b/caffe2/core/operator.h index 423a1b73f2..eea3a58d29 100644 --- a/caffe2/core/operator.h +++ b/caffe2/core/operator.h @@ -196,7 +196,7 @@ class CAFFE2_API OperatorBase : public Observable<OperatorBase> { } inline Tensor - XOutputTensor(int idx, at::IntList dims, at::TensorOptions options) { + XOutputTensor(int idx, at::IntArrayRef dims, at::TensorOptions options) { CAFFE_ENFORCE_WITH_CALLER( options.device_opt() != c10::nullopt, "device must be provided in option."); @@ -214,7 +214,7 @@ class CAFFE2_API OperatorBase : public Observable<OperatorBase> { } inline Tensor* - OutputTensor(int idx, at::IntList dims, at::TensorOptions options) { + OutputTensor(int idx, at::IntArrayRef dims, at::TensorOptions options) { if (isLegacyOperator()) { CAFFE_ENFORCE_WITH_CALLER( options.device_opt() != c10::nullopt, @@ -667,7 +667,7 @@ class Operator : public OperatorBase { return OperatorBase::template Input<Tensor>(idx, type); } - Tensor XOutput(int idx, at::IntList dims, at::TensorOptions options) { + Tensor XOutput(int idx, at::IntArrayRef dims, at::TensorOptions options) { // We'll default device to the device of the current Operator Context if (options.device_opt() == c10::nullopt) { return OperatorBase::XOutputTensor( @@ -724,7 +724,7 @@ class Operator : public OperatorBase { /// helpful, as we may be able to fit the output in the same /// space that was previously used. /// - Tensor* Output(int idx, at::IntList dims, at::TensorOptions options) { + Tensor* Output(int idx, at::IntArrayRef dims, at::TensorOptions options) { // We'll default device to the device of the current Operator Context if (options.device_opt() == c10::nullopt) { return OperatorBase::OutputTensor( diff --git a/caffe2/core/tensor.cc b/caffe2/core/tensor.cc index 8bd5190710..afcae812be 100644 --- a/caffe2/core/tensor.cc +++ b/caffe2/core/tensor.cc @@ -117,7 +117,7 @@ void TensorVectorResize( } } -Tensor empty(at::IntList dims, at::TensorOptions options) { +Tensor empty(at::IntArrayRef dims, at::TensorOptions options) { // TODO: merge this with at::empty after Tensor is merged auto tensor = Tensor(dims, options.device()); tensor.raw_mutable_data(options.dtype()); @@ -126,7 +126,7 @@ Tensor empty(at::IntList dims, at::TensorOptions options) { void ReinitializeTensor( Tensor* tensor, - at::IntList dims, + at::IntArrayRef dims, at::TensorOptions options) { CAFFE_ENFORCE(options.device_opt() != c10::nullopt); if (*tensor) { diff --git a/caffe2/core/tensor.h b/caffe2/core/tensor.h index 5e042a5977..f86f93c053 100644 --- a/caffe2/core/tensor.h +++ b/caffe2/core/tensor.h @@ -83,7 +83,7 @@ class CAFFE2_API Tensor final { * Note that the actual data allocation is not going to be carried out until * the first time mutable_data() is called. */ - explicit Tensor(at::IntList dims, DeviceType type) : Tensor(type) { + explicit Tensor(at::IntArrayRef dims, DeviceType type) : Tensor(type) { // TODO: here, we create a Storage // and immediately discard it in Resize() since // reset_tensor will be true and FreeMemory will be called, @@ -92,7 +92,7 @@ class CAFFE2_API Tensor final { } // we want to preserve index information - explicit Tensor(at::IntList dims, at::Device device): Tensor(device) { + explicit Tensor(at::IntArrayRef dims, at::Device device): Tensor(device) { Resize(dims); } @@ -500,7 +500,7 @@ class CAFFE2_API Tensor final { return impl_->numel() * itemsize(); } - inline at::IntList sizes() const { + inline at::IntArrayRef sizes() const { return impl_.get()->sizes(); } @@ -535,7 +535,7 @@ class CAFFE2_API Tensor final { return impl_.get()->stride(dim); } - inline at::IntList strides() const { + inline at::IntArrayRef strides() const { return impl_.get()->strides(); } @@ -614,7 +614,7 @@ class CAFFE2_API Tensor final { * this will not do anything if the * Tensor already has correct size and data type */ -CAFFE2_API void ReinitializeTensor(Tensor* t, at::IntList dims, at::TensorOptions options); +CAFFE2_API void ReinitializeTensor(Tensor* t, at::IntArrayRef dims, at::TensorOptions options); CAFFE2_API void ReinitializeAndCopyFrom( Tensor* t, @@ -651,7 +651,7 @@ void TensorVectorResize( DeviceType type); // Tensor factory function -CAFFE2_API Tensor empty(at::IntList dims, at::TensorOptions options); +CAFFE2_API Tensor empty(at::IntArrayRef dims, at::TensorOptions options); /** * @brief Creates a CPU tensor, and fills its contents with the given values. @@ -660,7 +660,7 @@ CAFFE2_API Tensor empty(at::IntList dims, at::TensorOptions options); // TODO: can be unified with at::from_blob when Tensor is merged and string // types are supported template <typename T> -Tensor TensorCPUFromValues(at::IntList dims, at::ArrayRef<T> values) { +Tensor TensorCPUFromValues(at::IntArrayRef dims, at::ArrayRef<T> values) { Tensor r = empty(dims, at::device(CPU).dtype<T>()); CAFFE_ENFORCE_EQ(values.size(), r.numel()); CPUContext context; |