summaryrefslogtreecommitdiff
path: root/caffe2
diff options
context:
space:
mode:
authorJerry Zhang <jerryzh@fb.com>2018-10-24 16:27:41 -0700
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2018-10-24 16:32:51 -0700
commitb790fcaf3940e569d4ab843f35cbe5fbc57508f5 (patch)
tree2af1847e2773132869abba671106ef062c1acefc /caffe2
parenta4475d529d8a80c81fb0c1e0bc0f02f40ac53282 (diff)
downloadpytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.tar.gz
pytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.tar.bz2
pytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.zip
Renaming dims() to sizes() (caffe2/caffe2) - 4/4
Summary: Codemod generated with clangr shard mode, 25 files per diff, for renaming dims() to sizes() Reviewed By: ezyang Differential Revision: D10842900 fbshipit-source-id: 8d58ed4d403fb0308a8fa286659f8e830b040bec
Diffstat (limited to 'caffe2')
-rw-r--r--caffe2/python/pybind_state.h2
-rw-r--r--caffe2/python/pybind_state_dlpack.h2
-rw-r--r--caffe2/python/pybind_state_gpu.cc2
-rw-r--r--caffe2/python/pybind_state_int8.cc4
-rw-r--r--caffe2/queue/queue_ops.h2
-rw-r--r--caffe2/queue/rebatching_queue.cc8
-rw-r--r--caffe2/sgd/adagrad_op.h2
-rw-r--r--caffe2/sgd/adam_op.h2
8 files changed, 12 insertions, 12 deletions
diff --git a/caffe2/python/pybind_state.h b/caffe2/python/pybind_state.h
index 340fee2eff..3205e0ac08 100644
--- a/caffe2/python/pybind_state.h
+++ b/caffe2/python/pybind_state.h
@@ -128,7 +128,7 @@ class TensorFetcher : public BlobFetcherBase {
tensor.meta().name(),
".");
std::vector<npy_intp> npy_dims;
- for (const auto dim : tensor.dims()) {
+ for (const auto dim : tensor.sizes()) {
npy_dims.push_back(dim);
}
result.copied = force_copy || NeedsCopy(&tensor, tensor.meta());
diff --git a/caffe2/python/pybind_state_dlpack.h b/caffe2/python/pybind_state_dlpack.h
index 6db4ae42b8..68383d8f8f 100644
--- a/caffe2/python/pybind_state_dlpack.h
+++ b/caffe2/python/pybind_state_dlpack.h
@@ -57,7 +57,7 @@ class DLPackWrapper {
dlTensor.ctx = tensor_context;
dlTensor.ndim = tensor->ndim();
dlTensor.dtype = tensor_type;
- dlTensor.shape = const_cast<int64_t*>(&(tensor->dims()[0]));
+ dlTensor.shape = const_cast<int64_t*>(&(tensor->sizes()[0]));
dlTensor.strides = nullptr;
dlTensor.byte_offset = 0;
diff --git a/caffe2/python/pybind_state_gpu.cc b/caffe2/python/pybind_state_gpu.cc
index 3893be96ff..d5a561a7dc 100644
--- a/caffe2/python/pybind_state_gpu.cc
+++ b/caffe2/python/pybind_state_gpu.cc
@@ -143,7 +143,7 @@ void addCUDAObjectMethods(py::module& m) {
"Copy data from given DLPack tensor into this tensor.")
.def_property_readonly(
"_shape",
- [](const DLPackWrapper<CUDAContext>& t) { return t.tensor->dims(); })
+ [](const DLPackWrapper<CUDAContext>& t) { return t.tensor->sizes(); })
.def(
"_reshape",
[](DLPackWrapper<CUDAContext>* t, std::vector<int64_t> dims) {
diff --git a/caffe2/python/pybind_state_int8.cc b/caffe2/python/pybind_state_int8.cc
index f60e4e3cc5..5f77cf99dc 100644
--- a/caffe2/python/pybind_state_int8.cc
+++ b/caffe2/python/pybind_state_int8.cc
@@ -38,11 +38,11 @@ class Int8TensorFetcher : public BlobFetcherBase {
const int numpy_type = CaffeToNumpyType(src.t.meta());
CAFFE_ENFORCE(numpy_type != -1, "Int8Tensor contains unknown type data");
std::vector<npy_intp> npy_dims;
- for (const auto dim : src.t.dims()) {
+ for (const auto dim : src.t.sizes()) {
npy_dims.push_back(dim);
}
auto data_array = pybind11::reinterpret_steal<pybind11::object>(
- PyArray_SimpleNew(src.t.dims().size(), npy_dims.data(), numpy_type));
+ PyArray_SimpleNew(src.t.sizes().size(), npy_dims.data(), numpy_type));
void* ptr = static_cast<void*>(
PyArray_DATA(reinterpret_cast<PyArrayObject*>(data_array.ptr())));
CPUContext context;
diff --git a/caffe2/queue/queue_ops.h b/caffe2/queue/queue_ops.h
index 8e924176a0..dac7caecd3 100644
--- a/caffe2/queue/queue_ops.h
+++ b/caffe2/queue/queue_ops.h
@@ -160,7 +160,7 @@ class SafeDequeueBlobsOp final : public Operator<Context> {
size,
" total columns");
- out->Extend(in.dims()[0], kTensorGrowthPct, &context_);
+ out->Extend(in.sizes()[0], kTensorGrowthPct, &context_);
auto* dst =
(char*)out->raw_mutable_data() + oldSize * in.meta().itemsize();
context_.template CopyItems<Context, Context>(
diff --git a/caffe2/queue/rebatching_queue.cc b/caffe2/queue/rebatching_queue.cc
index 6131d974c4..52b304006a 100644
--- a/caffe2/queue/rebatching_queue.cc
+++ b/caffe2/queue/rebatching_queue.cc
@@ -42,7 +42,7 @@ void concat(
CAFFE_ENFORCE_EQ(inputZero[j].itemsize(), input.itemsize());
CAFFE_ENFORCE_EQ(inputZero[j].ndim(), input.ndim());
for (int k = 0; k < input.ndim(); ++k) {
- CAFFE_ENFORCE_EQ(input.dims()[k], inputZero[j].dims()[k]);
+ CAFFE_ENFORCE_EQ(input.sizes()[k], inputZero[j].dims()[k]);
}
// Skip empty tensors
@@ -68,7 +68,7 @@ std::vector<std::vector<TensorCPU>> split(
const std::vector<const TensorCPU*>& inputs) {
CAFFE_ENFORCE(!inputs.empty());
- const auto outputSize = inputs[0]->dims().at(0);
+ const auto outputSize = inputs[0]->sizes().at(0);
std::vector<std::vector<TensorCPU>> outputs(outputSize);
for (const auto* inputPtr : inputs) {
@@ -78,10 +78,10 @@ std::vector<std::vector<TensorCPU>> split(
const auto innerSize = input.size_from_dim(1);
const auto itemSize = input.meta().itemsize();
- auto outputDims = input.dims().vec();
+ auto outputDims = input.sizes().vec();
CAFFE_ENFORCE(!outputDims.empty());
outputDims.erase(outputDims.begin());
- CAFFE_ENFORCE_EQ(input.dims().at(0), outputSize);
+ CAFFE_ENFORCE_EQ(input.sizes().at(0), outputSize);
for (int i = 0; i < outputSize; ++i) {
outputs[i].push_back(Tensor(outputDims, CPU));
diff --git a/caffe2/sgd/adagrad_op.h b/caffe2/sgd/adagrad_op.h
index ec88a124d4..de330b2ad0 100644
--- a/caffe2/sgd/adagrad_op.h
+++ b/caffe2/sgd/adagrad_op.h
@@ -250,7 +250,7 @@ class RowWiseSparseAdagradOp final : public Operator<Context> {
bool RunOnDevice() override {
// Enforce shapes
- CAFFE_ENFORCE_EQ(Input(PARAM).dims()[0], Input(MOMENT_1).size());
+ CAFFE_ENFORCE_EQ(Input(PARAM).sizes()[0], Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
diff --git a/caffe2/sgd/adam_op.h b/caffe2/sgd/adam_op.h
index 699ba7aa5d..0cf2f417cd 100644
--- a/caffe2/sgd/adam_op.h
+++ b/caffe2/sgd/adam_op.h
@@ -333,7 +333,7 @@ class RowWiseSparseAdamOp final : public Operator<Context> {
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
- CAFFE_ENFORCE_EQ(Input(PARAM).dims()[0], Input(MOMENT_2).size());
+ CAFFE_ENFORCE_EQ(Input(PARAM).sizes()[0], Input(MOMENT_2).size());
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));