summaryrefslogtreecommitdiff
path: root/caffe2/queue
diff options
context:
space:
mode:
authorJerry Zhang <jerryzh@fb.com>2018-10-24 16:27:41 -0700
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2018-10-24 16:32:51 -0700
commitb790fcaf3940e569d4ab843f35cbe5fbc57508f5 (patch)
tree2af1847e2773132869abba671106ef062c1acefc /caffe2/queue
parenta4475d529d8a80c81fb0c1e0bc0f02f40ac53282 (diff)
downloadpytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.tar.gz
pytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.tar.bz2
pytorch-b790fcaf3940e569d4ab843f35cbe5fbc57508f5.zip
Renaming dims() to sizes() (caffe2/caffe2) - 4/4
Summary: Codemod generated with clangr shard mode, 25 files per diff, for renaming dims() to sizes() Reviewed By: ezyang Differential Revision: D10842900 fbshipit-source-id: 8d58ed4d403fb0308a8fa286659f8e830b040bec
Diffstat (limited to 'caffe2/queue')
-rw-r--r--caffe2/queue/queue_ops.h2
-rw-r--r--caffe2/queue/rebatching_queue.cc8
2 files changed, 5 insertions, 5 deletions
diff --git a/caffe2/queue/queue_ops.h b/caffe2/queue/queue_ops.h
index 8e924176a0..dac7caecd3 100644
--- a/caffe2/queue/queue_ops.h
+++ b/caffe2/queue/queue_ops.h
@@ -160,7 +160,7 @@ class SafeDequeueBlobsOp final : public Operator<Context> {
size,
" total columns");
- out->Extend(in.dims()[0], kTensorGrowthPct, &context_);
+ out->Extend(in.sizes()[0], kTensorGrowthPct, &context_);
auto* dst =
(char*)out->raw_mutable_data() + oldSize * in.meta().itemsize();
context_.template CopyItems<Context, Context>(
diff --git a/caffe2/queue/rebatching_queue.cc b/caffe2/queue/rebatching_queue.cc
index 6131d974c4..52b304006a 100644
--- a/caffe2/queue/rebatching_queue.cc
+++ b/caffe2/queue/rebatching_queue.cc
@@ -42,7 +42,7 @@ void concat(
CAFFE_ENFORCE_EQ(inputZero[j].itemsize(), input.itemsize());
CAFFE_ENFORCE_EQ(inputZero[j].ndim(), input.ndim());
for (int k = 0; k < input.ndim(); ++k) {
- CAFFE_ENFORCE_EQ(input.dims()[k], inputZero[j].dims()[k]);
+ CAFFE_ENFORCE_EQ(input.sizes()[k], inputZero[j].dims()[k]);
}
// Skip empty tensors
@@ -68,7 +68,7 @@ std::vector<std::vector<TensorCPU>> split(
const std::vector<const TensorCPU*>& inputs) {
CAFFE_ENFORCE(!inputs.empty());
- const auto outputSize = inputs[0]->dims().at(0);
+ const auto outputSize = inputs[0]->sizes().at(0);
std::vector<std::vector<TensorCPU>> outputs(outputSize);
for (const auto* inputPtr : inputs) {
@@ -78,10 +78,10 @@ std::vector<std::vector<TensorCPU>> split(
const auto innerSize = input.size_from_dim(1);
const auto itemSize = input.meta().itemsize();
- auto outputDims = input.dims().vec();
+ auto outputDims = input.sizes().vec();
CAFFE_ENFORCE(!outputDims.empty());
outputDims.erase(outputDims.begin());
- CAFFE_ENFORCE_EQ(input.dims().at(0), outputSize);
+ CAFFE_ENFORCE_EQ(input.sizes().at(0), outputSize);
for (int i = 0; i < outputSize; ++i) {
outputs[i].push_back(Tensor(outputDims, CPU));