diff options
author | James Reed <jamesreed@fb.com> | 2019-01-31 14:13:45 -0800 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-01-31 14:35:56 -0800 |
commit | dfb081a7e4d8cbef53084eb17968e837a825b248 (patch) | |
tree | f201d81c69511a1fdb12d5d73cba664ce74f1e40 /aten | |
parent | 3f8fd19a868eb4aa1154744fb484e7e2cb555aec (diff) | |
download | pytorch-dfb081a7e4d8cbef53084eb17968e837a825b248.tar.gz pytorch-dfb081a7e4d8cbef53084eb17968e837a825b248.tar.bz2 pytorch-dfb081a7e4d8cbef53084eb17968e837a825b248.zip |
Fix a lot of C++ build warnings (#16411)
Summary:
I went through my build log and did what I thought were reasonable fixes to all the C++ compilation warnings that came up
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16411
Differential Revision: D13901006
Pulled By: jamesr66a
fbshipit-source-id: 02df4e3e5a5c8dd9e69ac9f065cd3f2a80645033
Diffstat (limited to 'aten')
-rw-r--r-- | aten/src/ATen/SparseTensorImpl.h | 4 | ||||
-rw-r--r-- | aten/src/ATen/core/jit_type.h | 8 | ||||
-rw-r--r-- | aten/src/ATen/core/stack.h | 2 | ||||
-rw-r--r-- | aten/src/ATen/test/basic.cpp | 3 |
4 files changed, 8 insertions, 9 deletions
diff --git a/aten/src/ATen/SparseTensorImpl.h b/aten/src/ATen/SparseTensorImpl.h index 84ca155b3c..7afc03d31f 100644 --- a/aten/src/ATen/SparseTensorImpl.h +++ b/aten/src/ATen/SparseTensorImpl.h @@ -88,7 +88,7 @@ public: // (this could make some of the stored indices out-of-bound and thus unsafe). void resize_(int64_t sparse_dim, int64_t dense_dim, IntList size) { AT_CHECK(allow_tensor_metadata_change(), "resize_ is not allowed on Tensor created from .data or .detach()"); - AT_CHECK(sparse_dim + dense_dim == size.size(), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size()); + AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size()); if (nnz() > 0) { auto alt_options_msg = "You could try the following options:\n\ 1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\ @@ -146,7 +146,7 @@ public: // NOTE: this function will resize the sparse tensor and also set `indices` and `values` to empty. void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntList size) { AT_CHECK(allow_tensor_metadata_change(), "resize_and_clear_ is not allowed on Tensor created from .data or .detach()"); - AT_CHECK(sparse_dim + dense_dim == size.size(), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size()); + AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size()); sizes_ = size.vec(); sparse_dim_ = sparse_dim; diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index 54b1d02126..bca3f3b46e 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -309,7 +309,7 @@ struct CAFFE2_API TensorType : public DynamicType { at::ScalarType scalarType() const { return scalar_type_; } at::Device device() const { return device_; } - int dim() const { return dim_; } + int64_t dim() const { return dim_; } bool requires_grad() const override { return requires_grad_; } TensorTypePtr toScalarType(at::ScalarType type){ @@ -317,7 +317,7 @@ struct CAFFE2_API TensorType : public DynamicType { t->scalar_type_ = type; return t; } - TensorTypePtr withDim(int new_dim) { + TensorTypePtr withDim(size_t new_dim) { auto t = TensorType::create(*this); t->dim_ = new_dim; return t; @@ -360,7 +360,7 @@ protected: tensor.dim(), tensor.is_variable() && tensor.requires_grad(), kind) {} - TensorType(at::ScalarType scalar_type, at::Device device, int dim, bool requires_grad=true, TypeKind kind=TypeKind::TensorType) + TensorType(at::ScalarType scalar_type, at::Device device, int64_t dim, bool requires_grad=true, TypeKind kind=TypeKind::TensorType) : DynamicType(kind) , scalar_type_(scalar_type) , requires_grad_(at::isFloatingType(scalar_type) && requires_grad) @@ -370,7 +370,7 @@ protected: at::ScalarType scalar_type_; bool requires_grad_; at::Device device_; - int dim_; + int64_t dim_; }; struct CompleteTensorType; diff --git a/aten/src/ATen/core/stack.h b/aten/src/ATen/core/stack.h index 42d1b58bd2..32c07a4d53 100644 --- a/aten/src/ATen/core/stack.h +++ b/aten/src/ATen/core/stack.h @@ -67,7 +67,7 @@ static inline void pop(Stack& stack, Types&... args) { } template <typename... Types> static inline void push(Stack& stack, Types&&... args) { - std::initializer_list<int>{(stack.emplace_back(std::forward<Types>(args)), 0)...}; + (void)std::initializer_list<int>{(stack.emplace_back(std::forward<Types>(args)), 0)...}; } // The packer here is carefully written not to make any unnecessary diff --git a/aten/src/ATen/test/basic.cpp b/aten/src/ATen/test/basic.cpp index 7d301b4dba..83a2340dd4 100644 --- a/aten/src/ATen/test/basic.cpp +++ b/aten/src/ATen/test/basic.cpp @@ -207,8 +207,7 @@ void TestTensorFromTH() { int a = 4; THFloatTensor* t = THFloatTensor_newWithSize2d(a, a); THFloatTensor_fill(t, a); - Tensor tt = CPU(kFloat).unsafeTensorFromTH(t, false); - ASSERT_NO_THROW(tt); + ASSERT_NO_THROW(CPU(kFloat).unsafeTensorFromTH(t, false)); } void TestToCFloat() { |