summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile29
-rw-r--r--include/caffe/blob.hpp4
-rw-r--r--include/caffe/common.hpp5
-rw-r--r--include/caffe/data_layers.hpp4
-rw-r--r--src/caffe/common.cpp7
-rw-r--r--src/caffe/layers/data_layer.cpp2
-rw-r--r--src/caffe/layers/hdf5_data_layer.cpp1
-rw-r--r--src/caffe/layers/pooling_layer.cpp8
-rw-r--r--src/caffe/layers/tanh_layer.cpp1
-rw-r--r--src/caffe/layers/window_data_layer.cpp7
-rw-r--r--src/caffe/net.cpp3
-rw-r--r--src/caffe/test/test_data_layer.cpp6
-rw-r--r--src/caffe/test/test_hdf5data_layer.cpp7
-rw-r--r--src/caffe/test/test_image_data_layer.cpp6
-rw-r--r--src/caffe/test/test_lrn_layer.cpp7
-rw-r--r--src/caffe/test/test_memory_data_layer.cpp7
-rw-r--r--src/caffe/test/test_neuron_layer.cpp2
-rw-r--r--src/caffe/test/test_random_number_generator.cpp4
-rw-r--r--src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp1
-rw-r--r--src/caffe/util/io.cpp4
-rw-r--r--src/gtest/gtest-all.cpp1
-rw-r--r--tools/convert_imageset.cpp2
-rw-r--r--tools/dump_network.cpp2
-rw-r--r--tools/extract_features.cpp1
24 files changed, 63 insertions, 58 deletions
diff --git a/Makefile b/Makefile
index 943165a4..77e2ff5e 100644
--- a/Makefile
+++ b/Makefile
@@ -134,7 +134,7 @@ LIBRARIES := cudart cublas curand \
hdf5_hl hdf5 \
opencv_core opencv_highgui opencv_imgproc
PYTHON_LIBRARIES := boost_python python2.7
-WARNINGS := -Wall
+WARNINGS := -Wall -Wno-sign-compare
##############################
# Set build directories
@@ -168,6 +168,11 @@ endif
ifeq ($(LINUX), 1)
CXX := /usr/bin/g++
+ GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)
+ # older versions of gcc are too dumb to build boost with -Wuninitalized
+ ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1)
+ WARNINGS += -Wno-uninitialized
+ endif
endif
# OS X:
@@ -175,8 +180,11 @@ endif
# libstdc++ instead of libc++ for CUDA compatibility on 10.9
ifeq ($(OSX), 1)
CXX := /usr/bin/clang++
+ # clang throws this warning for cuda headers
+ WARNINGS += -Wno-unneeded-internal-declaration
ifneq ($(findstring 10.9, $(shell sw_vers -productVersion)),)
CXXFLAGS += -stdlib=libstdc++
+ LINKFLAGS += -stdlib=libstdc++
endif
endif
@@ -218,8 +226,11 @@ LIBRARY_DIRS += $(BLAS_LIB)
# Complete build flags.
COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
-CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS)
+CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
NVCCFLAGS := -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
+# mex may invoke an older gcc that is too liberal with -Wuninitalized
+MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized
+LINKFLAGS += -fPIC $(COMMON_FLAGS) $(WARNINGS)
LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) \
$(foreach library,$(LIBRARIES),-l$(library))
PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))
@@ -269,7 +280,7 @@ py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)
$(PY$(PROJECT)_SO): $(STATIC_NAME) $(PY$(PROJECT)_SRC)
$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \
- $(STATIC_NAME) $(CXXFLAGS) $(PYTHON_LDFLAGS)
+ $(STATIC_NAME) $(LINKFLAGS) $(PYTHON_LDFLAGS)
@ echo
mat$(PROJECT): mat
@@ -283,7 +294,7 @@ $(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
exit 1; \
fi
$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) $(STATIC_NAME) \
- CXXFLAGS="\$$CXXFLAGS $(CXXFLAGS) $(WARNINGS)" \
+ CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \
CXXLIBS="\$$CXXLIBS $(LDFLAGS)" -o $@
@ echo
@@ -306,7 +317,7 @@ $(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)
@ mkdir -p $@
$(NAME): $(PROTO_OBJS) $(OBJS) | $(LIB_BUILD_DIR)
- $(CXX) -shared -o $@ $(OBJS) $(CXXFLAGS) $(LDFLAGS) $(WARNINGS)
+ $(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS)
@ echo
$(STATIC_NAME): $(PROTO_OBJS) $(OBJS) | $(LIB_BUILD_DIR)
@@ -321,21 +332,21 @@ $(TEST_BUILD_DIR)/%.o: src/$(PROJECT)/test/%.cpp $(HXX_SRCS) $(TEST_HDRS) \
$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) $(STATIC_NAME) \
| $(TEST_BIN_DIR)
$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) $(STATIC_NAME) \
- -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS)
+ -o $@ $(LINKFLAGS) $(LDFLAGS)
@ echo
$(TEST_BIN_DIR)/%.testbin: $(TEST_BUILD_DIR)/%.o $(GTEST_OBJ) $(STATIC_NAME) \
| $(TEST_BIN_DIR)
$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) $(STATIC_NAME) \
- -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS)
+ -o $@ $(LINKFLAGS) $(LDFLAGS)
@ echo
$(TOOL_BINS): %.bin : %.o $(STATIC_NAME)
- $(CXX) $< $(STATIC_NAME) -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS)
+ $(CXX) $< $(STATIC_NAME) -o $@ $(LINKFLAGS) $(LDFLAGS)
@ echo
$(EXAMPLE_BINS): %.bin : %.o $(STATIC_NAME)
- $(CXX) $< $(STATIC_NAME) -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS)
+ $(CXX) $< $(STATIC_NAME) -o $@ $(LINKFLAGS) $(LDFLAGS)
@ echo
$(LAYER_BUILD_DIR)/%.o: src/$(PROJECT)/layers/%.cpp $(HXX_SRCS) \
diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp
index 75101462..c04375a1 100644
--- a/include/caffe/blob.hpp
+++ b/include/caffe/blob.hpp
@@ -13,8 +13,8 @@ template <typename Dtype>
class Blob {
public:
Blob()
- : num_(0), channels_(0), height_(0), width_(0), count_(0), data_(),
- diff_() {}
+ : data_(), diff_(), num_(0), channels_(0), height_(0), width_(0),
+ count_(0) {}
explicit Blob(const int num, const int channels, const int height,
const int width);
void Reshape(const int num, const int channels, const int height,
diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp
index 7bfa5d40..bd4e39f1 100644
--- a/include/caffe/common.hpp
+++ b/include/caffe/common.hpp
@@ -56,11 +56,6 @@ private:\
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError())
-// Define not supported status for pre-6.0 compatibility.
-#if CUDA_VERSION < 6000
-#define CUBLAS_STATUS_NOT_SUPPORTED 831486
-#endif
-
namespace caffe {
// We will use the boost shared_ptr instead of the new C++11 one mainly
diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp
index 2b4c278e..fc93daac 100644
--- a/include/caffe/data_layers.hpp
+++ b/include/caffe/data_layers.hpp
@@ -248,8 +248,8 @@ class MemoryDataLayer : public Layer<Dtype> {
virtual inline LayerParameter_LayerType type() const {
return LayerParameter_LayerType_MEMORY_DATA;
}
- virtual inline int ExactNumBottomBlobs() { return 0; }
- virtual inline int ExactNumTopBlobs() { return 2; }
+ virtual inline int ExactNumBottomBlobs() const { return 0; }
+ virtual inline int ExactNumTopBlobs() const { return 2; }
// Reset should accept const pointers, but can't, because the memory
// will be given to Blob, which is mutable
diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp
index 5a8300aa..631c8afd 100644
--- a/src/caffe/common.cpp
+++ b/src/caffe/common.cpp
@@ -22,9 +22,8 @@ int64_t cluster_seedgen(void) {
Caffe::Caffe()
- : mode_(Caffe::CPU), phase_(Caffe::TRAIN), cublas_handle_(NULL),
- curand_generator_(NULL),
- random_generator_() {
+ : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(),
+ mode_(Caffe::CPU), phase_(Caffe::TRAIN) {
// Try to create a cublas handler, and report an error if failed (but we will
// keep the program running as one might just want to run CPU code).
if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) {
@@ -162,8 +161,10 @@ const char* cublasGetErrorString(cublasStatus_t error) {
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
+#if CUDA_VERSION >= 6000
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
+#endif
}
return "Unknown cublas status";
}
diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp
index f12ae1c1..29c4fec8 100644
--- a/src/caffe/layers/data_layer.cpp
+++ b/src/caffe/layers/data_layer.cpp
@@ -26,7 +26,7 @@ void* DataLayerPrefetch(void* layer_pointer) {
Datum datum;
CHECK(layer->prefetch_data_);
Dtype* top_data = layer->prefetch_data_->mutable_cpu_data();
- Dtype* top_label;
+ Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (layer->output_labels_) {
top_label = layer->prefetch_label_->mutable_cpu_data();
}
diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp
index d5c64f05..50863b8d 100644
--- a/src/caffe/layers/hdf5_data_layer.cpp
+++ b/src/caffe/layers/hdf5_data_layer.cpp
@@ -45,6 +45,7 @@ void HDF5DataLayer<Dtype>::LoadHDF5FileData(const char* filename) {
file_id, "label", MIN_LABEL_DIM, MAX_LABEL_DIM, &label_blob_);
herr_t status = H5Fclose(file_id);
+ CHECK_GE(status, 0) << "Failed to close HDF5 file " << filename;
CHECK_EQ(data_blob_.num(), label_blob_.num());
LOG(INFO) << "Successully loaded " << data_blob_.num() << " rows";
}
diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp
index 8f5f82d6..ba84edea 100644
--- a/src/caffe/layers/pooling_layer.cpp
+++ b/src/caffe/layers/pooling_layer.cpp
@@ -87,8 +87,8 @@ Dtype PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const int top_count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
- int* mask;
- Dtype* top_mask;
+ int* mask = NULL; // suppress warnings about uninitalized variables
+ Dtype* top_mask = NULL;
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more code.
switch (this->layer_param_.pooling_param().pool()) {
@@ -195,8 +195,8 @@ void PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
- const int* mask;
- const Dtype* top_mask;
+ const int* mask = NULL; // suppress warnings about uninitialized variables
+ const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// The main loop
diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp
index 66f530f8..77b44104 100644
--- a/src/caffe/layers/tanh_layer.cpp
+++ b/src/caffe/layers/tanh_layer.cpp
@@ -33,7 +33,6 @@ void TanHLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int count = (*bottom)[0]->count();
- Dtype exp2x;
Dtype tanhx;
for (int i = 0; i < count; ++i) {
tanhx = top_data[i];
diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp
index e08bed7d..fd4860f9 100644
--- a/src/caffe/layers/window_data_layer.cpp
+++ b/src/caffe/layers/window_data_layer.cpp
@@ -291,7 +291,10 @@ void WindowDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
string hashtag;
int image_index, channels;
- while (infile >> hashtag >> image_index) {
+ if (!(infile >> hashtag >> image_index)) {
+ LOG(FATAL) << "Window file is empty";
+ }
+ do {
CHECK_EQ(hashtag, "#");
// read image path
string image_path;
@@ -347,7 +350,7 @@ void WindowDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
<< image_size[2] << " "
<< "windows to process: " << num_windows;
}
- }
+ } while (infile >> hashtag >> image_index);
LOG(INFO) << "Number of images: " << image_index+1;
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index 41ae7520..d76d284d 100644
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
@@ -42,7 +42,6 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
- int num_layers = param.layers_size();
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
<< "Incorrect input blob dimension specifications.";
memory_used_ = 0;
@@ -58,7 +57,6 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int layer_id = 0; layer_id < param.layers_size(); ++layer_id) {
- bool in_place = false;
const LayerParameter& layer_param = param.layers(layer_id);
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
@@ -198,7 +196,6 @@ int Net<Dtype>::AppendBottom(const NetParameter& param,
bottom_vecs_[layer_id].push_back(blobs_[blob_id].get());
bottom_id_vecs_[layer_id].push_back(blob_id);
available_blobs->erase(blob_name);
- bool need_backward = param.force_backward() || blob_need_backward_[blob_id];
return blob_id;
}
diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp
index 68f2618d..8ba5f29f 100644
--- a/src/caffe/test/test_data_layer.cpp
+++ b/src/caffe/test/test_data_layer.cpp
@@ -24,10 +24,10 @@ template <typename Dtype>
class DataLayerTest : public ::testing::Test {
protected:
DataLayerTest()
- : blob_top_data_(new Blob<Dtype>()),
- blob_top_label_(new Blob<Dtype>()),
+ : backend_(DataParameter_DB_LEVELDB),
filename_(new string(tmpnam(NULL))),
- backend_(DataParameter_DB_LEVELDB),
+ blob_top_data_(new Blob<Dtype>()),
+ blob_top_label_(new Blob<Dtype>()),
seed_(1701) {}
virtual void SetUp() {
blob_top_vec_.push_back(blob_top_data_);
diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp
index a0ed113b..1cbca009 100644
--- a/src/caffe/test/test_hdf5data_layer.cpp
+++ b/src/caffe/test/test_hdf5data_layer.cpp
@@ -24,9 +24,9 @@ template <typename Dtype>
class HDF5DataLayerTest : public ::testing::Test {
protected:
HDF5DataLayerTest()
- : blob_top_data_(new Blob<Dtype>()),
- blob_top_label_(new Blob<Dtype>()),
- filename(NULL) {}
+ : filename(NULL),
+ blob_top_data_(new Blob<Dtype>()),
+ blob_top_label_(new Blob<Dtype>()) {}
virtual void SetUp() {
blob_top_vec_.push_back(blob_top_data_);
blob_top_vec_.push_back(blob_top_label_);
@@ -61,7 +61,6 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) {
int batch_size = 5;
hdf5_data_param->set_batch_size(batch_size);
hdf5_data_param->set_source(*(this->filename));
- int num_rows = 10;
int num_cols = 8;
int height = 5;
int width = 5;
diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp
index fea5793d..2278f4e2 100644
--- a/src/caffe/test/test_image_data_layer.cpp
+++ b/src/caffe/test/test_image_data_layer.cpp
@@ -27,10 +27,10 @@ template <typename Dtype>
class ImageDataLayerTest : public ::testing::Test {
protected:
ImageDataLayerTest()
- : blob_top_data_(new Blob<Dtype>()),
- blob_top_label_(new Blob<Dtype>()),
+ : seed_(1701),
filename_(new string(tmpnam(NULL))),
- seed_(1701) {}
+ blob_top_data_(new Blob<Dtype>()),
+ blob_top_label_(new Blob<Dtype>()) {}
virtual void SetUp() {
blob_top_vec_.push_back(blob_top_data_);
blob_top_vec_.push_back(blob_top_label_);
diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp
index 1923128d..7f0a211a 100644
--- a/src/caffe/test/test_lrn_layer.cpp
+++ b/src/caffe/test/test_lrn_layer.cpp
@@ -25,9 +25,9 @@ template <typename Dtype>
class LRNLayerTest : public ::testing::Test {
protected:
LRNLayerTest()
- : blob_bottom_(new Blob<Dtype>()),
- blob_top_(new Blob<Dtype>()),
- epsilon_(Dtype(1e-5)) {}
+ : epsilon_(Dtype(1e-5)),
+ blob_bottom_(new Blob<Dtype>()),
+ blob_top_(new Blob<Dtype>()) {}
virtual void SetUp() {
Caffe::set_random_seed(1701);
blob_bottom_->Reshape(2, 7, 3, 3);
@@ -55,7 +55,6 @@ void LRNLayerTest<Dtype>::ReferenceLRNForward(
Blob<Dtype>* blob_top) {
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
- const Dtype* bottom_data = blob_bottom.cpu_data();
Dtype* top_data = blob_top->mutable_cpu_data();
LRNParameter lrn_param = layer_param.lrn_param();
Dtype alpha = lrn_param.alpha();
diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp
index 15f01bd4..9781439f 100644
--- a/src/caffe/test/test_memory_data_layer.cpp
+++ b/src/caffe/test/test_memory_data_layer.cpp
@@ -12,9 +12,10 @@ template <typename Dtype>
class MemoryDataLayerTest : public ::testing::Test {
protected:
MemoryDataLayerTest()
- : data_blob_(new Blob<Dtype>()),
- label_blob_(new Blob<Dtype>()),
- data_(new Blob<Dtype>()), labels_(new Blob<Dtype>()) {}
+ : data_(new Blob<Dtype>()),
+ labels_(new Blob<Dtype>()),
+ data_blob_(new Blob<Dtype>()),
+ label_blob_(new Blob<Dtype>()) {}
virtual void SetUp() {
batch_size_ = 8;
batches_ = 12;
diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp
index 2210b461..ca70e1ac 100644
--- a/src/caffe/test/test_neuron_layer.cpp
+++ b/src/caffe/test/test_neuron_layer.cpp
@@ -189,7 +189,6 @@ TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
- float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
if (top_data[i] != 0) {
EXPECT_EQ(top_data[i], bottom_data[i]);
@@ -244,7 +243,6 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) {
// Now, check values
const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
const TypeParam* top_data = this->blob_top_->cpu_data();
- float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio());
for (int i = 0; i < this->blob_bottom_->count(); ++i) {
if (top_data[i] != 0) {
EXPECT_EQ(top_data[i], bottom_data[i]);
diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp
index 62daf608..4116de4c 100644
--- a/src/caffe/test/test_random_number_generator.cpp
+++ b/src/caffe/test/test_random_number_generator.cpp
@@ -16,9 +16,9 @@ template <typename Dtype>
class RandomNumberGeneratorTest : public ::testing::Test {
protected:
RandomNumberGeneratorTest()
- : sample_size_(10000),
+ : mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure.
+ sample_size_(10000),
seed_(1701),
- mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure.
data_(new SyncedMemory(sample_size_ * sizeof(Dtype))),
data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype))),
int_data_(new SyncedMemory(sample_size_ * sizeof(int))),
diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
index d8018be0..089d5950 100644
--- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
+++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp
@@ -69,7 +69,6 @@ class SigmoidCrossEntropyLossLayerTest : public ::testing::Test {
targets_filler_param.set_max(1.0);
UniformFiller<Dtype> targets_filler(targets_filler_param);
Dtype eps = 2e-2;
- int num_inf = 0;
for (int i = 0; i < 100; ++i) {
// Fill the data vector
data_filler.Fill(this->blob_bottom_data_);
diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp
index 65f82547..ff65bd64 100644
--- a/src/caffe/util/io.cpp
+++ b/src/caffe/util/io.cpp
@@ -123,6 +123,7 @@ void hdf5_load_nd_dataset_helper(
herr_t status;
int ndims;
status = H5LTget_dataset_ndims(file_id, dataset_name_, &ndims);
+ CHECK_GE(status, 0) << "Failed to get dataset ndims for " << dataset_name_;
CHECK_GE(ndims, min_dim);
CHECK_LE(ndims, max_dim);
@@ -131,6 +132,7 @@ void hdf5_load_nd_dataset_helper(
H5T_class_t class_;
status = H5LTget_dataset_info(
file_id, dataset_name_, dims.data(), &class_, NULL);
+ CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_;
CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data";
blob->Reshape(
@@ -146,6 +148,7 @@ void hdf5_load_nd_dataset<float>(hid_t file_id, const char* dataset_name_,
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_float(
file_id, dataset_name_, blob->mutable_cpu_data());
+ CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_;
}
template <>
@@ -154,6 +157,7 @@ void hdf5_load_nd_dataset<double>(hid_t file_id, const char* dataset_name_,
hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob);
herr_t status = H5LTread_dataset_double(
file_id, dataset_name_, blob->mutable_cpu_data());
+ CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_;
}
template <>
diff --git a/src/gtest/gtest-all.cpp b/src/gtest/gtest-all.cpp
index 5ced66a9..92619741 100644
--- a/src/gtest/gtest-all.cpp
+++ b/src/gtest/gtest-all.cpp
@@ -601,7 +601,6 @@ class GTestFlagSaver {
bool list_tests_;
String output_;
bool print_time_;
- bool pretty_;
internal::Int32 random_seed_;
internal::Int32 repeat_;
bool shuffle_;
diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp
index aa951551..c4bc0b32 100644
--- a/tools/convert_imageset.cpp
+++ b/tools/convert_imageset.cpp
@@ -2,7 +2,7 @@
// This program converts a set of images to a leveldb by storing them as Datum
// proto buffers.
// Usage:
-// convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME RANDOM_SHUFFLE[0 or 1] \
+// convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME RANDOM_SHUFFLE[0 or 1]
// [resize_height] [resize_width]
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
diff --git a/tools/dump_network.cpp b/tools/dump_network.cpp
index f29e150b..8ed8edac 100644
--- a/tools/dump_network.cpp
+++ b/tools/dump_network.cpp
@@ -4,7 +4,7 @@
// all the intermediate blobs produced by the net to individual binary
// files stored in protobuffer binary formats.
// Usage:
-// dump_network input_net_param trained_net_param \
+// dump_network input_net_param trained_net_param
// input_blob output_prefix 0/1
// if input_net_param is 'none', we will directly load the network from
// trained_net_param. If the last argv is 1, we will do a forward-backward pass
diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp
index cdad6676..3a670d96 100644
--- a/tools/extract_features.cpp
+++ b/tools/extract_features.cpp
@@ -116,7 +116,6 @@ int feature_extraction_pipeline(int argc, char** argv) {
leveldb::WriteBatch* batch = new leveldb::WriteBatch();
const int kMaxKeyStrLength = 100;
char key_str[kMaxKeyStrLength];
- int num_bytes_of_binary_code = sizeof(Dtype);
vector<Blob<float>*> input_vec;
int image_index = 0;
for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {