summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.icsi111
-rw-r--r--include/caffe/layer.hpp3
-rw-r--r--include/caffe/layer_register.hpp86
-rw-r--r--include/caffe/vision_layers.hpp2
-rw-r--r--src/caffe/common.cpp4
-rw-r--r--src/caffe/layer_factory.cpp62
-rw-r--r--src/caffe/layers/conv_layer.cpp2
-rw-r--r--src/caffe/layers/data_layer.cpp2
-rw-r--r--src/caffe/layers/dropout_layer.cu1
-rw-r--r--src/caffe/layers/im2col_layer.cpp1
-rw-r--r--src/caffe/layers/inner_product_layer.cpp1
-rw-r--r--src/caffe/layers/loss_layer.cu4
-rw-r--r--src/caffe/layers/lrn_layer.cpp1
-rw-r--r--src/caffe/layers/padding_layer.cu1
-rw-r--r--src/caffe/layers/pooling_layer.cpp1
-rw-r--r--src/caffe/layers/relu_layer.cu2
-rw-r--r--src/caffe/layers/softmax_layer.cu1
-rw-r--r--src/caffe/layers/softmax_loss_layer.cu1
-rw-r--r--src/caffe/net.cpp4
19 files changed, 221 insertions, 69 deletions
diff --git a/Makefile.icsi b/Makefile.icsi
new file mode 100644
index 00000000..4c8b730a
--- /dev/null
+++ b/Makefile.icsi
@@ -0,0 +1,111 @@
+# The makefile for caffe. Extremely hack.
+PROJECT := caffe
+TEST_GPUID := 1
+
+# The target static library and shared library name
+NAME := lib$(PROJECT).so
+STATIC_NAME := lib$(PROJECT).a
+# All source files
+CXX_SRCS := $(shell find src/caffe ! -name "test_*.cpp" -name "*.cpp")
+CU_SRCS := $(shell find src/caffe -name "*.cu")
+TEST_SRCS := $(shell find src/caffe -name "test_*.cpp")
+GTEST_SRC := src/gtest/gtest-all.cpp
+EXAMPLE_SRCS := $(shell find examples -name "*.cpp")
+PROTO_SRCS := $(wildcard src/caffe/proto/*.proto)
+# The generated files for protocol buffers
+PROTO_GEN_HEADER := ${PROTO_SRCS:.proto=.pb.h}
+PROTO_GEN_CC := ${PROTO_SRCS:.proto=.pb.cc}
+PROTO_GEN_PY := ${PROTO_SRCS:.proto=_pb2.py}
+# The objects that are needed to generate the library
+CXX_OBJS := ${CXX_SRCS:.cpp=.o}
+CU_OBJS := ${CU_SRCS:.cu=.cuo}
+PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}
+OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)
+# program and test objects
+EXAMPLE_OBJS := ${EXAMPLE_SRCS:.cpp=.o}
+TEST_OBJS := ${TEST_SRCS:.cpp=.o}
+GTEST_OBJ := ${GTEST_SRC:.cpp=.o}
+# program and test bins
+EXAMPLE_BINS :=${EXAMPLE_OBJS:.o=.bin}
+TEST_BINS := ${TEST_OBJS:.o=.testbin}
+
+# define third-party library paths
+CUDA_DIR := /u/vis/software/cuda-5.5
+CUDA_ARCH := -arch=sm_30
+MKL_DIR := /u/vis/software/dist/intel/composerxe-2011.3.174/mkl
+
+CUDA_INCLUDE_DIR := $(CUDA_DIR)/include
+CUDA_LIB_DIR := $(CUDA_DIR)/lib64 $(CUDA_DIR)/lib
+MKL_INCLUDE_DIR := $(MKL_DIR)/include
+MKL_LIB_DIR := $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64
+
+# define inclue and libaries
+# We put src here just for gtest
+INCLUDE_DIRS := ./src ./include /u/vis/software/include /usr/local/include $(CUDA_INCLUDE_DIR) $(MKL_INCLUDE_DIR)
+LIBRARY_DIRS := /usr/lib /u/vis/software/lib /usr/local/lib $(CUDA_LIB_DIR) $(MKL_LIB_DIR)
+LIBRARIES := cudart cublas protobuf glog mkl_rt curand \
+ leveldb snappy pthread
+WARNINGS := -Wall
+
+COMMON_FLAGS := $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
+CXXFLAGS += -pthread -fPIC -O2 $(COMMON_FLAGS)
+NVCCFLAGS := -Xcompiler -fPIC -O2 $(COMMON_FLAGS)
+LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) \
+ $(foreach library,$(LIBRARIES),-l$(library))
+
+NVCC = $(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CPPFLAGS) $(CUDA_ARCH)
+
+.PHONY: all test clean distclean linecount examples distribute
+
+all: $(NAME) $(STATIC_NAME) test examples
+
+linecount: clean
+ cloc --read-lang-def=caffe.cloc src/caffe/
+
+test: $(TEST_BINS)
+
+examples: $(EXAMPLE_BINS)
+
+$(NAME): $(PROTO_OBJS) $(OBJS)
+ $(CXX) -shared $(OBJS) -o $(NAME) $(LDFLAGS) $(WARNINGS)
+
+$(STATIC_NAME): $(PROTO_OBJS) $(OBJS)
+ ar rcs $(STATIC_NAME) $(PROTO_OBJS) $(OBJS)
+
+runtest: test
+ for testbin in $(TEST_BINS); do $$testbin $(TEST_GPUID); done
+
+$(TEST_BINS): %.testbin : %.o $(GTEST_OBJ) $(STATIC_NAME)
+ $(CXX) $< $(GTEST_OBJ) $(STATIC_NAME) -o $@ $(LDFLAGS) $(WARNINGS)
+
+$(EXAMPLE_BINS): %.bin : %.o $(STATIC_NAME)
+ $(CXX) $< $(STATIC_NAME) -o $@ $(LDFLAGS) $(WARNINGS)
+
+$(OBJS): $(PROTO_GEN_CC)
+
+$(EXAMPLE_OBJS): $(PROTO_GEN_CC)
+
+$(CU_OBJS): %.cuo: %.cu
+ $(NVCC) -c $< -o $@
+
+$(PROTO_GEN_CC): $(PROTO_SRCS)
+ protoc --proto_path=src --cpp_out=src --python_out=src $(PROTO_SRCS)
+ mkdir -p include/caffe/proto
+ cp $(PROTO_GEN_HEADER) include/caffe/proto/
+
+clean:
+ @- $(RM) $(NAME) $(STATIC_NAME) $(TEST_BINS) $(EXAMPLE_BINS)
+ @- $(RM) $(OBJS) $(TEST_OBJS) $(EXAMPLE_OBJS)
+ @- $(RM) $(PROTO_GEN_HEADER) $(PROTO_GEN_CC) $(PROTO_GEN_PY)
+ @- $(RM) -rf build
+
+distclean: clean
+
+distribute: all
+ mkdir build
+ cp -r include build/
+ mkdir build/bin
+ cp $(EXAMPLE_BINS) build/bin
+ mkdir build/lib
+ cp $(NAME) build/lib
+ cp $(STATIC_NAME) build/lib
diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp
index adc63657..48365ebb 100644
--- a/include/caffe/layer.hpp
+++ b/include/caffe/layer.hpp
@@ -127,9 +127,6 @@ void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
}
}
-// The layer factory function
-template <typename Dtype>
-Layer<Dtype>* GetLayer(const LayerParameter& param);
} // namespace caffe
diff --git a/include/caffe/layer_register.hpp b/include/caffe/layer_register.hpp
new file mode 100644
index 00000000..0d251235
--- /dev/null
+++ b/include/caffe/layer_register.hpp
@@ -0,0 +1,86 @@
+// This function implements a registry keeping a record of all layer names.
+// Copyright Yangqing Jia 2013
+
+#ifndef CAFFE_LAYER_REGISTER_HPP_
+#define CAFFE_LAYER_REGISTER_HPP_
+
+#include <string>
+#include <map>
+
+#include "caffe/layer.hpp"
+#include "caffe/proto/caffe.pb.h"
+
+using std::string;
+
+namespace caffe {
+
+// Internal: the layer registry
+template <typename Dtype>
+class LayerRegistry {
+ public:
+ typedef Layer<Dtype>* (*Creator)(const LayerParameter&);
+
+ LayerRegistry() : layer_map_() {}
+ ~LayerRegistry() {}
+
+ void AddCreator(string name, Creator creator) {
+ layer_map_[name] = creator;
+ }
+
+ inline Layer<Dtype>* CreateLayer(const string& name, const LayerParameter& param) {
+ typename LayerMap::const_iterator it = layer_map_.find(name);
+ if (it == layer_map_.end()) {
+ LOG(FATAL) << "Unknown layer: " << name;
+ }
+ return (it->second)(param);
+ }
+
+ private:
+ typedef typename std::map<string, Creator> LayerMap;
+ LayerMap layer_map_;
+};
+
+
+// Internal: the function to get the layer registry.
+template <typename Dtype>
+inline LayerRegistry<Dtype>& GetLayerRegistry() {
+ static LayerRegistry<Dtype> registry;
+ return registry;
+};
+
+
+// Internal: The registerer class to register a class.
+template <typename Dtype>
+class LayerCreatorRegisterer {
+ public:
+ explicit LayerCreatorRegisterer(const string& name,
+ typename LayerRegistry<Dtype>::Creator creator) {
+ GetLayerRegistry<Dtype>().AddCreator(name, creator);
+ }
+ ~LayerCreatorRegisterer() {}
+};
+
+
+// The macro to use for register a layer. For example, if you have a
+// ConvolutionLayer and want to register it with name "conv", do
+// REGISTER_LAYER("conv", ConvolutionLayer)
+#define REGISTER_LAYER(name, DerivedLayer) \
+ template <typename Dtype> \
+ Layer<Dtype>* Create##DerivedLayer(const LayerParameter& param) { \
+ return new DerivedLayer<Dtype>(param); \
+ } \
+ LayerCreatorRegisterer<float> g_creator_float_##DerivedLayer( \
+ name, &Create##DerivedLayer<float>); \
+ LayerCreatorRegisterer<double> g_creator_double_##DerivedLayer( \
+ name, &Create##DerivedLayer<double>)
+
+
+// The function to call to get a layer.
+template <typename Dtype>
+Layer<Dtype>* CreateLayer(const LayerParameter& param) {
+ return GetLayerRegistry<Dtype>().CreateLayer(param.type(), param);
+}
+
+} // namespace caffe
+
+# endif // CAFFE_LAYER_REGISTER_HPP_
diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp
index 432d7ffe..6ce0cf75 100644
--- a/include/caffe/vision_layers.hpp
+++ b/include/caffe/vision_layers.hpp
@@ -9,6 +9,7 @@
#include <vector>
#include "caffe/layer.hpp"
+#include "caffe/layer_register.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
@@ -400,6 +401,7 @@ class AccuracyLayer : public Layer<Dtype> {
}
};
+
} // namespace caffe
#endif // CAFFE_VISION_LAYERS_HPP_
diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp
index 1fce86a2..c254d70e 100644
--- a/src/caffe/common.cpp
+++ b/src/caffe/common.cpp
@@ -36,13 +36,13 @@ Caffe::Caffe()
// Try to create a curand handler.
if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT)
!= CURAND_STATUS_SUCCESS ||
- curandSetPseudoRandomGeneratorSeed(curand_generator_, 1701ULL)
+ curandSetPseudoRandomGeneratorSeed(curand_generator_, time(NULL))
!= CURAND_STATUS_SUCCESS) {
LOG(ERROR) << "Cannot create Curand generator. Curand won't be available.";
}
// Try to create a vsl stream. This should almost always work, but we will
// check it anyway.
- if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937, 1701) != VSL_STATUS_OK) {
+ if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937, time(NULL)) != VSL_STATUS_OK) {
LOG(ERROR) << "Cannot create vsl stream. VSL random number generator "
<< "won't be available.";
}
diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp
deleted file mode 100644
index 6961bb3f..00000000
--- a/src/caffe/layer_factory.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2013 Yangqing Jia
-
-#ifndef CAFFE_LAYER_FACTORY_HPP_
-#define CAFFE_LAYER_FACTORY_HPP_
-
-#include <string>
-
-#include "caffe/layer.hpp"
-#include "caffe/vision_layers.hpp"
-#include "caffe/proto/caffe.pb.h"
-
-
-namespace caffe {
-
-
-// A function to get a specific layer from the specification given in
-// LayerParameter. Ideally this would be replaced by a factory pattern,
-// but we will leave it this way for now.
-template <typename Dtype>
-Layer<Dtype>* GetLayer(const LayerParameter& param) {
- const std::string& type = param.type();
- if (type == "accuracy") {
- return new AccuracyLayer<Dtype>(param);
- } else if (type == "conv") {
- return new ConvolutionLayer<Dtype>(param);
- } else if (type == "data") {
- return new DataLayer<Dtype>(param);
- } else if (type == "dropout") {
- return new DropoutLayer<Dtype>(param);
- } else if (type == "euclidean_loss") {
- return new EuclideanLossLayer<Dtype>(param);
- } else if (type == "im2col") {
- return new Im2colLayer<Dtype>(param);
- } else if (type == "innerproduct") {
- return new InnerProductLayer<Dtype>(param);
- } else if (type == "lrn") {
- return new LRNLayer<Dtype>(param);
- } else if (type == "padding") {
- return new PaddingLayer<Dtype>(param);
- } else if (type == "pool") {
- return new PoolingLayer<Dtype>(param);
- } else if (type == "relu") {
- return new ReLULayer<Dtype>(param);
- } else if (type == "softmax") {
- return new SoftmaxLayer<Dtype>(param);
- } else if (type == "softmax_loss") {
- return new SoftmaxWithLossLayer<Dtype>(param);
- } else if (type == "multinomial_logistic_loss") {
- return new MultinomialLogisticLossLayer<Dtype>(param);
- } else {
- LOG(FATAL) << "Unknown layer name: " << type;
- }
- // just to suppress old compiler warnings.
- return (Layer<Dtype>*)(NULL);
-}
-
-template Layer<float>* GetLayer(const LayerParameter& param);
-template Layer<double>* GetLayer(const LayerParameter& param);
-
-} // namespace caffe
-
-#endif // CAFFE_LAYER_FACTORY_HPP_
diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp
index f2608be2..5531b2d4 100644
--- a/src/caffe/layers/conv_layer.cpp
+++ b/src/caffe/layers/conv_layer.cpp
@@ -250,5 +250,7 @@ Dtype ConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(ConvolutionLayer);
+REGISTER_LAYER("conv", ConvolutionLayer);
+
} // namespace caffe
diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp
index 9ed95165..a1b55c20 100644
--- a/src/caffe/layers/data_layer.cpp
+++ b/src/caffe/layers/data_layer.cpp
@@ -225,5 +225,7 @@ Dtype DataLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(DataLayer);
+REGISTER_LAYER("data", DataLayer);
+
} // namespace caffe
diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu
index df94f2de..8b98dbc8 100644
--- a/src/caffe/layers/dropout_layer.cu
+++ b/src/caffe/layers/dropout_layer.cu
@@ -119,6 +119,7 @@ Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(DropoutLayer);
+REGISTER_LAYER("dropout", DropoutLayer);
} // namespace caffe
diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp
index 976c8441..d0fa4e34 100644
--- a/src/caffe/layers/im2col_layer.cpp
+++ b/src/caffe/layers/im2col_layer.cpp
@@ -71,5 +71,6 @@ Dtype Im2colLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(Im2colLayer);
+REGISTER_LAYER("im2col", Im2colLayer);
} // namespace caffe
diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp
index 18f1df0d..e3a21f99 100644
--- a/src/caffe/layers/inner_product_layer.cpp
+++ b/src/caffe/layers/inner_product_layer.cpp
@@ -140,5 +140,6 @@ Dtype InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(InnerProductLayer);
+REGISTER_LAYER("innerproduct", InnerProductLayer);
} // namespace caffe
diff --git a/src/caffe/layers/loss_layer.cu b/src/caffe/layers/loss_layer.cu
index 18a8023d..f8437f9c 100644
--- a/src/caffe/layers/loss_layer.cu
+++ b/src/caffe/layers/loss_layer.cu
@@ -117,7 +117,11 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
}
INSTANTIATE_CLASS(MultinomialLogisticLossLayer);
+REGISTER_LAYER("multinomial_logistic_loss", MultinomialLogisticLossLayer);
INSTANTIATE_CLASS(EuclideanLossLayer);
+REGISTER_LAYER("euclidean_loss", EuclideanLossLayer);
INSTANTIATE_CLASS(AccuracyLayer);
+REGISTER_LAYER("accuracy", AccuracyLayer);
+
} // namespace caffe
diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp
index 337b77b7..0876f51e 100644
--- a/src/caffe/layers/lrn_layer.cpp
+++ b/src/caffe/layers/lrn_layer.cpp
@@ -130,6 +130,7 @@ Dtype LRNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(LRNLayer);
+REGISTER_LAYER("lrn", LRNLayer);
} // namespace caffe
diff --git a/src/caffe/layers/padding_layer.cu b/src/caffe/layers/padding_layer.cu
index 90f5508b..b0b21f6d 100644
--- a/src/caffe/layers/padding_layer.cu
+++ b/src/caffe/layers/padding_layer.cu
@@ -134,6 +134,7 @@ Dtype PaddingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(PaddingLayer);
+REGISTER_LAYER("padding", PaddingLayer);
} // namespace caffe
diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp
index 59ce3fe7..498abab2 100644
--- a/src/caffe/layers/pooling_layer.cpp
+++ b/src/caffe/layers/pooling_layer.cpp
@@ -182,6 +182,7 @@ Dtype PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
INSTANTIATE_CLASS(PoolingLayer);
+REGISTER_LAYER("pool", PoolingLayer);
} // namespace caffe
diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu
index b0fc46ef..79945b58 100644
--- a/src/caffe/layers/relu_layer.cu
+++ b/src/caffe/layers/relu_layer.cu
@@ -84,6 +84,6 @@ Dtype ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(ReLULayer);
-
+REGISTER_LAYER("relu", ReLULayer);
} // namespace caffe
diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu
index a7659697..635de685 100644
--- a/src/caffe/layers/softmax_layer.cu
+++ b/src/caffe/layers/softmax_layer.cu
@@ -176,6 +176,7 @@ Dtype SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(SoftmaxLayer);
+REGISTER_LAYER("softmax", SoftmaxLayer);
} // namespace caffe
diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu
index 3a001c08..a90968a4 100644
--- a/src/caffe/layers/softmax_loss_layer.cu
+++ b/src/caffe/layers/softmax_loss_layer.cu
@@ -68,6 +68,7 @@ Dtype SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
}
INSTANTIATE_CLASS(SoftmaxWithLossLayer);
+REGISTER_LAYER("softmax_loss", SoftmaxWithLossLayer);
} // namespace caffe
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index 38a806df..f25dba24 100644
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
@@ -7,6 +7,7 @@
#include "caffe/proto/caffe.pb.h"
#include "caffe/layer.hpp"
+#include "caffe/layer_register.hpp"
#include "caffe/net.hpp"
using std::pair;
@@ -47,7 +48,8 @@ Net<Dtype>::Net(const NetParameter& param,
for (int i = 0; i < param.layers_size(); ++i) {
const LayerConnection& layer_connection = param.layers(i);
const LayerParameter& layer_param = layer_connection.layer();
- layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
+ layers_.push_back(
+ shared_ptr<Layer<Dtype> >(CreateLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
LOG(INFO) << "Creating Layer " << layer_param.name();
bool need_backward = false;