summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md37
-rw-r--r--CMakeLists.txt3
-rw-r--r--CONTRIBUTING.md56
-rw-r--r--Makefile14
-rw-r--r--Makefile.config.example2
-rw-r--r--README.md2
-rw-r--r--cmake/ConfigGen.cmake12
-rw-r--r--cmake/Cuda.cmake6
-rw-r--r--cmake/Dependencies.cmake8
-rw-r--r--cmake/Modules/FindMKL.cmake2
-rw-r--r--cmake/ProtoBuf.cmake2
-rw-r--r--cmake/Summary.cmake2
-rw-r--r--docs/tutorial/interfaces.md2
-rw-r--r--docs/tutorial/layers.md1
-rw-r--r--docs/tutorial/layers/clip.md20
-rw-r--r--include/caffe/filler.hpp12
-rw-r--r--include/caffe/layers/clip_layer.hpp75
-rw-r--r--include/caffe/layers/pooling_layer.hpp1
-rw-r--r--include/caffe/layers/swish_layer.hpp96
-rw-r--r--include/caffe/net.hpp6
-rw-r--r--include/caffe/sgd_solvers.hpp5
-rw-r--r--include/caffe/solver.hpp7
-rw-r--r--include/caffe/syncedmem.hpp4
-rw-r--r--include/caffe/util/hdf5.hpp2
-rw-r--r--include/caffe/util/signal_handler.h2
-rw-r--r--python/caffe/_caffe.cpp22
-rw-r--r--python/caffe/draw.py144
-rw-r--r--python/caffe/test/test_solver.py11
-rwxr-xr-xpython/draw_net.py6
-rw-r--r--python/train.py2
-rw-r--r--src/caffe/layer_factory.cpp1
-rw-r--r--src/caffe/layers/clip_layer.cpp51
-rw-r--r--src/caffe/layers/clip_layer.cu67
-rw-r--r--src/caffe/layers/embed_layer.cu5
-rw-r--r--src/caffe/layers/hdf5_data_layer.cpp2
-rw-r--r--src/caffe/layers/hdf5_data_layer.cu2
-rw-r--r--src/caffe/layers/hdf5_output_layer.cpp2
-rw-r--r--src/caffe/layers/hdf5_output_layer.cu2
-rw-r--r--src/caffe/layers/inner_product_layer.cpp2
-rw-r--r--src/caffe/layers/pooling_layer.cpp23
-rw-r--r--src/caffe/layers/swish_layer.cpp68
-rw-r--r--src/caffe/layers/swish_layer.cu54
-rw-r--r--src/caffe/net.cpp21
-rw-r--r--src/caffe/proto/caffe.proto31
-rw-r--r--src/caffe/solver.cpp10
-rw-r--r--src/caffe/solvers/sgd_solver.cpp16
-rw-r--r--src/caffe/test/test_filler.cpp447
-rw-r--r--src/caffe/test/test_hdf5_output_layer.cpp2
-rw-r--r--src/caffe/test/test_hdf5data_layer.cpp2
-rw-r--r--src/caffe/test/test_neuron_layer.cpp140
-rw-r--r--src/caffe/test/test_syncedmem.cpp4
-rw-r--r--src/caffe/util/hdf5.cpp2
-rw-r--r--src/caffe/util/signal_handler.cpp2
53 files changed, 1322 insertions, 198 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index d78a3dc3..c981f62f 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,19 +1,34 @@
-Please use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) for usage, installation, or modeling questions, or other requests for help.
-_Do not post such requests to Issues._ Doing so interferes with the development of Caffe.
+## Important - read before submitting
-Please read the [guidelines for contributing](https://github.com/BVLC/caffe/blob/master/CONTRIBUTING.md) before submitting this issue.
+*Please read the [guidelines for contributing](https://github.com/BVLC/caffe/blob/master/CONTRIBUTING.md) before submitting this issue!*
+
+*Please do not post installation, build, usage, or modeling questions, or other requests for help to Issues.*
+Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead.
+This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
### Issue summary
### Steps to reproduce
-If you are having difficulty building Caffe or training a model, please ask the caffe-users mailing list. If you are reporting a build error that seems to be due to a bug in Caffe, please attach your build configuration (either Makefile.config or CMakeCache.txt) and the output of the make (or cmake) command.
-### Your system configuration
-Operating system:
-Compiler:
-CUDA version (if applicable):
-CUDNN version (if applicable):
-BLAS:
-Python or MATLAB version (for pycaffe and matcaffe respectively):
+### Tried solutions
+
+
+### System configuration
+
+* Operating system:
+* Compiler:
+* CUDA version (if applicable):
+* CUDNN version (if applicable):
+* BLAS:
+* Python version (if using pycaffe):
+* MATLAB version (if using matcaffe):
+
+### Issue checklist
+
+- [ ] read the guidelines and removed the first paragraph
+- [ ] written a short summary and detailed steps to reproduce
+- [ ] explained how solutions to related problems failed (tick if found none)
+- [ ] filled system configuration
+- [ ] attached relevant logs/config files (tick if not applicable)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 08f56a33..27d172f9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -42,6 +42,9 @@ caffe_option(USE_LMDB "Build with lmdb" ON)
caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF)
caffe_option(USE_OPENMP "Link with OpenMP (when your BLAS wants OpenMP and you get linker errors)" OFF)
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+caffe_option(USE_HDF5 "Build with hdf5" ON)
+
# ---[ Dependencies
include(cmake/Dependencies.cmake)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 8cd5e56c..45f7e186 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,21 +1,63 @@
# Contributing
+Below you will find a collection of guidelines for submitting issues as well as contributing code to the Caffe repository.
+Please read those before starting an issue or a pull request.
+
## Issues
Specific Caffe design and development issues, bugs, and feature requests are maintained by GitHub Issues.
-_Please do not post usage, installation, or modeling questions, or other requests for help to Issues._
-Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead. This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
-
-When reporting a bug, it's most helpful to provide the following information, where applicable:
+*Please do not post installation, build, usage, or modeling questions, or other requests for help to Issues.*
+Use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) instead.
+This helps developers maintain a clear, uncluttered, and efficient view of the state of Caffe.
+See the chapter [caffe-users](#caffe-users) below for guidance on posting to the users list.
-* What steps reproduce the bug?
-* Can you reproduce the bug using the latest [master](https://github.com/BVLC/caffe/tree/master), compiled with the `DEBUG` make option?
-* What hardware and operating system/distribution are you running?
+When reporting an issue, it's most helpful to provide the following information, where applicable:
+* How does the problem look like and what steps reproduce it?
+* Can you reproduce it using the latest [master](https://github.com/BVLC/caffe/tree/master), compiled with the `DEBUG` make option?
+* What hardware and software are you running? In particular:
+ * GPU make and model, if relevant,
+ * operating system/distribution,
+ * compiler; please also post which version (for example, with GCC run `gcc --version` to check),
+ * CUDA version, if applicable (run `nvcc --version` to check),
+ * cuDNN version, if applicable (version number is stored in `cudnn.h`, look for lines containing `CUDNN_MAJOR`, `CUDNN_MINOR` and `CUDNN_PATCHLEVEL`),
+ * BLAS library,
+ * Python version, if relevant,
+ * MATLAB version, if relevant.
+* **What have you already tried** to solve the problem? How did it fail? Are there any other issues related to yours?
+* If this is not a build-related issue, does your installation pass `make runtest`?
* If the bug is a crash, provide the backtrace (usually printed by Caffe; always obtainable with `gdb`).
+* If you are reporting a build error that seems to be due to a bug in Caffe, please attach your build configuration (either Makefile.config or CMakeCache.txt) and the output of the make (or cmake) command.
+
+If only a small portion of the code/log is relevant to your issue, you may paste it directly into the post, preferably using Markdown syntax for code block: triple backtick ( \`\`\` ) to open/close a block.
+In other cases (multiple files, or long files), please **attach** them to the post - this greatly improves readability.
+
+If the problem arises during a complex operation (e.g. large script using pycaffe, long network prototxt), please reduce the example to the minimal size that still causes the error.
+Also, minimize influence of external modules, data etc. - this way it will be easier for others to understand and reproduce your issue, and eventually help you.
+Sometimes you will find the root cause yourself in the process.
Try to give your issue a title that is succinct and specific. The devs will rename issues as needed to keep track of them.
+## Caffe-users
+
+Before you post to the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users), make sure you look for existing solutions.
+The Caffe community has encountered and found solutions to countless problems - benefit from the collective experience.
+Recommended places to look:
+* the [users list](https://groups.google.com/forum/#!forum/caffe-users) itself,
+* [`caffe`](https://stackoverflow.com/questions/tagged/caffe) tag on StackOverflow,
+* [GitHub issues](https://github.com/BVLC/caffe/issues) tracker (some problems have been answered there),
+* the public [wiki](https://github.com/BVLC/caffe/wiki),
+* the official [documentation](http://caffe.berkeleyvision.org/).
+
+Found a post/issue with your exact problem, but with no answer?
+Don't just leave a "me too" message - provide the details of your case.
+Problems with more available information are easier to solve and attract good attention.
+
+When posting to the list, make sure you provide as much relevant information as possible - recommendations for an issue report (see above) are a good starting point.
+*Please make it very clear which version of Caffe you are using, especially if it is a fork not maintained by BVLC.*
+
+Formatting recommendations hold: paste short logs/code fragments into the post (use fixed-width text for them), **attach** long logs or multiple files.
+
## Pull Requests
Caffe welcomes all contributions.
diff --git a/Makefile b/Makefile
index c85c695a..b7660e85 100644
--- a/Makefile
+++ b/Makefile
@@ -178,11 +178,13 @@ ifneq ($(CPU_ONLY), 1)
LIBRARIES := cudart cublas curand
endif
-LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5
+LIBRARIES += glog gflags protobuf boost_system boost_filesystem m
# handle IO dependencies
USE_LEVELDB ?= 1
USE_LMDB ?= 1
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+USE_HDF5 ?= 1
USE_OPENCV ?= 1
ifeq ($(USE_LEVELDB), 1)
@@ -191,6 +193,10 @@ endif
ifeq ($(USE_LMDB), 1)
LIBRARIES += lmdb
endif
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+ifeq ($(USE_HDF5), 1)
+ LIBRARIES += hdf5_hl hdf5
+endif
ifeq ($(USE_OPENCV), 1)
LIBRARIES += opencv_core opencv_highgui opencv_imgproc
@@ -347,6 +353,10 @@ ifeq ($(ALLOW_LMDB_NOLOCK), 1)
COMMON_FLAGS += -DALLOW_LMDB_NOLOCK
endif
endif
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+ifeq ($(USE_HDF5), 1)
+ COMMON_FLAGS += -DUSE_HDF5
+endif
# CPU-only configuration
ifeq ($(CPU_ONLY), 1)
@@ -641,7 +651,7 @@ $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \
$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \
$(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)
@ echo PROTOC \(python\) $<
- $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<
+ $(Q)protoc --proto_path=src --python_out=python $<
$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)
touch $(PY_PROTO_INIT)
diff --git a/Makefile.config.example b/Makefile.config.example
index 79905935..24ca6327 100644
--- a/Makefile.config.example
+++ b/Makefile.config.example
@@ -11,6 +11,8 @@
# USE_OPENCV := 0
# USE_LEVELDB := 0
# USE_LMDB := 0
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+# USE_HDF5 := 0
# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)
# You should not set this flag if you will be reading LMDBs with any
diff --git a/README.md b/README.md
index fe259535..3705c55a 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ and step-by-step examples.
## Custom distributions
- - [Intel Caffe](https://github.com/BVLC/caffe/tree/intel) (Optimized for CPU and support for multi-node), in particular Xeon processors (HSW, BDW, SKX, Xeon Phi).
+ - [Intel Caffe](https://github.com/BVLC/caffe/tree/intel) (Optimized for CPU and support for multi-node), in particular Intel® Xeon processors.
- [OpenCL Caffe](https://github.com/BVLC/caffe/tree/opencl) e.g. for AMD or Intel devices.
- [Windows Caffe](https://github.com/BVLC/caffe/tree/windows)
diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake
index 09bb09b4..69889c24 100644
--- a/cmake/ConfigGen.cmake
+++ b/cmake/ConfigGen.cmake
@@ -24,6 +24,18 @@ function(caffe_generate_export_configs)
set(HAVE_CUDA FALSE)
endif()
+ set(HDF5_IMPORTED OFF)
+ foreach(_lib ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES})
+ if(TARGET ${_lib})
+ set(HDF5_IMPORTED ON)
+ endif()
+ endforeach()
+
+ # This code is taken from https://github.com/sh1r0/caffe-android-lib
+ if(USE_HDF5)
+ list(APPEND Caffe_DEFINITIONS -DUSE_HDF5)
+ endif()
+
if(NOT HAVE_CUDNN)
set(HAVE_CUDNN FALSE)
endif()
diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake
index 54e26fd5..e03feabf 100644
--- a/cmake/Cuda.cmake
+++ b/cmake/Cuda.cmake
@@ -109,6 +109,12 @@ function(caffe_select_nvcc_arch_flags out_variable)
set(__nvcc_flags "")
set(__nvcc_archs_readable "")
+ string(COMPARE LESS "${CUDA_VERSION}" "9.0" iscudaolderthan90)
+ if(NOT iscudaolderthan90)
+ string(REPLACE "21(20)" "" __cuda_arch_bin "${__cuda_arch_bin}")
+ string(REPLACE "20" "" __cuda_arch_bin "${__cuda_arch_bin}")
+ endif()
+
# Tell NVCC to add binaries for the specified GPUs
foreach(__arch ${__cuda_arch_bin})
if(__arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index c48255c8..ca2e3ad9 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -47,6 +47,14 @@ find_package(HDF5 COMPONENTS HL REQUIRED)
list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${HDF5_INCLUDE_DIRS})
list(APPEND Caffe_LINKER_LIBS PUBLIC ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES})
+# This code is taken from https://github.com/sh1r0/caffe-android-lib
+if(USE_HDF5)
+ find_package(HDF5 COMPONENTS HL REQUIRED)
+ include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR})
+ list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES})
+ add_definitions(-DUSE_HDF5)
+endif()
+
# ---[ LMDB
if(USE_LMDB)
find_package(LMDB REQUIRED)
diff --git a/cmake/Modules/FindMKL.cmake b/cmake/Modules/FindMKL.cmake
index 5ab93b2d..ef0c3bf1 100644
--- a/cmake/Modules/FindMKL.cmake
+++ b/cmake/Modules/FindMKL.cmake
@@ -9,7 +9,7 @@
# This module defines the following variables:
#
# MKL_FOUND : True mkl is found
-# MKL_INCLUDE_DIR : unclude directory
+# MKL_INCLUDE_DIR : include directory
# MKL_LIBRARIES : the libraries to link against.
diff --git a/cmake/ProtoBuf.cmake b/cmake/ProtoBuf.cmake
index 8005b448..72ea3230 100644
--- a/cmake/ProtoBuf.cmake
+++ b/cmake/ProtoBuf.cmake
@@ -78,7 +78,7 @@ function(caffe_protobuf_generate_cpp_py output_dir srcs_var hdrs_var python_var)
"${output_dir}/${fil_we}_pb2.py"
COMMAND ${CMAKE_COMMAND} -E make_directory "${output_dir}"
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${output_dir} ${_protoc_include} ${abs_fil}
- COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${output_dir} ${_protoc_include} ${abs_fil}
+ COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJECT_BINARY_DIR}/include --proto_path ${PROJECT_SOURCE_DIR}/src ${_protoc_include} ${abs_fil}
DEPENDS ${abs_fil}
COMMENT "Running C++/Python protocol buffer compiler on ${fil}" VERBATIM )
endforeach()
diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake
index ed8c2526..40b8c2f2 100644
--- a/cmake/Summary.cmake
+++ b/cmake/Summary.cmake
@@ -119,6 +119,8 @@ function(caffe_print_configuration_summary)
caffe_status(" USE_LMDB : ${USE_LMDB}")
caffe_status(" USE_NCCL : ${USE_NCCL}")
caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}")
+ # This code is taken from https://github.com/sh1r0/caffe-android-lib
+ caffe_status(" USE_HDF5 : ${USE_HDF5}")
caffe_status("")
caffe_status("Dependencies:")
caffe_status(" BLAS : " APPLE THEN "Yes (vecLib)" ELSE "Yes (${BLAS})")
diff --git a/docs/tutorial/interfaces.md b/docs/tutorial/interfaces.md
index b5a4f1ad..2578af5d 100644
--- a/docs/tutorial/interfaces.md
+++ b/docs/tutorial/interfaces.md
@@ -129,8 +129,8 @@ Use CPU:
Use GPU and specify its gpu_id:
- caffe.set_mode_gpu();
caffe.set_device(gpu_id);
+ caffe.set_mode_gpu();
#### Create a network and access its layers and blobs
diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md
index 78a46f3a..5036d4fd 100644
--- a/docs/tutorial/layers.md
+++ b/docs/tutorial/layers.md
@@ -93,6 +93,7 @@ Layers:
* [Log](layers/log.html) - f(x) = log(x).
* [BNLL](layers/bnll.html) - f(x) = log(1 + exp(x)).
* [Threshold](layers/threshold.html) - performs step function at user defined threshold.
+* [Clip](layers/clip.html) - clips a blob between a fixed minimum and maximum value.
* [Bias](layers/bias.html) - adds a bias to a blob that can either be learned or fixed.
* [Scale](layers/scale.html) - scales a blob by an amount that can either be learned or fixed.
diff --git a/docs/tutorial/layers/clip.md b/docs/tutorial/layers/clip.md
new file mode 100644
index 00000000..d6a20f5f
--- /dev/null
+++ b/docs/tutorial/layers/clip.md
@@ -0,0 +1,20 @@
+---
+title: Clip Layer
+---
+
+# Clip Layer
+
+* Layer type: `Clip`
+* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ClipLayer.html)
+* Header: [`./include/caffe/layers/clip_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/clip_layer.hpp)
+* CPU implementation: [`./src/caffe/layers/clip_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/clip_layer.cpp)
+* CUDA GPU implementation: [`./src/caffe/layers/clip_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/clip_layer.cu)
+
+## Parameters
+
+* Parameters (`ClipParameter clip_param`)
+* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto):
+
+{% highlight Protobuf %}
+{% include proto/ClipParameter.txt %}
+{% endhighlight %}
diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp
index e3e86a52..a4477361 100644
--- a/include/caffe/filler.hpp
+++ b/include/caffe/filler.hpp
@@ -148,8 +148,10 @@ class XavierFiller : public Filler<Dtype> {
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->shape(0);
- // Compatible for ND Convolution
- int fan_out = blob->count() / blob->shape(1);
+ // Compatibility with ND blobs
+ int fan_out = blob->num_axes() > 1 ?
+ blob->count() / blob->shape(1) :
+ blob->count();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
@@ -191,8 +193,10 @@ class MSRAFiller : public Filler<Dtype> {
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->shape(0);
- // Compatible for ND Convolution
- int fan_out = blob->count() / blob->shape(1);
+ // Compatibility with ND blobs
+ int fan_out = blob->num_axes() > 1 ?
+ blob->count() / blob->shape(1) :
+ blob->count();
Dtype n = fan_in; // default to fan_in
if (this->filler_param_.variance_norm() ==
FillerParameter_VarianceNorm_AVERAGE) {
diff --git a/include/caffe/layers/clip_layer.hpp b/include/caffe/layers/clip_layer.hpp
new file mode 100644
index 00000000..2788193e
--- /dev/null
+++ b/include/caffe/layers/clip_layer.hpp
@@ -0,0 +1,75 @@
+#ifndef CAFFE_CLIP_LAYER_HPP_
+#define CAFFE_CLIP_LAYER_HPP_
+
+#include <vector>
+
+#include "caffe/blob.hpp"
+#include "caffe/layer.hpp"
+#include "caffe/proto/caffe.pb.h"
+
+#include "caffe/layers/neuron_layer.hpp"
+
+namespace caffe {
+
+/**
+ * @brief Clip: @f$ y = \max(min, \min(max, x)) @f$.
+ */
+template <typename Dtype>
+class ClipLayer : public NeuronLayer<Dtype> {
+ public:
+ /**
+ * @param param provides ClipParameter clip_param,
+ * with ClipLayer options:
+ * - min
+ * - max
+ */
+ explicit ClipLayer(const LayerParameter& param)
+ : NeuronLayer<Dtype>(param) {}
+
+ virtual inline const char* type() const { return "Clip"; }
+
+ protected:
+ /**
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$
+ * @param top output Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the computed outputs @f$
+ * y = \max(min, \min(max, x))
+ * @f$
+ */
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+
+ /**
+ * @brief Computes the error gradient w.r.t. the clipped inputs.
+ *
+ * @param top output Blob vector (length 1), providing the error gradient with
+ * respect to the outputs
+ * -# @f$ (N \times C \times H \times W) @f$
+ * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
+ * with respect to computed outputs @f$ y @f$
+ * @param propagate_down see Layer::Backward.
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$; Backward fills their diff with
+ * gradients @f$
+ * \frac{\partial E}{\partial x} = \left\{
+ * \begin{array}{lr}
+ * 0 & \mathrm{if} \; x < min \vee x > max \\
+ * \frac{\partial E}{\partial y} & \mathrm{if} \; x \ge min \wedge x \le max
+ * \end{array} \right.
+ * @f$
+ */
+ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+};
+
+} // namespace caffe
+
+#endif // CAFFE_CLIP_LAYER_HPP_
diff --git a/include/caffe/layers/pooling_layer.hpp b/include/caffe/layers/pooling_layer.hpp
index f4d6803b..38a43283 100644
--- a/include/caffe/layers/pooling_layer.hpp
+++ b/include/caffe/layers/pooling_layer.hpp
@@ -51,6 +51,7 @@ class PoolingLayer : public Layer<Dtype> {
int height_, width_;
int pooled_height_, pooled_width_;
bool global_pooling_;
+ PoolingParameter_RoundMode round_mode_;
Blob<Dtype> rand_idx_;
Blob<int> max_idx_;
};
diff --git a/include/caffe/layers/swish_layer.hpp b/include/caffe/layers/swish_layer.hpp
new file mode 100644
index 00000000..d538ff6d
--- /dev/null
+++ b/include/caffe/layers/swish_layer.hpp
@@ -0,0 +1,96 @@
+#ifndef CAFFE_SWISH_LAYER_HPP_
+#define CAFFE_SWISH_LAYER_HPP_
+
+#include <vector>
+
+#include "caffe/blob.hpp"
+#include "caffe/layer.hpp"
+#include "caffe/proto/caffe.pb.h"
+
+#include "caffe/layers/neuron_layer.hpp"
+#include "caffe/layers/sigmoid_layer.hpp"
+
+namespace caffe {
+
+/**
+ * @brief Swish non-linearity @f$ y = x \sigma (\beta x) @f$.
+ * A novel activation function that tends to work better than ReLU [1].
+ *
+ * [1] Prajit Ramachandran, Barret Zoph, Quoc V. Le. "Searching for
+ * Activation Functions". arXiv preprint arXiv:1710.05941v2 (2017).
+ */
+template <typename Dtype>
+class SwishLayer : public NeuronLayer<Dtype> {
+ public:
+ /**
+ * @param param provides SwishParameter swish_param,
+ * with SwishLayer options:
+ * - beta (\b optional, default 1).
+ * the value @f$ \beta @f$ in the @f$ y = x \sigma (\beta x) @f$.
+ */
+ explicit SwishLayer(const LayerParameter& param)
+ : NeuronLayer<Dtype>(param),
+ sigmoid_layer_(new SigmoidLayer<Dtype>(param)),
+ sigmoid_input_(new Blob<Dtype>()),
+ sigmoid_output_(new Blob<Dtype>()) {}
+ virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+ virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+
+ virtual inline const char* type() const { return "Swish"; }
+
+ protected:
+ /**
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$
+ * @param top output Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the computed outputs @f$
+ * y = x \sigma (\beta x)
+ * @f$.
+ */
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top);
+
+ /**
+ * @brief Computes the error gradient w.r.t. the sigmoid inputs.
+ *
+ * @param top output Blob vector (length 1), providing the error gradient with
+ * respect to the outputs
+ * -# @f$ (N \times C \times H \times W) @f$
+ * containing error gradients @f$ \frac{\partial E}{\partial y} @f$
+ * with respect to computed outputs @f$ y @f$
+ * @param propagate_down see Layer::Backward.
+ * @param bottom input Blob vector (length 1)
+ * -# @f$ (N \times C \times H \times W) @f$
+ * the inputs @f$ x @f$; Backward fills their diff with
+ * gradients @f$
+ * \frac{\partial E}{\partial x}
+ * = \frac{\partial E}{\partial y}(\beta y +
+ * \sigma (\beta x)(1 - \beta y))
+ * @f$ if propagate_down[0]
+ */
+ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
+
+ /// The internal SigmoidLayer
+ shared_ptr<SigmoidLayer<Dtype> > sigmoid_layer_;
+ /// sigmoid_input_ stores the input of the SigmoidLayer.
+ shared_ptr<Blob<Dtype> > sigmoid_input_;
+ /// sigmoid_output_ stores the output of the SigmoidLayer.
+ shared_ptr<Blob<Dtype> > sigmoid_output_;
+ /// bottom vector holder to call the underlying SigmoidLayer::Forward
+ vector<Blob<Dtype>*> sigmoid_bottom_vec_;
+ /// top vector holder to call the underlying SigmoidLayer::Forward
+ vector<Blob<Dtype>*> sigmoid_top_vec_;
+};
+
+} // namespace caffe
+
+#endif // CAFFE_SWISH_LAYER_HPP_
diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp
index d3c9306e..143d5d28 100644
--- a/include/caffe/net.hpp
+++ b/include/caffe/net.hpp
@@ -111,9 +111,9 @@ class Net {
* another Net.
*/
void CopyTrainedLayersFrom(const NetParameter& param);
- void CopyTrainedLayersFrom(const string trained_filename);
- void CopyTrainedLayersFromBinaryProto(const string trained_filename);
- void CopyTrainedLayersFromHDF5(const string trained_filename);
+ void CopyTrainedLayersFrom(const string& trained_filename);
+ void CopyTrainedLayersFromBinaryProto(const string& trained_filename);
+ void CopyTrainedLayersFromHDF5(const string& trained_filename);
/// @brief Writes the net to a proto.
void ToProto(NetParameter* param, bool write_diff = false) const;
/// @brief Writes the net to an HDF5 file.
diff --git a/include/caffe/sgd_solvers.hpp b/include/caffe/sgd_solvers.hpp
index 1fc52d87..925ff783 100644
--- a/include/caffe/sgd_solvers.hpp
+++ b/include/caffe/sgd_solvers.hpp
@@ -23,10 +23,11 @@ class SGDSolver : public Solver<Dtype> {
const vector<shared_ptr<Blob<Dtype> > >& history() { return history_; }
+ virtual void ApplyUpdate();
+ Dtype GetLearningRate();
+
protected:
void PreSolve();
- Dtype GetLearningRate();
- virtual void ApplyUpdate();
virtual void Normalize(int param_id);
virtual void Regularize(int param_id);
virtual void ComputeUpdateValue(int param_id, Dtype rate);
diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp
index a28d8cb8..7a0d7777 100644
--- a/include/caffe/solver.hpp
+++ b/include/caffe/solver.hpp
@@ -55,7 +55,7 @@ class Solver {
// The main entry of the solver function. In default, iter will be zero. Pass
// in a non-zero iter number to resume training for a pre-trained net.
virtual void Solve(const char* resume_file = NULL);
- inline void Solve(const string resume_file) { Solve(resume_file.c_str()); }
+ inline void Solve(const string& resume_file) { Solve(resume_file.c_str()); }
void Step(int iters);
// The Restore method simply dispatches to one of the
// RestoreSolverStateFrom___ protected methods. You should implement these
@@ -94,10 +94,11 @@ class Solver {
*/
virtual inline const char* type() const { return ""; }
- protected:
// Make and apply the update value for the current iteration.
virtual void ApplyUpdate() = 0;
- string SnapshotFilename(const string extension);
+
+ protected:
+ string SnapshotFilename(const string& extension);
string SnapshotToBinaryProto();
string SnapshotToHDF5();
// The test routine
diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp
index 317ce29a..8d650a34 100644
--- a/include/caffe/syncedmem.hpp
+++ b/include/caffe/syncedmem.hpp
@@ -66,8 +66,8 @@ class SyncedMemory {
void* mutable_cpu_data();
void* mutable_gpu_data();
enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED };
- SyncedHead head() { return head_; }
- size_t size() { return size_; }
+ SyncedHead head() const { return head_; }
+ size_t size() const { return size_; }
#ifndef CPU_ONLY
void async_gpu_push(const cudaStream_t& stream);
diff --git a/include/caffe/util/hdf5.hpp b/include/caffe/util/hdf5.hpp
index 71549c1c..dbd8bb6c 100644
--- a/include/caffe/util/hdf5.hpp
+++ b/include/caffe/util/hdf5.hpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#ifndef CAFFE_UTIL_HDF5_H_
#define CAFFE_UTIL_HDF5_H_
@@ -37,3 +38,4 @@ string hdf5_get_name_by_idx(hid_t loc_id, int idx);
} // namespace caffe
#endif // CAFFE_UTIL_HDF5_H_
+#endif // USE_HDF5
diff --git a/include/caffe/util/signal_handler.h b/include/caffe/util/signal_handler.h
index fb84c65b..52463325 100644
--- a/include/caffe/util/signal_handler.h
+++ b/include/caffe/util/signal_handler.h
@@ -8,7 +8,7 @@ namespace caffe {
class SignalHandler {
public:
- // Contructor. Specify what action to take when a signal is received.
+ // Constructor. Specify what action to take when a signal is received.
SignalHandler(SolverAction::Enum SIGINT_action,
SolverAction::Enum SIGHUP_action);
~SignalHandler();
diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp
index 72659a4f..82bf21e6 100644
--- a/python/caffe/_caffe.cpp
+++ b/python/caffe/_caffe.cpp
@@ -416,7 +416,7 @@ BOOST_PYTHON_MODULE(_caffe) {
.def("reshape", &Net<Dtype>::Reshape)
.def("clear_param_diffs", &Net<Dtype>::ClearParamDiffs)
// The cast is to select a particular overload.
- .def("copy_from", static_cast<void (Net<Dtype>::*)(const string)>(
+ .def("copy_from", static_cast<void (Net<Dtype>::*)(const string&)>(
&Net<Dtype>::CopyTrainedLayersFrom))
.def("share_with", &Net<Dtype>::ShareTrainedLayersWith)
.add_property("_blob_loss_weights", bp::make_function(
@@ -490,7 +490,9 @@ BOOST_PYTHON_MODULE(_caffe) {
bp::class_<SolverParameter>("SolverParameter", bp::no_init)
.add_property("max_iter", &SolverParameter::max_iter)
.add_property("display", &SolverParameter::display)
- .add_property("layer_wise_reduce", &SolverParameter::layer_wise_reduce);
+ .add_property("layer_wise_reduce", &SolverParameter::layer_wise_reduce)
+ .add_property("base_lr", &SolverParameter::base_lr,
+ &SolverParameter::set_base_lr);
bp::class_<LayerParameter>("LayerParameter", bp::no_init);
bp::class_<Solver<Dtype>, shared_ptr<Solver<Dtype> >, boost::noncopyable>(
@@ -507,26 +509,28 @@ BOOST_PYTHON_MODULE(_caffe) {
.def("restore", &Solver<Dtype>::Restore)
.def("snapshot", &Solver<Dtype>::Snapshot)
.def("share_weights", &share_weights)
+ .def("apply_update", &Solver<Dtype>::ApplyUpdate)
.add_property("param", bp::make_function(&Solver<Dtype>::param,
- bp::return_value_policy<bp::copy_const_reference>()));
+ bp::return_internal_reference<>()));
BP_REGISTER_SHARED_PTR_TO_PYTHON(Solver<Dtype>);
bp::class_<SGDSolver<Dtype>, bp::bases<Solver<Dtype> >,
shared_ptr<SGDSolver<Dtype> >, boost::noncopyable>(
- "SGDSolver", bp::init<string>());
- bp::class_<NesterovSolver<Dtype>, bp::bases<Solver<Dtype> >,
+ "SGDSolver", bp::init<string>())
+ .add_property("lr", &SGDSolver<Dtype>::GetLearningRate);
+ bp::class_<NesterovSolver<Dtype>, bp::bases<SGDSolver<Dtype> >,
shared_ptr<NesterovSolver<Dtype> >, boost::noncopyable>(
"NesterovSolver", bp::init<string>());
- bp::class_<AdaGradSolver<Dtype>, bp::bases<Solver<Dtype> >,
+ bp::class_<AdaGradSolver<Dtype>, bp::bases<SGDSolver<Dtype> >,
shared_ptr<AdaGradSolver<Dtype> >, boost::noncopyable>(
"AdaGradSolver", bp::init<string>());
- bp::class_<RMSPropSolver<Dtype>, bp::bases<Solver<Dtype> >,
+ bp::class_<RMSPropSolver<Dtype>, bp::bases<SGDSolver<Dtype> >,
shared_ptr<RMSPropSolver<Dtype> >, boost::noncopyable>(
"RMSPropSolver", bp::init<string>());
- bp::class_<AdaDeltaSolver<Dtype>, bp::bases<Solver<Dtype> >,
+ bp::class_<AdaDeltaSolver<Dtype>, bp::bases<SGDSolver<Dtype> >,
shared_ptr<AdaDeltaSolver<Dtype> >, boost::noncopyable>(
"AdaDeltaSolver", bp::init<string>());
- bp::class_<AdamSolver<Dtype>, bp::bases<Solver<Dtype> >,
+ bp::class_<AdamSolver<Dtype>, bp::bases<SGDSolver<Dtype> >,
shared_ptr<AdamSolver<Dtype> >, boost::noncopyable>(
"AdamSolver", bp::init<string>());
diff --git a/python/caffe/draw.py b/python/caffe/draw.py
index 8411a41d..0061f490 100644
--- a/python/caffe/draw.py
+++ b/python/caffe/draw.py
@@ -59,18 +59,60 @@ def get_edge_label(layer):
return edge_label
-def get_layer_label(layer, rankdir):
+def get_layer_lr_mult(layer):
+ """Get the learning rate multipliers.
+
+ Get the learning rate multipliers for the given layer. Assumes a
+ Convolution/Deconvolution/InnerProduct layer.
+
+ Parameters
+ ----------
+ layer : caffe_pb2.LayerParameter
+ A Convolution, Deconvolution, or InnerProduct layer.
+
+ Returns
+ -------
+ learning_rates : tuple of floats
+ the learning rate multipliers for the weights and biases.
+ """
+ if layer.type not in ['Convolution', 'Deconvolution', 'InnerProduct']:
+ raise ValueError("%s layers do not have a "
+ "learning rate multiplier" % layer.type)
+
+ if not hasattr(layer, 'param'):
+ return (1.0, 1.0)
+
+ params = getattr(layer, 'param')
+
+ if len(params) == 0:
+ return (1.0, 1.0)
+
+ if len(params) == 1:
+ lrm0 = getattr(params[0],'lr_mult', 1.0)
+ return (lrm0, 1.0)
+
+ if len(params) == 2:
+ lrm0, lrm1 = [getattr(p,'lr_mult', 1.0) for p in params]
+ return (lrm0, lrm1)
+
+ raise ValueError("Could not parse the learning rate multiplier")
+
+
+def get_layer_label(layer, rankdir, display_lrm=False):
"""Define node label based on layer type.
Parameters
----------
- layer : ?
+ layer : caffe_pb2.LayerParameter
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
+ display_lrm : boolean, optional
+ If True include the learning rate multipliers in the label (default is
+ False).
Returns
-------
- string :
+ node_label : string
A label for the current layer
"""
@@ -81,36 +123,54 @@ def get_layer_label(layer, rankdir):
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
- separator = '\\n'
-
- if layer.type == 'Convolution' or layer.type == 'Deconvolution':
- # Outer double quotes needed or else colon characters don't parse
- # properly
- node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
- (layer.name,
- separator,
- layer.type,
- separator,
- layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size) else 1,
- separator,
- layer.convolution_param.stride[0] if len(layer.convolution_param.stride) else 1,
- separator,
- layer.convolution_param.pad[0] if len(layer.convolution_param.pad) else 0)
- elif layer.type == 'Pooling':
+ separator = r'\n'
+
+ # Initializes a list of descriptors that will be concatenated into the
+ # `node_label`
+ descriptors_list = []
+ # Add the layer's name
+ descriptors_list.append(layer.name)
+ # Add layer's type
+ if layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
- node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
- (layer.name,
- separator,
- pooling_types_dict[layer.pooling_param.pool],
- layer.type,
- separator,
- layer.pooling_param.kernel_size,
- separator,
- layer.pooling_param.stride,
- separator,
- layer.pooling_param.pad)
+ layer_type = '(%s %s)' % (layer.type,
+ pooling_types_dict[layer.pooling_param.pool])
else:
- node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
+ layer_type = '(%s)' % layer.type
+ descriptors_list.append(layer_type)
+
+ # Describe parameters for spatial operation layers
+ if layer.type in ['Convolution', 'Deconvolution', 'Pooling']:
+ if layer.type == 'Pooling':
+ kernel_size = layer.pooling_param.kernel_size
+ stride = layer.pooling_param.stride
+ padding = layer.pooling_param.pad
+ else:
+ kernel_size = layer.convolution_param.kernel_size[0] if \
+ len(layer.convolution_param.kernel_size) else 1
+ stride = layer.convolution_param.stride[0] if \
+ len(layer.convolution_param.stride) else 1
+ padding = layer.convolution_param.pad[0] if \
+ len(layer.convolution_param.pad) else 0
+ spatial_descriptor = separator.join([
+ "kernel size: %d" % kernel_size,
+ "stride: %d" % stride,
+ "pad: %d" % padding,
+ ])
+ descriptors_list.append(spatial_descriptor)
+
+ # Add LR multiplier for learning layers
+ if display_lrm and layer.type in ['Convolution', 'Deconvolution', 'InnerProduct']:
+ lrm0, lrm1 = get_layer_lr_mult(layer)
+ if any([lrm0, lrm1]):
+ lr_mult = "lr mult: %.1f, %.1f" % (lrm0, lrm1)
+ descriptors_list.append(lr_mult)
+
+ # Concatenate the descriptors into one label
+ node_label = separator.join(descriptors_list)
+ # Outer double quotes needed or else colon characters don't parse
+ # properly
+ node_label = '"%s"' % node_label
return node_label
@@ -127,7 +187,7 @@ def choose_color_by_layertype(layertype):
return color
-def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
+def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None, display_lrm=False):
"""Create a data structure which represents the `caffe_net`.
Parameters
@@ -140,6 +200,9 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
+ display_lrm : boolean, optional
+ If True display the learning rate multipliers when relevant (default is
+ False).
Returns
-------
@@ -164,7 +227,7 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
included = included and not layer_phase.phase == phase
if not included:
continue
- node_label = get_layer_label(layer, rankdir)
+ node_label = get_layer_label(layer, rankdir, display_lrm=display_lrm)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
@@ -202,7 +265,7 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
return pydot_graph
-def draw_net(caffe_net, rankdir, ext='png', phase=None):
+def draw_net(caffe_net, rankdir, ext='png', phase=None, display_lrm=False):
"""Draws a caffe net and returns the image string encoded using the given
extension.
@@ -214,16 +277,20 @@ def draw_net(caffe_net, rankdir, ext='png', phase=None):
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
+ display_lrm : boolean, optional
+ If True display the learning rate multipliers for the learning layers
+ (default is False).
Returns
-------
string :
Postscript representation of the graph.
"""
- return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
+ return get_pydot_graph(caffe_net, rankdir, phase=phase,
+ display_lrm=display_lrm).create(format=ext)
-def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
+def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None, display_lrm=False):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
@@ -238,7 +305,10 @@ def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
+ display_lrm : boolean, optional
+ If True display the learning rate multipliers for the learning layers
+ (default is False).
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
- fid.write(draw_net(caffe_net, rankdir, ext, phase))
+ fid.write(draw_net(caffe_net, rankdir, ext, phase, display_lrm))
diff --git a/python/caffe/test/test_solver.py b/python/caffe/test/test_solver.py
index f618fded..50c9d541 100644
--- a/python/caffe/test/test_solver.py
+++ b/python/caffe/test/test_solver.py
@@ -38,6 +38,17 @@ class TestSolver(unittest.TestCase):
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
+ def test_apply_update(self):
+ net = self.solver.net
+ data = net.layers[1].blobs[0].data[...]
+ # Reset the weights of that layer to 0
+ data[...] = 0
+ net.layers[1].blobs[0].diff[...] = 1
+ # Apply the update, the initial learning rate should be 0.01
+ self.solver.apply_update()
+ # Check that the new weights are -0.01, with a precision of 1e-7
+ self.assertTrue((data - -0.01 * np.ones(data.shape)).max() < 1e-7)
+
def test_net_memory(self):
"""Check that nets survive after the solver is destroyed."""
diff --git a/python/draw_net.py b/python/draw_net.py
index dfe70d26..23cae30a 100755
--- a/python/draw_net.py
+++ b/python/draw_net.py
@@ -33,6 +33,10 @@ def parse_args():
'TEST, or ALL. If ALL, then all layers are drawn '
'regardless of phase.'),
default="ALL")
+ parser.add_argument('--display_lrm', action='store_true',
+ help=('Use this flag to visualize the learning rate '
+ 'multiplier, when non-zero, for the learning '
+ 'layers (Convolution, Deconvolution, InnerProduct).'))
args = parser.parse_args()
return args
@@ -51,7 +55,7 @@ def main():
elif args.phase != "ALL":
raise ValueError("Unknown phase: " + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
- phase)
+ phase, args.display_lrm)
if __name__ == '__main__':
diff --git a/python/train.py b/python/train.py
index 5897f5dc..14a38b8c 100644
--- a/python/train.py
+++ b/python/train.py
@@ -63,8 +63,8 @@ def time(solver, nccl):
def solve(proto, snapshot, gpus, timing, uid, rank):
- caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
+ caffe.set_mode_gpu()
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp
index 9f9026b1..d9984431 100644
--- a/src/caffe/layer_factory.cpp
+++ b/src/caffe/layer_factory.cpp
@@ -7,6 +7,7 @@
#include "caffe/layer.hpp"
#include "caffe/layer_factory.hpp"
+#include "caffe/layers/clip_layer.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/deconv_layer.hpp"
#include "caffe/layers/lrn_layer.hpp"
diff --git a/src/caffe/layers/clip_layer.cpp b/src/caffe/layers/clip_layer.cpp
new file mode 100644
index 00000000..9d9a5967
--- /dev/null
+++ b/src/caffe/layers/clip_layer.cpp
@@ -0,0 +1,51 @@
+#include <algorithm>
+#include <vector>
+
+#include "caffe/layers/clip_layer.hpp"
+
+namespace caffe {
+
+template <typename Dtype>
+void ClipLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
+ const int count = bottom[0]->count();
+
+ Dtype min = this->layer_param_.clip_param().min();
+ Dtype max = this->layer_param_.clip_param().max();
+
+ for (int i = 0; i < count; ++i) {
+ top_data[i] = std::max(min, std::min(bottom_data[i], max));
+ }
+}
+
+template <typename Dtype>
+void ClipLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down,
+ const vector<Blob<Dtype>*>& bottom) {
+ if (propagate_down[0]) {
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ const Dtype* top_diff = top[0]->cpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
+
+ Dtype min = this->layer_param_.clip_param().min();
+ Dtype max = this->layer_param_.clip_param().max();
+
+ for (int i = 0; i < count; ++i) {
+ bottom_diff[i] = top_diff[i] * (
+ bottom_data[i] >= min && bottom_data[i] <= max);
+ }
+ }
+}
+
+
+#ifdef CPU_ONLY
+STUB_GPU(ClipLayer);
+#endif
+
+INSTANTIATE_CLASS(ClipLayer);
+REGISTER_LAYER_CLASS(Clip);
+
+} // namespace caffe
diff --git a/src/caffe/layers/clip_layer.cu b/src/caffe/layers/clip_layer.cu
new file mode 100644
index 00000000..56f3be32
--- /dev/null
+++ b/src/caffe/layers/clip_layer.cu
@@ -0,0 +1,67 @@
+#include <vector>
+
+#include "caffe/layers/clip_layer.hpp"
+#include "caffe/util/math_functions.hpp"
+
+namespace caffe {
+
+__global__ void ClipForward(const int n, const float* in, float* out,
+ float p_min, float p_max) {
+ CUDA_KERNEL_LOOP(index, n) {
+ out[index] = fmaxf(p_min, fminf(in[index], p_max));
+ }
+}
+
+__global__ void ClipForward(const int n, const double* in, double* out,
+ double p_min, double p_max) {
+ CUDA_KERNEL_LOOP(index, n) {
+ out[index] = fmax(p_min, fmin(in[index], p_max));
+ }
+}
+
+template <typename Dtype>
+void ClipLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
+ const int count = bottom[0]->count();
+ Dtype p_min = this->layer_param_.clip_param().min();
+ Dtype p_max = this->layer_param_.clip_param().max();
+ // NOLINT_NEXT_LINE(whitespace/operators)
+ ClipForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ count, bottom_data, top_data, p_min, p_max);
+ CUDA_POST_KERNEL_CHECK;
+}
+
+template <typename Dtype>
+__global__ void ClipBackward(const int n, const Dtype* in_diff,
+ const Dtype* in_data, Dtype* out_diff, Dtype p_min, Dtype p_max) {
+ CUDA_KERNEL_LOOP(index, n) {
+ out_diff[index] = in_diff[index] * (
+ in_data[index] >= p_min && in_data[index] <= p_max);
+ }
+}
+
+template <typename Dtype>
+void ClipLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down,
+ const vector<Blob<Dtype>*>& bottom) {
+ if (propagate_down[0]) {
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ const Dtype* top_diff = top[0]->gpu_diff();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
+ Dtype p_min = this->layer_param_.clip_param().min();
+ Dtype p_max = this->layer_param_.clip_param().max();
+ // NOLINT_NEXT_LINE(whitespace/operators)
+ ClipBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ count, top_diff, bottom_data, bottom_diff, p_min, p_max);
+ CUDA_POST_KERNEL_CHECK;
+ }
+}
+
+
+INSTANTIATE_LAYER_GPU_FUNCS(ClipLayer);
+
+
+} // namespace caffe
diff --git a/src/caffe/layers/embed_layer.cu b/src/caffe/layers/embed_layer.cu
index 6324a3a8..3cf39fd9 100644
--- a/src/caffe/layers/embed_layer.cu
+++ b/src/caffe/layers/embed_layer.cu
@@ -15,6 +15,11 @@ __global__ void EmbedForward(const int nthreads, const Dtype* bottom_data,
const int n = top_index / N;
const int d = top_index % N;
const int index = static_cast<int>(bottom_data[n]);
+ #ifdef DEBUG
+ assert(index >= 0);
+ assert(index < K);
+ assert(static_cast<Dtype>(index) == bottom_data[n]);
+ #endif
const int weight_index = index * N + d;
top_data[top_index] = weight[weight_index];
}
diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp
index 00716a92..7668854c 100644
--- a/src/caffe/layers/hdf5_data_layer.cpp
+++ b/src/caffe/layers/hdf5_data_layer.cpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
/*
TODO:
- load file in a separate thread ("prefetch")
@@ -184,3 +185,4 @@ INSTANTIATE_CLASS(HDF5DataLayer);
REGISTER_LAYER_CLASS(HDF5Data);
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu
index 33eebd41..70cd9f32 100644
--- a/src/caffe/layers/hdf5_data_layer.cu
+++ b/src/caffe/layers/hdf5_data_layer.cu
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
/*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
@@ -34,3 +35,4 @@ void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
INSTANTIATE_LAYER_GPU_FUNCS(HDF5DataLayer);
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp
index f8f1edcd..28c453a2 100644
--- a/src/caffe/layers/hdf5_output_layer.cpp
+++ b/src/caffe/layers/hdf5_output_layer.cpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#include <vector>
#include "hdf5.h"
@@ -72,3 +73,4 @@ INSTANTIATE_CLASS(HDF5OutputLayer);
REGISTER_LAYER_CLASS(HDF5Output);
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu
index c1685cd3..891aea03 100644
--- a/src/caffe/layers/hdf5_output_layer.cu
+++ b/src/caffe/layers/hdf5_output_layer.cu
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#include <vector>
#include "hdf5.h"
@@ -37,3 +38,4 @@ void HDF5OutputLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
INSTANTIATE_LAYER_GPU_FUNCS(HDF5OutputLayer);
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp
index e65349f0..57fdbe1f 100644
--- a/src/caffe/layers/inner_product_layer.cpp
+++ b/src/caffe/layers/inner_product_layer.cpp
@@ -42,7 +42,7 @@ void InnerProductLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
this->layer_param_.inner_product_param().weight_filler()));
weight_filler->Fill(this->blobs_[0].get());
- // If necessary, intiialize and fill the bias term
+ // If necessary, initialize and fill the bias term
if (bias_term_) {
vector<int> bias_shape(1, N_);
this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp
index 90897db0..32dc0482 100644
--- a/src/caffe/layers/pooling_layer.cpp
+++ b/src/caffe/layers/pooling_layer.cpp
@@ -35,6 +35,7 @@ void PoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
|| (!pool_param.has_stride_h() && !pool_param.has_stride_w()))
<< "Stride is stride OR stride_h and stride_w are required.";
global_pooling_ = pool_param.global_pooling();
+ round_mode_ = pool_param.round_mode();
if (global_pooling_) {
kernel_h_ = bottom[0]->height();
kernel_w_ = bottom[0]->width();
@@ -87,10 +88,22 @@ void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
kernel_h_ = bottom[0]->height();
kernel_w_ = bottom[0]->width();
}
- pooled_height_ = static_cast<int>(ceil(static_cast<float>(
- height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
- pooled_width_ = static_cast<int>(ceil(static_cast<float>(
- width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
+ switch (round_mode_) {
+ case PoolingParameter_RoundMode_CEIL:
+ pooled_height_ = static_cast<int>(ceil(static_cast<float>(
+ height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
+ pooled_width_ = static_cast<int>(ceil(static_cast<float>(
+ width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
+ break;
+ case PoolingParameter_RoundMode_FLOOR:
+ pooled_height_ = static_cast<int>(floor(static_cast<float>(
+ height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
+ pooled_width_ = static_cast<int>(floor(static_cast<float>(
+ width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
+ break;
+ default:
+ LOG(FATAL) << "Unknown rounding mode.";
+ }
if (pad_h_ || pad_w_) {
// If we have padding, ensure that the last pooling starts strictly
// inside the image (instead of at the padding); otherwise clip the last.
@@ -132,7 +145,7 @@ void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const int top_count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
- int* mask = NULL; // suppress warnings about uninitalized variables
+ int* mask = NULL; // suppress warnings about uninitialized variables
Dtype* top_mask = NULL;
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more code.
diff --git a/src/caffe/layers/swish_layer.cpp b/src/caffe/layers/swish_layer.cpp
new file mode 100644
index 00000000..28935679
--- /dev/null
+++ b/src/caffe/layers/swish_layer.cpp
@@ -0,0 +1,68 @@
+#include <cmath>
+#include <vector>
+
+#include "caffe/layers/swish_layer.hpp"
+#include "caffe/util/math_functions.hpp"
+
+namespace caffe {
+
+template <typename Dtype>
+void SwishLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ NeuronLayer<Dtype>::LayerSetUp(bottom, top);
+ sigmoid_bottom_vec_.clear();
+ sigmoid_bottom_vec_.push_back(sigmoid_input_.get());
+ sigmoid_top_vec_.clear();
+ sigmoid_top_vec_.push_back(sigmoid_output_.get());
+ sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_);
+}
+
+template <typename Dtype>
+void SwishLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ NeuronLayer<Dtype>::Reshape(bottom, top);
+ sigmoid_input_->ReshapeLike(*bottom[0]);
+ sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_);
+}
+
+template <typename Dtype>
+void SwishLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ const Dtype* bottom_data = bottom[0]->cpu_data();
+ Dtype* sigmoid_input_data = sigmoid_input_->mutable_cpu_data();
+ Dtype* top_data = top[0]->mutable_cpu_data();
+ const int count = bottom[0]->count();
+ Dtype beta = this->layer_param_.swish_param().beta();
+ caffe_copy(count, bottom_data, sigmoid_input_data);
+ caffe_scal(count, beta, sigmoid_input_data);
+ sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
+ caffe_mul(count, bottom_data, sigmoid_output_->cpu_data(), top_data);
+}
+
+template <typename Dtype>
+void SwishLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down,
+ const vector<Blob<Dtype>*>& bottom) {
+ if (propagate_down[0]) {
+ const Dtype* top_data = top[0]->cpu_data();
+ const Dtype* top_diff = top[0]->cpu_diff();
+ const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
+ const int count = bottom[0]->count();
+ Dtype beta = this->layer_param_.swish_param().beta();
+ for (int i = 0; i < count; ++i) {
+ const Dtype swish_x = top_data[i];
+ bottom_diff[i] = top_diff[i] * (beta * swish_x + sigmoid_output_data[i]
+ * (1. - beta * swish_x));
+ }
+ }
+}
+
+#ifdef CPU_ONLY
+STUB_GPU(SwishLayer);
+#endif
+
+INSTANTIATE_CLASS(SwishLayer);
+REGISTER_LAYER_CLASS(Swish);
+
+} // namespace caffe
diff --git a/src/caffe/layers/swish_layer.cu b/src/caffe/layers/swish_layer.cu
new file mode 100644
index 00000000..c4fef53b
--- /dev/null
+++ b/src/caffe/layers/swish_layer.cu
@@ -0,0 +1,54 @@
+#include <cmath>
+#include <vector>
+
+#include "caffe/layers/swish_layer.hpp"
+#include "caffe/util/math_functions.hpp"
+
+namespace caffe {
+
+template <typename Dtype>
+void SwishLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ const Dtype* bottom_data = bottom[0]->gpu_data();
+ Dtype* sigmoid_input_data = sigmoid_input_->mutable_gpu_data();
+ Dtype* top_data = top[0]->mutable_gpu_data();
+ const int count = bottom[0]->count();
+ Dtype beta = this->layer_param_.swish_param().beta();
+ caffe_copy(count, bottom_data, sigmoid_input_data);
+ caffe_gpu_scal(count, beta, sigmoid_input_data);
+ sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
+ caffe_gpu_mul(count, bottom_data, sigmoid_output_->gpu_data(), top_data);
+}
+
+template <typename Dtype>
+__global__ void SwishBackward(const int n, const Dtype* in_diff,
+ const Dtype* out_data, const Dtype* sigmoid_output_data, Dtype* out_diff,
+ const Dtype beta) {
+ CUDA_KERNEL_LOOP(index, n) {
+ const Dtype swish_x = out_data[index];
+ out_diff[index] = in_diff[index] * (beta * swish_x
+ + sigmoid_output_data[index] * (1 - beta * swish_x));
+ }
+}
+
+template <typename Dtype>
+void SwishLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down,
+ const vector<Blob<Dtype>*>& bottom) {
+ if (propagate_down[0]) {
+ const Dtype* top_data = top[0]->gpu_data();
+ const Dtype* top_diff = top[0]->gpu_diff();
+ const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
+ Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
+ const int count = bottom[0]->count();
+ Dtype beta = this->layer_param_.swish_param().beta();
+ // NOLINT_NEXT_LINE(whitespace/operators)
+ SwishBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ count, top_diff, top_data, sigmoid_output_data, bottom_diff, beta);
+ CUDA_POST_KERNEL_CHECK;
+ }
+}
+
+INSTANTIATE_LAYER_GPU_FUNCS(SwishLayer);
+
+} // namespace caffe
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index 353c2f95..5e844b03 100644
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
@@ -5,7 +5,9 @@
#include <utility>
#include <vector>
+#ifdef USE_HDF5
#include "hdf5.h"
+#endif // USE_HDF5
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
@@ -164,7 +166,7 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
// loss. We can skip backward computation for blobs that don't contribute
// to the loss.
// Also checks if all bottom blobs don't need backward computation (possible
- // because the skip_propagate_down param) and so we can skip bacward
+ // because the skip_propagate_down param) and so we can skip backward
// computation for the entire layer
set<string> blobs_under_loss;
set<string> blobs_skip_backp;
@@ -768,7 +770,7 @@ void Net<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {
}
template <typename Dtype>
-void Net<Dtype>::CopyTrainedLayersFrom(const string trained_filename) {
+void Net<Dtype>::CopyTrainedLayersFrom(const string& trained_filename) {
if (H5Fis_hdf5(trained_filename.c_str())) {
CopyTrainedLayersFromHDF5(trained_filename);
} else {
@@ -778,14 +780,15 @@ void Net<Dtype>::CopyTrainedLayersFrom(const string trained_filename) {
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFromBinaryProto(
- const string trained_filename) {
+ const string& trained_filename) {
NetParameter param;
ReadNetParamsFromBinaryFileOrDie(trained_filename, &param);
CopyTrainedLayersFrom(param);
}
template <typename Dtype>
-void Net<Dtype>::CopyTrainedLayersFromHDF5(const string trained_filename) {
+void Net<Dtype>::CopyTrainedLayersFromHDF5(const string& trained_filename) {
+#ifdef USE_HDF5
hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename;
@@ -832,6 +835,10 @@ void Net<Dtype>::CopyTrainedLayersFromHDF5(const string trained_filename) {
}
H5Gclose(data_hid);
H5Fclose(file_hid);
+#else
+ LOG(FATAL) << "CopyTrainedLayersFromHDF5 requires hdf5;"
+ << " compile with USE_HDF5.";
+#endif // USE_HDF5
}
template <typename Dtype>
@@ -848,6 +855,8 @@ void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {
template <typename Dtype>
void Net<Dtype>::ToHDF5(const string& filename, bool write_diff) const {
+// This code is taken from https://github.com/sh1r0/caffe-android-lib
+#ifdef USE_HDF5
hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT);
CHECK_GE(file_hid, 0)
@@ -901,6 +910,10 @@ void Net<Dtype>::ToHDF5(const string& filename, bool write_diff) const {
H5Gclose(diff_hid);
}
H5Fclose(file_hid);
+// This code is taken from https://github.com/sh1r0/caffe-android-lib
+#else
+ LOG(FATAL) << "ToHDF5 requires hdf5; compile with USE_HDF5.";
+#endif // USE_HDF5
}
template <typename Dtype>
diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto
index 22764abc..3dcad697 100644
--- a/src/caffe/proto/caffe.proto
+++ b/src/caffe/proto/caffe.proto
@@ -187,7 +187,7 @@ message SolverParameter {
optional int32 snapshot = 14 [default = 0]; // The snapshot interval
// The prefix for the snapshot.
- // If not set then is replaced by prototxt file path without extention.
+ // If not set then is replaced by prototxt file path without extension.
// If is set to directory then is augmented by prototxt file name
// without extention.
optional string snapshot_prefix = 15;
@@ -248,8 +248,8 @@ message SolverParameter {
// Path to caffemodel file(s) with pretrained weights to initialize finetuning.
// Tha same as command line --weights parameter for caffe train command.
- // If command line --weights parameter if specified, it has higher priority
- // and owerwrites this one(s).
+ // If command line --weights parameter is specified, it has higher priority
+ // and overwrites this one(s).
// If --snapshot command line parameter is specified, this one(s) are ignored.
// If several model files are expected, they can be listed in a one
// weights parameter separated by ',' (like in a command string) or
@@ -322,7 +322,7 @@ message ParamSpec {
// NOTE
// Update the next available ID when you add a new LayerParameter field.
//
-// LayerParameter next available layer-specific ID: 147 (last added: recurrent_param)
+// LayerParameter next available layer-specific ID: 149 (last added: clip_param)
message LayerParameter {
optional string name = 1; // the layer name
optional string type = 2; // the layer type
@@ -378,6 +378,7 @@ message LayerParameter {
optional ArgMaxParameter argmax_param = 103;
optional BatchNormParameter batch_norm_param = 139;
optional BiasParameter bias_param = 141;
+ optional ClipParameter clip_param = 148;
optional ConcatParameter concat_param = 104;
optional ContrastiveLossParameter contrastive_loss_param = 105;
optional ConvolutionParameter convolution_param = 106;
@@ -415,6 +416,7 @@ message LayerParameter {
optional SoftmaxParameter softmax_param = 125;
optional SPPParameter spp_param = 132;
optional SliceParameter slice_param = 126;
+ optional SwishParameter swish_param = 147;
optional TanHParameter tanh_param = 127;
optional ThresholdParameter threshold_param = 128;
optional TileParameter tile_param = 138;
@@ -504,6 +506,12 @@ message ArgMaxParameter {
optional int32 axis = 3;
}
+// Message that stores parameters used by ClipLayer
+message ClipParameter {
+ required float min = 1;
+ required float max = 2;
+}
+
message ConcatParameter {
// The axis along which to concatenate -- may be negative to index from the
// end (e.g., -1 for the last axis). Other axes must have the
@@ -935,6 +943,12 @@ message PoolingParameter {
// If global_pooling then it will pool over the size of the bottom by doing
// kernel_h = bottom->height and kernel_w = bottom->width
optional bool global_pooling = 12 [default = false];
+ // How to calculate the output size - using ceil (default) or floor rounding.
+ enum RoundMode {
+ CEIL = 0;
+ FLOOR = 1;
+ }
+ optional RoundMode round_mode = 13 [default = CEIL];
}
message PowerParameter {
@@ -1156,6 +1170,15 @@ message SoftmaxParameter {
optional int32 axis = 2 [default = 1];
}
+// Message that stores parameters used by SwishLayer
+message SwishParameter {
+ // Beta parameter for the Swish activation function
+ // Described in:
+ // Prajit Ramachandran, Barret Zoph, Quoc V. Le. (2017). Searching for
+ // Activation Functions. https://arxiv.org/abs/1710.05941v2
+ optional float beta = 1 [default = 1];
+}
+
message TanHParameter {
enum Engine {
DEFAULT = 0;
diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp
index d229acff..842312e0 100644
--- a/src/caffe/solver.cpp
+++ b/src/caffe/solver.cpp
@@ -78,7 +78,7 @@ template <typename Dtype>
void Solver<Dtype>::InitTrainNet() {
const int num_train_nets = param_.has_net() + param_.has_net_param() +
param_.has_train_net() + param_.has_train_net_param();
- const string& field_names = "net, net_param, train_net, train_net_param";
+ const string field_names = "net, net_param, train_net, train_net_param";
CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net "
<< "using one of these fields: " << field_names;
CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than "
@@ -266,10 +266,6 @@ void Solver<Dtype>::Step(int iters) {
}
ApplyUpdate();
- // Increment the internal iter_ counter -- its value should always indicate
- // the number of times the weights have been updated.
- ++iter_;
-
SolverAction::Enum request = GetRequestedAction();
// Save a snapshot if needed.
@@ -451,13 +447,13 @@ void Solver<Dtype>::CheckSnapshotWritePermissions() {
} else {
LOG(FATAL) << "Cannot write to snapshot prefix '"
<< param_.snapshot_prefix() << "'. Make sure "
- << "that the directory exists and is writeable.";
+ << "that the directory exists and is writable.";
}
}
}
template <typename Dtype>
-string Solver<Dtype>::SnapshotFilename(const string extension) {
+string Solver<Dtype>::SnapshotFilename(const string& extension) {
return param_.snapshot_prefix() + "_iter_" + caffe::format_int(iter_)
+ extension;
}
diff --git a/src/caffe/solvers/sgd_solver.cpp b/src/caffe/solvers/sgd_solver.cpp
index 1d52beb0..081c47eb 100644
--- a/src/caffe/solvers/sgd_solver.cpp
+++ b/src/caffe/solvers/sgd_solver.cpp
@@ -120,6 +120,10 @@ void SGDSolver<Dtype>::ApplyUpdate() {
ComputeUpdateValue(param_id, rate);
}
this->net_->Update();
+
+ // Increment the internal iter_ counter -- its value should always indicate
+ // the number of times the weights have been updated.
+ ++this->iter_;
}
template <typename Dtype>
@@ -285,6 +289,8 @@ void SGDSolver<Dtype>::SnapshotSolverStateToBinaryProto(
template <typename Dtype>
void SGDSolver<Dtype>::SnapshotSolverStateToHDF5(
const string& model_filename) {
+// This code is taken from https://github.com/sh1r0/caffe-android-lib
+#ifdef USE_HDF5
string snapshot_filename =
Solver<Dtype>::SnapshotFilename(".solverstate.h5");
LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename;
@@ -306,6 +312,11 @@ void SGDSolver<Dtype>::SnapshotSolverStateToHDF5(
}
H5Gclose(history_hid);
H5Fclose(file_hid);
+// This code is taken from https://github.com/sh1r0/caffe-android-lib
+#else
+ LOG(FATAL) << "SnapshotSolverStateToHDF5 requires hdf5;"
+ << " compile with USE_HDF5.";
+#endif // USE_HDF5
}
template <typename Dtype>
@@ -330,6 +341,7 @@ void SGDSolver<Dtype>::RestoreSolverStateFromBinaryProto(
template <typename Dtype>
void SGDSolver<Dtype>::RestoreSolverStateFromHDF5(const string& state_file) {
+#ifdef USE_HDF5
hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file;
this->iter_ = hdf5_load_int(file_hid, "iter");
@@ -351,6 +363,10 @@ void SGDSolver<Dtype>::RestoreSolverStateFromHDF5(const string& state_file) {
}
H5Gclose(history_hid);
H5Fclose(file_hid);
+#else
+ LOG(FATAL) << "RestoreSolverStateFromHDF5 requires hdf5;"
+ << " compile with USE_HDF5.";
+#endif // USE_HDF5
}
INSTANTIATE_CLASS(SGDSolver);
diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp
index f84d707b..34f7007d 100644
--- a/src/caffe/test/test_filler.cpp
+++ b/src/caffe/test/test_filler.cpp
@@ -1,3 +1,5 @@
+#include <vector>
+
#include "gtest/gtest.h"
#include "caffe/filler.hpp"
@@ -10,11 +12,20 @@ template <typename Dtype>
class ConstantFillerTest : public ::testing::Test {
protected:
ConstantFillerTest()
- : blob_(new Blob<Dtype>(2, 3, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
filler_param_.set_value(10.);
filler_.reset(new ConstantFiller<Dtype>(filler_param_));
+ }
+ virtual void test_params(const vector<int>& shape) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
filler_->Fill(blob_);
+ const int count = blob_->count();
+ const Dtype* data = blob_->cpu_data();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_EQ(data[i], filler_param_.value());
+ }
}
virtual ~ConstantFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
@@ -25,12 +36,34 @@ class ConstantFillerTest : public ::testing::Test {
TYPED_TEST_CASE(ConstantFillerTest, TestDtypes);
TYPED_TEST(ConstantFillerTest, TestFill) {
- EXPECT_TRUE(this->blob_);
- const int count = this->blob_->count();
- const TypeParam* data = this->blob_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_EQ(data[i], this->filler_param_.value());
- }
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(ConstantFillerTest, TestFill1D) {
+ vector<int> blob_shape(1, 15);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(ConstantFillerTest, TestFill2D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(3);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(ConstantFillerTest, TestFill5D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ this->test_params(blob_shape);
}
@@ -38,12 +71,22 @@ template <typename Dtype>
class UniformFillerTest : public ::testing::Test {
protected:
UniformFillerTest()
- : blob_(new Blob<Dtype>(2, 3, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
filler_param_.set_min(1.);
filler_param_.set_max(2.);
filler_.reset(new UniformFiller<Dtype>(filler_param_));
+ }
+ virtual void test_params(const vector<int>& shape) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
filler_->Fill(blob_);
+ const int count = blob_->count();
+ const Dtype* data = blob_->cpu_data();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_GE(data[i], filler_param_.min());
+ EXPECT_LE(data[i], filler_param_.max());
+ }
}
virtual ~UniformFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
@@ -54,23 +97,64 @@ class UniformFillerTest : public ::testing::Test {
TYPED_TEST_CASE(UniformFillerTest, TestDtypes);
TYPED_TEST(UniformFillerTest, TestFill) {
- EXPECT_TRUE(this->blob_);
- const int count = this->blob_->count();
- const TypeParam* data = this->blob_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_GE(data[i], this->filler_param_.min());
- EXPECT_LE(data[i], this->filler_param_.max());
- }
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(UniformFillerTest, TestFill1D) {
+ vector<int> blob_shape(1, 15);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(UniformFillerTest, TestFill2D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(3);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(UniformFillerTest, TestFill5D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ this->test_params(blob_shape);
}
template <typename Dtype>
class PositiveUnitballFillerTest : public ::testing::Test {
protected:
PositiveUnitballFillerTest()
- : blob_(new Blob<Dtype>(2, 3, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
filler_.reset(new PositiveUnitballFiller<Dtype>(filler_param_));
+ }
+ virtual void test_params(const vector<int>& shape) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
filler_->Fill(blob_);
+ const int num = blob_->shape(0);
+ const int count = blob_->count();
+ const int dim = count / num;
+ const Dtype* data = blob_->cpu_data();
+ for (int i = 0; i < count; ++i) {
+ EXPECT_GE(data[i], 0);
+ EXPECT_LE(data[i], 1);
+ }
+ for (int i = 0; i < num; ++i) {
+ Dtype sum = Dtype(0);
+ for (int j = 0; j < dim; ++j) {
+ sum += data[i * dim + j];
+ }
+ EXPECT_GE(sum, 0.999);
+ EXPECT_LE(sum, 1.001);
+ }
}
virtual ~PositiveUnitballFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
@@ -81,35 +165,78 @@ class PositiveUnitballFillerTest : public ::testing::Test {
TYPED_TEST_CASE(PositiveUnitballFillerTest, TestDtypes);
TYPED_TEST(PositiveUnitballFillerTest, TestFill) {
- EXPECT_TRUE(this->blob_);
- const int num = this->blob_->num();
- const int count = this->blob_->count();
- const int dim = count / num;
- const TypeParam* data = this->blob_->cpu_data();
- for (int i = 0; i < count; ++i) {
- EXPECT_GE(data[i], 0);
- EXPECT_LE(data[i], 1);
- }
- for (int i = 0; i < num; ++i) {
- TypeParam sum = 0;
- for (int j = 0; j < dim; ++j) {
- sum += data[i * dim + j];
- }
- EXPECT_GE(sum, 0.999);
- EXPECT_LE(sum, 1.001);
- }
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(PositiveUnitballFillerTest, TestFill1D) {
+ vector<int> blob_shape(1, 15);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(PositiveUnitballFillerTest, TestFill2D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(3);
+ this->test_params(blob_shape);
+}
+
+TYPED_TEST(PositiveUnitballFillerTest, TestFill5D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ this->test_params(blob_shape);
}
template <typename Dtype>
class GaussianFillerTest : public ::testing::Test {
protected:
GaussianFillerTest()
- : blob_(new Blob<Dtype>(2, 3, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
filler_param_.set_mean(10.);
filler_param_.set_std(0.1);
filler_.reset(new GaussianFiller<Dtype>(filler_param_));
+ }
+ virtual void test_params(const vector<int>& shape,
+ const Dtype tolerance = Dtype(5), const int repetitions = 100) {
+ // Tests for statistical properties should be ran multiple times.
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
+ for (int i = 0; i < repetitions; ++i) {
+ test_params_iter(shape, tolerance);
+ }
+ }
+ virtual void test_params_iter(const vector<int>& shape,
+ const Dtype tolerance) {
+ // This test has a configurable tolerance parameter - by default it was
+ // equal to 5.0 which is very loose - allowing some tuning (e.g. for tests
+ // on smaller blobs the actual variance will be larger than desired, so the
+ // tolerance can be increased to account for that).
filler_->Fill(blob_);
+ const int count = blob_->count();
+ const Dtype* data = blob_->cpu_data();
+ Dtype mean = Dtype(0);
+ Dtype var = Dtype(0);
+ for (int i = 0; i < count; ++i) {
+ mean += data[i];
+ var += data[i] * data[i];
+ }
+ mean /= count;
+ var /= count;
+ var -= mean*mean;
+ EXPECT_GE(mean, filler_param_.mean() - filler_param_.std() * tolerance);
+ EXPECT_LE(mean, filler_param_.mean() + filler_param_.std() * tolerance);
+ Dtype target_var = filler_param_.std() * filler_param_.std();
+ EXPECT_GE(var, target_var / tolerance);
+ EXPECT_LE(var, target_var * tolerance);
}
virtual ~GaussianFillerTest() { delete blob_; }
Blob<Dtype>* const blob_;
@@ -120,41 +247,62 @@ class GaussianFillerTest : public ::testing::Test {
TYPED_TEST_CASE(GaussianFillerTest, TestDtypes);
TYPED_TEST(GaussianFillerTest, TestFill) {
- EXPECT_TRUE(this->blob_);
- const int count = this->blob_->count();
- const TypeParam* data = this->blob_->cpu_data();
- TypeParam mean = 0.;
- TypeParam var = 0.;
- for (int i = 0; i < count; ++i) {
- mean += data[i];
- var += (data[i] - this->filler_param_.mean()) *
- (data[i] - this->filler_param_.mean());
- }
- mean /= count;
- var /= count;
- // Very loose test.
- EXPECT_GE(mean, this->filler_param_.mean() - this->filler_param_.std() * 5);
- EXPECT_LE(mean, this->filler_param_.mean() + this->filler_param_.std() * 5);
- TypeParam target_var = this->filler_param_.std() * this->filler_param_.std();
- EXPECT_GE(var, target_var / 5.);
- EXPECT_LE(var, target_var * 5.);
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ const TypeParam tolerance = TypeParam(3); // enough for a 120-element blob
+ this->test_params(blob_shape, tolerance);
+}
+
+TYPED_TEST(GaussianFillerTest, TestFill1D) {
+ vector<int> blob_shape(1, 125);
+ const TypeParam tolerance = TypeParam(3);
+ this->test_params(blob_shape, tolerance);
+}
+
+TYPED_TEST(GaussianFillerTest, TestFill2D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(15);
+ const TypeParam tolerance = TypeParam(3);
+ this->test_params(blob_shape, tolerance);
+}
+
+TYPED_TEST(GaussianFillerTest, TestFill5D) {
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ const TypeParam tolerance = TypeParam(2);
+ this->test_params(blob_shape, tolerance);
}
template <typename Dtype>
class XavierFillerTest : public ::testing::Test {
protected:
XavierFillerTest()
- : blob_(new Blob<Dtype>(1000, 2, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
}
virtual void test_params(FillerParameter_VarianceNorm variance_norm,
+ Dtype n, const vector<int>& shape, const int repetitions = 100) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
+ for (int i = 0; i < repetitions; ++i) {
+ test_params_iter(variance_norm, n);
+ }
+ }
+ virtual void test_params_iter(FillerParameter_VarianceNorm variance_norm,
Dtype n) {
- this->filler_param_.set_variance_norm(variance_norm);
- this->filler_.reset(new XavierFiller<Dtype>(this->filler_param_));
- this->filler_->Fill(blob_);
- EXPECT_TRUE(this->blob_);
- const int count = this->blob_->count();
- const Dtype* data = this->blob_->cpu_data();
+ filler_param_.set_variance_norm(variance_norm);
+ filler_.reset(new XavierFiller<Dtype>(filler_param_));
+ filler_->Fill(blob_);
+ const int count = blob_->count();
+ const Dtype* data = blob_->cpu_data();
Dtype mean = 0.;
Dtype ex2 = 0.;
for (int i = 0; i < count; ++i) {
@@ -177,33 +325,92 @@ class XavierFillerTest : public ::testing::Test {
TYPED_TEST_CASE(XavierFillerTest, TestDtypes);
TYPED_TEST(XavierFillerTest, TestFillFanIn) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = 2*4*5;
- this->test_params(FillerParameter_VarianceNorm_FAN_IN, n);
+ this->test_params(FillerParameter_VarianceNorm_FAN_IN, n, blob_shape);
}
+
TYPED_TEST(XavierFillerTest, TestFillFanOut) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = 1000*4*5;
- this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n);
+ this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n, blob_shape);
}
+
TYPED_TEST(XavierFillerTest, TestFillAverage) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = (2*4*5 + 1000*4*5) / 2.0;
- this->test_params(FillerParameter_VarianceNorm_AVERAGE, n);
+ this->test_params(FillerParameter_VarianceNorm_AVERAGE, n, blob_shape);
+}
+
+TYPED_TEST(XavierFillerTest, TestFill1D) {
+ // This makes little sense but at least we will know that we can fill it
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape(1, 25);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new XavierFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
+}
+
+TYPED_TEST(XavierFillerTest, TestFill2D) {
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(3);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new XavierFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
+}
+
+TYPED_TEST(XavierFillerTest, TestFill5D) {
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new XavierFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
}
template <typename Dtype>
class MSRAFillerTest : public ::testing::Test {
protected:
MSRAFillerTest()
- : blob_(new Blob<Dtype>(1000, 2, 4, 5)),
+ : blob_(new Blob<Dtype>()),
filler_param_() {
}
virtual void test_params(FillerParameter_VarianceNorm variance_norm,
+ Dtype n, const vector<int>& shape, const int repetitions = 100) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
+ for (int i = 0; i < repetitions; ++i) {
+ test_params_iter(variance_norm, n);
+ }
+ }
+ virtual void test_params_iter(FillerParameter_VarianceNorm variance_norm,
Dtype n) {
- this->filler_param_.set_variance_norm(variance_norm);
- this->filler_.reset(new MSRAFiller<Dtype>(this->filler_param_));
- this->filler_->Fill(blob_);
- EXPECT_TRUE(this->blob_);
- const int count = this->blob_->count();
- const Dtype* data = this->blob_->cpu_data();
+ filler_param_.set_variance_norm(variance_norm);
+ filler_.reset(new MSRAFiller<Dtype>(filler_param_));
+ filler_->Fill(blob_);
+ const int count = blob_->count();
+ const Dtype* data = blob_->cpu_data();
Dtype mean = 0.;
Dtype ex2 = 0.;
for (int i = 0; i < count; ++i) {
@@ -226,36 +433,92 @@ class MSRAFillerTest : public ::testing::Test {
TYPED_TEST_CASE(MSRAFillerTest, TestDtypes);
TYPED_TEST(MSRAFillerTest, TestFillFanIn) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = 2*4*5;
- this->test_params(FillerParameter_VarianceNorm_FAN_IN, n);
+ this->test_params(FillerParameter_VarianceNorm_FAN_IN, n, blob_shape);
}
+
TYPED_TEST(MSRAFillerTest, TestFillFanOut) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = 1000*4*5;
- this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n);
+ this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n, blob_shape);
}
+
TYPED_TEST(MSRAFillerTest, TestFillAverage) {
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
TypeParam n = (2*4*5 + 1000*4*5) / 2.0;
- this->test_params(FillerParameter_VarianceNorm_AVERAGE, n);
+ this->test_params(FillerParameter_VarianceNorm_AVERAGE, n, blob_shape);
+}
+
+TYPED_TEST(MSRAFillerTest, TestFill1D) {
+ // Like with Xavier - no checking for correctness, just if it can be filled.
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape(1, 25);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new MSRAFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
+}
+
+TYPED_TEST(MSRAFillerTest, TestFill2D) {
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape;
+ blob_shape.push_back(8);
+ blob_shape.push_back(3);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new MSRAFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
+}
+
+TYPED_TEST(MSRAFillerTest, TestFill5D) {
+ EXPECT_TRUE(this->blob_);
+ vector<int> blob_shape;
+ blob_shape.push_back(2);
+ blob_shape.push_back(3);
+ blob_shape.push_back(4);
+ blob_shape.push_back(5);
+ blob_shape.push_back(2);
+ this->blob_->Reshape(blob_shape);
+ this->filler_param_.set_variance_norm(FillerParameter_VarianceNorm_AVERAGE);
+ this->filler_.reset(new MSRAFiller<TypeParam>(this->filler_param_));
+ this->filler_->Fill(this->blob_);
}
template <typename Dtype>
class BilinearFillerTest : public ::testing::Test {
protected:
- BilinearFillerTest() : filler_param_() {}
- virtual void test_params(const int n) {
- this->blob_ = new Blob<Dtype>(1000, 2, n, n);
- this->filler_.reset(new BilinearFiller<Dtype>(this->filler_param_));
- this->filler_->Fill(blob_);
- EXPECT_TRUE(this->blob_);
- const int outer_num = this->blob_->count(0, 2);
- const int inner_num = this->blob_->count(2, 4);
- const Dtype* data = this->blob_->cpu_data();
- int f = ceil(this->blob_->width() / 2.);
- Dtype c = (this->blob_->width() - 1) / (2. * f);
+ BilinearFillerTest()
+ : blob_(new Blob<Dtype>()),
+ filler_param_() {
+ }
+ virtual void test_params(const vector<int>& shape) {
+ EXPECT_TRUE(blob_);
+ blob_->Reshape(shape);
+ filler_.reset(new BilinearFiller<Dtype>(filler_param_));
+ filler_->Fill(blob_);
+ CHECK_EQ(blob_->num_axes(), 4);
+ const int outer_num = blob_->count(0, 2);
+ const int inner_num = blob_->count(2, 4);
+ const Dtype* data = blob_->cpu_data();
+ int f = ceil(blob_->shape(3) / 2.);
+ Dtype c = (blob_->shape(3) - 1) / (2. * f);
for (int i = 0; i < outer_num; ++i) {
for (int j = 0; j < inner_num; ++j) {
- Dtype x = j % this->blob_->width();
- Dtype y = (j / this->blob_->width()) % this->blob_->height();
+ Dtype x = j % blob_->shape(3);
+ Dtype y = (j / blob_->shape(3)) % blob_->shape(2);
Dtype expected_value = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));
const Dtype actual_value = data[i * inner_num + j];
EXPECT_NEAR(expected_value, actual_value, 0.01);
@@ -272,11 +535,21 @@ TYPED_TEST_CASE(BilinearFillerTest, TestDtypes);
TYPED_TEST(BilinearFillerTest, TestFillOdd) {
const int n = 7;
- this->test_params(n);
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(n);
+ blob_shape.push_back(n);
+ this->test_params(blob_shape);
}
TYPED_TEST(BilinearFillerTest, TestFillEven) {
const int n = 6;
- this->test_params(n);
+ vector<int> blob_shape;
+ blob_shape.push_back(1000);
+ blob_shape.push_back(2);
+ blob_shape.push_back(n);
+ blob_shape.push_back(n);
+ this->test_params(blob_shape);
}
} // namespace caffe
diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp
index f94dd57e..11d52310 100644
--- a/src/caffe/test/test_hdf5_output_layer.cpp
+++ b/src/caffe/test/test_hdf5_output_layer.cpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#include <string>
#include <vector>
@@ -120,3 +121,4 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) {
}
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp
index 3977c486..0e5c398f 100644
--- a/src/caffe/test/test_hdf5data_layer.cpp
+++ b/src/caffe/test/test_hdf5data_layer.cpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#include <string>
#include <vector>
@@ -163,3 +164,4 @@ TYPED_TEST(HDF5DataLayerTest, TestSkip) {
}
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp
index 180871a2..d1ecc37b 100644
--- a/src/caffe/test/test_neuron_layer.cpp
+++ b/src/caffe/test/test_neuron_layer.cpp
@@ -10,6 +10,7 @@
#include "caffe/layers/absval_layer.hpp"
#include "caffe/layers/bnll_layer.hpp"
+#include "caffe/layers/clip_layer.hpp"
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/layers/elu_layer.hpp"
#include "caffe/layers/exp_layer.hpp"
@@ -19,6 +20,7 @@
#include "caffe/layers/prelu_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include "caffe/layers/sigmoid_layer.hpp"
+#include "caffe/layers/swish_layer.hpp"
#include "caffe/layers/tanh_layer.hpp"
#include "caffe/layers/threshold_layer.hpp"
@@ -205,6 +207,66 @@ TYPED_TEST(NeuronLayerTest, TestAbsGradient) {
this->blob_top_vec_);
}
+TYPED_TEST(NeuronLayerTest, TestClip) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "clip_param { min: -1, max: 2 }", &layer_param));
+ ClipLayer<Dtype> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ // Now, check values
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_GE(top_data[i], -1);
+ EXPECT_LE(top_data[i], 2);
+ EXPECT_TRUE(bottom_data[i] > -1 || top_data[i] == -1);
+ EXPECT_TRUE(bottom_data[i] < 2 || top_data[i] == 2);
+ EXPECT_TRUE(!(bottom_data[i] >= -1 && bottom_data[i] <= 2)
+ || top_data[i] == bottom_data[i]);
+ }
+}
+
+TYPED_TEST(NeuronLayerTest, TestClipGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "clip_param { min: -1, max: 2 }", &layer_param));
+ ClipLayer<Dtype> layer(layer_param);
+ // Unfortunately, it might happen that an input value lands exactly within
+ // the discontinuity region of the Clip function. In this case the numeric
+ // gradient is likely to differ significantly (i.e. by a value larger than
+ // checker tolerance) from the computed gradient. To handle such cases, we
+ // eliminate such values from the input blob before the gradient check.
+ const Dtype epsilon = 1e-2;
+ const Dtype min_range_start = layer_param.clip_param().min() - epsilon;
+ const Dtype min_range_end = layer_param.clip_param().min() + epsilon;
+ const Dtype max_range_start = layer_param.clip_param().max() - epsilon;
+ const Dtype max_range_end = layer_param.clip_param().max() + epsilon;
+ // The input blob is owned by the NeuronLayerTest object, so we begin with
+ // creating a temporary blob and copying the input data there.
+ Blob<Dtype> temp_bottom;
+ temp_bottom.ReshapeLike(*this->blob_bottom_);
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ Dtype* temp_data_mutable = temp_bottom.mutable_cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ if (bottom_data[i] >= min_range_start &&
+ bottom_data[i] <= min_range_end) {
+ temp_data_mutable[i] = bottom_data[i] - epsilon;
+ } else if (bottom_data[i] >= max_range_start &&
+ bottom_data[i] <= max_range_end) {
+ temp_data_mutable[i] = bottom_data[i] + epsilon;
+ } else {
+ temp_data_mutable[i] = bottom_data[i];
+ }
+ }
+ vector<Blob<Dtype>*> temp_bottom_vec;
+ temp_bottom_vec.push_back(&temp_bottom);
+ GradientChecker<Dtype> checker(epsilon, 1e-3);
+ checker.CheckGradientEltwise(&layer, temp_bottom_vec, this->blob_top_vec_);
+}
+
TYPED_TEST(NeuronLayerTest, TestReLU) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
@@ -344,6 +406,84 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) {
this->blob_top_vec_);
}
+TYPED_TEST(NeuronLayerTest, TestSwish) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ SwishLayer<Dtype> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ // Now, check values
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] / (1. + exp(-bottom_data[i])));
+ }
+}
+
+TYPED_TEST(NeuronLayerTest, TestSwishWithBeta) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "swish_param { beta: 1.5 }", &layer_param));
+ SwishLayer<Dtype> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ // Now, check values
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] / (1. + exp(-1.5 *
+ bottom_data[i])));
+ }
+}
+
+TYPED_TEST(NeuronLayerTest, TestSwishAsLinear) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "swish_param { beta: 0.0 }", &layer_param));
+ SwishLayer<Dtype> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ // Now, check values
+ const Dtype* bottom_data = this->blob_bottom_->cpu_data();
+ const Dtype* top_data = this->blob_top_->cpu_data();
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ EXPECT_FLOAT_EQ(top_data[i], bottom_data[i] / 2.0);
+ }
+}
+
+TYPED_TEST(NeuronLayerTest, TestSwishGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ SwishLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
+}
+
+TYPED_TEST(NeuronLayerTest, TestSwishWithBetaGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "swish_param { beta: 1.5 }", &layer_param));
+ SwishLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
+}
+
+TYPED_TEST(NeuronLayerTest, TestSwishAsLinearGradient) {
+ typedef typename TypeParam::Dtype Dtype;
+ LayerParameter layer_param;
+ CHECK(google::protobuf::TextFormat::ParseFromString(
+ "swish_param { beta: 0.0 }", &layer_param));
+ SwishLayer<Dtype> layer(layer_param);
+ GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
+ checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
+ this->blob_top_vec_);
+}
+
TYPED_TEST(NeuronLayerTest, TestTanH) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp
index 16dfb582..2ca9ca2f 100644
--- a/src/caffe/test/test_syncedmem.cpp
+++ b/src/caffe/test/test_syncedmem.cpp
@@ -80,7 +80,7 @@ TEST_F(SyncedMemoryTest, TestGPURead) {
char* recovered_value = new char[10];
caffe_gpu_memcpy(10, gpu_data, recovered_value);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((static_cast<char*>(recovered_value))[i], 1);
+ EXPECT_EQ(recovered_value[i], 1);
}
// do another round
cpu_data = mem.mutable_cpu_data();
@@ -94,7 +94,7 @@ TEST_F(SyncedMemoryTest, TestGPURead) {
// check if values are the same
caffe_gpu_memcpy(10, gpu_data, recovered_value);
for (int i = 0; i < mem.size(); ++i) {
- EXPECT_EQ((static_cast<char*>(recovered_value))[i], 2);
+ EXPECT_EQ(recovered_value[i], 2);
}
delete[] recovered_value;
}
diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp
index ed737429..cefd853d 100644
--- a/src/caffe/util/hdf5.cpp
+++ b/src/caffe/util/hdf5.cpp
@@ -1,3 +1,4 @@
+#ifdef USE_HDF5
#include "caffe/util/hdf5.hpp"
#include <string>
@@ -207,3 +208,4 @@ string hdf5_get_name_by_idx(hid_t loc_id, int idx) {
}
} // namespace caffe
+#endif // USE_HDF5
diff --git a/src/caffe/util/signal_handler.cpp b/src/caffe/util/signal_handler.cpp
index 5d764ec5..9658fb39 100644
--- a/src/caffe/util/signal_handler.cpp
+++ b/src/caffe/util/signal_handler.cpp
@@ -48,7 +48,7 @@ namespace {
void UnhookHandler() {
if (already_hooked_up) {
struct sigaction sa;
- // Setup the sighub handler
+ // Setup the sighup handler
sa.sa_handler = SIG_DFL;
// Restart the system call, if at all possible
sa.sa_flags = SA_RESTART;