summaryrefslogtreecommitdiff
path: root/include/caffe/layer.hpp
diff options
context:
space:
mode:
authorRonghang Hu <huronghang@hotmail.com>2015-08-11 21:38:06 -0700
committerRonghang Hu <huronghang@hotmail.com>2015-08-12 10:51:45 -0700
commit0d34d5ba0fbdc09ac8f372cb581ccaec599f10bc (patch)
tree1d6aa3258483de57074730ba7e55e1fb5870e793 /include/caffe/layer.hpp
parent8771d0f4317fc0081d86b7637f5f5ceef5b92dfb (diff)
downloadcaffeonacl-0d34d5ba0fbdc09ac8f372cb581ccaec599f10bc.tar.gz
caffeonacl-0d34d5ba0fbdc09ac8f372cb581ccaec599f10bc.tar.bz2
caffeonacl-0d34d5ba0fbdc09ac8f372cb581ccaec599f10bc.zip
Data Layers Parallel for Multi-GPU
Allow data layers (and also PythonLayer when used as data layer) to be shared among worker solver's training net, and also test net for future-proof if one wants to do Multi-GPU testing. Data layers are locked during forward to ensure sequential forward.
Diffstat (limited to 'include/caffe/layer.hpp')
-rw-r--r--include/caffe/layer.hpp15
1 files changed, 15 insertions, 0 deletions
diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp
index 0771b6a8..d82197a9 100644
--- a/include/caffe/layer.hpp
+++ b/include/caffe/layer.hpp
@@ -1,6 +1,7 @@
#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_
+#include <boost/thread.hpp>
#include <algorithm>
#include <string>
#include <vector>
@@ -86,6 +87,14 @@ class Layer {
const vector<Blob<Dtype>*>& top) {}
/**
+ * @brief Whether a layer should be shared by multiple nets during data
+ * parallelism. By default, all layers except for data layers should
+ * not be shared. data layers should be shared to ensure each worker
+ * solver access data sequentially during data parallelism.
+ */
+ virtual inline bool ShareInParallel() const { return false; }
+
+ /**
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
* the shapes of the bottom blobs.
*
@@ -396,6 +405,10 @@ class Layer {
}
}
+ private:
+ // mutex to lock layer to ensure sequential forward
+ boost::mutex forward_mutex_;
+
DISABLE_COPY_AND_ASSIGN(Layer);
}; // class Layer
@@ -405,6 +418,8 @@ class Layer {
template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
+ // Lock during forward to ensure sequential forward
+ boost::mutex::scoped_lock lock(forward_mutex_);
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {