summaryrefslogtreecommitdiff
path: root/src/caffe
diff options
context:
space:
mode:
authorJeff Donahue <jeff.donahue@gmail.com>2014-07-09 14:47:46 -0700
committerJeff Donahue <jeff.donahue@gmail.com>2014-07-15 13:48:09 -0700
commitb2a9a3c7fea3022cccecf1cd729b44db3fd9dffd (patch)
tree5b0f26bb64db8e8e4470c3423479831835b02297 /src/caffe
parent8a0ba0fd405ebdd1aa247695edf20b991b9e7b98 (diff)
downloadcaffeonacl-b2a9a3c7fea3022cccecf1cd729b44db3fd9dffd.tar.gz
caffeonacl-b2a9a3c7fea3022cccecf1cd729b44db3fd9dffd.tar.bz2
caffeonacl-b2a9a3c7fea3022cccecf1cd729b44db3fd9dffd.zip
Add param_propagate_down_ vector to layer, populate according to
blobs_lr in Net::Init
Diffstat (limited to 'src/caffe')
-rw-r--r--src/caffe/net.cpp12
1 files changed, 11 insertions, 1 deletions
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index 0b9cc841..edea726f 100644
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
@@ -96,12 +96,18 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
if (blobs_lr_size) {
// Check if this layer needs backward operation itself
for (int param_id = 0; param_id < blobs_lr_size; ++param_id) {
- need_backward |= layer_param.blobs_lr(param_id) > 0;
+ const bool param_need_backward = layer_param.blobs_lr(param_id) > 0;
+ need_backward |= param_need_backward;
+ layers_[layer_id]->set_param_propagate_down(param_id,
+ param_need_backward);
}
} else if (layers_[layer_id]->blobs().size()) {
// catch: if a layer param does not specify blobs_lr, we should assume the
// learning rate to be 1. Thus we will need to perform backward.
need_backward = true;
+ for (int param_id = 0; param_id < blobs_lr_size; ++param_id) {
+ layers_[layer_id]->set_param_propagate_down(param_id, true);
+ }
}
const int param_size = layer_param.param_size();
CHECK(param_size == num_param_blobs || param_size == 0)
@@ -139,6 +145,10 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] ||
bottom_need_backward_[layer_id][bottom_id];
}
+ for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
+ ++param_id) {
+ layers_[layer_id]->set_param_propagate_down(param_id, true);
+ }
}
}
// In the end, all remaining blobs are considered output blobs.