1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
|
// Copyright Yangqing Jia 2013
#include <map>
#include <set>
#include <string>
#include <vector>
#include "caffe/proto/caffe.pb.h"
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
using std::pair;
using std::map;
using std::set;
namespace caffe {
template <typename Dtype>
Net<Dtype>::Net(const NetParameter& param,
const vector<Blob<Dtype>* >& bottom) {
// Basically, build all the layers and set up its connections.
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
int num_layers = param.layers_size();
CHECK_EQ(bottom.size(), param.input_size())
<< "Incorrect bottom blob size.";
// set the input blobs
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
CHECK_GT(bottom[i]->count(), 0);
shared_ptr<Blob<Dtype> > blob_pointer(
new Blob<Dtype>(bottom[i]->num(), bottom[i]->channels(),
bottom[i]->height(), bottom[i]->width()));
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
net_input_blob_indices_.push_back(i);
blob_name_to_idx[blob_name] = i;
available_blobs.insert(blob_name);
}
// For each layer, set up their input and output
bottom_vecs_.resize(param.layers_size());
top_vecs_.resize(param.layers_size());
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int i = 0; i < param.layers_size(); ++i) {
const LayerConnection& layer_connection = param.layers(i);
const LayerParameter& layer_param = layer_connection.layer();
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
LOG(INFO) << "Creating Layer " << layer_param.name();
// Figure out this layer's input and output
for (int j = 0; j < layer_connection.bottom_size(); ++j) {
const string& blob_name = layer_connection.bottom(j);
if (available_blobs.find(blob_name) == available_blobs.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name <<
" to layer" << j;
}
LOG(INFO) << layer_param.name() << " <- " << blob_name;
bottom_vecs_[i].push_back(
blobs_[blob_name_to_idx[blob_name]].get());
bottom_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
available_blobs.erase(blob_name);
}
for (int j = 0; j < layer_connection.top_size(); ++j) {
const string& blob_name = layer_connection.top(j);
// Check if we are doing in-place computation
if (layer_connection.bottom_size() > j &&
blob_name == layer_connection.bottom(j)) {
// In-place computation
LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
available_blobs.insert(blob_name);
top_vecs_[i].push_back(
blobs_[blob_name_to_idx[blob_name]].get());
top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
} else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
// If we are not doing in-place computation but has duplicated blobs,
// raise an error.
LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
} else {
// Normal output.
LOG(INFO) << layer_param.name() << " -> " << blob_name;
shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_name_to_idx[blob_name] = blob_names_.size() - 1;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
top_id_vecs_[i].push_back(blob_names_.size() - 1);
}
}
}
// In the end, all remaining blobs are considered output blobs.
for (set<string>::iterator it = available_blobs.begin();
it != available_blobs.end(); ++it) {
LOG(ERROR) << "This network produces output " << *it;
net_output_blob_indices_.push_back(blob_name_to_idx[*it]);
net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
}
LOG(INFO) << "Setting up the layers.";
for (int i = 0; i < layers_.size(); ++i) {
LOG(INFO) << "Setting up " << layer_names_[i];
layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]);
vector<shared_ptr<Blob<Dtype> > >& layer_blobs = layers_[i]->blobs();
for (int j = 0; j < layer_blobs.size(); ++j) {
params_.push_back(layer_blobs[j]);
}
// push the learning rate mutlipliers
if (layers_[i]->layer_param().blobs_lr_size()) {
CHECK_EQ(layers_[i]->layer_param().blobs_lr_size(), layer_blobs.size());
for (int j = 0; j < layer_blobs.size(); ++j) {
float local_lr = layers_[i]->layer_param().blobs_lr(j);
CHECK_GT(local_lr, 0.);
params_lr_.push_back(local_lr);
}
} else {
for (int j = 0; j < layer_blobs.size(); ++j) {
params_lr_.push_back(1.);
}
}
for (int topid = 0; topid < top_vecs_[i].size(); ++topid) {
LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->channels() << " "
<< top_vecs_[i][topid]->height() << " "
<< top_vecs_[i][topid]->width();
}
}
LOG(INFO) << "Network initialization done.";
}
template <typename Dtype>
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(
const vector<Blob<Dtype>*> & bottom) {
// Copy bottom to internal bottom
for (int i = 0; i < bottom.size(); ++i) {
blobs_[net_input_blob_indices_[i]]->CopyFrom(*bottom[i]);
}
for (int i = 0; i < layers_.size(); ++i) {
// LOG(ERROR) << "Forwarding " << layer_names_[i];
layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]);
}
return net_output_blobs_;
}
template <typename Dtype>
Dtype Net<Dtype>::Backward() {
Dtype loss = 0;
// TODO(Yangqing): figure out those layers that do not need backward.
for (int i = layers_.size() - 1; i >= 0; --i) {
Dtype layer_loss = layers_[i]->Backward(
top_vecs_[i], true, &bottom_vecs_[i]);
loss += layer_loss;
}
return loss;
}
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {
int num_source_layers = param.layers_size();
for (int i = 0; i < num_source_layers; ++i) {
const LayerParameter& source_layer = param.layers(i).layer();
const string& source_layer_name = source_layer.name();
int target_layer_id = 0;
while (target_layer_id != layer_names_.size() &&
layer_names_[target_layer_id] != source_layer_name) {
++target_layer_id;
}
if (target_layer_id == layer_names_.size()) {
LOG(INFO) << "Ignoring source layer " << source_layer_name;
continue;
}
LOG(INFO) << "Loading source layer " << source_layer_name;
vector<shared_ptr<Blob<Dtype> > >& target_blobs =
layers_[target_layer_id]->blobs();
CHECK_EQ(target_blobs.size(), source_layer.blobs_size())
<< "Incompatible number of blobs for layer " << source_layer_name;
for (int j = 0; j < target_blobs.size(); ++j) {
CHECK_EQ(target_blobs[j]->num(), source_layer.blobs(j).num());
CHECK_EQ(target_blobs[j]->channels(), source_layer.blobs(j).channels());
CHECK_EQ(target_blobs[j]->height(), source_layer.blobs(j).height());
CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width());
target_blobs[j]->FromProto(source_layer.blobs(j));
}
}
}
template <typename Dtype>
void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) {
param->Clear();
param->set_name(name_);
// Add bottom and top
for (int i = 0; i < net_input_blob_indices_.size(); ++i) {
param->add_input(blob_names_[net_input_blob_indices_[i]]);
}
LOG(INFO) << "Serializing " << layers_.size() << " layers";
for (int i = 0; i < layers_.size(); ++i) {
LayerConnection* layer_connection = param->add_layers();
for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) {
layer_connection->add_bottom(blob_names_[bottom_id_vecs_[i][j]]);
}
for (int j = 0; j < top_id_vecs_[i].size(); ++j) {
layer_connection->add_top(blob_names_[top_id_vecs_[i][j]]);
}
LayerParameter* layer_parameter = layer_connection->mutable_layer();
layers_[i]->ToProto(layer_parameter, write_diff);
}
}
template <typename Dtype>
void Net<Dtype>::Update() {
for (int i = 0; i < params_.size(); ++i) {
params_[i]->Update();
}
}
INSTANTIATE_CLASS(Net);
} // namespace caffe
|