summaryrefslogtreecommitdiff
path: root/src/caffe
diff options
context:
space:
mode:
authorJonathan L Long <jonlong@cs.berkeley.edu>2015-01-06 20:25:53 -0800
committerJonathan L Long <jonlong@cs.berkeley.edu>2015-01-09 00:03:47 -0800
commit2377b68dcc3c20685ec388097dbe9e9c6b3b0e92 (patch)
tree24219da4918fff3c68ead7258e83b137a91b52e3 /src/caffe
parentc6a88bf549677446fd90c3f838d5ecf5b91da8a1 (diff)
downloadcaffeonacl-2377b68dcc3c20685ec388097dbe9e9c6b3b0e92.tar.gz
caffeonacl-2377b68dcc3c20685ec388097dbe9e9c6b3b0e92.tar.bz2
caffeonacl-2377b68dcc3c20685ec388097dbe9e9c6b3b0e92.zip
improve const-ness of Net
Diffstat (limited to 'src/caffe')
-rw-r--r--src/caffe/net.cpp16
-rw-r--r--src/caffe/solver.cpp23
2 files changed, 21 insertions, 18 deletions
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index e4492cfd..47fc8446 100644
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
@@ -636,7 +636,7 @@ void Net<Dtype>::UpdateDebugInfo(const int param_id) {
}
template <typename Dtype>
-void Net<Dtype>::ShareTrainedLayersWith(Net* other) {
+void Net<Dtype>::ShareTrainedLayersWith(const Net* other) {
int num_source_layers = other->layers().size();
for (int i = 0; i < num_source_layers; ++i) {
Layer<Dtype>* source_layer = other->layers()[i].get();
@@ -726,7 +726,7 @@ void Net<Dtype>::CopyTrainedLayersFrom(const string trained_filename) {
}
template <typename Dtype>
-void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) {
+void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {
param->Clear();
param->set_name(name_);
// Add bottom and top
@@ -785,16 +785,16 @@ void Net<Dtype>::Update() {
}
template <typename Dtype>
-bool Net<Dtype>::has_blob(const string& blob_name) {
+bool Net<Dtype>::has_blob(const string& blob_name) const {
return blob_names_index_.find(blob_name) != blob_names_index_.end();
}
template <typename Dtype>
const shared_ptr<Blob<Dtype> > Net<Dtype>::blob_by_name(
- const string& blob_name) {
+ const string& blob_name) const {
shared_ptr<Blob<Dtype> > blob_ptr;
if (has_blob(blob_name)) {
- blob_ptr = blobs_[blob_names_index_[blob_name]];
+ blob_ptr = blobs_[blob_names_index_.find(blob_name)->second];
} else {
blob_ptr.reset((Blob<Dtype>*)(NULL));
LOG(WARNING) << "Unknown blob name " << blob_name;
@@ -803,16 +803,16 @@ const shared_ptr<Blob<Dtype> > Net<Dtype>::blob_by_name(
}
template <typename Dtype>
-bool Net<Dtype>::has_layer(const string& layer_name) {
+bool Net<Dtype>::has_layer(const string& layer_name) const {
return layer_names_index_.find(layer_name) != layer_names_index_.end();
}
template <typename Dtype>
const shared_ptr<Layer<Dtype> > Net<Dtype>::layer_by_name(
- const string& layer_name) {
+ const string& layer_name) const {
shared_ptr<Layer<Dtype> > layer_ptr;
if (has_layer(layer_name)) {
- layer_ptr = layers_[layer_names_index_[layer_name]];
+ layer_ptr = layers_[layer_names_index_.find(layer_name)->second];
} else {
layer_ptr.reset((Layer<Dtype>*)(NULL));
LOG(WARNING) << "Unknown layer name " << layer_name;
diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp
index ab9c00ee..3fa0e2d1 100644
--- a/src/caffe/solver.cpp
+++ b/src/caffe/solver.cpp
@@ -418,7 +418,7 @@ Dtype SGDSolver<Dtype>::GetLearningRate() {
template <typename Dtype>
void SGDSolver<Dtype>::PreSolve() {
// Initialize the history
- vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
+ const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
history_.clear();
update_.clear();
temp_.clear();
@@ -439,9 +439,10 @@ void SGDSolver<Dtype>::PreSolve() {
template <typename Dtype>
void SGDSolver<Dtype>::ComputeUpdateValue() {
- vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
- vector<float>& net_params_lr = this->net_->params_lr();
- vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
+ const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
+ const vector<float>& net_params_lr = this->net_->params_lr();
+ const vector<float>& net_params_weight_decay =
+ this->net_->params_weight_decay();
// get the learning rate
Dtype rate = GetLearningRate();
if (this->param_.display() && this->iter_ % this->param_.display() == 0) {
@@ -552,9 +553,10 @@ void SGDSolver<Dtype>::RestoreSolverState(const SolverState& state) {
template <typename Dtype>
void NesterovSolver<Dtype>::ComputeUpdateValue() {
- vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
- vector<float>& net_params_lr = this->net_->params_lr();
- vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
+ const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
+ const vector<float>& net_params_lr = this->net_->params_lr();
+ const vector<float>& net_params_weight_decay =
+ this->net_->params_weight_decay();
// get the learning rate
Dtype rate = this->GetLearningRate();
if (this->param_.display() && this->iter_ % this->param_.display() == 0) {
@@ -667,9 +669,10 @@ void NesterovSolver<Dtype>::ComputeUpdateValue() {
template <typename Dtype>
void AdaGradSolver<Dtype>::ComputeUpdateValue() {
- vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
- vector<float>& net_params_lr = this->net_->params_lr();
- vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
+ const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
+ const vector<float>& net_params_lr = this->net_->params_lr();
+ const vector<float>& net_params_weight_decay =
+ this->net_->params_weight_decay();
// get the learning rate
Dtype rate = this->GetLearningRate();
Dtype delta = this->param_.delta();