summaryrefslogtreecommitdiff
path: root/tools/net_speed_benchmark.cpp
blob: 83fba1477e3dea459b2565192a5e64752dc8c020 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
// Copyright 2013 Yangqing Jia

#include <ctime>
#include <string>
#include <vector>

#include "cuda_runtime.h"
#include "fcntl.h"
#include "google/protobuf/text_format.h"

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/net.hpp"
#include "caffe/filler.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/io.hpp"
#include "caffe/solver.hpp"

using boost::shared_ptr;

using namespace caffe;  // NOLINT(build/namespaces)

int main(int argc, char** argv) {
  int total_iter = 50;

  if (argc < 2) {
    LOG(ERROR) << "net_speed_benchmark net_proto [iterations=50] [CPU/GPU] "
        << "[Device_id=0]";
    return 0;
  }

  if (argc >=3) {
    total_iter = atoi(argv[2]);
  }

  LOG(ERROR) << "Testing for " << total_iter << "Iterations.";

  if (argc >= 4 && strcmp(argv[3], "GPU") == 0) {
    LOG(ERROR) << "Using GPU";
    uint device_id = 0;
    if (argc >= 5 && strcmp(argv[3], "GPU") == 0) {
      device_id = atoi(argv[4]);
    }
    LOG(ERROR) << "Using Device_id=" << device_id;
    Caffe::SetDevice(device_id);
    Caffe::set_mode(Caffe::GPU);
  } else {
    LOG(ERROR) << "Using CPU";
    Caffe::set_mode(Caffe::CPU);
  }

  Caffe::set_phase(Caffe::TRAIN);
  NetParameter net_param;
  ReadProtoFromTextFile(argv[1],
      &net_param);
  Net<float> caffe_net(net_param);

  // Run the network without training.
  LOG(ERROR) << "Performing Forward";
  // Note that for the speed benchmark, we will assume that the network does
  // not take any input blobs.
  caffe_net.Forward(vector<Blob<float>*>());
  LOG(ERROR) << "Performing Backward";
  LOG(ERROR) << "Initial loss: " << caffe_net.Backward();

  const vector<shared_ptr<Layer<float> > >& layers = caffe_net.layers();
  vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs();
  vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs();
  LOG(ERROR) << "*** Benchmark begins ***";
  if (Caffe::mode() == Caffe::GPU) {
    cudaDeviceSynchronize();
  }
  clock_t forward_start = clock();
  for (int i = 0; i < layers.size(); ++i) {
    const string& layername = layers[i]->layer_param().name();
    if (Caffe::mode() == Caffe::GPU) {
      cudaDeviceSynchronize();
    }
    clock_t start = clock();
    for (int j = 0; j < total_iter; ++j) {
      layers[i]->Forward(bottom_vecs[i], &top_vecs[i]);
    }
    if (Caffe::mode() == Caffe::GPU) {
      cudaDeviceSynchronize();
    }
    LOG(ERROR) << layername << "\tforward: "
        << static_cast<float>(clock() - start) / CLOCKS_PER_SEC
        << " seconds.";
  }
  LOG(ERROR) << "Forward pass: "
      << static_cast<float>(clock() - forward_start) / CLOCKS_PER_SEC
      << " seconds.";
  clock_t backward_start = clock();
  for (int i = layers.size() - 1; i >= 0; --i) {
    const string& layername = layers[i]->layer_param().name();
    if (Caffe::mode() == Caffe::GPU) {
      cudaDeviceSynchronize();
    }
    clock_t start = clock();
    for (int j = 0; j < total_iter; ++j) {
      layers[i]->Backward(top_vecs[i], true, &bottom_vecs[i]);
    }
    if (Caffe::mode() == Caffe::GPU) {
      cudaDeviceSynchronize();
    }
    LOG(ERROR) << layername << "\tbackward: "
        << static_cast<float>(clock() - start) / CLOCKS_PER_SEC
        << " seconds.";
  }
  LOG(ERROR) << "Backward pass: "
      << static_cast<float>(clock() - backward_start) / CLOCKS_PER_SEC
      << " seconds.";
  LOG(ERROR) << "Total Time: "
      << static_cast<float>(clock() - forward_start) / CLOCKS_PER_SEC
      << " seconds.";
  LOG(ERROR) << "*** Benchmark ends ***";
  return 0;
}