summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvan Shelhamer <shelhamer@imaginarynumber.net>2014-05-22 00:56:35 -0700
committerEvan Shelhamer <shelhamer@imaginarynumber.net>2014-05-22 00:59:06 -0700
commit5d0958c173ac4d4632ea4146c538a35585a3ddc4 (patch)
tree2cc91a7c5601cd6045b215cd9dbb44337c3c9b33
parent2cb1359d74e1891892f0a1dadc0e975272451ef6 (diff)
downloadcaffe-5d0958c173ac4d4632ea4146c538a35585a3ddc4.tar.gz
caffe-5d0958c173ac4d4632ea4146c538a35585a3ddc4.tar.bz2
caffe-5d0958c173ac4d4632ea4146c538a35585a3ddc4.zip
release v1 model defs + weights
- Caffe reference ImageNet model - AlexNet Note that one can upgrade the weights locally by `upgrade_net_proto_binary.bin` to avoid re-downloading.
-rw-r--r--examples/imagenet/alexnet_deploy.prototxt224
-rw-r--r--examples/imagenet/alexnet_train.prototxt242
-rw-r--r--examples/imagenet/alexnet_val.prototxt180
-rwxr-xr-xexamples/imagenet/get_caffe_alexnet_model.sh2
-rwxr-xr-xexamples/imagenet/get_caffe_reference_imagenet_model.sh4
5 files changed, 301 insertions, 351 deletions
diff --git a/examples/imagenet/alexnet_deploy.prototxt b/examples/imagenet/alexnet_deploy.prototxt
index 4059fd5d..d010753f 100644
--- a/examples/imagenet/alexnet_deploy.prototxt
+++ b/examples/imagenet/alexnet_deploy.prototxt
@@ -5,32 +5,30 @@ input_dim: 3
input_dim: 227
input_dim: 227
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -39,44 +37,42 @@ layers {
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 5
+ group: 2
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -85,176 +81,164 @@ layers {
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
+ group: 2
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
+ kernel_size: 3
+ group: 2
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 1000
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "prob"
- type: "softmax"
- }
+ name: "prob"
+ type: SOFTMAX
bottom: "fc8"
top: "prob"
}
diff --git a/examples/imagenet/alexnet_train.prototxt b/examples/imagenet/alexnet_train.prototxt
index c5394dc5..32a96cfd 100644
--- a/examples/imagenet/alexnet_train.prototxt
+++ b/examples/imagenet/alexnet_train.prototxt
@@ -1,23 +1,27 @@
name: "AlexNet"
layers {
- layer {
- name: "data"
- type: "data"
+ name: "data"
+ type: DATA
+ data_param {
source: "ilsvrc12_train_leveldb"
- meanfile: "../../data/ilsvrc12/imagenet_mean.binaryproto"
- batchsize: 256
- cropsize: 227
+ mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
+ batch_size: 256
+ crop_size: 227
mirror: true
}
top: "data"
top: "label"
}
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
@@ -25,28 +29,22 @@ layers {
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -55,24 +53,28 @@ layers {
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
+ kernel_size: 5
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
@@ -81,26 +83,20 @@ layers {
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -109,55 +105,57 @@ layers {
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
+ kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
@@ -166,30 +164,28 @@ layers {
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
weight_filler {
type: "gaussian"
std: 0.01
@@ -198,37 +194,35 @@ layers {
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
@@ -238,35 +232,33 @@ layers {
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
@@ -276,35 +268,33 @@ layers {
type: "constant"
value: 0.1
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
@@ -312,21 +302,15 @@ layers {
}
bias_filler {
type: "constant"
- value: 0.
+ value: 0
}
- blobs_lr: 1.
- blobs_lr: 2.
- weight_decay: 1.
- weight_decay: 0.
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "loss"
- type: "softmax_loss"
- }
+ name: "loss"
+ type: SOFTMAX_LOSS
bottom: "fc8"
bottom: "label"
}
diff --git a/examples/imagenet/alexnet_val.prototxt b/examples/imagenet/alexnet_val.prototxt
index aff33d01..3fd6296e 100644
--- a/examples/imagenet/alexnet_val.prototxt
+++ b/examples/imagenet/alexnet_val.prototxt
@@ -1,40 +1,38 @@
name: "AlexNet"
layers {
- layer {
- name: "data"
- type: "data"
+ name: "data"
+ type: DATA
+ data_param {
source: "ilsvrc12_val_leveldb"
- meanfile: "../../data/ilsvrc12/imagenet_mean.binaryproto"
- batchsize: 50
- cropsize: 227
+ mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto"
+ batch_size: 50
+ crop_size: 227
mirror: false
}
top: "data"
top: "label"
}
layers {
- layer {
- name: "conv1"
- type: "conv"
+ name: "conv1"
+ type: CONVOLUTION
+ convolution_param {
num_output: 96
- kernelsize: 11
+ kernel_size: 11
stride: 4
}
bottom: "data"
top: "conv1"
}
layers {
- layer {
- name: "relu1"
- type: "relu"
- }
+ name: "relu1"
+ type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
- layer {
- name: "norm1"
- type: "lrn"
+ name: "norm1"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -43,40 +41,38 @@ layers {
top: "norm1"
}
layers {
- layer {
- name: "pool1"
- type: "pool"
+ name: "pool1"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
- layer {
- name: "conv2"
- type: "conv"
+ name: "conv2"
+ type: CONVOLUTION
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 5
pad: 2
+ kernel_size: 5
+ group: 2
}
bottom: "pool1"
top: "conv2"
}
layers {
- layer {
- name: "relu2"
- type: "relu"
- }
+ name: "relu2"
+ type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
- layer {
- name: "norm2"
- type: "lrn"
+ name: "norm2"
+ type: LRN
+ lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
@@ -85,161 +81,147 @@ layers {
top: "norm2"
}
layers {
- layer {
- name: "pool2"
- type: "pool"
+ name: "pool2"
+ type: POOLING
+ pooling_param {
pool: MAX
- kernelsize: 3
+ kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
- layer {
- name: "conv3"
- type: "conv"
+ name: "conv3"
+ type: CONVOLUTION
+ convolution_param {
num_output: 384
- kernelsize: 3
pad: 1
+ kernel_size: 3
}
bottom: "pool2"
top: "conv3"
}
layers {
- layer {
- name: "relu3"
- type: "relu"
- }
+ name: "relu3"
+ type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
- layer {
- name: "conv4"
- type: "conv"
+ name: "conv4"
+ type: CONVOLUTION
+ convolution_param {
num_output: 384
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
}
bottom: "conv3"
top: "conv4"
}
layers {
- layer {
- name: "relu4"
- type: "relu"
- }
+ name: "relu4"
+ type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
- layer {
- name: "conv5"
- type: "conv"
+ name: "conv5"
+ type: CONVOLUTION
+ convolution_param {
num_output: 256
- group: 2
- kernelsize: 3
pad: 1
+ kernel_size: 3
+ group: 2
}
bottom: "conv4"
top: "conv5"
}
layers {
- layer {
- name: "relu5"
- type: "relu"
- }
+ name: "relu5"
+ type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
- layer {
- name: "pool5"
- type: "pool"
- kernelsize: 3
+ name: "pool5"
+ type: POOLING
+ pooling_param {
pool: MAX
+ kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
- layer {
- name: "fc6"
- type: "innerproduct"
+ name: "fc6"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 4096
}
bottom: "pool5"
top: "fc6"
}
layers {
- layer {
- name: "relu6"
- type: "relu"
- }
+ name: "relu6"
+ type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "drop6"
- type: "dropout"
+ name: "drop6"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc6"
top: "fc6"
}
layers {
- layer {
- name: "fc7"
- type: "innerproduct"
+ name: "fc7"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 4096
}
bottom: "fc6"
top: "fc7"
}
layers {
- layer {
- name: "relu7"
- type: "relu"
- }
+ name: "relu7"
+ type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "drop7"
- type: "dropout"
+ name: "drop7"
+ type: DROPOUT
+ dropout_param {
dropout_ratio: 0.5
}
bottom: "fc7"
top: "fc7"
}
layers {
- layer {
- name: "fc8"
- type: "innerproduct"
+ name: "fc8"
+ type: INNER_PRODUCT
+ inner_product_param {
num_output: 1000
}
bottom: "fc7"
top: "fc8"
}
layers {
- layer {
- name: "prob"
- type: "softmax"
- }
+ name: "prob"
+ type: SOFTMAX
bottom: "fc8"
top: "prob"
}
layers {
- layer {
- name: "accuracy"
- type: "accuracy"
- }
+ top: "accuracy"
+ name: "accuracy"
+ type: ACCURACY
bottom: "prob"
bottom: "label"
- top: "accuracy"
}
diff --git a/examples/imagenet/get_caffe_alexnet_model.sh b/examples/imagenet/get_caffe_alexnet_model.sh
index 167c9371..399e2a05 100755
--- a/examples/imagenet/get_caffe_alexnet_model.sh
+++ b/examples/imagenet/get_caffe_alexnet_model.sh
@@ -3,7 +3,7 @@
# for ilsvrc image classification and deep feature extraction
MODEL=caffe_alexnet_model
-CHECKSUM=91df0e19290ef78324de9eecb258a77f
+CHECKSUM=29eb495b11613825c1900382f5286963
if [ -f $MODEL ]; then
echo "Model already exists. Checking md5..."
diff --git a/examples/imagenet/get_caffe_reference_imagenet_model.sh b/examples/imagenet/get_caffe_reference_imagenet_model.sh
index 7a85613e..2381dbd7 100755
--- a/examples/imagenet/get_caffe_reference_imagenet_model.sh
+++ b/examples/imagenet/get_caffe_reference_imagenet_model.sh
@@ -3,7 +3,7 @@
# for ilsvrc image classification and deep feature extraction
MODEL=caffe_reference_imagenet_model
-CHECKSUM=bf44bac4a59aa7792b296962fe483f2b
+CHECKSUM=af678f0bd3cdd2437e35679d88665170
if [ -f $MODEL ]; then
echo "Model already exists. Checking md5..."
@@ -23,6 +23,6 @@ fi
echo "Downloading..."
-wget --no-check-certificate https://www.dropbox.com/s/n3jups0gr7uj0dv/$MODEL
+wget --no-check-certificate https://www.dropbox.com/s/7qkokvr7x0esljl/$MODEL
echo "Done. Please run this command again to verify that checksum = $CHECKSUM."