summaryrefslogtreecommitdiff
path: root/models/bvlc_alexnet/alexnet_train_val.prototxt
diff options
context:
space:
mode:
Diffstat (limited to 'models/bvlc_alexnet/alexnet_train_val.prototxt')
-rw-r--r--models/bvlc_alexnet/alexnet_train_val.prototxt344
1 files changed, 344 insertions, 0 deletions
diff --git a/models/bvlc_alexnet/alexnet_train_val.prototxt b/models/bvlc_alexnet/alexnet_train_val.prototxt
new file mode 100644
index 00000000..69b8916d
--- /dev/null
+++ b/models/bvlc_alexnet/alexnet_train_val.prototxt
@@ -0,0 +1,344 @@
+name: "AlexNet"
+layers {
+ name: "data"
+ type: DATA
+ top: "data"
+ top: "label"
+ data_param {
+ source: "examples/imagenet/ilsvrc12_train_leveldb"
+ batch_size: 256
+ }
+ transform_param {
+ crop_size: 227
+ mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
+ mirror: true
+ }
+ include: { phase: TRAIN }
+}
+layers {
+ name: "data"
+ type: DATA
+ top: "data"
+ top: "label"
+ data_param {
+ source: "examples/imagenet/ilsvrc12_val_leveldb"
+ batch_size: 50
+ }
+ transform_param {
+ crop_size: 227
+ mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
+ mirror: false
+ }
+ include: { phase: TEST }
+}
+layers {
+ name: "conv1"
+ type: CONVOLUTION
+ bottom: "data"
+ top: "conv1"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
+ num_output: 96
+ kernel_size: 11
+ stride: 4
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layers {
+ name: "relu1"
+ type: RELU
+ bottom: "conv1"
+ top: "conv1"
+}
+layers {
+ name: "norm1"
+ type: LRN
+ bottom: "conv1"
+ top: "norm1"
+ lrn_param {
+ local_size: 5
+ alpha: 0.0001
+ beta: 0.75
+ }
+}
+layers {
+ name: "pool1"
+ type: POOLING
+ bottom: "norm1"
+ top: "pool1"
+ pooling_param {
+ pool: MAX
+ kernel_size: 3
+ stride: 2
+ }
+}
+layers {
+ name: "conv2"
+ type: CONVOLUTION
+ bottom: "pool1"
+ top: "conv2"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
+ num_output: 256
+ pad: 2
+ kernel_size: 5
+ group: 2
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0.1
+ }
+ }
+}
+layers {
+ name: "relu2"
+ type: RELU
+ bottom: "conv2"
+ top: "conv2"
+}
+layers {
+ name: "norm2"
+ type: LRN
+ bottom: "conv2"
+ top: "norm2"
+ lrn_param {
+ local_size: 5
+ alpha: 0.0001
+ beta: 0.75
+ }
+}
+layers {
+ name: "pool2"
+ type: POOLING
+ bottom: "norm2"
+ top: "pool2"
+ pooling_param {
+ pool: MAX
+ kernel_size: 3
+ stride: 2
+ }
+}
+layers {
+ name: "conv3"
+ type: CONVOLUTION
+ bottom: "pool2"
+ top: "conv3"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
+ num_output: 384
+ pad: 1
+ kernel_size: 3
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layers {
+ name: "relu3"
+ type: RELU
+ bottom: "conv3"
+ top: "conv3"
+}
+layers {
+ name: "conv4"
+ type: CONVOLUTION
+ bottom: "conv3"
+ top: "conv4"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
+ num_output: 384
+ pad: 1
+ kernel_size: 3
+ group: 2
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0.1
+ }
+ }
+}
+layers {
+ name: "relu4"
+ type: RELU
+ bottom: "conv4"
+ top: "conv4"
+}
+layers {
+ name: "conv5"
+ type: CONVOLUTION
+ bottom: "conv4"
+ top: "conv5"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ convolution_param {
+ num_output: 256
+ pad: 1
+ kernel_size: 3
+ group: 2
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0.1
+ }
+ }
+}
+layers {
+ name: "relu5"
+ type: RELU
+ bottom: "conv5"
+ top: "conv5"
+}
+layers {
+ name: "pool5"
+ type: POOLING
+ bottom: "conv5"
+ top: "pool5"
+ pooling_param {
+ pool: MAX
+ kernel_size: 3
+ stride: 2
+ }
+}
+layers {
+ name: "fc6"
+ type: INNER_PRODUCT
+ bottom: "pool5"
+ top: "fc6"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
+ num_output: 4096
+ weight_filler {
+ type: "gaussian"
+ std: 0.005
+ }
+ bias_filler {
+ type: "constant"
+ value: 0.1
+ }
+ }
+}
+layers {
+ name: "relu6"
+ type: RELU
+ bottom: "fc6"
+ top: "fc6"
+}
+layers {
+ name: "drop6"
+ type: DROPOUT
+ bottom: "fc6"
+ top: "fc6"
+ dropout_param {
+ dropout_ratio: 0.5
+ }
+}
+layers {
+ name: "fc7"
+ type: INNER_PRODUCT
+ bottom: "fc6"
+ top: "fc7"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
+ num_output: 4096
+ weight_filler {
+ type: "gaussian"
+ std: 0.005
+ }
+ bias_filler {
+ type: "constant"
+ value: 0.1
+ }
+ }
+}
+layers {
+ name: "relu7"
+ type: RELU
+ bottom: "fc7"
+ top: "fc7"
+}
+layers {
+ name: "drop7"
+ type: DROPOUT
+ bottom: "fc7"
+ top: "fc7"
+ dropout_param {
+ dropout_ratio: 0.5
+ }
+}
+layers {
+ name: "fc8"
+ type: INNER_PRODUCT
+ bottom: "fc7"
+ top: "fc8"
+ blobs_lr: 1
+ blobs_lr: 2
+ weight_decay: 1
+ weight_decay: 0
+ inner_product_param {
+ num_output: 1000
+ weight_filler {
+ type: "gaussian"
+ std: 0.01
+ }
+ bias_filler {
+ type: "constant"
+ value: 0
+ }
+ }
+}
+layers {
+ name: "accuracy"
+ type: ACCURACY
+ bottom: "fc8"
+ bottom: "label"
+ top: "accuracy"
+ include: { phase: TEST }
+}
+layers {
+ name: "loss"
+ type: SOFTMAX_LOSS
+ bottom: "fc8"
+ bottom: "label"
+ top: "loss"
+}