diff options
author | Jeff Donahue <jeff.donahue@gmail.com> | 2015-02-05 15:17:24 -0800 |
---|---|---|
committer | Jeff Donahue <jeff.donahue@gmail.com> | 2015-02-05 15:17:24 -0800 |
commit | e6c80dac40d4bb13390b56bc18294e6e91b00436 (patch) | |
tree | ce9356561d9ac4270b14a71f2272b48c96cceb36 /examples | |
parent | 11a4c1655842a625a8fbb93f8c8ccd3629321a3f (diff) | |
download | caffeonacl-e6c80dac40d4bb13390b56bc18294e6e91b00436.tar.gz caffeonacl-e6c80dac40d4bb13390b56bc18294e6e91b00436.tar.bz2 caffeonacl-e6c80dac40d4bb13390b56bc18294e6e91b00436.zip |
Upgrade existing nets using upgrade_net_proto_text tool
Restore comments afterwards
Diffstat (limited to 'examples')
-rw-r--r-- | examples/cifar10/cifar10_full.prototxt | 84 | ||||
-rw-r--r-- | examples/cifar10/cifar10_full_train_test.prototxt | 120 | ||||
-rw-r--r-- | examples/cifar10/cifar10_quick.prototxt | 88 | ||||
-rw-r--r-- | examples/cifar10/cifar10_quick_train_test.prototxt | 124 | ||||
-rw-r--r-- | examples/feature_extraction/imagenet_val.prototxt | 114 | ||||
-rw-r--r-- | examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt | 230 | ||||
-rw-r--r-- | examples/hdf5_classification/train_val.prototxt | 44 | ||||
-rw-r--r-- | examples/hdf5_classification/train_val2.prototxt | 64 | ||||
-rw-r--r-- | examples/imagenet/bvlc_caffenet_full_conv.prototxt | 92 | ||||
-rw-r--r-- | examples/mnist/lenet.prototxt | 64 | ||||
-rw-r--r-- | examples/mnist/lenet_train_test.prototxt | 105 | ||||
-rw-r--r-- | examples/mnist/mnist_autoencoder.prototxt | 260 | ||||
-rw-r--r-- | examples/siamese/mnist_siamese.prototxt | 74 | ||||
-rw-r--r-- | examples/siamese/mnist_siamese_train_test.prototxt | 260 |
14 files changed, 993 insertions, 730 deletions
diff --git a/examples/cifar10/cifar10_full.prototxt b/examples/cifar10/cifar10_full.prototxt index 8bbd3000..c16f7dca 100644 --- a/examples/cifar10/cifar10_full.prototxt +++ b/examples/cifar10/cifar10_full.prototxt @@ -6,13 +6,17 @@ input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 32 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -20,9 +24,9 @@ layers { stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -31,31 +35,35 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -63,15 +71,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -80,21 +88,21 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -104,15 +112,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -121,22 +129,26 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 250 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 250 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 10 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip1" top: "prob" } diff --git a/examples/cifar10/cifar10_full_train_test.prototxt b/examples/cifar10/cifar10_full_train_test.prototxt index 38cc04f4..d45fc61e 100644 --- a/examples/cifar10/cifar10_full_train_test.prototxt +++ b/examples/cifar10/cifar10_full_train_test.prototxt @@ -1,41 +1,49 @@ name: "CIFAR10_full" -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_train_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TRAIN } } -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TEST + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_test_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -50,9 +58,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -61,31 +69,35 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -100,15 +112,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -117,21 +129,21 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { - norm_region: WITHIN_CHANNEL local_size: 3 alpha: 5e-05 beta: 0.75 + norm_region: WITHIN_CHANNEL } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -148,15 +160,15 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -165,15 +177,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 250 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 250 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 10 weight_filler { @@ -185,17 +201,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip1" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip1" bottom: "label" top: "loss" diff --git a/examples/cifar10/cifar10_quick.prototxt b/examples/cifar10/cifar10_quick.prototxt index 505158f7..1ad190e1 100644 --- a/examples/cifar10/cifar10_quick.prototxt +++ b/examples/cifar10/cifar10_quick.prototxt @@ -4,13 +4,17 @@ input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 32 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -18,9 +22,9 @@ layers { stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -29,19 +33,23 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -49,15 +57,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -66,13 +74,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "pool2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 64 pad: 2 @@ -80,15 +92,15 @@ layers { stride: 1 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -97,31 +109,39 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 64 } } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip2" top: "prob" } diff --git a/examples/cifar10/cifar10_quick_train_test.prototxt b/examples/cifar10/cifar10_quick_train_test.prototxt index 074bb001..23177393 100644 --- a/examples/cifar10/cifar10_quick_train_test.prototxt +++ b/examples/cifar10/cifar10_quick_train_test.prototxt @@ -1,41 +1,49 @@ name: "CIFAR10_quick" -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_train_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TRAIN } } -layers { +layer { name: "cifar" - type: DATA + type: "Data" top: "data" top: "label" + include { + phase: TEST + } + transform_param { + mean_file: "examples/cifar10/mean.binaryproto" + } data_param { source: "examples/cifar10/cifar10_test_lmdb" batch_size: 100 backend: LMDB } - transform_param { - mean_file: "examples/cifar10/mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -50,9 +58,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -61,19 +69,23 @@ layers { stride: 2 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "pool1" top: "pool1" } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 32 pad: 2 @@ -88,15 +100,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -105,13 +117,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "pool2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 64 pad: 2 @@ -126,15 +142,15 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "pool3" - type: POOLING + type: "Pooling" bottom: "conv3" top: "pool3" pooling_param { @@ -143,13 +159,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool3" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 64 weight_filler { @@ -161,13 +181,17 @@ layers { } } } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -179,17 +203,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip2" bottom: "label" top: "loss" diff --git a/examples/feature_extraction/imagenet_val.prototxt b/examples/feature_extraction/imagenet_val.prototxt index 83fe8c1a..b0a1cefa 100644 --- a/examples/feature_extraction/imagenet_val.prototxt +++ b/examples/feature_extraction/imagenet_val.prototxt @@ -1,24 +1,24 @@ name: "CaffeNet" -layers { +layer { name: "data" - type: IMAGE_DATA + type: "ImageData" top: "data" top: "label" + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } image_data_param { source: "examples/_temp/file_list.txt" batch_size: 50 new_height: 256 new_width: 256 } - transform_param { - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - mirror: false - } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -27,15 +27,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -44,9 +44,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -55,9 +55,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -67,15 +67,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -84,9 +84,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -95,9 +95,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -106,15 +106,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -124,15 +124,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -142,15 +142,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -159,79 +159,79 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" inner_product_param { num_output: 4096 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8" inner_product_param { num_output: 1000 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8" top: "prob" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "prob" bottom: "label" top: "accuracy" } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8" bottom: "label" top: "loss" diff --git a/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt b/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt index 5cd605bb..9dd2120a 100644 --- a/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt +++ b/examples/finetune_pascal_detection/pascal_finetune_trainval_test.prototxt @@ -1,9 +1,17 @@ name: "CaffeNet" -layers { +layer { name: "data" - type: WINDOW_DATA + type: "WindowData" top: "data" top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } window_data_param { source: "examples/finetune_pascal_detection/window_file_2007_trainval.txt" batch_size: 128 @@ -13,18 +21,20 @@ layers { context_pad: 16 crop_mode: "warp" } +} +layer { + name: "data" + type: "WindowData" + top: "data" + top: "label" + include { + phase: TEST + } transform_param { mirror: true crop_size: 227 mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" } - include: { phase: TRAIN } -} -layers { - name: "data" - type: WINDOW_DATA - top: "data" - top: "label" window_data_param { source: "examples/finetune_pascal_detection/window_file_2007_test.txt" batch_size: 128 @@ -34,22 +44,20 @@ layers { context_pad: 16 crop_mode: "warp" } - transform_param { - mirror: true - crop_size: 227 - mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" - } - include: { phase: TEST } } -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 96 kernel_size: 11 @@ -64,15 +72,15 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -81,9 +89,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -92,15 +100,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 2 @@ -116,15 +128,15 @@ layers { } } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -133,9 +145,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -144,15 +156,19 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -167,21 +183,25 @@ layers { } } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 384 pad: 1 @@ -197,21 +217,25 @@ layers { } } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } convolution_param { num_output: 256 pad: 1 @@ -227,15 +251,15 @@ layers { } } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -244,15 +268,19 @@ layers { stride: 2 } } -layers { +layer { name: "fc6" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool5" top: "fc6" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -265,30 +293,34 @@ layers { } } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6" top: "fc6" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc6" top: "fc7" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 4096 weight_filler { @@ -301,30 +333,34 @@ layers { } } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7" top: "fc7" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8_pascal" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc7" top: "fc8_pascal" - blobs_lr: 10 - blobs_lr: 20 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } inner_product_param { num_output: 21 weight_filler { @@ -337,17 +373,19 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc8_pascal" bottom: "label" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc8_pascal" bottom: "label" top: "accuracy" - include { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/hdf5_classification/train_val.prototxt b/examples/hdf5_classification/train_val.prototxt index b55b6644..b9ccc1a9 100644 --- a/examples/hdf5_classification/train_val.prototxt +++ b/examples/hdf5_classification/train_val.prototxt @@ -1,35 +1,43 @@ name: "LogisticRegressionNet" -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TRAIN + } hdf5_data_param { source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } - include: { phase: TRAIN } } -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TEST + } hdf5_data_param { source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } - include: { phase: TEST } } -layers { +layer { name: "fc1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "data" top: "fc1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 2 weight_filler { @@ -42,18 +50,20 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc1" bottom: "label" top: "loss" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc1" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/hdf5_classification/train_val2.prototxt b/examples/hdf5_classification/train_val2.prototxt index b6a75650..f9ef731f 100644 --- a/examples/hdf5_classification/train_val2.prototxt +++ b/examples/hdf5_classification/train_val2.prototxt @@ -1,35 +1,43 @@ name: "LogisticRegressionNet" -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TRAIN + } hdf5_data_param { source: "examples/hdf5_classification/data/train.txt" batch_size: 10 } - include: { phase: TRAIN } } -layers { +layer { name: "data" - type: HDF5_DATA + type: "HDF5Data" top: "data" top: "label" + include { + phase: TEST + } hdf5_data_param { source: "examples/hdf5_classification/data/test.txt" batch_size: 10 } - include: { phase: TEST } } -layers { +layer { name: "fc1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "data" top: "fc1" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 40 weight_filler { @@ -42,21 +50,25 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "fc1" top: "fc1" } -layers { +layer { name: "fc2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "fc1" top: "fc2" - blobs_lr: 1 - blobs_lr: 2 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } inner_product_param { num_output: 2 weight_filler { @@ -69,18 +81,20 @@ layers { } } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "fc2" bottom: "label" top: "loss" } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "fc2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } diff --git a/examples/imagenet/bvlc_caffenet_full_conv.prototxt b/examples/imagenet/bvlc_caffenet_full_conv.prototxt index 395b0f01..7b22bfa1 100644 --- a/examples/imagenet/bvlc_caffenet_full_conv.prototxt +++ b/examples/imagenet/bvlc_caffenet_full_conv.prototxt @@ -5,9 +5,9 @@ input_dim: 1 input_dim: 3 input_dim: 451 input_dim: 451 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" convolution_param { @@ -16,15 +16,15 @@ layers { stride: 4 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "conv1" top: "conv1" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -33,9 +33,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm1" - type: LRN + type: "LRN" bottom: "pool1" top: "norm1" lrn_param { @@ -44,9 +44,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { @@ -56,15 +56,15 @@ layers { group: 2 } } -layers { +layer { name: "relu2" - type: RELU + type: "ReLU" bottom: "conv2" top: "conv2" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -73,9 +73,9 @@ layers { stride: 2 } } -layers { +layer { name: "norm2" - type: LRN + type: "LRN" bottom: "pool2" top: "norm2" lrn_param { @@ -84,9 +84,9 @@ layers { beta: 0.75 } } -layers { +layer { name: "conv3" - type: CONVOLUTION + type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { @@ -95,15 +95,15 @@ layers { kernel_size: 3 } } -layers { +layer { name: "relu3" - type: RELU + type: "ReLU" bottom: "conv3" top: "conv3" } -layers { +layer { name: "conv4" - type: CONVOLUTION + type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { @@ -113,15 +113,15 @@ layers { group: 2 } } -layers { +layer { name: "relu4" - type: RELU + type: "ReLU" bottom: "conv4" top: "conv4" } -layers { +layer { name: "conv5" - type: CONVOLUTION + type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { @@ -131,15 +131,15 @@ layers { group: 2 } } -layers { +layer { name: "relu5" - type: RELU + type: "ReLU" bottom: "conv5" top: "conv5" } -layers { +layer { name: "pool5" - type: POOLING + type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { @@ -148,9 +148,9 @@ layers { stride: 2 } } -layers { +layer { name: "fc6-conv" - type: CONVOLUTION + type: "Convolution" bottom: "pool5" top: "fc6-conv" convolution_param { @@ -158,24 +158,24 @@ layers { kernel_size: 6 } } -layers { +layer { name: "relu6" - type: RELU + type: "ReLU" bottom: "fc6-conv" top: "fc6-conv" } -layers { +layer { name: "drop6" - type: DROPOUT + type: "Dropout" bottom: "fc6-conv" top: "fc6-conv" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc7-conv" - type: CONVOLUTION + type: "Convolution" bottom: "fc6-conv" top: "fc7-conv" convolution_param { @@ -183,24 +183,24 @@ layers { kernel_size: 1 } } -layers { +layer { name: "relu7" - type: RELU + type: "ReLU" bottom: "fc7-conv" top: "fc7-conv" } -layers { +layer { name: "drop7" - type: DROPOUT + type: "Dropout" bottom: "fc7-conv" top: "fc7-conv" dropout_param { dropout_ratio: 0.5 } } -layers { +layer { name: "fc8-conv" - type: CONVOLUTION + type: "Convolution" bottom: "fc7-conv" top: "fc8-conv" convolution_param { @@ -208,9 +208,9 @@ layers { kernel_size: 1 } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "fc8-conv" top: "prob" } diff --git a/examples/mnist/lenet.prototxt b/examples/mnist/lenet.prototxt index 491fad1b..cb42610f 100644 --- a/examples/mnist/lenet.prototxt +++ b/examples/mnist/lenet.prototxt @@ -4,13 +4,17 @@ input_dim: 64 input_dim: 1 input_dim: 28 input_dim: 28 -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -23,9 +27,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -34,13 +38,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -53,9 +61,9 @@ layers { } } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -64,13 +72,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -81,19 +93,23 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -104,9 +120,9 @@ layers { } } } -layers { +layer { name: "prob" - type: SOFTMAX + type: "Softmax" bottom: "ip2" top: "prob" } diff --git a/examples/mnist/lenet_train_test.prototxt b/examples/mnist/lenet_train_test.prototxt index 2bd960b5..b18fc26c 100644 --- a/examples/mnist/lenet_train_test.prototxt +++ b/examples/mnist/lenet_train_test.prototxt @@ -1,42 +1,49 @@ name: "LeNet" -layers { +layer { name: "mnist" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/mnist/mnist_train_lmdb" - backend: LMDB - batch_size: 64 + include { + phase: TRAIN } transform_param { scale: 0.00390625 } - include: { phase: TRAIN } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } } -layers { +layer { name: "mnist" - type: DATA + type: "Data" top: "data" top: "label" - data_param { - source: "examples/mnist/mnist_test_lmdb" - backend: LMDB - batch_size: 100 + include { + phase: TEST } transform_param { scale: 0.00390625 } - include: { phase: TEST } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } } - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -49,9 +56,9 @@ layers { } } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -60,13 +67,17 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -79,9 +90,9 @@ layers { } } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -90,13 +101,17 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -107,19 +122,23 @@ layers { } } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -130,17 +149,19 @@ layers { } } } -layers { +layer { name: "accuracy" - type: ACCURACY + type: "Accuracy" bottom: "ip2" bottom: "label" top: "accuracy" - include: { phase: TEST } + include { + phase: TEST + } } -layers { +layer { name: "loss" - type: SOFTMAX_LOSS + type: "SoftmaxWithLoss" bottom: "ip2" bottom: "label" top: "loss" diff --git a/examples/mnist/mnist_autoencoder.prototxt b/examples/mnist/mnist_autoencoder.prototxt index 0b33781a..563c7c91 100644 --- a/examples/mnist/mnist_autoencoder.prototxt +++ b/examples/mnist/mnist_autoencoder.prototxt @@ -1,67 +1,73 @@ name: "MNISTAutoencoder" -layers { - top: "data" +layer { name: "data" - type: DATA - data_param { - source: "examples/mnist/mnist_train_lmdb" - backend: LMDB - batch_size: 100 + type: "Data" + top: "data" + include { + phase: TRAIN } transform_param { scale: 0.0039215684 } - include: { phase: TRAIN } -} -layers { - top: "data" - name: "data" - type: DATA data_param { source: "examples/mnist/mnist_train_lmdb" - backend: LMDB batch_size: 100 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + include { + phase: TEST + stage: "test-on-train" } transform_param { scale: 0.0039215684 } - include: { - phase: TEST - stage: 'test-on-train' + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 100 + backend: LMDB } } -layers { - top: "data" +layer { name: "data" - type: DATA - data_param { - source: "examples/mnist/mnist_test_lmdb" - backend: LMDB - batch_size: 100 + type: "Data" + top: "data" + include { + phase: TEST + stage: "test-on-test" } transform_param { scale: 0.0039215684 } - include: { - phase: TEST - stage: 'test-on-test' + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB } } -layers { +layer { + name: "flatdata" + type: "Flatten" bottom: "data" top: "flatdata" - name: "flatdata" - type: FLATTEN } -layers { +layer { + name: "encode1" + type: "InnerProduct" bottom: "data" top: "encode1" - name: "encode1" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -75,21 +81,25 @@ layers { } } } -layers { +layer { + name: "encode1neuron" + type: "Sigmoid" bottom: "encode1" top: "encode1neuron" - name: "encode1neuron" - type: SIGMOID } -layers { +layer { + name: "encode2" + type: "InnerProduct" bottom: "encode1neuron" top: "encode2" - name: "encode2" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 500 weight_filler { @@ -103,21 +113,25 @@ layers { } } } -layers { +layer { + name: "encode2neuron" + type: "Sigmoid" bottom: "encode2" top: "encode2neuron" - name: "encode2neuron" - type: SIGMOID } -layers { +layer { + name: "encode3" + type: "InnerProduct" bottom: "encode2neuron" top: "encode3" - name: "encode3" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 250 weight_filler { @@ -131,21 +145,25 @@ layers { } } } -layers { +layer { + name: "encode3neuron" + type: "Sigmoid" bottom: "encode3" top: "encode3neuron" - name: "encode3neuron" - type: SIGMOID } -layers { +layer { + name: "encode4" + type: "InnerProduct" bottom: "encode3neuron" top: "encode4" - name: "encode4" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 30 weight_filler { @@ -159,15 +177,19 @@ layers { } } } -layers { +layer { + name: "decode4" + type: "InnerProduct" bottom: "encode4" top: "decode4" - name: "decode4" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 250 weight_filler { @@ -181,21 +203,25 @@ layers { } } } -layers { +layer { + name: "decode4neuron" + type: "Sigmoid" bottom: "decode4" top: "decode4neuron" - name: "decode4neuron" - type: SIGMOID } -layers { +layer { + name: "decode3" + type: "InnerProduct" bottom: "decode4neuron" top: "decode3" - name: "decode3" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 500 weight_filler { @@ -209,21 +235,25 @@ layers { } } } -layers { +layer { + name: "decode3neuron" + type: "Sigmoid" bottom: "decode3" top: "decode3neuron" - name: "decode3neuron" - type: SIGMOID } -layers { +layer { + name: "decode2" + type: "InnerProduct" bottom: "decode3neuron" top: "decode2" - name: "decode2" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 1000 weight_filler { @@ -237,21 +267,25 @@ layers { } } } -layers { +layer { + name: "decode2neuron" + type: "Sigmoid" bottom: "decode2" top: "decode2neuron" - name: "decode2neuron" - type: SIGMOID } -layers { +layer { + name: "decode1" + type: "InnerProduct" bottom: "decode2neuron" top: "decode1" - name: "decode1" - type: INNER_PRODUCT - blobs_lr: 1 - blobs_lr: 1 - weight_decay: 1 - weight_decay: 0 + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 1 + decay_mult: 0 + } inner_product_param { num_output: 784 weight_filler { @@ -265,25 +299,25 @@ layers { } } } -layers { +layer { + name: "loss" + type: "SigmoidCrossEntropyLoss" bottom: "decode1" bottom: "flatdata" top: "cross_entropy_loss" - name: "loss" - type: SIGMOID_CROSS_ENTROPY_LOSS loss_weight: 1 } -layers { +layer { + name: "decode1neuron" + type: "Sigmoid" bottom: "decode1" top: "decode1neuron" - name: "decode1neuron" - type: SIGMOID } -layers { +layer { + name: "loss" + type: "EuclideanLoss" bottom: "decode1neuron" bottom: "flatdata" top: "l2_error" - name: "loss" - type: EUCLIDEAN_LOSS loss_weight: 0 } diff --git a/examples/siamese/mnist_siamese.prototxt b/examples/siamese/mnist_siamese.prototxt index 8dd42e9c..0e903f85 100644 --- a/examples/siamese/mnist_siamese.prototxt +++ b/examples/siamese/mnist_siamese.prototxt @@ -4,23 +4,26 @@ input_dim: 10000 input_dim: 1 input_dim: 28 input_dim: 28 - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 stride: 1 } } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -29,22 +32,26 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 stride: 1 } } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -53,42 +60,53 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 500 } } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 10 } } - -layers { +layer { name: "feat" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2" top: "feat" - blobs_lr: 1 - blobs_lr: 2 + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } inner_product_param { num_output: 2 } diff --git a/examples/siamese/mnist_siamese_train_test.prototxt b/examples/siamese/mnist_siamese_train_test.prototxt index 92361c31..8ff864f5 100644 --- a/examples/siamese/mnist_siamese_train_test.prototxt +++ b/examples/siamese/mnist_siamese_train_test.prototxt @@ -1,50 +1,60 @@ name: "mnist_siamese_train_test" -layers { +layer { name: "pair_data" - type: DATA + type: "Data" top: "pair_data" top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } data_param { source: "examples/siamese/mnist_siamese_train_leveldb" - scale: 0.00390625 batch_size: 64 } - include: { phase: TRAIN } } -layers { +layer { name: "pair_data" - type: DATA + type: "Data" top: "pair_data" top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } data_param { source: "examples/siamese/mnist_siamese_test_leveldb" - scale: 0.00390625 batch_size: 100 } - include: { phase: TEST } } -layers { - name: "slice_pair" - type: SLICE - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } } - - - - -layers { +layer { name: "conv1" - type: CONVOLUTION + type: "Convolution" bottom: "data" top: "conv1" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -56,12 +66,10 @@ layers { type: "constant" } } - param: "conv1_w" - param: "conv1_b" } -layers { +layer { name: "pool1" - type: POOLING + type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { @@ -70,13 +78,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv2" - type: CONVOLUTION + type: "Convolution" bottom: "pool1" top: "conv2" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -88,12 +102,10 @@ layers { type: "constant" } } - param: "conv2_w" - param: "conv2_b" } -layers { +layer { name: "pool2" - type: POOLING + type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { @@ -102,13 +114,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2" top: "ip1" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -118,22 +136,26 @@ layers { type: "constant" } } - param: "ip1_w" - param: "ip1_b" } -layers { +layer { name: "relu1" - type: RELU + type: "ReLU" bottom: "ip1" top: "ip1" } -layers { +layer { name: "ip2" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1" top: "ip2" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -143,17 +165,20 @@ layers { type: "constant" } } - param: "ip2_w" - param: "ip2_b" } - -layers { +layer { name: "feat" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2" top: "feat" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } inner_product_param { num_output: 2 weight_filler { @@ -163,19 +188,20 @@ layers { type: "constant" } } - param: "feat_w" - param: "feat_b" } - - - -layers { +layer { name: "conv1_p" - type: CONVOLUTION + type: "Convolution" bottom: "data_p" top: "conv1_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } convolution_param { num_output: 20 kernel_size: 5 @@ -187,12 +213,10 @@ layers { type: "constant" } } - param: "conv1_w" - param: "conv1_b" } -layers { +layer { name: "pool1_p" - type: POOLING + type: "Pooling" bottom: "conv1_p" top: "pool1_p" pooling_param { @@ -201,13 +225,19 @@ layers { stride: 2 } } -layers { +layer { name: "conv2_p" - type: CONVOLUTION + type: "Convolution" bottom: "pool1_p" top: "conv2_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } convolution_param { num_output: 50 kernel_size: 5 @@ -219,12 +249,10 @@ layers { type: "constant" } } - param: "conv2_w" - param: "conv2_b" } -layers { +layer { name: "pool2_p" - type: POOLING + type: "Pooling" bottom: "conv2_p" top: "pool2_p" pooling_param { @@ -233,13 +261,19 @@ layers { stride: 2 } } -layers { +layer { name: "ip1_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "pool2_p" top: "ip1_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } inner_product_param { num_output: 500 weight_filler { @@ -249,22 +283,26 @@ layers { type: "constant" } } - param: "ip1_w" - param: "ip1_b" } -layers { +layer { name: "relu1_p" - type: RELU + type: "ReLU" bottom: "ip1_p" top: "ip1_p" } -layers { +layer { name: "ip2_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip1_p" top: "ip2_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } inner_product_param { num_output: 10 weight_filler { @@ -274,17 +312,20 @@ layers { type: "constant" } } - param: "ip2_w" - param: "ip2_b" } - -layers { +layer { name: "feat_p" - type: INNER_PRODUCT + type: "InnerProduct" bottom: "ip2_p" top: "feat_p" - blobs_lr: 1 - blobs_lr: 2 + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } inner_product_param { num_output: 2 weight_filler { @@ -294,20 +335,15 @@ layers { type: "constant" } } - param: "feat_w" - param: "feat_b" } - - - -layers { - name: "loss" - type: CONTRASTIVE_LOSS - contrastive_loss_param { - margin: 1.0 - } - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } } |