summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorSergio <sguada@gmail.com>2014-10-03 17:14:20 -0700
committerSergio <sguada@gmail.com>2014-10-15 18:52:07 -0700
commita1f83100858121fd094df625ed40af8c5afb42e5 (patch)
tree203f952caaf4ff31d2cdff29fae21910cb468663 /examples
parentbdd0a0074f3831cd8894f039e0ea1b5fcaddd90f (diff)
downloadcaffeonacl-a1f83100858121fd094df625ed40af8c5afb42e5.tar.gz
caffeonacl-a1f83100858121fd094df625ed40af8c5afb42e5.tar.bz2
caffeonacl-a1f83100858121fd094df625ed40af8c5afb42e5.zip
Adapt lenet_multistep_solver.prototxt to current solvers
Diffstat (limited to 'examples')
-rw-r--r--examples/mnist/lenet_multistep_solver.prototxt (renamed from examples/lenet/lenet_multistep_solver.prototxt)13
-rw-r--r--examples/mnist/readme.md3
2 files changed, 8 insertions, 8 deletions
diff --git a/examples/lenet/lenet_multistep_solver.prototxt b/examples/mnist/lenet_multistep_solver.prototxt
index fadd7c90..7ee9fb60 100644
--- a/examples/lenet/lenet_multistep_solver.prototxt
+++ b/examples/mnist/lenet_multistep_solver.prototxt
@@ -1,7 +1,5 @@
-# The training protocol buffer definition
-train_net: "lenet_train.prototxt"
-# The testing protocol buffer definition
-test_net: "lenet_test.prototxt"
+# The train/test net protocol buffer definition
+net: "examples/mnist/lenet_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
@@ -27,7 +25,6 @@ display: 100
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
-snapshot_prefix: "lenet"
-# solver mode: 0 for CPU and 1 for GPU
-solver_mode: 1
-device_id: 1
+snapshot_prefix: "examples/mnist/lenet_multistep"
+# solver mode: CPU or GPU
+solver_mode: GPU
diff --git a/examples/mnist/readme.md b/examples/mnist/readme.md
index 44e0091f..1f8a8696 100644
--- a/examples/mnist/readme.md
+++ b/examples/mnist/readme.md
@@ -283,3 +283,6 @@ You just did! All the training was carried out on the GPU. In fact, if you would
and you will be using CPU for training. Isn't that easy?
MNIST is a small dataset, so training with GPU does not really introduce too much benefit due to communication overheads. On larger datasets with more complex models, such as ImageNet, the computation speed difference will be more significant.
+
+### How to reduce the learning rate a fixed steps?
+Look at lenet_multistep_solver.prototxt