summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTim Meinhardt <meinhardt.tim@gmail.com>2015-11-06 14:51:46 +0100
committerTim Meinhardt <meinhardt.tim@gmail.com>2015-11-06 14:51:46 +0100
commit987b3d8794e3fe27b4402d52fb3921555104b451 (patch)
tree35f6fc80d5d6b85c3b657f18c610f53d27f9f362 /include
parent0ec116e39c1433feaf9756cd2651c51d810fcbc6 (diff)
downloadcaffe-987b3d8794e3fe27b4402d52fb3921555104b451.tar.gz
caffe-987b3d8794e3fe27b4402d52fb3921555104b451.tar.bz2
caffe-987b3d8794e3fe27b4402d52fb3921555104b451.zip
Fix ArgMaxLayer::Reshape for any num of bottom axes
Diffstat (limited to 'include')
-rw-r--r--include/caffe/common_layers.hpp14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp
index 72f39ee0..d42d15c4 100644
--- a/include/caffe/common_layers.hpp
+++ b/include/caffe/common_layers.hpp
@@ -53,8 +53,8 @@ class ArgMaxLayer : public Layer<Dtype> {
* -# @f$ (N \times C \times H \times W) @f$
* the inputs @f$ x @f$
* @param top output Blob vector (length 1)
- * -# @f$ (N \times 1 \times K \times 1) @f$ or, if out_max_val
- * @f$ (N \times 2 \times K \times 1) @f$ unless axis set than e.g.
+ * -# @f$ (N \times 1 \times K) @f$ or, if out_max_val
+ * @f$ (N \times 2 \times K) @f$ unless axis set than e.g.
* @f$ (N \times K \times H \times W) @f$ if axis == 1
* the computed outputs @f$
* y_n = \arg\max\limits_i x_{ni}
@@ -81,13 +81,13 @@ class ArgMaxLayer : public Layer<Dtype> {
* each channel in the data (i.e. axis 1), it subtracts the mean and divides
* by the variance, where both statistics are computed across both spatial
* dimensions and across the different examples in the batch.
- *
+ *
* By default, during training time, the network is computing global mean/
* variance statistics via a running average, which is then used at test
* time to allow deterministic outputs for each input. You can manually
* toggle whether the network is accumulating or using the statistics via the
* use_global_stats option. IMPORTANT: for this feature to work, you MUST
- * set the learning rate to zero for all three parameter blobs, i.e.,
+ * set the learning rate to zero for all three parameter blobs, i.e.,
* param {lr_mult: 0} three times in the layer definition.
*
* Note that the original paper also included a per-channel learned bias and
@@ -96,10 +96,10 @@ class ArgMaxLayer : public Layer<Dtype> {
* followed by a Convolution layer with output the same size as the current.
* This produces a channel-specific value that can be added or multiplied by
* the BatchNorm layer's output.
- *
+ *
* [1] S. Ioffe and C. Szegedy, "Batch Normalization: Accelerating Deep Network
- * Training by Reducing Internal Covariate Shift." arXiv preprint
- * arXiv:1502.03167 (2015).
+ * Training by Reducing Internal Covariate Shift." arXiv preprint
+ * arXiv:1502.03167 (2015).
*
* TODO(dox): thorough documentation for Forward, Backward, and proto params.
*/