From 8b2aa7093cba002a5f286d47658de72a961d1299 Mon Sep 17 00:00:00 2001 From: Carl Doersch Date: Fri, 6 Nov 2015 14:41:30 -0800 Subject: Better normalization options for SoftmaxWithLoss layer. --- include/caffe/loss_layers.hpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index d08ad9b6..d6569c4a 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -747,6 +747,12 @@ class SoftmaxWithLossLayer : public LossLayer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + /// Read the normalization mode parameter and compute the normalizer based + /// on the blob size. If normalization_mode is VALID, the count of valid + /// outputs will be read from valid_count, unless it is -1 in which case + /// all outputs are assumed to be valid. + virtual Dtype get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count); /// The internal SoftmaxLayer used to map predictions to a distribution. shared_ptr > softmax_layer_; @@ -760,9 +766,8 @@ class SoftmaxWithLossLayer : public LossLayer { bool has_ignore_label_; /// The label indicating that an instance should be ignored. int ignore_label_; - /// Whether to normalize the loss by the total number of values present - /// (otherwise just by the batch size). - bool normalize_; + /// How to normalize the output loss. + LossParameter_NormalizationMode normalization_; int softmax_axis_, outer_num_, inner_num_; }; -- cgit v1.2.3