summaryrefslogtreecommitdiff
path: root/compute/cker/include/cker/operation/reference/SoftMax.h
diff options
context:
space:
mode:
Diffstat (limited to 'compute/cker/include/cker/operation/reference/SoftMax.h')
-rw-r--r--compute/cker/include/cker/operation/reference/SoftMax.h70
1 files changed, 0 insertions, 70 deletions
diff --git a/compute/cker/include/cker/operation/reference/SoftMax.h b/compute/cker/include/cker/operation/reference/SoftMax.h
deleted file mode 100644
index 420cb319b..000000000
--- a/compute/cker/include/cker/operation/reference/SoftMax.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_CKER_REFERENCE_SOFTMAX_H__
-#define __NNFW_CKER_REFERENCE_SOFTMAX_H__
-
-#include "cker/Shape.h"
-#include "cker/Types.h"
-
-#include <cmath>
-
-namespace nnfw
-{
-namespace cker
-{
-namespace reference
-{
-
-inline void Softmax(const SoftmaxParams &params, const Shape &input_shape, const float *input_data,
- const Shape &output_shape, float *output_data)
-{
- const int trailing_dim = input_shape.DimensionsCount() - 1;
- const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
- const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
-
- for (int i = 0; i < outer_size; ++i)
- {
- // Find max element value which we'll use to ensure numerical stability
- // taking advantage of the following equality:
- // exp(x[i])/sum(exp(x[i])) == exp(x[i]+C)/sum(exp(x[i]+C))
- float max = std::numeric_limits<float>::lowest();
- for (int c = 0; c < depth; ++c)
- {
- max = std::max(max, input_data[i * depth + c]);
- }
-
- // Compute sum.
- float sum = 0.f;
- for (int c = 0; c < depth; ++c)
- {
- sum += std::exp((input_data[i * depth + c] - max) * params.beta);
- }
-
- // Compute result.
- for (int c = 0; c < depth; ++c)
- {
- output_data[i * depth + c] = std::exp((input_data[i * depth + c] - max) * params.beta) / sum;
- }
- }
-}
-
-} // namespace reference
-} // namespace cker
-} // namespace nnfw
-
-#endif // __NNFW_CKER_REFERENCE_SOFTMAX_H__