summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc')
-rw-r--r--runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc119
1 files changed, 119 insertions, 0 deletions
diff --git a/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc
new file mode 100644
index 000000000..055f71590
--- /dev/null
+++ b/runtime/neurun/backend/cpu/kernel/FullyConnectedLayer.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FullyConnectedLayer.h"
+
+#include <cker/operation/FullyConnected.h>
+
+#include "OperationUtils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace kernel
+{
+
+FullyConnectedLayer::FullyConnectedLayer()
+ : _inputData(), _weightsData(), _biasData(), _outputData(), _inputDescr(), _weightsDescr(),
+ _biasDescr(), _outputDescr(), _activation(ir::Activation::NONE),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
+void FullyConnectedLayer::fullyConnectedFloat32()
+{
+ float output_activation_min, output_activation_max;
+ CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
+
+ nnfw::cker::FullyConnectedParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+
+ nnfw::cker::FullyConnected(op_params, convertToExtendedCkerShape(_inputDescr), _inputData.f,
+ convertToExtendedCkerShape(_weightsDescr), _weightsData.f,
+ convertToExtendedCkerShape(_biasDescr), _biasData.f,
+ convertToExtendedCkerShape(_outputDescr), _outputData.f);
+}
+
+// executionMutex is used to protect concurrent access of non-threadsafe resources
+// like gemmlowp::GemmContext.
+void FullyConnectedLayer::fullyConnectedQuant8()
+{
+ float real_multiplier = 0.0;
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
+ int32_t output_activation_min = 0;
+ int32_t output_activation_max = 0;
+ GetQuantizedConvolutionMultiplier(_inputDescr, _weightsDescr, _biasDescr, _outputDescr,
+ &real_multiplier);
+ QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
+ CalculateActivationRangeUint8(_activation, _outputDescr, &output_activation_min,
+ &output_activation_max);
+
+ nnfw::cker::FullyConnectedParams op_params;
+ op_params.input_offset = -_inputDescr.offset;
+ op_params.weights_offset = -_weightsDescr.offset;
+ op_params.output_offset = _outputDescr.offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+
+ nnfw::cker::FullyConnected(op_params, convertToExtendedCkerShape(_inputDescr), _inputData.u8,
+ convertToExtendedCkerShape(_weightsDescr), _weightsData.u8,
+ convertToExtendedCkerShape(_biasDescr), _biasData.i32,
+ convertToExtendedCkerShape(_outputDescr), _outputData.u8);
+}
+
+void FullyConnectedLayer::configure(uint8_t *inputData, const TensorDescriptor inputDescr,
+ uint8_t *weightsData, const TensorDescriptor weightsDescr,
+ uint8_t *biasData, const TensorDescriptor biasDescr,
+ ir::Activation activation, uint8_t *outputData,
+ const TensorDescriptor outputDescr)
+{
+ _inputData.u8 = inputData;
+ _inputDescr = inputDescr;
+ _inputType = inputDescr.type;
+ _weightsData.u8 = weightsData;
+ _weightsDescr = weightsDescr;
+ _biasData.u8 = biasData;
+ _biasDescr = biasDescr;
+ _activation = activation;
+ _outputData.u8 = outputData;
+ _outputDescr = outputDescr;
+}
+
+void FullyConnectedLayer::run()
+{
+ if (_inputType == OperandType::FLOAT32)
+ {
+ fullyConnectedFloat32();
+ }
+ else if (_inputType == OperandType::QUANT8_ASYMM)
+ {
+ fullyConnectedQuant8();
+ }
+}
+
+} // namespace kernel
+} // namespace cpu
+} // namespace backend
+} // namespace neurun