summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/cpu/kernel/OperationUtils.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/neurun/backend/cpu/kernel/OperationUtils.cc')
-rw-r--r--runtime/neurun/backend/cpu/kernel/OperationUtils.cc273
1 files changed, 273 insertions, 0 deletions
diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc
new file mode 100644
index 000000000..8aa15dcbd
--- /dev/null
+++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationUtils.h"
+
+#include <cmath>
+#include <algorithm>
+#include <cassert>
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace kernel
+{
+
+uint32_t getNumberOfDimensions(const TensorDescriptor &descr) { return descr.dimensions.size(); }
+
+uint32_t getNumberOfElements(const TensorDescriptor &descr)
+{
+ uint32_t count = 1;
+ for (size_t i = 0; i < descr.dimensions.size(); i++)
+ {
+ count *= descr.dimensions[i];
+ }
+ return count;
+}
+
+uint32_t getSizeOfDimension(const TensorDescriptor &descr, uint32_t dimensionIdx)
+{
+ if (dimensionIdx >= descr.dimensions.size())
+ {
+ // TODO, log the error
+ return 0;
+ }
+ return descr.dimensions[dimensionIdx];
+}
+
+void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
+{
+ if (double_multiplier == 0.)
+ {
+ *quantized_multiplier = 0;
+ *shift = 0;
+ return;
+ }
+ const double q = std::frexp(double_multiplier, shift);
+ auto q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
+
+ assert(q_fixed <= (1ll << 31));
+ if (q_fixed == (1ll << 31))
+ {
+ q_fixed /= 2;
+ ++*shift;
+ }
+ assert(q_fixed <= std::numeric_limits<int32_t>::max());
+ *quantized_multiplier = static_cast<int32_t>(q_fixed);
+}
+
+void GetQuantizedConvolutionMultiplier(const TensorDescriptor &inputDescr,
+ const TensorDescriptor &filterDescr,
+ const TensorDescriptor &biasDescr,
+ const TensorDescriptor &outputDescr, float *multiplier)
+{
+ const float input_product_scale = inputDescr.scale * filterDescr.scale;
+ const float bias_scale = biasDescr.scale;
+ const float output_scale = outputDescr.scale;
+ // The following conditions must be guaranteed by the training pipeline.
+ UNUSED_RELEASE(bias_scale);
+ assert(std::abs(input_product_scale - bias_scale) <=
+ 1e-6 * std::min(input_product_scale, bias_scale));
+ assert(input_product_scale >= 0);
+ assert(input_product_scale < output_scale);
+ *multiplier = input_product_scale / output_scale;
+}
+
+void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
+ int *left_shift)
+{
+ assert(double_multiplier > 1.);
+ const double q = std::frexp(double_multiplier, left_shift);
+ int64_t q_fixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
+ assert(q_fixed <= (1ll << 31));
+ if (q_fixed == (1ll << 31))
+ {
+ q_fixed /= 2;
+ ++*left_shift;
+ }
+ assert(*left_shift >= 0);
+ assert(q_fixed <= std::numeric_limits<int32_t>::max());
+ *quantized_multiplier = static_cast<int32_t>(q_fixed);
+}
+
+void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min,
+ float *activation_max)
+{
+ if (activation == ir::Activation::RELU)
+ {
+ *activation_min = 0.f;
+ *activation_max = std::numeric_limits<float>::max();
+ }
+ else if (activation == ir::Activation::RELU6)
+ {
+ *activation_min = 0.f;
+ *activation_max = 6.f;
+ }
+ else if (activation == ir::Activation::RELU1)
+ {
+ *activation_min = -1.f;
+ *activation_max = 1.f;
+ }
+ else if (activation == ir::Activation::SIGMOID)
+ {
+ *activation_min = 0.f;
+ *activation_max = 1.f;
+ }
+ else if (activation == ir::Activation::NONE)
+ {
+ *activation_min = std::numeric_limits<float>::lowest();
+ *activation_max = std::numeric_limits<float>::max();
+ }
+ else
+ {
+ std::cout << "Unsupported fused activation function." << std::endl;
+ }
+}
+
+void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr,
+ int32_t *act_min, int32_t *act_max)
+{
+ const int32_t qmin = std::numeric_limits<uint8_t>::min();
+ const int32_t qmax = std::numeric_limits<uint8_t>::max();
+ const auto scale = outputDescr.scale;
+ const auto zero_point = outputDescr.offset;
+ auto quantize = [scale, zero_point](float f) {
+ return zero_point + static_cast<int32_t>(std::round(f / scale));
+ };
+ if (activation == ir::Activation::RELU)
+ {
+ *act_min = std::max(qmin, quantize(0.0));
+ *act_max = qmax;
+ }
+ else if (activation == ir::Activation::RELU6)
+ {
+ *act_min = std::max(qmin, quantize(0.0));
+ *act_max = std::min(qmax, quantize(6.0));
+ }
+ else if (activation == ir::Activation::RELU1)
+ {
+ *act_min = std::max(qmin, quantize(-1.0));
+ *act_max = std::min(qmax, quantize(1.0));
+ }
+ else if (activation == ir::Activation::SIGMOID)
+ {
+ *act_min = std::max(qmin, quantize(0.0));
+ *act_max = std::min(qmax, quantize(1.0));
+ }
+ else if (activation == ir::Activation::NONE)
+ {
+ *act_min = qmin;
+ *act_max = qmax;
+ }
+ else
+ {
+ std::cout << "Unsupported fused activation function." << std::endl;
+ }
+}
+
+bool HaveSameShapes(const TensorDescriptor *input1, const TensorDescriptor *input2)
+{
+ if (input1 == input2)
+ return true;
+ if (input2 == NULL || input2 == NULL)
+ return false;
+
+ if (input1 == NULL)
+ {
+ return (getNumberOfDimensions(*input2) == 0);
+ }
+
+ if (getNumberOfDimensions(*input1) != getNumberOfDimensions(*input2))
+ return false;
+
+ for (uint32_t i = 0; i < getNumberOfDimensions(*input1); i++)
+ if (input1->dimensions[i] != input2->dimensions[i])
+ return false;
+
+ return true;
+}
+
+int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
+{
+ const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) *
+ (1ll << (31 - input_integer_bits)) / (1ll << input_left_shift);
+ // Tighten bound using floor. Suppose that we could use the exact value.
+ // After scaling the difference, the result would be at the maximum. Thus we
+ // must ensure that our value has lower magnitude.
+ return static_cast<int32_t>(std::floor(max_input_rescaled));
+}
+
+TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout)
+{
+ TensorDescriptor descriptor;
+
+ auto dims = o.shape().dims();
+ if (frontend_layout == ir::Layout::NCHW && o.shape().rank() == 4)
+ {
+ // NCHW -> NHWC
+ uint32_t permutation[4] = {0, 2, 3, 1};
+ for (int i = 0; i < o.shape().rank(); ++i)
+ {
+ dims.at(i) = o.shape().dim(permutation[i]);
+ }
+ }
+ descriptor.dimensions = std::vector<uint32_t>(dims.begin(), dims.end());
+ descriptor.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
+ descriptor.scale = o.typeInfo().scale();
+ descriptor.offset = o.typeInfo().offset();
+
+ // CPU backend assume that neurun internal shape's rank is always same or less than 4
+ assert(descriptor.dimensions.size() <= 4);
+
+ return descriptor;
+}
+
+uint32_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions)
+{
+ uint32_t size = 4;
+
+ switch (type)
+ {
+ case OperandType::FLOAT32:
+ case OperandType::INT32:
+ case OperandType::UINT32:
+ size = 4;
+ break;
+ case OperandType::BOOL8:
+ case OperandType::QUANT8_ASYMM:
+ case OperandType::QUANT8_SYMM:
+ size = 1;
+ break;
+ default:
+ throw std::runtime_error("Not supported operand type.");
+ break;
+ }
+
+ for (auto d : dimensions)
+ {
+ size *= d;
+ }
+
+ return size;
+}
+
+} // namespace kernel
+} // namespace cpu
+} // namespace backend
+} // namespace neurun