summaryrefslogtreecommitdiff
path: root/runtime/libs/tflite/port/1.13.1/src
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/libs/tflite/port/1.13.1/src')
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp103
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp405
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp400
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp3
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp196
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc32
6 files changed, 117 insertions, 1022 deletions
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp
deleted file mode 100644
index 61181465d..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/Abs.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-#include <cmath>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace Abs
-{
-
-void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; }
-
-void FreeAbs(TfLiteContext *, void *) {}
-
-TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 1);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
-
- TF_LITE_ENSURE_EQ(context, input->type, output->type);
-
- return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims));
-}
-
-TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node)
-{
- const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
- size_t elements = ::tflite::NumElements(input);
- switch (input->type)
- {
- case kTfLiteFloat32:
- {
- auto *in = input->data.f;
- auto *in_end = in + elements;
- auto *out = output->data.f;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteInt32:
- {
- auto *in = input->data.i32;
- auto *in_end = in + elements;
- auto *out = output->data.i32;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteInt64:
- {
- auto *in = input->data.i64;
- auto *in_end = in + elements;
- auto *out = output->data.i64;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteUInt8:
- {
- auto *in = input->data.uint8;
- auto *in_end = in + elements;
- auto *out = output->data.uint8;
- for (; in < in_end; in++, out++)
- *out = *in;
- return kTfLiteOk;
- }
- default:
- {
- context->ReportError(context, "Input type %d is not supported", input->type);
- return kTfLiteError;
- }
- }
-}
-
-} // namespace Abs
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp
deleted file mode 100644
index 207de98f5..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/TensorFlowMax.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowMax
-{
-
-struct TensorFlowMaxOp
-{
- TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node)
- {
- input = ::tflite::GetInput(context, node, 0);
- axis = ::tflite::GetInput(context, node, 1);
- output = ::tflite::GetOutput(context, node, 0);
- }
- const TfLiteTensor *input;
- const TfLiteTensor *axis;
- TfLiteTensor *output;
-};
-
-void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t)
-{
- // Creates two temp tensors to store index and axis for internal
- // implementation only.
- auto *scratch_tensor_index = new int;
- context->AddTensors(context, 2, scratch_tensor_index);
- return scratch_tensor_index;
-}
-
-void FreeTensorFlowMax(TfLiteContext *, void *buffer)
-{
- delete static_cast<TensorFlowMaxOp *>(buffer);
-}
-
-// Resizes the temp tensor that stores resolved axis.
-TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context,
- TfLiteTensor *resolved_axis)
-{
- TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
- return context->ResizeTensor(context, resolved_axis, axis_size);
-}
-
-// Resizes output array based on the input size and resolved axis.
-TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
-{
- int64_t num_axis = ::tflite::NumElements(op_context->axis);
- TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = ::tflite::NumDimensions(op_context->input);
- const int *axis = op_context->axis->data.i32;
-
- {
- // Calculates size of reducing axis.
- int64_t num_reduce_axis = num_axis;
- for (int64_t i = 0; i < num_axis; ++i)
- {
- int current = axis[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int64_t j = 0; j < i; ++j)
- {
- int previous = axis[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- int output_num_dims = ::tflite::NumDimensions(op_context->output);
- TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
- (input_num_dims - num_reduce_axis == output_num_dims));
-
- if (input_num_dims == output_num_dims)
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- int current = axis[axis_idx];
- output_dims->data[current] = 1;
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- else
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
- }
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- }
-}
-
-// Initializes temp tensors to store index and resolved axis.
-TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
- TensorFlowMaxOp *op_context)
-{
- // Creates a temp index to iterate through input data.
- int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(2);
- node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
- scratch_tensor->type = kTfLiteInt32;
- scratch_tensor->allocation_type = kTfLiteArenaRw;
- TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = ::tflite::NumDimensions(op_context->input);
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
-
- // Creates a temp tensor to store resolved axis given input data.
- node->temporaries->data[1] = *scratch_tensor_index + 1;
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- resolved_axis->type = kTfLiteInt32;
- return kTfLiteOk;
-}
-
-TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- TensorFlowMaxOp op_context(context, node);
- TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
-
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Leaves work to Eval if axis is not constant; else resizes output.
- if (!::tflite::IsConstantTensor(op_context.axis))
- {
- ::tflite::SetTensorToDynamic(op_context.output);
- ::tflite::SetTensorToDynamic(resolved_axis);
- return kTfLiteOk;
- }
- resolved_axis->allocation_type = kTfLiteArenaRw;
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- return ResizeOutputTensor(context, &op_context);
-}
-
-// Gets offset of index if expanded on axis. When expanded, the flattened offset
-// will not change, if the output index changes on the given axis. For example,
-// if you have a 2D tensor and you are expanding to 3D on axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
-// offset.
-inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- int out_idx = 0;
- for (int in_idx = 0; in_idx < num_dims; ++in_idx)
- {
- // if we need to expand this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (in_idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
- out_idx++;
- }
- else
- {
- offset = offset * static_cast<size_t>(dims[in_idx]);
- }
- }
- return offset;
-}
-
-// Gets offset of index if reducing on axis. When reducing, the flattened offset
-// will not change, if the input index changes on the given axis. For example,
-// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
-// offset.
-// TODO(kanlig): uses Dims to represent dimensions.
-inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- for (int idx = 0; idx < num_dims; ++idx)
- {
- // if we need to skip this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
- }
- }
- return offset;
-}
-
-// Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
-{
- int carry = 1;
- for (int idx = num_dims - 1; idx >= 0; --idx)
- {
- int current_val = current[idx] + carry;
- TF_LITE_ENSURE(context, (dims[idx] >= current_val));
- if (dims[idx] == current_val)
- {
- current[idx] = 0;
- }
- else
- {
- current[idx] = current_val;
- carry = 0;
- break;
- }
- }
- return (carry == 0);
-}
-
-template <typename T>
-inline TfLiteStatus
-CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
- T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
-{
- // resolves axis.
- int num_resolved_axis = 0;
- for (int idx = 0; idx < num_axis_dimensions; ++idx)
- {
- int current = axis[idx];
- TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
- if (current < 0)
- {
- current += input_num_dims;
- }
- bool is_dup = false;
- for (int j = 0; j < num_resolved_axis; ++j)
- {
- if (resolved_axis[j] == current)
- {
- is_dup = true;
- break;
- }
- }
- if (!is_dup)
- {
- resolved_axis[num_resolved_axis++] = current;
- }
- }
-
- TF_LITE_ENSURE(context, (input_num_dims > 0));
- TF_LITE_ENSURE(context, (input_dims != nullptr));
- TF_LITE_ENSURE(context, (temp_index != nullptr));
-
- // resets output data.
- for (int idx = 0; idx < output_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
- {
- size_t output_offset =
- ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
- size_t input_offset = ExpandedInputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- output_data[output_offset] = input_data[input_offset];
- }
-
- // resets temp index.
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
-
- // iterates through input_data.
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
- {
- size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
- size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- if (output_data[output_offset] < input_data[input_offset])
- {
- output_data[output_offset] = input_data[input_offset];
- }
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
-
- TensorFlowMaxOp op_context(context, node);
- int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
- TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Resize the output tensor if the output tensor is dynamic.
- if (::tflite::IsDynamicTensor(op_context.output))
- {
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
- }
-
- TfLiteStatus returnStatus = kTfLiteOk;
- switch (op_context.input->type)
- {
- case kTfLiteFloat32:
- returnStatus = CustomMax<float>(
- context, op_context.input->data.f, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt32:
- returnStatus = CustomMax<int>(context, op_context.input->data.i32,
- op_context.input->dims->data, op_context.input->dims->size,
- op_context.output->data.i32, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteUInt8:
- returnStatus = CustomMax<uint8_t>(
- context, op_context.input->data.uint8, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.uint8,
- op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt64:
- returnStatus = CustomMax<int64_t>(
- context, op_context.input->data.i64, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- default:
- returnStatus = kTfLiteError;
- }
-
- return returnStatus;
-}
-
-} // namespace TensorFlowMax
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp
deleted file mode 100644
index 40f266baa..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/TensorFlowSum.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowSum
-{
-
-struct TensorFlowSumOp
-{
- TensorFlowSumOp(TfLiteContext *context, TfLiteNode *node)
- {
- input = ::tflite::GetInput(context, node, 0);
- axis = ::tflite::GetInput(context, node, 1);
- output = ::tflite::GetOutput(context, node, 0);
- }
- const TfLiteTensor *input;
- const TfLiteTensor *axis;
- TfLiteTensor *output;
-};
-
-void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t)
-{
- // Creates two temp tensors to store index and axis for internal
- // implementation only.
- auto *scratch_tensor_index = new int;
- context->AddTensors(context, 2, scratch_tensor_index);
- return scratch_tensor_index;
-}
-
-void FreeTensorFlowSum(TfLiteContext *, void *buffer)
-{
- delete static_cast<TensorFlowSumOp *>(buffer);
-}
-
-// Resizes the temp tensor that stores resolved axis.
-TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowSumOp *op_context,
- TfLiteTensor *resolved_axis)
-{
- TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
- return context->ResizeTensor(context, resolved_axis, axis_size);
-}
-
-// Resizes output array based on the input size and resolved axis.
-TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context)
-{
- int64_t num_axis = ::tflite::NumElements(op_context->axis);
- TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = ::tflite::NumDimensions(op_context->input);
- const int *axis = op_context->axis->data.i32;
-
- {
- // Calculates size of reducing axis.
- int64_t num_reduce_axis = num_axis;
- for (int64_t i = 0; i < num_axis; ++i)
- {
- int current = axis[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int64_t j = 0; j < i; ++j)
- {
- int previous = axis[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- int output_num_dims = ::tflite::NumDimensions(op_context->output);
- TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
- (input_num_dims - num_reduce_axis == output_num_dims));
-
- if (input_num_dims == output_num_dims)
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- int current = axis[axis_idx];
- output_dims->data[current] = 1;
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- else
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
- }
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- }
-}
-
-// Initializes temp tensors to store index and resolved axis.
-TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
- TensorFlowSumOp *op_context)
-{
- // Creates a temp index to iterate through input data.
- int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(2);
- node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
- scratch_tensor->type = kTfLiteInt32;
- scratch_tensor->allocation_type = kTfLiteArenaRw;
- TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = ::tflite::NumDimensions(op_context->input);
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
-
- // Creates a temp tensor to store resolved axis given input data.
- node->temporaries->data[1] = *scratch_tensor_index + 1;
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- resolved_axis->type = kTfLiteInt32;
- return kTfLiteOk;
-}
-
-TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- TensorFlowSumOp op_context(context, node);
- TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
-
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Leaves work to Eval if axis is not constant; else resizes output.
- if (!::tflite::IsConstantTensor(op_context.axis))
- {
- ::tflite::SetTensorToDynamic(op_context.output);
- ::tflite::SetTensorToDynamic(resolved_axis);
- return kTfLiteOk;
- }
- resolved_axis->allocation_type = kTfLiteArenaRw;
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- return ResizeOutputTensor(context, &op_context);
-}
-
-// Gets offset of index if expanded on axis. When expanded, the flattened offset
-// will not change, if the output index changes on the given axis. For example,
-// if you have a 2D tensor and you are expanding to 3D on axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
-// offset.
-inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- int out_idx = 0;
- for (int in_idx = 0; in_idx < num_dims; ++in_idx)
- {
- // if we need to expand this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (in_idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
- out_idx++;
- }
- else
- {
- offset = offset * static_cast<size_t>(dims[in_idx]);
- }
- }
- return offset;
-}
-
-// Gets offset of index if reducing on axis. When reducing, the flattened offset
-// will not change, if the input index changes on the given axis. For example,
-// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
-// offset.
-// TODO(kanlig): uses Dims to represent dimensions.
-inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- for (int idx = 0; idx < num_dims; ++idx)
- {
- // if we need to skip this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
- }
- }
- return offset;
-}
-
-// Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
-{
- int carry = 1;
- for (int idx = num_dims - 1; idx >= 0; --idx)
- {
- int current_val = current[idx] + carry;
- TF_LITE_ENSURE(context, (dims[idx] >= current_val));
- if (dims[idx] == current_val)
- {
- current[idx] = 0;
- }
- else
- {
- current[idx] = current_val;
- carry = 0;
- break;
- }
- }
- return (carry == 0);
-}
-
-template <typename T>
-inline TfLiteStatus
-CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
- T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
-{
- // resolves axis.
- int num_resolved_axis = 0;
- for (int idx = 0; idx < num_axis_dimensions; ++idx)
- {
- int current = axis[idx];
- TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
- if (current < 0)
- {
- current += input_num_dims;
- }
- bool is_dup = false;
- for (int j = 0; j < num_resolved_axis; ++j)
- {
- if (resolved_axis[j] == current)
- {
- is_dup = true;
- break;
- }
- }
- if (!is_dup)
- {
- resolved_axis[num_resolved_axis++] = current;
- }
- }
-
- TF_LITE_ENSURE(context, (input_num_dims > 0));
- TF_LITE_ENSURE(context, (input_dims != nullptr));
- TF_LITE_ENSURE(context, (temp_index != nullptr));
-
- // resets output data.
- for (int idx = 0; idx < output_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
- {
- size_t output_offset =
- ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
- output_data[output_offset] = 0;
- }
-
- // resets temp index.
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
-
- // iterates through input_data.
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
- {
- size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
- size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- output_data[output_offset] += input_data[input_offset];
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
-{
-
- TensorFlowSumOp op_context(context, node);
- int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
- TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Resize the output tensor if the output tensor is dynamic.
- if (::tflite::IsDynamicTensor(op_context.output))
- {
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
- }
-
- TfLiteStatus returnStatus = kTfLiteOk;
- switch (op_context.input->type)
- {
- case kTfLiteFloat32:
- returnStatus = CustomSum<float>(
- context, op_context.input->data.f, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt32:
- returnStatus = CustomSum<int>(context, op_context.input->data.i32,
- op_context.input->dims->data, op_context.input->dims->size,
- op_context.output->data.i32, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteUInt8:
- returnStatus = CustomSum<uint8_t>(
- context, op_context.input->data.uint8, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.uint8,
- op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt64:
- returnStatus = CustomSum<int64_t>(
- context, op_context.input->data.i64, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- default:
- returnStatus = kTfLiteError;
- }
-
- return returnStatus;
-}
-
-} // namespace TensorFlowSum
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
index b2088b277..89f81b612 100644
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
+++ b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
@@ -296,10 +296,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_FILL, Register_FILL());
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD());
- AddCustom("TensorFlowMax", nnfw::tflite::custom::Register_TensorFlowMax());
AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference());
- AddCustom("TensorFlowSum", nnfw::tflite::custom::Register_TensorFlowSum());
- AddCustom("Abs", nnfw::tflite::custom::Register_Abs());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
index 99272f0e5..2924c44e9 100644
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
+++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
@@ -35,6 +35,8 @@ limitations under the License.
#include <sys/system_properties.h>
#endif
+#include <memory>
+
namespace nnfw {
namespace tflite {
@@ -159,6 +161,9 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
uint32_t* no_of_operands_added,
std::vector<int64_t>* nnapi_ids) {
uint32_t next_id = 0;
+ // Allocate temporary buffer to save casted boolean tensor
+ std::unordered_map<size_t, std::unique_ptr<uint8_t[]>> const_boolean_tensors;
+
for (size_t i = 0; i < subgraph->tensors_size(); i++) {
// Skip temporaries and RNN back-edges.
if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
@@ -196,9 +201,7 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
case kTfLiteBool:
// Workaround to pass bool type under NNAPI
// Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0
- nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
- scale = 1.0f;
- zeroPoint = 0;
+ nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
default:
logError("Unsupported tensor type %d", tensor->type);
@@ -243,7 +246,19 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
// TODO(aselle): Based on Michael's suggestion, limiting this to read
// only memory
if (tensor->allocation_type == kTfLiteMmapRo) {
- if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
+ if (tensor->type == kTfLiteBool)
+ {
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
+ size_t elements = tensor->bytes / sizeof(bool);
+ const_boolean_tensors[i] = std::make_unique<uint8_t[]>(elements);
+ for (size_t idx = 0; idx < elements; idx++)
+ {
+ const_boolean_tensors[i].get()[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
+ }
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, const_boolean_tensors[i].get(), tensor->bytes));
+ }
+ else if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
RETURN_ERROR_IF_NN_FAILED(
ANeuralNetworksModel_setOperandValueFromMemory(
@@ -703,19 +718,32 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_TOPK_V2;
break;
+ case tflite::BuiltinOperator_GREATER:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER;
+ break;
+ case tflite::BuiltinOperator_GREATER_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
+ break;
+ case tflite::BuiltinOperator_LESS:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS;
+ break;
+ case tflite::BuiltinOperator_LESS_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
+ break;
case tflite::BuiltinOperator_GATHER:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_GATHER;
add_gather_params(node.builtin_data);
break;
case tflite::BuiltinOperator_SPLIT:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SPLIT;
add_split_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SPLIT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_NEG:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_NEG;
@@ -733,21 +761,14 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
case tflite::BuiltinOperator_PRELU:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_PRELU_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_PRELU;
+ break;
case tflite::BuiltinOperator_ARG_MAX:
check_arg_max_input(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_ARGMAX_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_ARGMAX;
+ break;
case tflite::BuiltinOperator_PACK:
add_pack_ex_params(node.builtin_data);
CHECK_NN(ANeuralNetworksModel_addOperationEx(
@@ -773,66 +794,40 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_RSQRT;
break;
case tflite::BuiltinOperator_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_EQUAL;
+ break;
case tflite::BuiltinOperator_NOT_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_NOT_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
+ break;
case tflite::BuiltinOperator_SUM:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_REDUCE_MAX:
- add_reducer_v12_params(node.builtin_data);
+ add_reducer_params(node.builtin_data);
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
break;
case tflite::BuiltinOperator_REDUCE_MIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_MIN_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_LOGICAL_AND:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_AND_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
+ break;
case tflite::BuiltinOperator_LOGICAL_OR:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_OR_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
+ break;
case tflite::BuiltinOperator_LOGICAL_NOT:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_NOT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
+ break;
case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
@@ -851,6 +846,26 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_ABS;
break;
+ case tflite::BuiltinOperator_ONE_HOT:
+ add_one_hot_tensor_inputs_as_scalar();
+ add_one_hot_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_ONE_HOT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
+ case tflite::BuiltinOperator_SIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SIN;
+ break;
+ case tflite::BuiltinOperator_SHAPE:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SHAPE_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
@@ -881,14 +896,14 @@ TfLiteStatus AddOpsAndParams(
//case tflite::BuiltinOperator_MINIMUM:
//case tflite::BuiltinOperator_ARG_MAX:
case tflite::BuiltinOperator_ARG_MIN:
- case tflite::BuiltinOperator_GREATER:
- case tflite::BuiltinOperator_GREATER_EQUAL:
- case tflite::BuiltinOperator_LESS:
- case tflite::BuiltinOperator_LESS_EQUAL:
+ //case tflite::BuiltinOperator_GREATER:
+ //case tflite::BuiltinOperator_GREATER_EQUAL:
+ //case tflite::BuiltinOperator_LESS:
+ //case tflite::BuiltinOperator_LESS_EQUAL:
//case tflite::BuiltinOperator_NEG:
case tflite::BuiltinOperator_SELECT:
// case tflite::BuiltinOperator_SLICE:
- case tflite::BuiltinOperator_SIN:
+ //case tflite::BuiltinOperator_SIN:
case tflite::BuiltinOperator_LOG:
//case tflite::BuiltinOperator_TRANSPOSE_CONV:
case tflite::BuiltinOperator_TILE:
@@ -902,12 +917,12 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_REDUCE_PROD:
//case tflite::BuiltinOperator_SQRT:
//case tflite::BuiltinOperator_RSQRT:
- case tflite::BuiltinOperator_SHAPE:
+ //case tflite::BuiltinOperator_SHAPE:
case tflite::BuiltinOperator_POW:
case tflite::BuiltinOperator_FAKE_QUANT:
//case tflite::BuiltinOperator_PACK:
//case tflite::BuiltinOperator_LOGICAL_OR:
- case tflite::BuiltinOperator_ONE_HOT:
+ //case tflite::BuiltinOperator_ONE_HOT:
//case tflite::BuiltinOperator_LOGICAL_AND:
//case tflite::BuiltinOperator_LOGICAL_NOT:
//case tflite::BuiltinOperator_UNPACK:
@@ -928,13 +943,7 @@ TfLiteStatus AddOpsAndParams(
break;
case tflite::BuiltinOperator_CUSTOM: {
std::string custom_name(registration.custom_name);
- if (custom_name.compare("TensorFlowMax") == 0) {
- add_reducer_v12_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
- break;
- }
- else if (custom_name.compare("SquaredDifference") == 0) {
+ if (custom_name.compare("SquaredDifference") == 0) {
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
static_cast<uint32_t>(augmented_inputs.size()),
@@ -943,21 +952,6 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
}
- else if (custom_name.compare("TensorFlowSum") == 0) {
- add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("Abs") == 0) {
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_ABS;
- break;
- }
logError("Custom operations are not supported when using NNAPI.");
return kTfLiteError;
break;
@@ -1110,6 +1104,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
// TODO(aselle): This should be called setInputValue maybe to be cons.
TfLiteTensor* tensor = subgraph->tensor(input);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setInput(
@@ -1128,6 +1123,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
TfLiteTensor* tensor = subgraph->tensor(output);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setOutput(
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
index 5b718029b..ee758105f 100644
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
+++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
@@ -135,7 +135,7 @@
assert(count == 1);
};
- auto add_reducer_v12_params = [&add_scalar_bool8](void* data) {
+ auto add_reducer_params = [&add_scalar_bool8](void* data) {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
if (builtin == nullptr)
{
@@ -147,14 +147,24 @@
}
};
- auto add_reducer_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
- if (builtin == nullptr)
- {
- add_scalar_int32(0);
- }
- else
- {
- add_scalar_int32(builtin->keep_dims);
- }
+ auto add_one_hot_tensor_inputs_as_scalar = [subgraph, &node, &augmented_inputs,
+ &add_scalar_float32]() {
+ assert(augmented_inputs.size() == 4);
+ const auto on_value_idx = node.inputs->data[2];
+ const auto off_value_idx = node.inputs->data[3];
+ const auto on_value_tensor = subgraph->tensor(on_value_idx);
+ const auto off_value_tensor = subgraph->tensor(off_value_idx);
+ assert(on_value_tensor->type == kTfLiteFloat32);
+ assert(off_value_tensor->type == kTfLiteFloat32);
+ const auto on_value = *on_value_tensor->data.f;
+ const auto off_value = *off_value_tensor->data.f;
+ augmented_inputs.pop_back();
+ augmented_inputs.pop_back();
+ add_scalar_float32(on_value);
+ add_scalar_float32(off_value);
+ };
+
+ auto add_one_hot_params = [&add_scalar_int32](void* data) {
+ const auto* builtin = reinterpret_cast<TfLiteOneHotParams*>(data);
+ add_scalar_int32(builtin->axis);
};