summaryrefslogtreecommitdiff
path: root/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp')
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp196
1 files changed, 96 insertions, 100 deletions
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
index 99272f0e5..2924c44e9 100644
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
+++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
@@ -35,6 +35,8 @@ limitations under the License.
#include <sys/system_properties.h>
#endif
+#include <memory>
+
namespace nnfw {
namespace tflite {
@@ -159,6 +161,9 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
uint32_t* no_of_operands_added,
std::vector<int64_t>* nnapi_ids) {
uint32_t next_id = 0;
+ // Allocate temporary buffer to save casted boolean tensor
+ std::unordered_map<size_t, std::unique_ptr<uint8_t[]>> const_boolean_tensors;
+
for (size_t i = 0; i < subgraph->tensors_size(); i++) {
// Skip temporaries and RNN back-edges.
if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
@@ -196,9 +201,7 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
case kTfLiteBool:
// Workaround to pass bool type under NNAPI
// Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0
- nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
- scale = 1.0f;
- zeroPoint = 0;
+ nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
default:
logError("Unsupported tensor type %d", tensor->type);
@@ -243,7 +246,19 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
// TODO(aselle): Based on Michael's suggestion, limiting this to read
// only memory
if (tensor->allocation_type == kTfLiteMmapRo) {
- if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
+ if (tensor->type == kTfLiteBool)
+ {
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
+ size_t elements = tensor->bytes / sizeof(bool);
+ const_boolean_tensors[i] = std::make_unique<uint8_t[]>(elements);
+ for (size_t idx = 0; idx < elements; idx++)
+ {
+ const_boolean_tensors[i].get()[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
+ }
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, const_boolean_tensors[i].get(), tensor->bytes));
+ }
+ else if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
RETURN_ERROR_IF_NN_FAILED(
ANeuralNetworksModel_setOperandValueFromMemory(
@@ -703,19 +718,32 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_TOPK_V2;
break;
+ case tflite::BuiltinOperator_GREATER:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER;
+ break;
+ case tflite::BuiltinOperator_GREATER_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
+ break;
+ case tflite::BuiltinOperator_LESS:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS;
+ break;
+ case tflite::BuiltinOperator_LESS_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
+ break;
case tflite::BuiltinOperator_GATHER:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_GATHER;
add_gather_params(node.builtin_data);
break;
case tflite::BuiltinOperator_SPLIT:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SPLIT;
add_split_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SPLIT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_NEG:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_NEG;
@@ -733,21 +761,14 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
case tflite::BuiltinOperator_PRELU:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_PRELU_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_PRELU;
+ break;
case tflite::BuiltinOperator_ARG_MAX:
check_arg_max_input(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_ARGMAX_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_ARGMAX;
+ break;
case tflite::BuiltinOperator_PACK:
add_pack_ex_params(node.builtin_data);
CHECK_NN(ANeuralNetworksModel_addOperationEx(
@@ -773,66 +794,40 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_RSQRT;
break;
case tflite::BuiltinOperator_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_EQUAL;
+ break;
case tflite::BuiltinOperator_NOT_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_NOT_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
+ break;
case tflite::BuiltinOperator_SUM:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_REDUCE_MAX:
- add_reducer_v12_params(node.builtin_data);
+ add_reducer_params(node.builtin_data);
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
break;
case tflite::BuiltinOperator_REDUCE_MIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_MIN_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_LOGICAL_AND:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_AND_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
+ break;
case tflite::BuiltinOperator_LOGICAL_OR:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_OR_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
+ break;
case tflite::BuiltinOperator_LOGICAL_NOT:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_NOT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
+ break;
case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
@@ -851,6 +846,26 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_ABS;
break;
+ case tflite::BuiltinOperator_ONE_HOT:
+ add_one_hot_tensor_inputs_as_scalar();
+ add_one_hot_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_ONE_HOT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
+ case tflite::BuiltinOperator_SIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SIN;
+ break;
+ case tflite::BuiltinOperator_SHAPE:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SHAPE_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
@@ -881,14 +896,14 @@ TfLiteStatus AddOpsAndParams(
//case tflite::BuiltinOperator_MINIMUM:
//case tflite::BuiltinOperator_ARG_MAX:
case tflite::BuiltinOperator_ARG_MIN:
- case tflite::BuiltinOperator_GREATER:
- case tflite::BuiltinOperator_GREATER_EQUAL:
- case tflite::BuiltinOperator_LESS:
- case tflite::BuiltinOperator_LESS_EQUAL:
+ //case tflite::BuiltinOperator_GREATER:
+ //case tflite::BuiltinOperator_GREATER_EQUAL:
+ //case tflite::BuiltinOperator_LESS:
+ //case tflite::BuiltinOperator_LESS_EQUAL:
//case tflite::BuiltinOperator_NEG:
case tflite::BuiltinOperator_SELECT:
// case tflite::BuiltinOperator_SLICE:
- case tflite::BuiltinOperator_SIN:
+ //case tflite::BuiltinOperator_SIN:
case tflite::BuiltinOperator_LOG:
//case tflite::BuiltinOperator_TRANSPOSE_CONV:
case tflite::BuiltinOperator_TILE:
@@ -902,12 +917,12 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_REDUCE_PROD:
//case tflite::BuiltinOperator_SQRT:
//case tflite::BuiltinOperator_RSQRT:
- case tflite::BuiltinOperator_SHAPE:
+ //case tflite::BuiltinOperator_SHAPE:
case tflite::BuiltinOperator_POW:
case tflite::BuiltinOperator_FAKE_QUANT:
//case tflite::BuiltinOperator_PACK:
//case tflite::BuiltinOperator_LOGICAL_OR:
- case tflite::BuiltinOperator_ONE_HOT:
+ //case tflite::BuiltinOperator_ONE_HOT:
//case tflite::BuiltinOperator_LOGICAL_AND:
//case tflite::BuiltinOperator_LOGICAL_NOT:
//case tflite::BuiltinOperator_UNPACK:
@@ -928,13 +943,7 @@ TfLiteStatus AddOpsAndParams(
break;
case tflite::BuiltinOperator_CUSTOM: {
std::string custom_name(registration.custom_name);
- if (custom_name.compare("TensorFlowMax") == 0) {
- add_reducer_v12_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
- break;
- }
- else if (custom_name.compare("SquaredDifference") == 0) {
+ if (custom_name.compare("SquaredDifference") == 0) {
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
static_cast<uint32_t>(augmented_inputs.size()),
@@ -943,21 +952,6 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
}
- else if (custom_name.compare("TensorFlowSum") == 0) {
- add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("Abs") == 0) {
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_ABS;
- break;
- }
logError("Custom operations are not supported when using NNAPI.");
return kTfLiteError;
break;
@@ -1110,6 +1104,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
// TODO(aselle): This should be called setInputValue maybe to be cons.
TfLiteTensor* tensor = subgraph->tensor(input);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setInput(
@@ -1128,6 +1123,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
TfLiteTensor* tensor = subgraph->tensor(output);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setOutput(