summaryrefslogtreecommitdiff
path: root/libs/support
diff options
context:
space:
mode:
Diffstat (limited to 'libs/support')
-rw-r--r--libs/support/CMakeLists.txt2
-rw-r--r--libs/support/nnapi/CMakeLists.txt6
-rw-r--r--libs/support/nnapi/src/Utils.cpp29
-rw-r--r--libs/support/nnapi/src/feature/Utils.cpp43
-rw-r--r--libs/support/tflite/CMakeLists.txt12
-rw-r--r--libs/support/tflite/src/Diff.cpp538
-rw-r--r--libs/support/tflite/src/FeatureView.cpp73
-rw-r--r--libs/support/tflite/src/Quantization.cpp22
-rw-r--r--libs/support/tflite/src/TensorShapeUtils.cpp51
-rw-r--r--libs/support/tflite/src/TensorView.test.cpp53
-rw-r--r--libs/support/tflite/src/interp/FlatBufferBuilder.cpp46
-rw-r--r--libs/support/tflite/src/interp/FunctionBuilder.cpp40
-rw-r--r--libs/support/tflite/src/kernels/RSQRT.cpp83
-rw-r--r--libs/support/tflite/src/kernels/SquaredDifference.cpp115
-rw-r--r--libs/support/tflite/src/kernels/TensorFlowMax.cpp390
-rw-r--r--libs/support/tflite/src/kernels/register.cpp169
-rw-r--r--libs/support/tflite/src/nnapi_delegate.cpp720
-rw-r--r--libs/support/tflite/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc41
18 files changed, 0 insertions, 2433 deletions
diff --git a/libs/support/CMakeLists.txt b/libs/support/CMakeLists.txt
deleted file mode 100644
index c91677266..000000000
--- a/libs/support/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-add_subdirectory(tflite)
-add_subdirectory(nnapi)
diff --git a/libs/support/nnapi/CMakeLists.txt b/libs/support/nnapi/CMakeLists.txt
deleted file mode 100644
index 193bcbd4e..000000000
--- a/libs/support/nnapi/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-file(GLOB_RECURSE SOURCES "src/*.cpp")
-
-add_library(nnfw_support_nnapi ${SOURCES})
-set_property(TARGET nnfw_support_nnapi PROPERTY POSITION_INDEPENDENT_CODE ON)
-target_include_directories(nnfw_support_nnapi PUBLIC ${CMAKE_SOURCE_DIR}/include)
-target_link_libraries(nnfw_support_nnapi static_nnfw_util)
diff --git a/libs/support/nnapi/src/Utils.cpp b/libs/support/nnapi/src/Utils.cpp
deleted file mode 100644
index ae1076fd1..000000000
--- a/libs/support/nnapi/src/Utils.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-#include "support/nnapi/Utils.h"
-
-#include <cassert>
-
-namespace nnfw
-{
-namespace support
-{
-namespace nnapi
-{
-
-const char *to_string(const PaddingCode &code)
-{
- assert((ANEURALNETWORKS_PADDING_SAME == code) || (ANEURALNETWORKS_PADDING_VALID == code));
-
- switch (code)
- {
- case ANEURALNETWORKS_PADDING_SAME:
- return "ANEURALNETWORKS_PADDING_SAME";
- case ANEURALNETWORKS_PADDING_VALID:
- return "ANEURALNETWORKS_PADDING_VALID";
- }
-
- return nullptr;
-}
-
-} // namespace nnapi
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/nnapi/src/feature/Utils.cpp b/libs/support/nnapi/src/feature/Utils.cpp
deleted file mode 100644
index 62939ff4a..000000000
--- a/libs/support/nnapi/src/feature/Utils.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/nnapi/feature/Utils.h"
-
-namespace nnfw
-{
-namespace support
-{
-namespace nnapi
-{
-namespace feature
-{
-
-uint32_t indexOf(const nnfw::util::feature::Shape &shape, uint32_t ch, uint32_t row, uint32_t col)
-{
- uint32_t res = 0;
-
- // NNAPI assumes that NHWC ordering for feature map
- res += row * shape.W * shape.C;
- res += col * shape.C;
- res += ch;
-
- return res;
-}
-
-} // namespace feature
-} // namespace nnapi
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/tflite/CMakeLists.txt b/libs/support/tflite/CMakeLists.txt
deleted file mode 100644
index 667b3bc11..000000000
--- a/libs/support/tflite/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-file(GLOB_RECURSE SOURCES "src/*.cpp")
-file(GLOB_RECURSE TESTS "src/*.test.cpp")
-list(REMOVE_ITEM SOURCES ${TESTS})
-
-add_library(nnfw_support_tflite STATIC ${SOURCES})
-set_target_properties(nnfw_support_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(nnfw_support_tflite PUBLIC ${CMAKE_SOURCE_DIR}/include)
-target_link_libraries(nnfw_support_tflite tensorflow-lite ${LIB_PTHREAD} dl)
-target_link_libraries(nnfw_support_tflite static_nnfw_util)
-
-add_executable(nnfw_support_tflite_test_TensorView src/TensorView.test.cpp)
-target_link_libraries(nnfw_support_tflite_test_TensorView nnfw_support_tflite)
diff --git a/libs/support/tflite/src/Diff.cpp b/libs/support/tflite/src/Diff.cpp
deleted file mode 100644
index e875571cb..000000000
--- a/libs/support/tflite/src/Diff.cpp
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/Diff.h"
-#include "support/tflite/nnapi_delegate.h"
-
-#include "util/fp32.h"
-
-#include "util/tensor/IndexIterator.h"
-#include "util/tensor/IndexFormatter.h"
-#include "util/tensor/Zipper.h"
-#include "util/tensor/Comparator.h"
-
-#include "util/environment.h"
-
-#include <iostream>
-#include <cassert>
-
-class DiffSummary : public nnfw::util::tensor::Comparator::Observer
-{
-public:
- DiffSummary()
- : max_abs_diff_index(0), max_abs_diff_expected{0.0f}, max_abs_diff_obtained{0.0f},
- max_abs_diff_value{0.0f}, max_rel_diff_index(0), max_rel_diff_expected{0.0f},
- max_rel_diff_obtained{0.0f}, max_rel_diff_value{0.0f}
- {
- // DO NOTHING
- }
-
-public:
- void notify(const nnfw::util::tensor::Index &index, float expected, float obtained) override;
-
-public:
- nnfw::util::tensor::Index max_abs_diff_index;
- float max_abs_diff_expected;
- float max_abs_diff_obtained;
- float max_abs_diff_value;
-
- nnfw::util::tensor::Index max_rel_diff_index;
- float max_rel_diff_expected;
- float max_rel_diff_obtained;
- float max_rel_diff_value;
-};
-
-void DiffSummary::notify(const nnfw::util::tensor::Index &index, float expected, float obtained)
-{
- const auto abs_diff_value = std::fabs(expected - obtained);
-
- if (max_abs_diff_value < abs_diff_value)
- {
- max_abs_diff_index = index;
- max_abs_diff_value = abs_diff_value;
- max_abs_diff_expected = expected;
- max_abs_diff_obtained = obtained;
- }
-
- const auto rel_diff_value = nnfw::util::fp32::relative_diff(expected, obtained);
-
- if (max_rel_diff_value < rel_diff_value)
- {
- max_rel_diff_index = index;
- max_rel_diff_value = rel_diff_value;
- max_rel_diff_expected = expected;
- max_rel_diff_obtained = obtained;
- }
-}
-
-template <typename T>
-bool TfLiteInterpMatchApp::compareSingleTensorView(
- const nnfw::support::tflite::TensorView<T> &expected,
- const nnfw::support::tflite::TensorView<T> &obtained, int id) const
-{
- std::vector<nnfw::util::tensor::Diff<T>> diffs;
- assert(expected.shape() == obtained.shape());
-
- using nnfw::util::tensor::zip;
- using nnfw::util::tensor::Index;
-
- zip(expected.shape(), expected, obtained)
- << [&](const Index &index, T expected_value, T obtained_value) {
- if (expected_value != obtained_value)
- {
- diffs.emplace_back(index, expected_value, obtained_value);
- }
- };
-
- // TODO Unify summary generation code
- if (diffs.size() == 0)
- {
- std::cout << " Tensor #" << id << ": MATCHED" << std::endl;
- }
- else
- {
- std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl;
- std::cout << " " << diffs.size() << " diffs are detected" << std::endl;
- }
-
- if (diffs.size() > 0 && _verbose != 0)
- {
- std::cout << " ---- Details ---" << std::endl;
- for (const auto &diff : diffs)
- {
- std::cout << " Diff at [" << nnfw::util::tensor::IndexFormatter(diff.index) << "]"
- << std::endl;
- std::cout << " expected: " << diff.expected << std::endl;
- std::cout << " obtained: " << diff.obtained << std::endl;
- }
- }
-
- return diffs.size() == 0;
-}
-
-template <>
-bool TfLiteInterpMatchApp::compareSingleTensorView<float>(
- const nnfw::support::tflite::TensorView<float> &expected,
- const nnfw::support::tflite::TensorView<float> &obtained, int id) const
-{
- DiffSummary summary;
-
- assert(expected.shape() == obtained.shape());
- auto diffs = _comparator.compare(expected.shape(), expected, obtained, &summary);
-
- // TODO Unify summary generation code
- if (diffs.size() == 0)
- {
- std::cout << " Tensor #" << id << ": MATCHED" << std::endl;
- }
- else
- {
- std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl;
- std::cout << " " << diffs.size() << " diffs are detected" << std::endl;
- }
-
- // Print out max_diff
- if (summary.max_abs_diff_value > 0)
- {
- std::cout << " Max absolute diff at ["
- << nnfw::util::tensor::IndexFormatter(summary.max_abs_diff_index) << "]" << std::endl;
- std::cout << " expected: " << summary.max_abs_diff_expected << std::endl;
- std::cout << " obtained: " << summary.max_abs_diff_obtained << std::endl;
- std::cout << " absolute diff: " << summary.max_abs_diff_value << std::endl;
- }
-
- if (summary.max_rel_diff_value > 0)
- {
- const auto tolerance_level = summary.max_rel_diff_value / FLT_EPSILON;
-
- std::cout << " Max relative diff at ["
- << nnfw::util::tensor::IndexFormatter(summary.max_rel_diff_index) << "]" << std::endl;
- std::cout << " expected: " << summary.max_rel_diff_expected << std::endl;
- std::cout << " obtained: " << summary.max_rel_diff_obtained << std::endl;
- std::cout << " relative diff: " << summary.max_rel_diff_value << std::endl;
- std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl;
- }
-
- if (diffs.size() > 0)
- {
- if (_verbose != 0)
- {
- std::cout << " ---- Details ---" << std::endl;
- for (const auto &diff : diffs)
- {
- const auto absolute_diff = std::fabs(diff.expected - diff.obtained);
- const auto relative_diff = nnfw::util::fp32::relative_diff(diff.expected, diff.obtained);
- const auto tolerance_level = relative_diff / FLT_EPSILON;
-
- std::cout << " Diff at [" << nnfw::util::tensor::IndexFormatter(diff.index) << "]"
- << std::endl;
- std::cout << " expected: " << diff.expected << std::endl;
- std::cout << " obtained: " << diff.obtained << std::endl;
- std::cout << " absolute diff: " << absolute_diff << std::endl;
- std::cout << " relative diff: " << relative_diff << std::endl;
- std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl;
- }
- }
-
- return false;
- }
- return true;
-}
-
-#include <map>
-
-bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpreter &nnapi) const
-{
- assert(interp.outputs() == nnapi.outputs());
-
- bool all_matched = true;
-
- using Comparator = std::function<bool(int id, ::tflite::Interpreter &, ::tflite::Interpreter &)>;
-
- std::map<TfLiteType, Comparator> comparators;
-
- comparators[kTfLiteUInt8] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<uint8_t>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<uint8_t>::make(nnapi, id);
-
- return compareSingleTensorView(expected, obtained, id);
- };
-
- comparators[kTfLiteInt32] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<int32_t>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<int32_t>::make(nnapi, id);
-
- return compareSingleTensorView(expected, obtained, id);
- };
-
- comparators[kTfLiteFloat32] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::support::tflite::TensorView<float>::make(interp, id);
- const auto obtained = nnfw::support::tflite::TensorView<float>::make(nnapi, id);
-
- return compareSingleTensorView(expected, obtained, id);
- };
-
- for (const auto &id : interp.outputs())
- {
- assert(interp.tensor(id)->type == nnapi.tensor(id)->type);
-
- auto it = comparators.find(interp.tensor(id)->type);
-
- if (it == comparators.end())
- {
- throw std::runtime_error{"Not supported output type"};
- }
-
- const auto &comparator = it->second;
-
- if (!comparator(id, interp, nnapi))
- {
- all_matched = false;
- }
- }
-
- return all_matched;
-}
-
-#include "util/tensor/Object.h"
-
-using namespace std::placeholders;
-
-template <> uint8_t RandomGenerator::generate<uint8_t>(void)
-{
- // The value of type_range is 255.
- float type_range = static_cast<float>(std::numeric_limits<uint8_t>::max()) -
- static_cast<float>(std::numeric_limits<uint8_t>::min());
- // Most _dist values range from -5.0 to 5.0.
- float min_range = -5.0f;
- float max_range = 5.0f;
- return static_cast<uint8_t>((_dist(_rand) - min_range) * type_range / (max_range - min_range));
-}
-
-#include "support/tflite/TensorLogger.h"
-//
-// Random Test Runner
-//
-int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
-{
- auto tfl_interp = builder.build();
- auto nnapi = builder.build();
-
- tfl_interp->UseNNAPI(false);
-
- // Allocate Tensors
- tfl_interp->AllocateTensors();
- nnapi->AllocateTensors();
-
- assert(tfl_interp->inputs() == nnapi->inputs());
-
- using ::tflite::Interpreter;
- using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>;
-
- std::map<TfLiteType, Initializer> initializers;
- std::map<TfLiteType, Initializer> reseters;
-
- // Generate singed 32-bit integer (s32) input
- initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
- assert(nnapi->tensor(id)->type == kTfLiteInt32);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- int32_t value = 0;
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- // TODO Generate random values
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- ++value;
- };
- };
-
- // Generate singed 32-bit integer (s32) input
- reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
- assert(nnapi->tensor(id)->type == kTfLiteInt32);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- int32_t value = 0;
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- // TODO Generate random values
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
- assert(nnapi->tensor(id)->type == kTfLiteUInt8);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
- const ::nnfw::util::tensor::Index &)>(
- &RandomGenerator::generate<uint8_t>);
- const nnfw::util::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
- assert(tfl_interp_view.shape() == data.shape());
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- const auto value = data.at(ind);
-
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
- assert(nnapi->tensor(id)->type == kTfLiteUInt8);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
- const ::nnfw::util::tensor::Index &)>(
- &RandomGenerator::generate<uint8_t>);
- const nnfw::util::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
- assert(tfl_interp_view.shape() == data.shape());
-
- uint8_t value = 0;
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
- assert(nnapi->tensor(id)->type == kTfLiteFloat32);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
- const ::nnfw::util::tensor::Index &)>(
- &RandomGenerator::generate<float>);
- const nnfw::util::tensor::Object<float> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- const auto value = data.at(ind);
-
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
- assert(nnapi->tensor(id)->type == kTfLiteFloat32);
-
- auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
- const ::nnfw::util::tensor::Index &)>(
- &RandomGenerator::generate<float>);
- const nnfw::util::tensor::Object<float> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- float value = 0;
-
- nnfw::util::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::util::tensor::Index &ind) {
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- // Fill IFM with random numbers
- for (const auto id : tfl_interp->inputs())
- {
- assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
-
- auto it = initializers.find(tfl_interp->tensor(id)->type);
-
- if (it == initializers.end())
- {
- throw std::runtime_error{"Not supported input type"};
- }
-
- it->second(id, tfl_interp.get(), nnapi.get());
- }
-
- // Fill OFM with 0
- for (const auto id : tfl_interp->outputs())
- {
- assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
-
- auto it = reseters.find(tfl_interp->tensor(id)->type);
-
- if (it == reseters.end())
- {
- throw std::runtime_error{"Not supported input type"};
- }
-
- it->second(id, tfl_interp.get(), nnapi.get());
- }
-
- std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
- tfl_interp->Invoke();
-
- std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl;
-
- char *env = getenv("UPSTREAM_DELEGATE");
-
- if (env && !std::string(env).compare("1"))
- {
- nnapi->UseNNAPI(true);
- nnapi->Invoke();
- }
- else
- {
- nnfw::NNAPIDelegate d;
-
- if (d.BuildGraph(nnapi.get()))
- {
- throw std::runtime_error{"Failed to BuildGraph"};
- }
-
- if (d.Invoke(nnapi.get()))
- {
- throw std::runtime_error{"Failed to BuildGraph"};
- }
- }
-
- // Compare OFM
- std::cout << "[NNAPI TEST] Compare the result" << std::endl;
-
- const auto tolerance = _param.tolerance;
-
- auto equals = [tolerance](float lhs, float rhs) {
- // NOTE Hybrid approach
- // TODO Allow users to set tolerance for absolute_epsilon_equal
- if (nnfw::util::fp32::absolute_epsilon_equal(lhs, rhs))
- {
- return true;
- }
-
- return nnfw::util::fp32::epsilon_equal(lhs, rhs, tolerance);
- };
-
- nnfw::util::tensor::Comparator comparator(equals);
- TfLiteInterpMatchApp app(comparator);
-
- app.verbose() = _param.verbose;
-
- bool res = app.run(*tfl_interp, *nnapi);
-
- if (!res)
- {
- return 255;
- }
-
- std::cout << "[NNAPI TEST] PASSED" << std::endl;
-
- if (_param.tensor_logging)
- nnfw::support::tflite::TensorLogger::instance().save(_param.log_path, *tfl_interp);
-
- return 0;
-}
-
-RandomTestRunner RandomTestRunner::make(int seed)
-{
- RandomTestParam param;
-
- param.verbose = 0;
- param.tolerance = 1;
-
- nnfw::util::env::IntAccessor("VERBOSE").access(param.verbose);
- nnfw::util::env::IntAccessor("TOLERANCE").access(param.tolerance);
-
- return RandomTestRunner{seed, param};
-}
diff --git a/libs/support/tflite/src/FeatureView.cpp b/libs/support/tflite/src/FeatureView.cpp
deleted file mode 100644
index 4c7636780..000000000
--- a/libs/support/tflite/src/FeatureView.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/FeatureView.h"
-#include "support/tflite/TensorUtils.h"
-
-#include <cassert>
-
-namespace nnfw
-{
-namespace support
-{
-namespace tflite
-{
-
-nnfw::util::feature::Shape getFeatureShape(const TfLiteTensor *tensor)
-{
- nnfw::util::feature::Shape shape{tensor->dims->data[3], tensor->dims->data[1],
- tensor->dims->data[2]};
-
- return shape;
-}
-
-FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const InputIndex &index)
-{
- const auto tensor_index = interp.inputs().at(index.asInt());
- auto tensor_ptr = interp.tensor(tensor_index);
-
- assert(isFloatTensor(tensor_ptr));
- assert(isFeatureTensor(tensor_ptr));
-
- _shape = getFeatureShape(tensor_ptr);
- _base = interp.typed_tensor<float>(tensor_index);
-}
-
-FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const OutputIndex &index)
-{
- const auto tensor_index = interp.outputs().at(index.asInt());
- auto tensor_ptr = interp.tensor(tensor_index);
-
- assert(isFloatTensor(tensor_ptr));
- assert(isFeatureTensor(tensor_ptr));
-
- _shape = getFeatureShape(tensor_ptr);
- _base = interp.typed_tensor<float>(tensor_index);
-}
-
-float FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col) const
-{
- return *(_base + getElementOffset(ch, row, col));
-}
-
-float &FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col)
-{
- return *(_base + getElementOffset(ch, row, col));
-}
-
-} // namespace tflite
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/tflite/src/Quantization.cpp b/libs/support/tflite/src/Quantization.cpp
deleted file mode 100644
index b23204d41..000000000
--- a/libs/support/tflite/src/Quantization.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/Quantization.h"
-
-TfLiteQuantizationParams make_default_quantization(void)
-{
- return TfLiteQuantizationParams{0.0f, 0};
-}
diff --git a/libs/support/tflite/src/TensorShapeUtils.cpp b/libs/support/tflite/src/TensorShapeUtils.cpp
deleted file mode 100644
index 611ba920e..000000000
--- a/libs/support/tflite/src/TensorShapeUtils.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-#include "support/tflite/TensorShapeUtils.h"
-
-namespace nnfw
-{
-namespace support
-{
-namespace tflite
-{
-
-nnfw::util::tensor::Shape broadcast(const nnfw::util::tensor::Shape &lhs_shape,
- const nnfw::util::tensor::Shape &rhs_shape)
-{
- const uint32_t lhs_rank = lhs_shape.rank();
- const uint32_t rhs_rank = rhs_shape.rank();
- const uint32_t out_rank = std::max(lhs_rank, rhs_rank);
-
- // TODO Simplify implementation
- std::vector<int32_t> lhs_normalized_dims;
- std::vector<int32_t> rhs_normalized_dims;
-
- for (uint32_t n = 0; n < out_rank - lhs_rank; ++n)
- {
- lhs_normalized_dims.emplace_back(1);
- }
- for (uint32_t axis = 0; axis < lhs_rank; ++axis)
- {
- lhs_normalized_dims.emplace_back(lhs_shape.dim(axis));
- }
-
- for (uint32_t n = 0; n < out_rank - rhs_rank; ++n)
- {
- rhs_normalized_dims.emplace_back(1);
- }
- for (uint32_t axis = 0; axis < rhs_rank; ++axis)
- {
- rhs_normalized_dims.emplace_back(rhs_shape.dim(axis));
- }
-
- nnfw::util::tensor::Shape out_shape(out_rank);
-
- for (uint32_t axis = 0; axis < out_rank; ++axis)
- {
- out_shape.dim(axis) = std::max(lhs_normalized_dims.at(axis), rhs_normalized_dims.at(axis));
- }
-
- return out_shape;
-}
-
-} // namespace tflite
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/tflite/src/TensorView.test.cpp b/libs/support/tflite/src/TensorView.test.cpp
deleted file mode 100644
index 1d3a70500..000000000
--- a/libs/support/tflite/src/TensorView.test.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/TensorView.h"
-
-#include <cassert>
-
-void int_test(void)
-{
- int value[6] = {1, 2, 3, 4, 5, 6};
-
- const nnfw::util::tensor::Shape shape{2, 3};
- const nnfw::support::tflite::TensorView<int> view{shape, value};
-
- assert(view.at(nnfw::util::tensor::Index{0, 0}) == 1);
- assert(view.at(nnfw::util::tensor::Index{0, 1}) == 2);
- assert(view.at(nnfw::util::tensor::Index{0, 2}) == 3);
- assert(view.at(nnfw::util::tensor::Index{1, 0}) == 4);
- assert(view.at(nnfw::util::tensor::Index{1, 1}) == 5);
- assert(view.at(nnfw::util::tensor::Index{1, 2}) == 6);
-}
-
-int main(int argc, char **argv)
-{
- float value[6] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
-
- const nnfw::util::tensor::Shape shape{2, 3};
- const nnfw::support::tflite::TensorView<float> view{shape, value};
-
- assert(view.at(nnfw::util::tensor::Index{0, 0}) == 1.0f);
- assert(view.at(nnfw::util::tensor::Index{0, 1}) == 2.0f);
- assert(view.at(nnfw::util::tensor::Index{0, 2}) == 3.0f);
- assert(view.at(nnfw::util::tensor::Index{1, 0}) == 4.0f);
- assert(view.at(nnfw::util::tensor::Index{1, 1}) == 5.0f);
- assert(view.at(nnfw::util::tensor::Index{1, 2}) == 6.0f);
-
- int_test();
-
- return 0;
-}
diff --git a/libs/support/tflite/src/interp/FlatBufferBuilder.cpp b/libs/support/tflite/src/interp/FlatBufferBuilder.cpp
deleted file mode 100644
index 67df13f34..000000000
--- a/libs/support/tflite/src/interp/FlatBufferBuilder.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/interp/FlatBufferBuilder.h"
-
-#include "support/tflite/kernels/register.h"
-
-namespace nnfw
-{
-namespace support
-{
-namespace tflite
-{
-namespace interp
-{
-
-std::unique_ptr<::tflite::Interpreter> FlatBufferBuilder::build(void) const
-{
- std::unique_ptr<::tflite::Interpreter> interpreter;
-
- ::tflite::ops::builtin::BuiltinOpResolver resolver;
-
- ::tflite::InterpreterBuilder builder(_model, resolver);
-
- builder(&interpreter);
-
- return std::move(interpreter);
-}
-
-} // namespace interp
-} // namespace tflite
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/tflite/src/interp/FunctionBuilder.cpp b/libs/support/tflite/src/interp/FunctionBuilder.cpp
deleted file mode 100644
index 65783bd37..000000000
--- a/libs/support/tflite/src/interp/FunctionBuilder.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/interp/FunctionBuilder.h"
-
-namespace nnfw
-{
-namespace support
-{
-namespace tflite
-{
-namespace interp
-{
-
-std::unique_ptr<::tflite::Interpreter> FunctionBuilder::build(void) const
-{
- auto res = std::unique_ptr<::tflite::Interpreter>{new ::tflite::Interpreter};
-
- _fn(*res);
-
- return std::move(res);
-}
-
-} // namespace interp
-} // namespace tflite
-} // namespace support
-} // namespace nnfw
diff --git a/libs/support/tflite/src/kernels/RSQRT.cpp b/libs/support/tflite/src/kernels/RSQRT.cpp
deleted file mode 100644
index 13efe0ed9..000000000
--- a/libs/support/tflite/src/kernels/RSQRT.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/kernels/RSQRT.h"
-#include "tensorflow/contrib/lite/kernels/kernel_util.h"
-
-#include <cmath>
-#include <iostream>
-
-namespace tflite
-{
-namespace ops
-{
-namespace custom
-{
-namespace nnfw
-{
-namespace RSQRT
-{
-
-void *InitRSQRT(TfLiteContext *context, const char *buffer, size_t length) { return nullptr; }
-
-void FreeRSQRT(TfLiteContext *context, void *buffer) {}
-
-TfLiteStatus PrepareRSQRT(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
- const TfLiteTensor *input = GetInput(context, node, 0);
- TfLiteTensor *output = GetOutput(context, node, 0);
- TF_LITE_ENSURE_EQ(context, input->type, output->type);
- // Quantized float is not supported yet.
- TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
- return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims));
-}
-
-inline TfLiteStatus Eval(TfLiteContext *context, TfLiteNode *node, float float_func(float))
-{
- const TfLiteTensor *input = GetInput(context, node, 0);
- TfLiteTensor *output = GetOutput(context, node, 0);
- switch (input->type)
- {
- case kTfLiteFloat32:
- {
- size_t elements = NumElements(input);
- const float *in = input->data.f;
- const float *in_end = in + elements;
- float *out = output->data.f;
- for (; in < in_end; in++, out++)
- *out = float_func(*in);
- return kTfLiteOk;
- }
- default:
- {
- context->ReportError(context, "Input type is %d, requires float32", input->type);
- return kTfLiteError;
- }
- }
-}
-
-TfLiteStatus EvalRSQRT(TfLiteContext *context, TfLiteNode *node)
-{
- return Eval(context, node, [](float f) { return 1.f / std::sqrt(f); });
-}
-
-} // namespace RSQRT
-} // namespace nnfw
-} // namespace custom
-} // namespace ops
-} // namespace tflite
diff --git a/libs/support/tflite/src/kernels/SquaredDifference.cpp b/libs/support/tflite/src/kernels/SquaredDifference.cpp
deleted file mode 100644
index 25e10a8ed..000000000
--- a/libs/support/tflite/src/kernels/SquaredDifference.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/kernels/SquaredDifference.h"
-#include "tensorflow/contrib/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace tflite
-{
-namespace ops
-{
-namespace custom
-{
-namespace nnfw
-{
-namespace SquaredDifference
-{
-
-void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length)
-{
- return nullptr;
-}
-
-void FreeSquaredDifference(TfLiteContext *context, void *buffer) {}
-
-TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
-
- const TfLiteTensor *input1 = GetInput(context, node, 0);
- const TfLiteTensor *input2 = GetInput(context, node, 1);
- TfLiteTensor *output = GetOutput(context, node, 0);
-
- TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
- TF_LITE_ENSURE_EQ(context, input1->type, output->type);
-
- return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input1->dims));
-}
-
-TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node)
-{
-
- const TfLiteTensor *input1 = GetInput(context, node, 0);
- const TfLiteTensor *input2 = GetInput(context, node, 1);
-
- TfLiteTensor *output = GetOutput(context, node, 0);
-
- size_t elements = NumElements(input1);
-
- switch (input1->type)
- {
- case kTfLiteFloat32:
- {
- const float *in1 = input1->data.f;
- const float *in2 = input2->data.f;
- const float *in_end1 = in1 + elements;
- float *out = output->data.f;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- case kTfLiteInt32:
- {
- const int *in1 = input1->data.i32;
- const int *in2 = input2->data.i32;
- const int *in_end1 = in1 + elements;
- int *out = output->data.i32;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- case kTfLiteInt64:
- {
- const int64_t *in1 = input1->data.i64;
- const int64_t *in2 = input1->data.i64;
- const int64_t *in_end1 = in1 + elements;
- int64_t *out = output->data.i64;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- default:
- {
- context->ReportError(context, "InputType is %d Unsupported", input1->type);
- return kTfLiteError;
- }
- }
-}
-
-} // namespace SquaredDifference
-} // nnfw
-} // namespace custom
-} // namespace ops
-} // namespace tflite
diff --git a/libs/support/tflite/src/kernels/TensorFlowMax.cpp b/libs/support/tflite/src/kernels/TensorFlowMax.cpp
deleted file mode 100644
index abc6fda4e..000000000
--- a/libs/support/tflite/src/kernels/TensorFlowMax.cpp
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "support/tflite/kernels/TensorFlowMax.h"
-#include "tensorflow/contrib/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace tflite
-{
-namespace ops
-{
-namespace custom
-{
-namespace nnfw
-{
-namespace TensorFlowMax
-{
-
-struct TensorFlowMaxOp
-{
- TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node)
- {
- input = tflite::GetInput(context, node, 0);
- axis = tflite::GetInput(context, node, 1);
- output = tflite::GetOutput(context, node, 0);
- }
- const TfLiteTensor *input;
- const TfLiteTensor *axis;
- TfLiteTensor *output;
-};
-
-void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length)
-{
- // Creates two temp tensors to store index and axis for internal
- // implementation only.
- auto *scratch_tensor_index = new int;
- context->AddTensors(context, 2, scratch_tensor_index);
- return scratch_tensor_index;
-}
-
-void FreeTensorFlowMax(TfLiteContext *context, void *buffer)
-{
- delete static_cast<TensorFlowMaxOp *>(buffer);
-}
-
-// Resizes the temp tensor that stores resolved axis.
-TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context,
- TfLiteTensor *resolved_axis)
-{
- TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(tflite::NumElements(op_context->axis));
- return context->ResizeTensor(context, resolved_axis, axis_size);
-}
-
-// Resizes output array based on the input size and resolved axis.
-TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
-{
- size_t num_axis = tflite::NumElements(op_context->axis);
- const TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = tflite::NumDimensions(op_context->input);
- const int *axis = op_context->axis->data.i32;
-
- {
- // Calculates size of reducing axis.
- int num_reduce_axis = num_axis;
- for (int i = 0; i < num_axis; ++i)
- {
- int current = axis[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int j = 0; j < i; ++j)
- {
- int previous = axis[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- TfLiteIntArray *output_dims = TfLiteIntArrayCreate(input_num_dims - num_reduce_axis);
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
- }
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
-}
-
-// Initializes temp tensors to store index and resolved axis.
-TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
- TensorFlowMaxOp *op_context)
-{
- // Creates a temp index to iterate through input data.
- int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(2);
- node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
- scratch_tensor->type = kTfLiteInt32;
- scratch_tensor->allocation_type = kTfLiteArenaRw;
- TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = tflite::NumDimensions(op_context->input);
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
-
- // Creates a temp tensor to store resolved axis given input data.
- node->temporaries->data[1] = *scratch_tensor_index + 1;
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- resolved_axis->type = kTfLiteInt32;
- return kTfLiteOk;
-}
-
-TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
-
- TensorFlowMaxOp op_context(context, node);
- TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
-
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Leaves work to Eval if axis is not constant; else resizes output.
- if (!tflite::IsConstantTensor(op_context.axis))
- {
- tflite::SetTensorToDynamic(op_context.output);
- tflite::SetTensorToDynamic(resolved_axis);
- return kTfLiteOk;
- }
- resolved_axis->allocation_type = kTfLiteArenaRw;
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- return ResizeOutputTensor(context, &op_context);
-}
-
-// Gets offset of index if expanded on axis. When expanded, the flattened offset
-// will not change, if the output index changes on the given axis. For example,
-// if you have a 2D tensor and you are expanding to 3D on axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
-// offset.
-inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- int out_idx = 0;
- for (int in_idx = 0; in_idx < num_dims; ++in_idx)
- {
- // if we need to expand this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (in_idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
- out_idx++;
- }
- else
- {
- offset = offset * static_cast<size_t>(dims[in_idx]);
- }
- }
- return offset;
-}
-
-// Gets offset of index if reducing on axis. When reducing, the flattened offset
-// will not change, if the input index changes on the given axis. For example,
-// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
-// offset.
-// TODO(kanlig): uses Dims to represent dimensions.
-inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- for (int idx = 0; idx < num_dims; ++idx)
- {
- // if we need to skip this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
- }
- }
- return offset;
-}
-
-// Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
-{
- int carry = 1;
- for (int idx = num_dims - 1; idx >= 0; --idx)
- {
- int current_val = current[idx] + carry;
- TF_LITE_ENSURE(context, (dims[idx] >= current_val));
- if (dims[idx] == current_val)
- {
- current[idx] = 0;
- }
- else
- {
- current[idx] = current_val;
- carry = 0;
- break;
- }
- }
- return (carry == 0);
-}
-
-template <typename T>
-inline TfLiteStatus
-CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
- T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool keep_dims, int *temp_index, int *resolved_axis)
-{
- // resolves axis.
- int num_resolved_axis = 0;
- for (int idx = 0; idx < num_axis_dimensions; ++idx)
- {
- int current = axis[idx];
- TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
- if (current < 0)
- {
- current += input_num_dims;
- }
- bool is_dup = false;
- for (int j = 0; j < num_resolved_axis; ++j)
- {
- if (resolved_axis[j] == current)
- {
- is_dup = true;
- break;
- }
- }
- if (!is_dup)
- {
- resolved_axis[num_resolved_axis++] = current;
- }
- }
-
- TF_LITE_ENSURE(context, (input_num_dims > 0));
- TF_LITE_ENSURE(context, (input_dims != nullptr));
- TF_LITE_ENSURE(context, (temp_index != nullptr));
-
- // resets output data.
- for (int idx = 0; idx < output_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
- {
- size_t output_offset =
- ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
- size_t input_offset = ExpandedInputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- output_data[output_offset] = input_data[input_offset];
- }
-
- // resets temp index.
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
-
- // iterates through input_data.
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
- {
- size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
- size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- if (output_data[output_offset] < input_data[input_offset])
- {
- output_data[output_offset] = input_data[input_offset];
- }
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
-
- TensorFlowMaxOp op_context(context, node);
- int num_axis = static_cast<int>(tflite::NumElements(op_context.axis));
- TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Resize the output tensor if the output tensor is dynamic.
- if (tflite::IsDynamicTensor(op_context.output))
- {
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
- }
-
- TfLiteStatus returnStatus = kTfLiteOk;
- switch (op_context.input->type)
- {
- case kTfLiteFloat32:
- returnStatus = CustomMax<float>(
- context, op_context.input->data.f, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt32:
- returnStatus = CustomMax<int>(context, op_context.input->data.i32,
- op_context.input->dims->data, op_context.input->dims->size,
- op_context.output->data.i32, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteUInt8:
- returnStatus = CustomMax<uint8_t>(
- context, op_context.input->data.uint8, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.uint8,
- op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt64:
- returnStatus = CustomMax<int64_t>(
- context, op_context.input->data.i64, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- default:
- returnStatus = kTfLiteError;
- }
-
- return returnStatus;
-}
-} // namespace TensorFlowMax
-} // namespace nnfw
-} // namespace custom
-} // namespace ops
-} // namespace tflite
diff --git a/libs/support/tflite/src/kernels/register.cpp b/libs/support/tflite/src/kernels/register.cpp
deleted file mode 100644
index 6700b4de4..000000000
--- a/libs/support/tflite/src/kernels/register.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE This code is derived from the following file (in TensorFlow)
-// 'externals/tensorflow/tensorflow/contrib/lite/kernels/register.cc'
-#include "support/tflite/kernels/register.h"
-#include "support/tflite/kernels/CustomOps.h"
-
-// TODO Use namespace nnfw
-namespace tflite
-{
-namespace ops
-{
-namespace builtin
-{
-
-TfLiteRegistration *Register_RELU();
-TfLiteRegistration *Register_RELU_N1_TO_1();
-TfLiteRegistration *Register_RELU6();
-TfLiteRegistration *Register_TANH();
-TfLiteRegistration *Register_LOGISTIC();
-TfLiteRegistration *Register_AVERAGE_POOL_2D();
-TfLiteRegistration *Register_MAX_POOL_2D();
-TfLiteRegistration *Register_L2_POOL_2D();
-TfLiteRegistration *Register_CONV_2D();
-TfLiteRegistration *Register_DEPTHWISE_CONV_2D();
-TfLiteRegistration *Register_SVDF();
-TfLiteRegistration *Register_RNN();
-TfLiteRegistration *Register_BIDIRECTIONAL_SEQUENCE_RNN();
-TfLiteRegistration *Register_UNIDIRECTIONAL_SEQUENCE_RNN();
-TfLiteRegistration *Register_EMBEDDING_LOOKUP();
-TfLiteRegistration *Register_EMBEDDING_LOOKUP_SPARSE();
-TfLiteRegistration *Register_FULLY_CONNECTED();
-TfLiteRegistration *Register_LSH_PROJECTION();
-TfLiteRegistration *Register_HASHTABLE_LOOKUP();
-TfLiteRegistration *Register_SOFTMAX();
-TfLiteRegistration *Register_CONCATENATION();
-TfLiteRegistration *Register_ADD();
-TfLiteRegistration *Register_SPACE_TO_BATCH_ND();
-TfLiteRegistration *Register_DIV();
-TfLiteRegistration *Register_SUB();
-TfLiteRegistration *Register_BATCH_TO_SPACE_ND();
-TfLiteRegistration *Register_MUL();
-TfLiteRegistration *Register_L2_NORMALIZATION();
-TfLiteRegistration *Register_LOCAL_RESPONSE_NORMALIZATION();
-TfLiteRegistration *Register_LSTM();
-TfLiteRegistration *Register_BIDIRECTIONAL_SEQUENCE_LSTM();
-TfLiteRegistration *Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
-TfLiteRegistration *Register_PAD();
-TfLiteRegistration *Register_PADV2();
-TfLiteRegistration *Register_RESHAPE();
-TfLiteRegistration *Register_RESIZE_BILINEAR();
-TfLiteRegistration *Register_SKIP_GRAM();
-TfLiteRegistration *Register_SPACE_TO_DEPTH();
-TfLiteRegistration *Register_GATHER();
-TfLiteRegistration *Register_TRANSPOSE();
-TfLiteRegistration *Register_MEAN();
-TfLiteRegistration *Register_SPLIT();
-TfLiteRegistration *Register_SQUEEZE();
-TfLiteRegistration *Register_STRIDED_SLICE();
-TfLiteRegistration *Register_EXP();
-TfLiteRegistration *Register_TOPK_V2();
-TfLiteRegistration *Register_LOG_SOFTMAX();
-TfLiteRegistration *Register_CAST();
-TfLiteRegistration *Register_DEQUANTIZE();
-TfLiteRegistration *Register_PRELU();
-TfLiteRegistration *Register_MAXIMUM();
-TfLiteRegistration *Register_MINIMUM();
-TfLiteRegistration *Register_ARG_MAX();
-TfLiteRegistration *Register_GREATER();
-TfLiteRegistration *Register_GREATER_EQUAL();
-TfLiteRegistration *Register_LESS();
-TfLiteRegistration *Register_LESS_EQUAL();
-TfLiteRegistration *Register_FLOOR();
-TfLiteRegistration *Register_NEG();
-TfLiteRegistration *Register_SELECT();
-TfLiteRegistration *Register_SLICE();
-TfLiteRegistration *Register_SIN();
-TfLiteRegistration *Register_TRANSPOSE_CONV();
-TfLiteRegistration *Register_SPARSE_TO_DENSE();
-
-BuiltinOpResolver::BuiltinOpResolver()
-{
- AddBuiltin(BuiltinOperator_RELU, Register_RELU());
- AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
- AddBuiltin(BuiltinOperator_RELU6, Register_RELU6());
- AddBuiltin(BuiltinOperator_TANH, Register_TANH());
- AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC());
- AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D());
- AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D());
- AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
- AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D());
- AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D());
- AddBuiltin(BuiltinOperator_SVDF, Register_SVDF());
- AddBuiltin(BuiltinOperator_RNN, Register_RNN());
- AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, Register_BIDIRECTIONAL_SEQUENCE_RNN());
- AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, Register_UNIDIRECTIONAL_SEQUENCE_RNN());
- AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP());
- AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, Register_EMBEDDING_LOOKUP_SPARSE());
- AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED());
- AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
- AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
- AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX());
- AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION());
- AddBuiltin(BuiltinOperator_ADD, Register_ADD());
- AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND());
- AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND());
- AddBuiltin(BuiltinOperator_MUL, Register_MUL());
- AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION());
- AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, Register_LOCAL_RESPONSE_NORMALIZATION());
- AddBuiltin(BuiltinOperator_LSTM, Register_LSTM());
- AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, Register_BIDIRECTIONAL_SEQUENCE_LSTM());
- AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, Register_UNIDIRECTIONAL_SEQUENCE_LSTM());
- AddBuiltin(BuiltinOperator_PAD, Register_PAD());
- AddBuiltin(BuiltinOperator_PADV2, Register_PADV2());
- AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
- AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR());
- AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
- AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH());
- AddBuiltin(BuiltinOperator_GATHER, Register_GATHER());
- AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE());
- AddBuiltin(BuiltinOperator_MEAN, Register_MEAN());
- AddBuiltin(BuiltinOperator_DIV, Register_DIV());
- AddBuiltin(BuiltinOperator_SUB, Register_SUB());
- AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT());
- AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE());
- AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE());
- AddBuiltin(BuiltinOperator_EXP, Register_EXP());
- AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2());
- AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX());
- AddBuiltin(BuiltinOperator_CAST, Register_CAST());
- AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE());
- AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
- AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM());
- AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM());
- AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX());
- AddBuiltin(BuiltinOperator_GREATER, Register_GREATER());
- AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL());
- AddBuiltin(BuiltinOperator_LESS, Register_LESS());
- AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL());
- AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
- AddBuiltin(BuiltinOperator_NEG, Register_NEG());
- AddBuiltin(BuiltinOperator_SELECT, Register_SELECT());
- AddBuiltin(BuiltinOperator_SLICE, Register_SLICE());
- AddBuiltin(BuiltinOperator_SIN, Register_SIN());
- AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
- AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
-
- AddCustom("TensorFlowMax", tflite::ops::custom::nnfw::Register_TensorFlowMax());
- AddCustom("RSQRT", tflite::ops::custom::nnfw::Register_RSQRT());
- AddCustom("SquaredDifference", tflite::ops::custom::nnfw::Register_SquaredDifference());
-}
-
-} // namespace builtin
-} // namespace ops
-} // namespace tflite
diff --git a/libs/support/tflite/src/nnapi_delegate.cpp b/libs/support/tflite/src/nnapi_delegate.cpp
deleted file mode 100644
index 1eada4bca..000000000
--- a/libs/support/tflite/src/nnapi_delegate.cpp
+++ /dev/null
@@ -1,720 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This code is derived from the following file (in TensorFlow)
-// 'externals/tensorflow/tensorflow/contrib/lite/nnapi_delegate.cc'
-#include "support/tflite/nnapi_delegate.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include "tensorflow/contrib/lite/builtin_op_data.h"
-#include "tensorflow/contrib/lite/error_reporter.h"
-#include "tensorflow/contrib/lite/model.h"
-#include "NeuralNetworksShim.h"
-#include "NeuralNetworksExShim.h"
-
-#ifdef __ANDROID__
-#include <sys/system_properties.h>
-#endif
-
-namespace nnfw
-{
-
-// TODO(aselle): FATAL leaves resources hanging.
-void FATAL(const char* format, ...) {
- va_list args;
- va_start(args, format);
- vfprintf(stderr, format, args);
- va_end(args);
- fflush(stderr);
- exit(1);
-}
-
-// TODO(aselle): Change the error model to use status codes.
-#define CHECK_TFLITE_SUCCESS(x) \
- if (x != kTfLiteOk) { \
- FATAL("Aborting since tflite returned failure."); \
- }
-
-#define CHECK_NN(x) \
- if (x != ANEURALNETWORKS_NO_ERROR) { \
- FATAL("Aborting since tflite returned failure."); \
- }
-
-namespace {
-
-int32_t GetAndroidSdkVersion() {
-#ifdef __ANDROID__
- const char* sdkProp = "ro.build.version.sdk";
- char sdkVersion[PROP_VALUE_MAX];
- int length = __system_property_get(sdkProp, sdkVersion);
- if (length != 0) {
- for (int i = 0; i < length; ++i) {
- int digit = sdkVersion[i] - '0';
- if (digit < 0 || digit > 9) {
- // Non-numeric SDK version, assume it's higher then expected;
- return 0xFFFF;
- }
- }
- return atoi(sdkVersion);
- }
- FATAL("No %s prop", sdkProp);
-#endif // __ANDROID__
- return 0;
-}
-
-static const int32_t kAndroidSdkVersion = GetAndroidSdkVersion();
-
-} // namespace
-
-NNAPIAllocation::NNAPIAllocation(const char* filename,
- ::tflite::ErrorReporter* error_reporter)
- : MMAPAllocation(filename, error_reporter) {
- if (mmapped_buffer_ != MAP_FAILED)
- CHECK_NN(ANeuralNetworksMemory_createFromFd(buffer_size_bytes_, PROT_READ,
- mmap_fd_, 0, &handle_));
-}
-
-NNAPIAllocation::~NNAPIAllocation() {
- if (handle_) {
- ANeuralNetworksMemory_free(handle_);
- }
-}
-
-NNAPIDelegate::~NNAPIDelegate() {
- if (nn_compiled_model_) {
- ANeuralNetworksCompilation_free(nn_compiled_model_);
- nn_compiled_model_ = nullptr;
- }
- if (nn_model_) {
- ANeuralNetworksModel_free(nn_model_);
- nn_model_ = nullptr;
- // TODO(aselle): Is this thread-safe and callable multiple times?
- }
- // ANeuralNetworksShutdown();
-}
-
-// Adds the tensors of the interpreter to the NN API model.
-// Returns the number of operands added.
-uint32_t addTensorOperands(tflite::Interpreter* interpreter,
- ANeuralNetworksModel* nn_model,
- const std::vector<uint32_t>& skip_list) {
- uint32_t next_id = 0;
- for (size_t i = 0; i < interpreter->tensors_size(); i++) {
- // skip temporaries tensors.
- bool shouldSkip = false;
- for (auto skip_idx : skip_list) {
- if (i == skip_idx) {
- shouldSkip = true;
- break;
- }
- }
- if (shouldSkip) continue;
-
- int32_t nn_type = 0;
- // NNAPI requires 32-bit float scale to be zero, tflite doesn't care
- float scale = 0.0f;
- int32_t zeroPoint = 0;
- TfLiteTensor* tensor = interpreter->tensor(i);
- switch (tensor->type) {
- case kTfLiteNoType:
- // Tensors added during initialization of Ops don't have a type yet and
- // should not be registered with the NNAPI.
- continue;
- case kTfLiteFloat32:
- nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
- break;
- case kTfLiteUInt8:
- nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
- scale = tensor->params.scale;
- // FIXME The next line is a workaround because currently zero scale is
- // passed down from TF
- // Lite. Note that the latest NeuralNetworks.h (see
- // https://android.googlesource.com/platform/frameworks/ml/+/master/nn/runtime/include/NeuralNetworks.h)
- // requires scale to be greater than zero. Remove this workaround
- // when the scale
- // value is correctly passed.
- scale = (scale == 0.0f) ? 1.0f : scale;
- zeroPoint = tensor->params.zero_point;
- break;
- case kTfLiteInt32:
- nn_type = ANEURALNETWORKS_TENSOR_INT32;
- scale = tensor->params.scale;
- zeroPoint = tensor->params.zero_point;
- break;
- default:
- FATAL("Unsupported type.");
- }
- // TODO(aselle): Note, many of these are intermediate results. Do I need
- // to ever specify these sizes. I am currently below doing setValue
- // on all of them, but I shouldn't in the future.
- // Answer(jeanluc): If all the operators can set the dimension correctly,
- // you won't need to.
- ANeuralNetworksOperandType operand_type{
- nn_type, static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data), scale, zeroPoint};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- // TODO(aselle): Based on Michael's suggestion, limiting this to read
- // only memory
- if (tensor->allocation_type == kTfLiteMmapRo) {
- if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
- static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
- CHECK_NN(ANeuralNetworksModel_setOperandValueFromMemory(
- nn_model, next_id, alloc->memory(), alloc->offset(tensor->data.raw),
- tensor->bytes));
- } else {
- CHECK_NN(ANeuralNetworksModel_setOperandValue(
- nn_model, next_id, tensor->data.raw, tensor->bytes));
- }
- } else if (tensor->bytes == 0) {
- // These size 0 tensors are optional tensors reserved.
- CHECK_NN(
- ANeuralNetworksModel_setOperandValue(nn_model, next_id, nullptr, 0));
- }
-
- ++next_id;
- }
- return next_id;
-}
-
-// Adds the operations and their parameters to the NN API model.
-// 'next-id' is the operand ID of the next operand of the model.
-void AddOpsAndParams(tflite::Interpreter* interpreter,
- ANeuralNetworksModel* nn_model, uint32_t next_id,
- std::vector<int>* model_state_inputs,
- std::vector<int>* model_state_outputs) {
- for (size_t i = 0; i < interpreter->nodes_size(); i++) {
- const auto* node_and_registration = interpreter->node_and_registration(i);
- const TfLiteNode& node = node_and_registration->first;
- const TfLiteRegistration& registration = node_and_registration->second;
- tflite::BuiltinOperator builtin =
- static_cast<tflite::BuiltinOperator>(registration.builtin_code);
-
- // Add the parameters.
- std::vector<uint32_t> augmented_inputs(
- node.inputs->data, node.inputs->data + node.inputs->size);
- std::vector<uint32_t> augmented_outputs(
- node.outputs->data, node.outputs->data + node.outputs->size);
-
- auto add_scalar_int32 = [&nn_model, &augmented_inputs,
- &next_id](int value) {
- ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_INT32};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
- sizeof(int32_t)))
- augmented_inputs.push_back(next_id++);
- };
-
- auto add_scalar_float32 = [&nn_model, &augmented_inputs,
- &next_id](float value) {
- ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_FLOAT32};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
- sizeof(float)))
- augmented_inputs.push_back(next_id++);
- };
-
- // Handle state tensors of RNN, LSTM, SVDF.
- // For each state_out tensor, a corresponding state_in operand needs to be
- // created for NNAPI.
- auto duplicate_state_tensor_float32 =
- [interpreter, &nn_model, &next_id, &augmented_inputs,
- &model_state_inputs, &model_state_outputs](int tensor_id) {
- const TfLiteTensor* tensor = interpreter->tensor(tensor_id);
- ANeuralNetworksOperandType operand_type{
- ANEURALNETWORKS_TENSOR_FLOAT32,
- static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data),
- tensor->params.scale, tensor->params.zero_point};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- augmented_inputs.push_back(next_id);
- model_state_inputs->push_back(next_id);
- model_state_outputs->push_back(tensor_id);
- next_id++;
- };
-
- auto add_add_params = [&add_scalar_int32]() { add_scalar_int32(0); };
-
- auto add_pooling_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->filter_width);
- add_scalar_int32(builtin->filter_height);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_convolution_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteConvParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_depthwise_conv_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->depth_multiplier);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_fully_connected_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_concatenation_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(data);
- add_scalar_int32(builtin->axis);
- if (builtin->activation != kTfLiteActNone) {
- FATAL("Concatenation does not support fused activation in NNAPI");
- }
- };
-
- auto add_softmax_params = [&add_scalar_float32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(data);
- add_scalar_float32(builtin->beta);
- };
-
- auto add_space_to_depth_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(data);
- add_scalar_int32(builtin->block_size);
- };
-
- auto add_lstm_params = [&add_scalar_int32,
- &add_scalar_float32](void* data) {
- auto builtin = reinterpret_cast<TfLiteLSTMParams*>(data);
- add_scalar_int32(builtin->activation);
- add_scalar_float32(builtin->cell_clip);
- add_scalar_float32(builtin->proj_clip);
- };
-
- // LSTM in NNAPI requires scratch tensor as an output operand.
- auto add_lstm_scratch_tensor_float32 = [interpreter, &node, &nn_model,
- &next_id, &augmented_outputs]() {
- int scratch_buffer_index = node.temporaries->data[0];
- const TfLiteTensor* tensor = interpreter->tensor(scratch_buffer_index);
- ANeuralNetworksOperandType operand_type{
- ANEURALNETWORKS_TENSOR_FLOAT32,
- static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data), tensor->params.scale,
- tensor->params.zero_point};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- augmented_outputs.insert(augmented_outputs.begin(), next_id++);
- };
-
- auto add_mean_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteMeanParams*>(data);
- add_scalar_int32(builtin->keep_dims);
- };
-
- auto add_svdf_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSVDFParams*>(data);
- add_scalar_int32(builtin->rank);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_rnn_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteRNNParams*>(data);
- add_scalar_int32(builtin->activation);
- };
-
- // Handle optional input tensors.
- auto add_optional_tensors = [&nn_model, &augmented_inputs,
- &next_id](int nn_type) {
- for (size_t idx = 0; idx < augmented_inputs.size(); idx++) {
- if (augmented_inputs[idx] == kOptionalTensor) {
- const std::vector<uint32_t> dim = {0, 0};
- ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id,
- nullptr, 0))
- augmented_inputs[idx] = next_id++;
- }
- }
- };
-
- int nnapi_version = 10;
-#include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc"
-
- ANeuralNetworksOperationType nn_op_type;
-
- switch (builtin) {
- case tflite::BuiltinOperator_ADD:
- nn_op_type = ANEURALNETWORKS_ADD;
- add_add_params();
- break;
- case tflite::BuiltinOperator_MUL:
- nn_op_type = ANEURALNETWORKS_MUL;
- add_add_params();
- break;
- case tflite::BuiltinOperator_AVERAGE_POOL_2D:
- add_pooling_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
- break;
- case tflite::BuiltinOperator_MAX_POOL_2D:
- add_pooling_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
- break;
- case tflite::BuiltinOperator_L2_POOL_2D:
- add_pooling_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
- break;
- case tflite::BuiltinOperator_CONV_2D:
- add_convolution_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_CONV_2D;
- break;
- case tflite::BuiltinOperator_RELU:
- nn_op_type = ANEURALNETWORKS_RELU;
- break;
- case tflite::BuiltinOperator_RELU_N1_TO_1:
- nn_op_type = ANEURALNETWORKS_RELU1;
- break;
- case tflite::BuiltinOperator_RELU6:
- nn_op_type = ANEURALNETWORKS_RELU6;
- break;
- case tflite::BuiltinOperator_TANH:
- nn_op_type = ANEURALNETWORKS_TANH;
- break;
- case tflite::BuiltinOperator_FLOOR:
- nn_op_type = ANEURALNETWORKS_FLOOR;
- break;
- case tflite::BuiltinOperator_LOGISTIC:
- nn_op_type = ANEURALNETWORKS_LOGISTIC;
- break;
- case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
- add_depthwise_conv_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
- break;
- case tflite::BuiltinOperator_CONCATENATION:
- add_concatenation_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_CONCATENATION;
- break;
- case tflite::BuiltinOperator_SOFTMAX:
- add_softmax_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SOFTMAX;
- break;
- case tflite::BuiltinOperator_FULLY_CONNECTED:
- add_fully_connected_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
- break;
- case tflite::BuiltinOperator_RESHAPE:
- nn_op_type = ANEURALNETWORKS_RESHAPE;
- // add_reshape_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_RESIZE_BILINEAR:
- add_resize_bilinear_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR;
- break;
- case tflite::BuiltinOperator_SPACE_TO_DEPTH:
- add_space_to_depth_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
- break;
- case tflite::BuiltinOperator_LSTM: {
- duplicate_state_tensor_float32(
- node.outputs->data[/*kOutputStateTensor*/ 0]);
- duplicate_state_tensor_float32(
- node.outputs->data[/*kCellStateTensor*/ 1]);
- add_lstm_params(node.builtin_data);
- add_lstm_scratch_tensor_float32();
- add_optional_tensors(ANEURALNETWORKS_TENSOR_FLOAT32);
- nn_op_type = ANEURALNETWORKS_LSTM;
- break;
- }
- case tflite::BuiltinOperator_DEQUANTIZE:
- nn_op_type = ANEURALNETWORKS_DEQUANTIZE;
- break;
- case tflite::BuiltinOperator_SVDF: {
- duplicate_state_tensor_float32(node.outputs->data[/*kStateTensor*/ 0]);
- add_svdf_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SVDF;
- break;
- }
- case tflite::BuiltinOperator_RNN: {
- duplicate_state_tensor_float32(
- node.outputs->data[/*kHiddenStateTensor*/ 0]);
- add_rnn_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_RNN;
- break;
- }
- case tflite::BuiltinOperator_EMBEDDING_LOOKUP:
- nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP;
- break;
- case tflite::BuiltinOperator_PAD:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_PAD;
- break;
- case tflite::BuiltinOperator_MEAN:
- nnapi_version = 11; // require NNAPI 1.1
- add_mean_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_MEAN;
- break;
- case tflite::BuiltinOperator_DIV:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_DIV;
- add_add_params();
- break;
- case tflite::BuiltinOperator_SUB:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_SUB;
- add_add_params();
- break;
- case tflite::BuiltinOperator_STRIDED_SLICE:
- add_strided_slice_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
- break;
- case tflite::BuiltinOperator_CAST:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_CAST_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_TOPK_V2:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_TOPK_V2_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_GATHER:
- add_gather_ex_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_GATHER_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_SPLIT:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SPLIT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_TRANSPOSE:
- nn_op_type = ANEURALNETWORKS_TRANSPOSE;
- // param is almost same as reshape
- break;
- case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
- case tflite::BuiltinOperator_LSH_PROJECTION:
- case tflite::BuiltinOperator_HASHTABLE_LOOKUP:
- case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
- case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN:
- case tflite::BuiltinOperator_EMBEDDING_LOOKUP_SPARSE:
- case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM:
- case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
- case tflite::BuiltinOperator_L2_NORMALIZATION:
- case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
- case tflite::BuiltinOperator_PADV2:
- case tflite::BuiltinOperator_CALL:
- case tflite::BuiltinOperator_SKIP_GRAM:
- case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
- case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
- case tflite::BuiltinOperator_SQUEEZE:
- case tflite::BuiltinOperator_EXP:
- case tflite::BuiltinOperator_LOG_SOFTMAX:
- case tflite::BuiltinOperator_DELEGATE:
- case tflite::BuiltinOperator_PRELU:
- case tflite::BuiltinOperator_MAXIMUM:
- case tflite::BuiltinOperator_MINIMUM:
- case tflite::BuiltinOperator_ARG_MAX:
- case tflite::BuiltinOperator_GREATER:
- case tflite::BuiltinOperator_GREATER_EQUAL:
- case tflite::BuiltinOperator_LESS:
- case tflite::BuiltinOperator_LESS_EQUAL:
- case tflite::BuiltinOperator_NEG:
- case tflite::BuiltinOperator_SELECT:
- case tflite::BuiltinOperator_SLICE:
- case tflite::BuiltinOperator_SIN:
- case tflite::BuiltinOperator_TRANSPOSE_CONV:
- case tflite::BuiltinOperator_SPARSE_TO_DENSE:
- FATAL("Op code %d is currently not delegated to NNAPI", builtin);
- nn_op_type = -1; // set to invalid
- break;
- case tflite::BuiltinOperator_CUSTOM:
- std::string custom_name(registration.custom_name);
- if (custom_name.compare("TensorFlowMax") == 0) {
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_TENSORFLOW_MAX_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("RSQRT") == 0) {
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_RSQRT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("SquaredDifference") == 0) {
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
-
- FATAL("Custom operations are not supported when using NNAPI.");
- nn_op_type = -1; // set to invalid
- break;
- }
-
- //if (nnapi_version == 11 && kAndroidSdkVersion < 28) {
- // FATAL("Op %d needs NNAPI1.1", builtin);
- //}
-
- // Add the operation.
- CHECK_NN(ANeuralNetworksModel_addOperation(
- nn_model, nn_op_type, static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(augmented_outputs.size()),
- reinterpret_cast<uint32_t*>(augmented_outputs.data())));
- }
-}
-
-TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Interpreter* interpreter) {
- // TODO(aselle): This is not correct. need to handle resize invalidation.
- if (nn_model_ && nn_compiled_model_) return kTfLiteOk;
-
- if (!nn_model_) {
- CHECK_NN(ANeuralNetworksModel_create(&nn_model_));
-
- // Find all the temporary tensors and put them in a skip_list.
- std::vector<uint32_t> skip_list;
- for (size_t i = 0; i < interpreter->nodes_size(); i++) {
- const auto* node_and_registration = interpreter->node_and_registration(i);
- const TfLiteNode& node = node_and_registration->first;
- if (node.temporaries != nullptr) {
- for (int j = 0; j < node.temporaries->size; j++) {
- skip_list.push_back(static_cast<uint32_t>(node.temporaries->data[j]));
- }
- }
- }
-
- uint32_t next_id = addTensorOperands(interpreter, nn_model_, skip_list);
- AddOpsAndParams(interpreter, nn_model_, next_id, &model_states_inputs_,
- &model_states_outputs_);
-
- std::vector<int> augmented_inputs = interpreter->inputs();
- std::vector<int> augmented_outputs = interpreter->outputs();
-
- // All state tensors input/output need to be treated as model input/output.
- augmented_inputs.insert(augmented_inputs.end(),
- model_states_inputs_.begin(),
- model_states_inputs_.end());
- augmented_outputs.insert(augmented_outputs.end(),
- model_states_outputs_.begin(),
- model_states_outputs_.end());
-
- CHECK_NN(ANeuralNetworksModel_identifyInputsAndOutputs(
- nn_model_, static_cast<uint32_t>(augmented_inputs.size()),
- reinterpret_cast<const uint32_t*>(augmented_inputs.data()),
- static_cast<uint32_t>(augmented_outputs.size()),
- reinterpret_cast<const uint32_t*>(augmented_outputs.data())));
- CHECK_NN(ANeuralNetworksModel_finish(nn_model_));
- }
- if (!nn_compiled_model_) {
- CHECK_NN(ANeuralNetworksCompilation_create(nn_model_, &nn_compiled_model_));
- CHECK_NN(ANeuralNetworksCompilation_finish(nn_compiled_model_));
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
- if (!nn_model_) {
- TF_LITE_ENSURE_STATUS(BuildGraph(interpreter));
- }
-
- ANeuralNetworksExecution* execution = nullptr;
- CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution));
-
- // Currently perform deep copy of input buffer
- for (size_t i = 0; i < interpreter->inputs().size(); i++) {
- int input = interpreter->inputs()[i];
- // TODO(aselle): Is this what we want or do we want input instead?
- // TODO(aselle): This should be called setInputValue maybe to be cons.
- TfLiteTensor* tensor = interpreter->tensor(input);
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
- }
-
- // Tell nn api where to place final data.
- for (size_t i = 0; i < interpreter->outputs().size(); i++) {
- int output = interpreter->outputs()[i];
- TfLiteTensor* tensor = interpreter->tensor(output);
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
- }
-
- // The state_out of previous invocation need to be mapped to state_in of
- // current invocation.
- for (size_t i = 0; i < model_states_outputs_.size(); i++) {
- int state_tensor_idx = model_states_outputs_[i];
- TfLiteTensor* tensor = interpreter->tensor(state_tensor_idx);
- // Here we are using a deep copy for state_in tensors so that we are not
- // reading and writing into the same buffer during a invocation.
- // TODO(miaowang): using double shared buffer to minimize the copies.
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i + interpreter->inputs().size(), nullptr, tensor->data.raw,
- tensor->bytes));
- // Tell NNAPI where to output the state_out.
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i + interpreter->outputs().size(), nullptr, tensor->data.raw,
- tensor->bytes));
- }
-
- // Currently use blocking compute.
- ANeuralNetworksEvent* event = nullptr;
- CHECK_NN(ANeuralNetworksExecution_startCompute(execution, &event));
- CHECK_NN(ANeuralNetworksEvent_wait(event));
- ANeuralNetworksEvent_free(event);
- ANeuralNetworksExecution_free(execution);
-
-#if 0
- printf("From the NN API:\n");
- TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);
- if (float* data =
- interpreter->typed_tensor<float>(interpreter->outputs()[0])) {
- size_t num = tensor->bytes / sizeof(float);
- for (float* p = data; p < data + num; p++) {
- printf(" %f", *p);
- }
- printf("\n");
- }
-#endif
-
- return kTfLiteOk;
-}
-
-} // namespace nnfw
-
-// clang-format on
diff --git a/libs/support/tflite/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/libs/support/tflite/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
deleted file mode 100644
index ea485fe45..000000000
--- a/libs/support/tflite/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
+++ /dev/null
@@ -1,41 +0,0 @@
-// This file is included from AddOpsAndParams defined in nnapi_delegate.cc
-// and contains lambda for extened implementation to original Tensorflow Lite.
- auto add_resize_bilinear_params = [&add_scalar_int32, &interpreter, &augmented_inputs](void* data) {
- auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(data);
- if (builtin->align_corners) {
- FATAL("Resize bilinear does not support align corners in NNAPI");
- }
-
- TfLiteTensor* tensor = interpreter->tensor(augmented_inputs.back());
- assert(tensor->type == kTfLiteInt32);
- assert(tensor->bytes == sizeof(int)*2);
- augmented_inputs.pop_back();
-
- int height = ((int*)(tensor->data.raw))[1];
- int width = ((int*)(tensor->data.raw))[0];
- add_scalar_int32(height);
- add_scalar_int32(width);
- };
-
- auto add_strided_slice_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(data);
- add_scalar_int32(builtin->begin_mask);
- add_scalar_int32(builtin->end_mask);
- // ellipsis_mask and new_axis_mask are not supported on nn runtime
- // cf) tflite interpreter supports both operations
- if (builtin->ellipsis_mask) {
- FATAL("STRIDE_SLICE does not support ellipsis_mask in NNAPI");
- }
- if (builtin->new_axis_mask) {
- FATAL("STRIDE_SLICE does not support new_axis_mask in NNAPI");
- }
- add_scalar_int32(builtin->shrink_axis_mask);
- };
-
- auto add_gather_ex_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteGatherParams*>(data);
- add_scalar_int32(builtin->axis);
- if (builtin->axis != 0) {
- FATAL("GATHER does not support axis>0 in NNAPI");
- }
- };