summaryrefslogtreecommitdiff
path: root/runtimes/libs/tflite
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/libs/tflite')
-rw-r--r--runtimes/libs/tflite/CMakeLists.txt26
-rw-r--r--runtimes/libs/tflite/include/tflite/Assert.h45
-rw-r--r--runtimes/libs/tflite/include/tflite/Diff.h200
-rw-r--r--runtimes/libs/tflite/include/tflite/FeatureView.h108
-rw-r--r--runtimes/libs/tflite/include/tflite/InputIndex.h60
-rw-r--r--runtimes/libs/tflite/include/tflite/InterpreterSession.h99
-rw-r--r--runtimes/libs/tflite/include/tflite/NNAPISession.h101
-rw-r--r--runtimes/libs/tflite/include/tflite/OutputIndex.h60
-rw-r--r--runtimes/libs/tflite/include/tflite/Quantization.h44
-rw-r--r--runtimes/libs/tflite/include/tflite/Session.h69
-rw-r--r--runtimes/libs/tflite/include/tflite/TensorLogger.h168
-rw-r--r--runtimes/libs/tflite/include/tflite/TensorShapeUtils.h64
-rw-r--r--runtimes/libs/tflite/include/tflite/TensorUtils.h54
-rw-r--r--runtimes/libs/tflite/include/tflite/TensorView.h120
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/Abs.h41
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/CustomOps.h62
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/SquaredDifference.h76
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowMax.h75
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowSum.h41
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/kernels/register.h42
-rw-r--r--runtimes/libs/tflite/include/tflite/ext/nnapi_delegate.h96
-rw-r--r--runtimes/libs/tflite/include/tflite/interp/Builder.h53
-rw-r--r--runtimes/libs/tflite/include/tflite/interp/FlatBufferBuilder.h64
-rw-r--r--runtimes/libs/tflite/include/tflite/interp/FunctionBuilder.h67
-rw-r--r--runtimes/libs/tflite/src/Diff.cpp596
-rw-r--r--runtimes/libs/tflite/src/FeatureView.cpp70
-rw-r--r--runtimes/libs/tflite/src/Quantization.cpp22
-rw-r--r--runtimes/libs/tflite/src/TensorShapeUtils.cpp29
-rw-r--r--runtimes/libs/tflite/src/TensorView.test.cpp53
-rw-r--r--runtimes/libs/tflite/src/ext/kernels/Abs.cpp103
-rw-r--r--runtimes/libs/tflite/src/ext/kernels/SquaredDifference.cpp109
-rw-r--r--runtimes/libs/tflite/src/ext/kernels/TensorFlowMax.cpp405
-rw-r--r--runtimes/libs/tflite/src/ext/kernels/TensorFlowSum.cpp400
-rw-r--r--runtimes/libs/tflite/src/ext/kernels/register.cpp247
-rw-r--r--runtimes/libs/tflite/src/ext/nnapi_delegate.cpp1238
-rw-r--r--runtimes/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc123
-rw-r--r--runtimes/libs/tflite/src/interp/FlatBufferBuilder.cpp40
-rw-r--r--runtimes/libs/tflite/src/interp/FunctionBuilder.cpp34
38 files changed, 5304 insertions, 0 deletions
diff --git a/runtimes/libs/tflite/CMakeLists.txt b/runtimes/libs/tflite/CMakeLists.txt
new file mode 100644
index 000000000..8b85e7183
--- /dev/null
+++ b/runtimes/libs/tflite/CMakeLists.txt
@@ -0,0 +1,26 @@
+nnfw_find_package(TensorFlowLite REQUIRED)
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+# NOTE For now NNFW supports two TFLITE versions - v1.12 for Ubuntu and v1.9 for Tizen.
+# So TFLITE version should mandatorily be specified to distinguish the version.
+if(NOT TFLITE_MAJOR_VER OR NOT TFLITE_MINOR_VER)
+ message(FATAL_ERROR "TFLITE_MAJOR_VER and TFLITE_MINOR_VER should be defined")
+endif(NOT TFLITE_MAJOR_VER OR NOT TFLITE_MINOR_VER)
+
+add_library(nnfw_lib_tflite STATIC ${SOURCES})
+set_target_properties(nnfw_lib_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnfw_lib_tflite PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
+target_link_libraries(nnfw_lib_tflite PUBLIC tensorflow-lite)
+target_link_libraries(nnfw_lib_tflite PUBLIC nnfw_lib_misc nnfw_lib_rua_shim)
+target_link_libraries(nnfw_lib_tflite PRIVATE ${LIB_PTHREAD} dl)
+target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_common)
+target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_coverage)
+
+target_compile_definitions(nnfw_lib_tflite PUBLIC TFLITE_MAJOR_VER=${TFLITE_MAJOR_VER}
+ TFLITE_MINOR_VER=${TFLITE_MINOR_VER})
+
+add_executable(nnfw_lib_tflite_test_TensorView src/TensorView.test.cpp)
+target_link_libraries(nnfw_lib_tflite_test_TensorView nnfw_lib_tflite)
diff --git a/runtimes/libs/tflite/include/tflite/Assert.h b/runtimes/libs/tflite/include/tflite/Assert.h
new file mode 100644
index 000000000..148ac7e01
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/Assert.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Assert.h
+ * @brief This file contains helper function of assertion
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_ASSERT_H__
+#define __NNFW_TFLITE_ASSERT_H__
+
+#include "tensorflow/lite/context.h"
+
+#include <sstream>
+
+#define STR_DETAIL(value) #value
+#define STR(value) STR_DETAIL(value)
+
+#define TFLITE_ENSURE(exp) \
+ { \
+ const TfLiteStatus status = (exp); \
+ \
+ if (status != kTfLiteOk) \
+ { \
+ std::ostringstream ss; \
+ ss << #exp << " failed (" << __FILE__ << ":" << __LINE__ << ")"; \
+ throw std::runtime_error{ss.str()}; \
+ } \
+ }
+
+#endif // __NNFW_TFLITE_ASSERT_H__
diff --git a/runtimes/libs/tflite/include/tflite/Diff.h b/runtimes/libs/tflite/include/tflite/Diff.h
new file mode 100644
index 000000000..55b7526ab
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/Diff.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Diff.h
+ * @brief This file contains classes for testing correctess of implementation
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_DIFF_H__
+#define __NNFW_TFLITE_DIFF_H__
+
+#include "tensorflow/lite/interpreter.h"
+
+#include "misc/tensor/Index.h"
+#include "misc/tensor/Diff.h"
+#include "misc/tensor/Shape.h"
+#include "misc/tensor/Comparator.h"
+
+#include "tflite/TensorView.h"
+
+#include <functional>
+#include <vector>
+
+/**
+ * @brief Class to define TfLite interpreter match application
+ */
+class TfLiteInterpMatchApp
+{
+public:
+ /**
+ * @brief Construct a new TfLiteInterpMatchApp object with Comparator
+ * @param[in] comparator Comparator object for tensor comparation
+ */
+ TfLiteInterpMatchApp(const nnfw::misc::tensor::Comparator &comparator)
+ : _verbose{false}, _comparator(comparator)
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Get reference verbose for debugging information
+ * @return Reference of verbose value
+ */
+ int &verbose(void) { return _verbose; }
+
+private:
+ int _verbose;
+
+public:
+ /**
+ * @brief Run two interpreter and return the output matching
+ * @param[in] pure Interpreter object of expected(with TfLite)
+ * @param[in] nnapi Interpreter object of obtained(through NNAPI)
+ * @return @c true if two Interpreter results are same, otherwise @c false
+ */
+ bool run(::tflite::Interpreter &pure, ::tflite::Interpreter &nnapi) const;
+ /**
+ * @brief Compare two TensorView values and return the match result
+ * @param[in] expected TensorView object to read expected values
+ * @param[in] obtained TensorView object to read obtained values
+ * @param[in] id Tensor ID value used for debug message
+ * @return @c true if two TensorView values are same, otherwise @c false
+ */
+ template <typename T>
+ bool compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected,
+ const nnfw::tflite::TensorView<T> &obtained, int id) const;
+
+private:
+ const nnfw::misc::tensor::Comparator &_comparator;
+};
+
+#include "tflite/interp/Builder.h"
+#include "tflite/Quantization.h"
+
+#include <random>
+
+/**
+ * @brief Class to generate random values
+ */
+class RandomGenerator
+{
+public:
+ /**
+ * @brief Construct a new RandomGenerator object
+ * @param[in] seed Random seed value
+ * @param[in] mean Mean value of normal random number generation
+ * @param[in] stddev Standard deviation of random number generation
+ * @param[in] quantization TfLiteQuantizationParams type to represent quantization value
+ * (not used yet)
+ */
+ RandomGenerator(int seed, float mean, float stddev,
+ const TfLiteQuantizationParams quantization = make_default_quantization())
+ : _rand{seed}, _dist{mean, stddev}, _quantization{quantization}
+ {
+ (void)_quantization;
+ }
+
+public:
+ /**
+ * @brief Generate random numbers for type T
+ * @param[in] s Shape value
+ * @param[in] i Index value
+ * @return Random generated value
+ * @note This is same as T generate(void) as two input parameters are not used
+ */
+ template <typename T>
+ T generate(const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)
+ {
+ return generate<T>();
+ }
+
+ /**
+ * @brief Generate random numbers for type T
+ * @return Random generated value
+ */
+ template <typename T> T generate(void) { return _dist(_rand); }
+
+private:
+ std::minstd_rand _rand;
+ std::normal_distribution<float> _dist;
+ // unused
+ const TfLiteQuantizationParams _quantization;
+};
+
+template <> uint8_t RandomGenerator::generate<uint8_t>(void);
+
+/**
+ * @brief Structure for NNAPI correctness test
+ */
+struct RandomTestParam
+{
+ int verbose; //!< Verbosity of debug information
+ int tolerance; //!< Torlerance of value difference
+ int tensor_logging = 0; //!< Save logging to a file if not 0
+ std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1
+};
+
+/**
+ * @brief Class to define Random test runner
+ */
+class RandomTestRunner
+{
+public:
+ /**
+ * @brief Construct a new RandomTestRunner object
+ * @param[in] seed Random seed value
+ * @param[in] param RandomTestParam object for test runner
+ * @param[in] quantization TfLiteQuantizationParams type to represent quantization value
+ */
+ RandomTestRunner(int seed, const RandomTestParam &param,
+ const TfLiteQuantizationParams quantization = make_default_quantization())
+ : _randgen{seed, 0.0f, 2.0f, quantization}, _param{param}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Run the random test runner
+ * @param[in] builder Interpreter Builder used to run
+ * @return 0 if test succeeds, otherwise failure
+ */
+ int run(const nnfw::tflite::Builder &builder);
+
+public:
+ /**
+ * @brief Get RandomGenerator reference
+ * @return RandomGenerator reference
+ */
+ RandomGenerator &generator() { return _randgen; };
+
+private:
+ RandomGenerator _randgen;
+ const RandomTestParam _param;
+
+public:
+ /**
+ * @brief Create a RandomTestRunner object
+ * @param[in] seed Random seed value
+ * @return RandomGenerator object
+ */
+ static RandomTestRunner make(int seed);
+};
+
+#endif // __NNFW_TFLITE_DIFF_H__
diff --git a/runtimes/libs/tflite/include/tflite/FeatureView.h b/runtimes/libs/tflite/include/tflite/FeatureView.h
new file mode 100644
index 000000000..a8f069c40
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/FeatureView.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file FeatureView.h
+ * @brief This file contains FeatureView class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_FEATURE_VIEW_H__
+#define __NNFW_TFLITE_FEATURE_VIEW_H__
+
+#include "tensorflow/lite/interpreter.h"
+
+#include "tflite/InputIndex.h"
+#include "tflite/OutputIndex.h"
+
+#include "misc/feature/Shape.h"
+#include "misc/feature/Reader.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+template <typename T> class FeatureView;
+
+/**
+ * @brief Class to support reading element of float type feature
+ */
+template <> class FeatureView<float> : public nnfw::misc::feature::Reader<float>
+{
+public:
+ /**
+ * @brief Construct a new FeatureView object
+ * @param[in] interp Interpreter to read from
+ * @param[in] index InputIndex index of input
+ */
+ FeatureView(::tflite::Interpreter &interp, const InputIndex &index);
+ /**
+ * @brief Construct a new FeatureView object
+ * @param[in] interp Interpreter to read from
+ * @param[in] index OutputIndex index of output
+ */
+ FeatureView(::tflite::Interpreter &interp, const OutputIndex &index);
+
+public:
+ /**
+ * @brief Get value of element using channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
+ float at(uint32_t ch, uint32_t row, uint32_t col) const;
+ /**
+ * @brief Get reference of element using channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
+ float &at(uint32_t ch, uint32_t row, uint32_t col);
+
+ float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0;
+
+private:
+ /**
+ * @brief Get offset of element from channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
+ uint32_t getElementOffset(uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ uint32_t res = 0;
+
+ // TensorFlow Lite assumes that NHWC ordering for tessor
+ res += row * _shape.W * _shape.C;
+ res += col * _shape.C;
+ res += ch;
+
+ return res;
+ }
+
+private:
+ nnfw::misc::feature::Shape _shape;
+ float *_base;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_FEATURE_VIEW_H__
diff --git a/runtimes/libs/tflite/include/tflite/InputIndex.h b/runtimes/libs/tflite/include/tflite/InputIndex.h
new file mode 100644
index 000000000..f535b2626
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/InputIndex.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file InputIndex.h
+ * @brief This file contains InputIndex class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_INPUT_INDEX_H__
+#define __NNFW_TFLITE_INPUT_INDEX_H__
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to express index of input
+ */
+class InputIndex
+{
+public:
+ /**
+ * @brief Construct a new InputIndex object with index value
+ * @param [in] index The value of index
+ */
+ InputIndex(int index) : _index(index)
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Get index value as int
+ * @return Index value as int
+ */
+ int asInt(void) const { return _index; }
+
+private:
+ int _index;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INPUT_INDEX_H__
diff --git a/runtimes/libs/tflite/include/tflite/InterpreterSession.h b/runtimes/libs/tflite/include/tflite/InterpreterSession.h
new file mode 100644
index 000000000..deaf05a7f
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/InterpreterSession.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file InterpreterSession.h
+ * @brief This file contains InterpreterSession class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_INTERPRETER_SESSION_H__
+#define __NNFW_TFLITE_INTERPRETER_SESSION_H__
+
+#include "Session.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define TfLite interpreter session which is inherited from Session class
+ */
+class InterpreterSession final : public Session
+{
+public:
+ /**
+ * @brief Construct a InterpreterSession object with interpreter of TfLite
+ * @param[in] interp The TfLite interpreter pointer
+ */
+ InterpreterSession(::tflite::Interpreter *interp) : _interp{interp}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Get TfLite interpreter pointer
+ * @return The TfLite interpreter
+ */
+ ::tflite::Interpreter *interp(void) override { return _interp; }
+
+public:
+ /**
+ * @brief Prepare the TfLite interpreter session
+ * @return @c true if tensor preparation is successful, otherwise @c false
+ */
+ bool prepare(void) override
+ {
+ _interp->UseNNAPI(false);
+
+ if (kTfLiteOk != _interp->AllocateTensors())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * @brief Run the Invoke function of TfLite interpreter
+ * @return @c true if Invoke() is successful, otherwise @c false
+ */
+ bool run(void) override
+ {
+ // Return true if Invoke returns kTfLiteOk
+ return kTfLiteOk == _interp->Invoke();
+ }
+
+ /**
+ * @brief Tear down TfLite interpreter session
+ * @return @c true always
+ */
+ bool teardown(void) override
+ {
+ // Do NOTHING currently
+ return true;
+ }
+
+private:
+ ::tflite::Interpreter *const _interp;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INTERPRETER_SESSION_H__
diff --git a/runtimes/libs/tflite/include/tflite/NNAPISession.h b/runtimes/libs/tflite/include/tflite/NNAPISession.h
new file mode 100644
index 000000000..b2a999d10
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/NNAPISession.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file NNAPISession.h
+ * @brief This file contains NNAPISession class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_NNAPI_SESSION_H__
+#define __NNFW_TFLITE_NNAPI_SESSION_H__
+
+#include "Session.h"
+#include "tflite/ext/nnapi_delegate.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define NNAPI interpreter session which is inherited from Session class
+ */
+class NNAPISession final : public Session
+{
+public:
+ /**
+ * @brief Construct a NNAPISession object with interpreter of TfLite
+ * @param[in] interp The TfLite interpreter pointer
+ * @note Invoke BuildGraph() of NNAPI delegate from Interpreter
+ */
+ NNAPISession(::tflite::Interpreter *interp) : _interp{interp}
+ {
+ // Construct Graph from Interpreter
+ _delegate.BuildGraph(_interp);
+ }
+
+public:
+ /**
+ * @brief Get TfLite interpreter pointer
+ * @return The TfLite interpreter
+ */
+ ::tflite::Interpreter *interp(void) override { return _interp; }
+
+public:
+ /**
+ * @brief Prepare the TfLite interpreter session
+ * @return @c true if tensor preparation is successful, otherwise @c false
+ */
+ bool prepare(void) override
+ {
+ // Explicitly turn off T/F lite internal NNAPI delegation in order to use locally defined
+ // NNAPI delegation.
+ _interp->UseNNAPI(false);
+
+ if (kTfLiteOk != _interp->AllocateTensors())
+ {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * @brief Run the Invoke function of NNAPI delegate
+ * @return @c true if Invoke() is successful, otherwise @c false
+ */
+ bool run(void) override { return kTfLiteOk == _delegate.Invoke(_interp); }
+
+ /**
+ * @brief Tear down TfLite interpreter session
+ * @return @c true always
+ */
+ bool teardown(void) override
+ {
+ // DO NOTHING
+ return true;
+ }
+
+private:
+ ::tflite::Interpreter *const _interp;
+ nnfw::tflite::NNAPIDelegate _delegate;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_NNAPI_SESSION_H__
diff --git a/runtimes/libs/tflite/include/tflite/OutputIndex.h b/runtimes/libs/tflite/include/tflite/OutputIndex.h
new file mode 100644
index 000000000..dd1ca8d44
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/OutputIndex.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file OutputIndex.h
+ * @brief This file contains OutputIndex class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_OUTPUT_INDEX_H__
+#define __NNFW_TFLITE_OUTPUT_INDEX_H__
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define OutputIndex
+ */
+class OutputIndex
+{
+public:
+ /**
+ * @brief Construct a OutputIndex object with index value
+ * @param[in] index The value of index
+ */
+ OutputIndex(int index) : _index(index)
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Get index value as int
+ * @return Index valuel as int
+ */
+ int asInt(void) const { return _index; }
+
+private:
+ int _index;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_OUTPUT_INDEX_H__
diff --git a/runtimes/libs/tflite/include/tflite/Quantization.h b/runtimes/libs/tflite/include/tflite/Quantization.h
new file mode 100644
index 000000000..8272bcdc0
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/Quantization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Quantization.h
+ * @brief This file contains BitwiseIntToFloat union and quantization related
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_QUANTIZATION_H__
+#define __NNFW_TFLITE_QUANTIZATION_H__
+
+/**
+ * @brief Union to provide bitwise conversion of integer and float
+ */
+union BitwiseIntToFloat {
+ int i;
+ float f;
+};
+
+static const float FLOAT_NEAREST_TO_1 = BitwiseIntToFloat{0x3f7fffff}.f;
+
+#include "tensorflow/lite/context.h"
+
+/**
+ * @brief Get TfLiteQuantizationParams object with default values
+ * @return TfLiteQuantizationParams object
+ */
+TfLiteQuantizationParams make_default_quantization(void);
+
+#endif // __NNFW_TFLITE_QUANTIZATION_H__
diff --git a/runtimes/libs/tflite/include/tflite/Session.h b/runtimes/libs/tflite/include/tflite/Session.h
new file mode 100644
index 000000000..b653acf61
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/Session.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Session.h
+ * @brief This file contains Session class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_SESSION_H__
+#define __NNFW_TFLITE_SESSION_H__
+
+#include <tensorflow/lite/interpreter.h>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Structure to provide interface methods of interpreter session
+ */
+struct Session
+{
+ /**
+ * @brief Destruct Session object using default destructor
+ */
+ virtual ~Session() = default;
+
+ /**
+ * @brief Get the Interpreter object pointer
+ * @return The Interpreter object pointer
+ */
+ virtual ::tflite::Interpreter *interp(void) = 0;
+
+ /**
+ * @brief Prepare the session
+ * @return @c true if prepare method succeeded, otherwise @c false
+ */
+ virtual bool prepare(void) = 0;
+ /**
+ * @brief Run the session
+ * @return @c true if run method succeeded, otherwise @c false
+ */
+ virtual bool run(void) = 0;
+ /**
+ * @brief Teardown(release) the session
+ * @return @c true if teardown method succeeded, otherwise @c false
+ */
+ virtual bool teardown(void) = 0;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INTERP_SESSION_H__
diff --git a/runtimes/libs/tflite/include/tflite/TensorLogger.h b/runtimes/libs/tflite/include/tflite/TensorLogger.h
new file mode 100644
index 000000000..7b3363bd5
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/TensorLogger.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file TensorLogger.h
+ * @brief This file contains TensorLogger class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_TENSOR_LOGGER_H__
+#define __NNFW_TFLITE_TENSOR_LOGGER_H__
+
+#include "misc/tensor/IndexIterator.h"
+#include "tflite/TensorView.h"
+
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/context.h>
+#include <fstream>
+#include <iomanip>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to write input and output value / shape into a file in python form
+ * @note This is a utility to write input and output value / shape into a file in python form.\n
+ * any python app can load this value by running the python code below:\n
+ * exec(open(filename).read())\n
+ * generated python code looks like the following: \n
+ * tensor_shape_gen = []\n
+ * tensor_value_gen = []\n\n
+ * tensor_shape_gen.append("{2, 1, 2}")\n
+ * tensor_value_gen.append([1, 2, 3, 4])\n\n
+ * tensor_shape_gen.append("{2}")\n
+ * tensor_value_gen.append([1, 2])\n\n
+ * tensor_shape_gen.append("{2, 1, 2}")\n
+ * tensor_value_gen.append([1, 4, 3, 8])\n
+ */
+class TensorLogger
+{
+private:
+ std::ofstream _outfile;
+
+public:
+ /**
+ * @brief Get TensorLogger instance
+ * @return The TensorLogger instance
+ */
+ static TensorLogger &instance()
+ {
+ static TensorLogger instance;
+ return instance;
+ }
+
+ /**
+ * @brief Save the tensor details to file from interpreter
+ * @param[in] path The file path to save
+ * @param[in] interp The TfLite interpreter
+ */
+ void save(const std::string &path, ::tflite::Interpreter &interp)
+ {
+ open(path);
+
+ int log_index = 0;
+ for (const auto id : interp.inputs())
+ {
+ _outfile << "# input tensors" << std::endl;
+ printTensor(interp, id, log_index++);
+ }
+ for (const auto id : interp.outputs())
+ {
+ _outfile << "# output tensors" << std::endl;
+ printTensor(interp, id, log_index++);
+ }
+ close();
+ }
+
+private:
+ void open(const std::string &path)
+ {
+ if (!_outfile.is_open())
+ _outfile.open(path, std::ios_base::out);
+
+ _outfile << "# ------ file: " << path << " ------" << std::endl
+ << "tensor_shape_gen = []" << std::endl
+ << "tensor_value_gen = []" << std::endl
+ << std::endl;
+ }
+
+ void printTensor(::tflite::Interpreter &interp, const int id, const int log_index)
+ {
+ const TfLiteTensor *tensor = interp.tensor(id);
+
+ _outfile << "# tensor name: " << tensor->name << std::endl;
+ _outfile << "# tflite::interpreter.tensor(" << id << ") -> "
+ "tensor_value_gen["
+ << log_index << "]" << std::endl;
+
+ if (tensor->type == kTfLiteInt32)
+ {
+ printTensorShape(tensor);
+ printTensorValue<int32_t>(tensor, tensor->data.i32);
+ }
+ else if (interp.tensor(id)->type == kTfLiteUInt8)
+ {
+ printTensorShape(tensor);
+ printTensorValue<uint8_t>(tensor, tensor->data.uint8);
+ }
+ else if (tensor->type == kTfLiteFloat32)
+ {
+ printTensorShape(tensor);
+ printTensorValue<float>(tensor, tensor->data.f);
+ }
+ }
+
+ void printTensorShape(const TfLiteTensor *tensor)
+ {
+ _outfile << "tensor_shape_gen.append('{";
+
+ int r = 0;
+ for (; r < tensor->dims->size - 1; r++)
+ {
+ _outfile << tensor->dims->data[r] << ", ";
+ }
+ _outfile << tensor->dims->data[r];
+
+ _outfile << "}')" << std::endl;
+ }
+
+ template <typename T> void printTensorValue(const TfLiteTensor *tensor, T *tensor_data_ptr)
+ {
+ _outfile << "tensor_value_gen.append([";
+
+ _outfile << std::fixed << std::setprecision(10);
+
+ const T *end = reinterpret_cast<const T *>(tensor->data.raw_const + tensor->bytes);
+ for (T *ptr = tensor_data_ptr; ptr < end; ptr++)
+ _outfile << *ptr << ", ";
+
+ _outfile << "])" << std::endl << std::endl;
+ }
+
+ void close()
+ {
+ _outfile << "# --------- tensor shape and value defined above ---------" << std::endl;
+ _outfile.close();
+ }
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_TENSOR_LOGGER_H__
diff --git a/runtimes/libs/tflite/include/tflite/TensorShapeUtils.h b/runtimes/libs/tflite/include/tflite/TensorShapeUtils.h
new file mode 100644
index 000000000..ba8687413
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/TensorShapeUtils.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file TensorShapeUtils.h
+ * @brief This file contains utilities function of tensor shape
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
+#define __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
+
+#include "misc/tensor/Shape.h"
+
+#include <vector>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Converts tensor::Shape into a vector
+ * @param[in] shape The tensor shape to be converted
+ * @return vector value of given shape object
+ */
+static inline std::vector<int32_t> as_dims(const nnfw::misc::tensor::Shape &shape)
+{
+ std::vector<int32_t> dims;
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ dims.emplace_back(shape.dim(axis));
+ }
+
+ return dims;
+}
+
+/**
+ * @brief Broadcasts between two given shapes
+ * @param[in] lhs_shape The left hand side shape
+ * @param[in] rhs_shape The right hand side shape
+ * @return The broadcasted shape
+ */
+nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape,
+ const nnfw::misc::tensor::Shape &rhs_shape);
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
diff --git a/runtimes/libs/tflite/include/tflite/TensorUtils.h b/runtimes/libs/tflite/include/tflite/TensorUtils.h
new file mode 100644
index 000000000..08af1468b
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/TensorUtils.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file TensorUtils.h
+ * @brief This file contains utilities function
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_TENSOR_UTILS_H__
+#define __NNFW_TFLITE_TENSOR_UTILS_H__
+
+#include <tensorflow/lite/context.h>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Get @c true if tensor type is kTfLiteFloat32, otherwise @c false
+ * @param[in] tensor The tensor object to be compared
+ * @return @c true if tensor type is kTfLiteFloat32, otherwise @c false
+ */
+inline bool isFloatTensor(const TfLiteTensor *tensor) { return tensor->type == kTfLiteFloat32; }
+
+/**
+ * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1,
+ * otherwise @c false
+ * @param[in] tensor The tensor object to be compared
+ * @return @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
+ */
+inline bool isFeatureTensor(const TfLiteTensor *tensor)
+{
+ return (tensor->dims->size == 4) && (tensor->dims->data[0] == 1);
+}
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_TENSOR_UTILS_H__
diff --git a/runtimes/libs/tflite/include/tflite/TensorView.h b/runtimes/libs/tflite/include/tflite/TensorView.h
new file mode 100644
index 000000000..ce791a73f
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/TensorView.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file TensorView.h
+ * @brief This file contains TensorView class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_TENSOR_VIEW_H__
+#define __NNFW_TFLITE_TENSOR_VIEW_H__
+
+#include "tensorflow/lite/interpreter.h"
+
+#include "misc/tensor/Shape.h"
+#include "misc/tensor/Index.h"
+#include "misc/tensor/Reader.h"
+#include "misc/tensor/NonIncreasingStride.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define TensorView which is inherited from nnfw::misc::tensor::Reader<T> class
+ */
+template <typename T> class TensorView final : public nnfw::misc::tensor::Reader<T>
+{
+public:
+ /**
+ * @brief Construct a TensorView object with base and shape informations
+ * @param[in] shape The shape of a tensor
+ * @param[in] base The base address of a tensor
+ */
+ TensorView(const nnfw::misc::tensor::Shape &shape, T *base) : _shape{shape}, _base{base}
+ {
+ // Set 'stride'
+ _stride.init(_shape);
+ }
+
+public:
+ /**
+ * @brief Get shape of tensor
+ * @return Reference of shape
+ */
+ const nnfw::misc::tensor::Shape &shape(void) const { return _shape; }
+
+public:
+ /**
+ * @brief Get value of tensor index
+ * @param[in] index The tensor index
+ * @return The value at the index
+ */
+ T at(const nnfw::misc::tensor::Index &index) const override
+ {
+ const auto offset = _stride.offset(index);
+ return *(_base + offset);
+ }
+
+public:
+ /**
+ * @brief Get reference value of tensor index
+ * @param[in] index The tensor index
+ * @return The reference value at the index
+ */
+ T &at(const nnfw::misc::tensor::Index &index)
+ {
+ const auto offset = _stride.offset(index);
+ return *(_base + offset);
+ }
+
+private:
+ nnfw::misc::tensor::Shape _shape; /**< The tensor shape */
+
+public:
+ T *_base; /**< The base address of tensor */
+ nnfw::misc::tensor::NonIncreasingStride _stride; /**< The NonIncreasingStride object */
+
+public:
+ // TODO Introduce Operand ID class
+ /**
+ * @brief Create TensorView object using given parameters
+ * @param[in] interp The TfLite interpreter
+ * @param[in] tensor_index The tensor index
+ * @return The new TensorView<T> object
+ */
+ static TensorView<T> make(::tflite::Interpreter &interp, int tensor_index)
+ {
+ auto tensor_ptr = interp.tensor(tensor_index);
+
+ // Set 'shape'
+ nnfw::misc::tensor::Shape shape(tensor_ptr->dims->size);
+
+ for (uint32_t axis = 0; axis < shape.rank(); ++axis)
+ {
+ shape.dim(axis) = tensor_ptr->dims->data[axis];
+ }
+
+ return TensorView<T>(shape, interp.typed_tensor<T>(tensor_index));
+ }
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_TENSOR_VIEW_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/Abs.h b/runtimes/libs/tflite/include/tflite/ext/kernels/Abs.h
new file mode 100644
index 000000000..697ba33e9
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/Abs.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_TFLITE_EXT_KERNELS_ABS_H__
+#define __NNFW_TFLITE_EXT_KERNELS_ABS_H__
+
+#include "tensorflow/lite/context.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace Abs
+{
+
+void *InitAbs(TfLiteContext *context, const char *buffer, size_t length);
+void FreeAbs(TfLiteContext *context, void *buffer);
+TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node);
+TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node);
+
+} // namespace Abs
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_ABS_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/CustomOps.h b/runtimes/libs/tflite/include/tflite/ext/kernels/CustomOps.h
new file mode 100644
index 000000000..3370db778
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/CustomOps.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file CustomOps.h
+ * @brief This file contains registration of custom operands
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
+#define __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
+
+#include "tensorflow/lite/context.h"
+#include "tflite/ext/kernels/TensorFlowMax.h"
+#include "tflite/ext/kernels/SquaredDifference.h"
+#include "tflite/ext/kernels/TensorFlowSum.h"
+#include "tflite/ext/kernels/Abs.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+
+#define REGISTER_FUNCTION(Name) \
+ TfLiteRegistration *Register_##Name(void) \
+ { \
+ static TfLiteRegistration r = {}; \
+ r.init = Name::Init##Name; \
+ r.free = Name::Free##Name; \
+ r.prepare = Name::Prepare##Name; \
+ r.invoke = Name::Eval##Name; \
+ r.custom_name = #Name; \
+ return &r; \
+ }
+
+REGISTER_FUNCTION(TensorFlowMax)
+REGISTER_FUNCTION(SquaredDifference)
+REGISTER_FUNCTION(TensorFlowSum)
+REGISTER_FUNCTION(Abs)
+
+#undef REGISTER_FUNCTION
+
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/SquaredDifference.h b/runtimes/libs/tflite/include/tflite/ext/kernels/SquaredDifference.h
new file mode 100644
index 000000000..5512ead78
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/SquaredDifference.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file SquaredDifference.h
+ * @brief This file contains SquaredDifference namespace and SquaredDifference function
+ * definitions
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
+#define __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
+
+#include "tensorflow/lite/context.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace SquaredDifference
+{
+
+/**
+ * @brief Initialize SquaredDifference operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
+void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length);
+
+/**
+ * @brief Release any memory it might have allocated via 'InitSquaredDifference'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
+void FreeSquaredDifference(TfLiteContext *context, void *buffer);
+
+/**
+ * @brief Prepare the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node);
+
+/**
+ * @brief Evaluation the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
+
+} // namespace SquaredDifference
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowMax.h b/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowMax.h
new file mode 100644
index 000000000..d573308ed
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowMax.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file TensorFlowMax.h
+ * @brief This file contains TensorFlowMax namespace and TensorFlowMax function definitions
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
+#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
+
+#include "tensorflow/lite/context.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace TensorFlowMax
+{
+
+/**
+ * @brief Initialize TensorFlowMax operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
+void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length);
+
+/**
+ * @brief Release any memory it might have allocated via 'InitTensorFlowMax'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
+void FreeTensorFlowMax(TfLiteContext *context, void *buffer);
+
+/**
+ * @brief Prepare the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
+
+/**
+ * @brief Evaluation the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
+
+} // namespace TensorFlowMax
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowSum.h b/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowSum.h
new file mode 100644
index 000000000..29455aac5
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/TensorFlowSum.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
+#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
+
+#include "tensorflow/lite/context.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace TensorFlowSum
+{
+
+void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length);
+void FreeTensorFlowSum(TfLiteContext *context, void *buffer);
+TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
+TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
+
+} // namespace TensorFlowSum
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
diff --git a/runtimes/libs/tflite/include/tflite/ext/kernels/register.h b/runtimes/libs/tflite/include/tflite/ext/kernels/register.h
new file mode 100644
index 000000000..7d2bd786d
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/kernels/register.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from the following file (in TensorFlow)
+// 'externals/tensorflow/tensorflow/lite/kernels/register.h'
+#ifndef __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
+#define __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
+
+#include <unordered_map>
+#include "tensorflow/lite/context.h"
+#include "tensorflow/lite/model.h"
+
+namespace nnfw {
+namespace tflite {
+
+class BuiltinOpResolver : public ::tflite::MutableOpResolver {
+ public:
+ BuiltinOpResolver();
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
+
+// clang-format on
diff --git a/runtimes/libs/tflite/include/tflite/ext/nnapi_delegate.h b/runtimes/libs/tflite/include/tflite/ext/nnapi_delegate.h
new file mode 100644
index 000000000..21017b29f
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/ext/nnapi_delegate.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
+// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.h'
+#ifndef __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
+#define __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
+
+#include "tensorflow/lite/allocation.h"
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 9
+#include "tensorflow/lite/context.h"
+#include "tensorflow/lite/error_reporter.h"
+#else
+#include "tensorflow/lite/c/c_api_internal.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#endif
+#include "tensorflow/lite/interpreter.h"
+
+struct ANeuralNetworksModel;
+struct ANeuralNetworksMemory;
+struct ANeuralNetworksCompilation;
+
+namespace nnfw {
+namespace tflite {
+
+class NNAPIAllocation : public ::tflite::MMAPAllocation {
+ public:
+ NNAPIAllocation(const char* filename, ::tflite::ErrorReporter* error_reporter);
+ ~NNAPIAllocation();
+
+ size_t offset(const void* ptr) const {
+ auto signed_offset = reinterpret_cast<const uint8_t*>(ptr) -
+ reinterpret_cast<const uint8_t*>(mmapped_buffer_);
+
+ return static_cast<size_t>(signed_offset);
+ }
+
+ ANeuralNetworksMemory* memory() const { return handle_; }
+ bool valid() const override { return handle_ != nullptr; }
+
+ private:
+ mutable ANeuralNetworksMemory* handle_ = nullptr;
+};
+
+class NNAPIDelegate {
+ public:
+ ~NNAPIDelegate();
+
+ // Convert a tflite graph to NNAPI
+ TfLiteStatus BuildGraph(::tflite::Interpreter* interpreter);
+
+ // Run
+ TfLiteStatus Invoke(::tflite::Interpreter* interpreter);
+
+ // Whether the current platform supports NNAPI delegation.
+ static bool IsSupported();
+
+ private:
+ // The NN API model handle
+ ANeuralNetworksModel* nn_model_ = nullptr;
+ // The NN API compilation handle
+ ANeuralNetworksCompilation* nn_compiled_model_ = nullptr;
+ // Model status
+ TfLiteStatus model_status_ = kTfLiteOk;
+
+ // List of state tensors for LSTM, RNN, SVDF.
+ // NN API does not allow ops to maintain states across multiple
+ // invocations. We need to manually create state input tensors from
+ // corresponding state output tensors of TFLite operations, and map them
+ // correctly.
+ std::vector<int> model_states_inputs_; // holds NNAPI operand ids
+ std::vector<int> model_states_outputs_; // holds TFLite tensor ids
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
+
+// clang-format on
diff --git a/runtimes/libs/tflite/include/tflite/interp/Builder.h b/runtimes/libs/tflite/include/tflite/interp/Builder.h
new file mode 100644
index 000000000..0f54e1779
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/interp/Builder.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Builder.h
+ * @brief This file contains Builder structure
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_INTERP_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_BUILDER_H__
+
+#include <tensorflow/lite/interpreter.h>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Structure to Builder
+ */
+struct Builder
+{
+ /**
+ * @brief Destroy the Builder object
+ */
+ virtual ~Builder() = default;
+
+ /**
+ * @brief Build a FlatBuffer model
+ * @return The TfLite interpreter object
+ */
+ virtual std::unique_ptr<::tflite::Interpreter> build(void) const = 0;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INTERP_BUILDER_H__
diff --git a/runtimes/libs/tflite/include/tflite/interp/FlatBufferBuilder.h b/runtimes/libs/tflite/include/tflite/interp/FlatBufferBuilder.h
new file mode 100644
index 000000000..2d96af50b
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/interp/FlatBufferBuilder.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file FlatBufferBuilder.h
+ * @brief This file contains FlatBufferBuilder class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
+
+#include <tensorflow/lite/model.h>
+
+#include "tflite/interp/Builder.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define FlatBufferBuilder which is inherited from Builder
+ */
+class FlatBufferBuilder final : public Builder
+{
+public:
+ /**
+ * @brief Construct a FlatBufferBuilder object with FlatBufferModel of TfLite
+ * @param[in] model The TfLite Flatbuffer model
+ */
+ FlatBufferBuilder(const ::tflite::FlatBufferModel &model) : _model{model}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Build a FlatBuffer model
+ * @return The TfLite interpreter pointer address
+ */
+ std::unique_ptr<::tflite::Interpreter> build(void) const override;
+
+private:
+ const ::tflite::FlatBufferModel &_model;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
diff --git a/runtimes/libs/tflite/include/tflite/interp/FunctionBuilder.h b/runtimes/libs/tflite/include/tflite/interp/FunctionBuilder.h
new file mode 100644
index 000000000..7bfb8db2d
--- /dev/null
+++ b/runtimes/libs/tflite/include/tflite/interp/FunctionBuilder.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file FunctionBuilder.h
+ * @brief This file contains FunctionBuilder class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+#ifndef __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
+#define __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
+
+#include <tensorflow/lite/model.h>
+
+#include "tflite/interp/Builder.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+/**
+ * @brief Class to define FunctionBuilder which is inherited from Builder
+ */
+class FunctionBuilder final : public Builder
+{
+public:
+ using SetupFunc = std::function<void(::tflite::Interpreter &)>;
+
+public:
+ /**
+ * @brief Construct a FunctionBuilder object with SetupFunction
+ * @param[in] fn The SetupFunc object
+ */
+ FunctionBuilder(const SetupFunc &fn) : _fn{fn}
+ {
+ // DO NOTHING
+ }
+
+public:
+ /**
+ * @brief Build a SetupFunc
+ * @return The TfLite interpreter pointer address
+ */
+ std::unique_ptr<::tflite::Interpreter> build(void) const override;
+
+private:
+ SetupFunc _fn;
+};
+
+} // namespace tflite
+} // namespace nnfw
+
+#endif // __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
diff --git a/runtimes/libs/tflite/src/Diff.cpp b/runtimes/libs/tflite/src/Diff.cpp
new file mode 100644
index 000000000..414aef207
--- /dev/null
+++ b/runtimes/libs/tflite/src/Diff.cpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/Diff.h"
+#include "tflite/ext/nnapi_delegate.h"
+
+#include "misc/fp32.h"
+
+#include "misc/tensor/IndexIterator.h"
+#include "misc/tensor/IndexFormatter.h"
+#include "misc/tensor/Zipper.h"
+#include "misc/tensor/Comparator.h"
+
+#include "misc/EnvVar.h"
+
+#include <iostream>
+#include <cassert>
+
+class DiffSummary : public nnfw::misc::tensor::Comparator::Observer
+{
+public:
+ DiffSummary()
+ : max_abs_diff_index(0), max_abs_diff_expected{0.0f}, max_abs_diff_obtained{0.0f},
+ max_abs_diff_value{0.0f}, max_rel_diff_index(0), max_rel_diff_expected{0.0f},
+ max_rel_diff_obtained{0.0f}, max_rel_diff_value{0.0f}
+ {
+ // DO NOTHING
+ }
+
+public:
+ void notify(const nnfw::misc::tensor::Index &index, float expected, float obtained) override;
+
+public:
+ nnfw::misc::tensor::Index max_abs_diff_index;
+ float max_abs_diff_expected;
+ float max_abs_diff_obtained;
+ float max_abs_diff_value;
+
+ nnfw::misc::tensor::Index max_rel_diff_index;
+ float max_rel_diff_expected;
+ float max_rel_diff_obtained;
+ float max_rel_diff_value;
+};
+
+void DiffSummary::notify(const nnfw::misc::tensor::Index &index, float expected, float obtained)
+{
+ const auto abs_diff_value = std::fabs(expected - obtained);
+
+ if (max_abs_diff_value < abs_diff_value)
+ {
+ max_abs_diff_index = index;
+ max_abs_diff_value = abs_diff_value;
+ max_abs_diff_expected = expected;
+ max_abs_diff_obtained = obtained;
+ }
+
+ const auto rel_diff_value = nnfw::misc::fp32::relative_diff(expected, obtained);
+
+ if (max_rel_diff_value < rel_diff_value)
+ {
+ max_rel_diff_index = index;
+ max_rel_diff_value = rel_diff_value;
+ max_rel_diff_expected = expected;
+ max_rel_diff_obtained = obtained;
+ }
+}
+
+template <typename T>
+bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected,
+ const nnfw::tflite::TensorView<T> &obtained,
+ int id) const
+{
+ std::vector<nnfw::misc::tensor::Diff<T>> diffs;
+ assert(expected.shape() == obtained.shape());
+
+ using nnfw::misc::tensor::zip;
+ using nnfw::misc::tensor::Index;
+
+ zip(expected.shape(), expected, obtained)
+ << [&](const Index &index, T expected_value, T obtained_value) {
+ if (expected_value != obtained_value)
+ {
+ diffs.emplace_back(index, expected_value, obtained_value);
+ }
+ };
+
+ // TODO Unify summary generation code
+ if (diffs.size() == 0)
+ {
+ std::cout << " Tensor #" << id << ": MATCHED" << std::endl;
+ }
+ else
+ {
+ std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl;
+ std::cout << " " << diffs.size() << " diffs are detected" << std::endl;
+ }
+
+ if (diffs.size() > 0 && _verbose != 0)
+ {
+ std::cout << " ---- Details ---" << std::endl;
+ for (const auto &diff : diffs)
+ {
+ std::cout << " Diff at [" << nnfw::misc::tensor::IndexFormatter(diff.index) << "]"
+ << std::endl;
+ std::cout << " expected: " << diff.expected << std::endl;
+ std::cout << " obtained: " << diff.obtained << std::endl;
+ }
+ }
+
+ return diffs.size() == 0;
+}
+
+template <>
+bool TfLiteInterpMatchApp::compareSingleTensorView<float>(
+ const nnfw::tflite::TensorView<float> &expected,
+ const nnfw::tflite::TensorView<float> &obtained, int id) const
+{
+ DiffSummary summary;
+
+ assert(expected.shape() == obtained.shape());
+ auto diffs = _comparator.compare(expected.shape(), expected, obtained, &summary);
+
+ // TODO Unify summary generation code
+ if (diffs.size() == 0)
+ {
+ std::cout << " Tensor #" << id << ": MATCHED" << std::endl;
+ }
+ else
+ {
+ std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl;
+ std::cout << " " << diffs.size() << " diffs are detected" << std::endl;
+ }
+
+ // Print out max_diff
+ if (summary.max_abs_diff_value > 0)
+ {
+ std::cout << " Max absolute diff at ["
+ << nnfw::misc::tensor::IndexFormatter(summary.max_abs_diff_index) << "]" << std::endl;
+ std::cout << " expected: " << summary.max_abs_diff_expected << std::endl;
+ std::cout << " obtained: " << summary.max_abs_diff_obtained << std::endl;
+ std::cout << " absolute diff: " << summary.max_abs_diff_value << std::endl;
+ }
+
+ if (summary.max_rel_diff_value > 0)
+ {
+ const auto tolerance_level = summary.max_rel_diff_value / FLT_EPSILON;
+
+ std::cout << " Max relative diff at ["
+ << nnfw::misc::tensor::IndexFormatter(summary.max_rel_diff_index) << "]" << std::endl;
+ std::cout << " expected: " << summary.max_rel_diff_expected << std::endl;
+ std::cout << " obtained: " << summary.max_rel_diff_obtained << std::endl;
+ std::cout << " relative diff: " << summary.max_rel_diff_value << std::endl;
+ std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl;
+ }
+
+ if (diffs.size() > 0)
+ {
+ if (_verbose != 0)
+ {
+ std::cout << " ---- Details ---" << std::endl;
+ for (const auto &diff : diffs)
+ {
+ const auto absolute_diff = std::fabs(diff.expected - diff.obtained);
+ const auto relative_diff = nnfw::misc::fp32::relative_diff(diff.expected, diff.obtained);
+ const auto tolerance_level = relative_diff / FLT_EPSILON;
+
+ std::cout << " Diff at [" << nnfw::misc::tensor::IndexFormatter(diff.index) << "]"
+ << std::endl;
+ std::cout << " expected: " << diff.expected << std::endl;
+ std::cout << " obtained: " << diff.obtained << std::endl;
+ std::cout << " absolute diff: " << absolute_diff << std::endl;
+ std::cout << " relative diff: " << relative_diff << std::endl;
+ std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl;
+ }
+ }
+
+ return false;
+ }
+ return true;
+}
+
+#include <map>
+
+bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpreter &nnapi) const
+{
+ assert(interp.outputs() == nnapi.outputs());
+
+ bool all_matched = true;
+
+ using Comparator = std::function<bool(int id, ::tflite::Interpreter &, ::tflite::Interpreter &)>;
+
+ std::map<TfLiteType, Comparator> comparators;
+
+ comparators[kTfLiteUInt8] = [this](int id, ::tflite::Interpreter &interp,
+ ::tflite::Interpreter &nnapi) {
+ const auto expected = nnfw::tflite::TensorView<uint8_t>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<uint8_t>::make(nnapi, id);
+
+ return compareSingleTensorView(expected, obtained, id);
+ };
+
+ comparators[kTfLiteInt32] = [this](int id, ::tflite::Interpreter &interp,
+ ::tflite::Interpreter &nnapi) {
+ const auto expected = nnfw::tflite::TensorView<int32_t>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<int32_t>::make(nnapi, id);
+
+ return compareSingleTensorView(expected, obtained, id);
+ };
+
+ comparators[kTfLiteFloat32] = [this](int id, ::tflite::Interpreter &interp,
+ ::tflite::Interpreter &nnapi) {
+ const auto expected = nnfw::tflite::TensorView<float>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<float>::make(nnapi, id);
+
+ return compareSingleTensorView(expected, obtained, id);
+ };
+
+ comparators[kTfLiteBool] = [this](int id, ::tflite::Interpreter &interp,
+ ::tflite::Interpreter &nnapi) {
+ const auto expected = nnfw::tflite::TensorView<bool>::make(interp, id);
+ const auto obtained = nnfw::tflite::TensorView<bool>::make(nnapi, id);
+
+ return compareSingleTensorView(expected, obtained, id);
+ };
+
+ for (const auto &id : interp.outputs())
+ {
+ assert(interp.tensor(id)->type == nnapi.tensor(id)->type);
+
+ auto it = comparators.find(interp.tensor(id)->type);
+
+ if (it == comparators.end())
+ {
+ throw std::runtime_error{"Not supported output type"};
+ }
+
+ const auto &comparator = it->second;
+
+ if (!comparator(id, interp, nnapi))
+ {
+ all_matched = false;
+ }
+ }
+
+ return all_matched;
+}
+
+#include "misc/tensor/Object.h"
+
+using namespace std::placeholders;
+
+template <> uint8_t RandomGenerator::generate<uint8_t>(void)
+{
+ // The value of type_range is 255.
+ float type_range = static_cast<float>(std::numeric_limits<uint8_t>::max()) -
+ static_cast<float>(std::numeric_limits<uint8_t>::min());
+ // Most _dist values range from -5.0 to 5.0.
+ float min_range = -5.0f;
+ float max_range = 5.0f;
+ return static_cast<uint8_t>((_dist(_rand) - min_range) * type_range / (max_range - min_range));
+}
+
+#include "tflite/TensorLogger.h"
+//
+// Random Test Runner
+//
+int RandomTestRunner::run(const nnfw::tflite::Builder &builder)
+{
+ auto tfl_interp = builder.build();
+ auto nnapi = builder.build();
+
+ tfl_interp->UseNNAPI(false);
+
+ // Allocate Tensors
+ tfl_interp->AllocateTensors();
+ nnapi->AllocateTensors();
+
+ assert(tfl_interp->inputs() == nnapi->inputs());
+
+ using ::tflite::Interpreter;
+ using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>;
+
+ std::map<TfLiteType, Initializer> initializers;
+ std::map<TfLiteType, Initializer> reseters;
+
+ // Generate singed 32-bit integer (s32) input
+ initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
+ assert(nnapi->tensor(id)->type == kTfLiteInt32);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ int32_t value = 0;
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ // TODO Generate random values
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ ++value;
+ };
+ };
+
+ // Generate singed 32-bit integer (s32) input
+ reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
+ assert(nnapi->tensor(id)->type == kTfLiteInt32);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ int32_t value = 0;
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ // TODO Generate random values
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
+ assert(nnapi->tensor(id)->type == kTfLiteUInt8);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<uint8_t>);
+ const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+ assert(tfl_interp_view.shape() == data.shape());
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ const auto value = data.at(ind);
+
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
+ assert(nnapi->tensor(id)->type == kTfLiteUInt8);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<uint8_t>);
+ const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+ assert(tfl_interp_view.shape() == data.shape());
+
+ uint8_t value = 0;
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
+ assert(nnapi->tensor(id)->type == kTfLiteFloat32);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<float>);
+ const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+
+ assert(tfl_interp_view.shape() == data.shape());
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ const auto value = data.at(ind);
+
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
+ assert(nnapi->tensor(id)->type == kTfLiteFloat32);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<float>);
+ const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+
+ assert(tfl_interp_view.shape() == data.shape());
+
+ float value = 0;
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ initializers[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteBool);
+ assert(nnapi->tensor(id)->type == kTfLiteBool);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<bool (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<bool>);
+ const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+
+ assert(tfl_interp_view.shape() == data.shape());
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ const auto value = data.at(ind);
+
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ reseters[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+ assert(tfl_interp->tensor(id)->type == kTfLiteBool);
+ assert(nnapi->tensor(id)->type == kTfLiteBool);
+
+ auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
+ auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
+
+ assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+ auto fp = static_cast<bool (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &,
+ const ::nnfw::misc::tensor::Index &)>(
+ &RandomGenerator::generate<bool>);
+ const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(),
+ std::bind(fp, _randgen, _1, _2));
+
+ assert(tfl_interp_view.shape() == data.shape());
+
+ bool value = false;
+
+ nnfw::misc::tensor::iterate(tfl_interp_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) {
+ tfl_interp_view.at(ind) = value;
+ nnapi_view.at(ind) = value;
+ };
+ };
+
+ // Fill IFM with random numbers
+ for (const auto id : tfl_interp->inputs())
+ {
+ assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
+
+ auto it = initializers.find(tfl_interp->tensor(id)->type);
+
+ if (it == initializers.end())
+ {
+ throw std::runtime_error{"Not supported input type"};
+ }
+
+ it->second(id, tfl_interp.get(), nnapi.get());
+ }
+
+ // Fill OFM with 0
+ for (const auto id : tfl_interp->outputs())
+ {
+ assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
+
+ auto it = reseters.find(tfl_interp->tensor(id)->type);
+
+ if (it == reseters.end())
+ {
+ throw std::runtime_error{"Not supported input type"};
+ }
+
+ it->second(id, tfl_interp.get(), nnapi.get());
+ }
+
+ std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
+ tfl_interp->Invoke();
+
+ std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl;
+
+ char *env = getenv("UPSTREAM_DELEGATE");
+
+ if (env && !std::string(env).compare("1"))
+ {
+ nnapi->UseNNAPI(true);
+ nnapi->Invoke();
+ }
+ else
+ {
+ nnfw::tflite::NNAPIDelegate d;
+
+ if (d.BuildGraph(nnapi.get()))
+ {
+ throw std::runtime_error{"Failed to BuildGraph"};
+ }
+
+ if (d.Invoke(nnapi.get()))
+ {
+ throw std::runtime_error{"Failed to BuildGraph"};
+ }
+ }
+
+ // Compare OFM
+ std::cout << "[NNAPI TEST] Compare the result" << std::endl;
+
+ const auto tolerance = _param.tolerance;
+
+ auto equals = [tolerance](float lhs, float rhs) {
+ // NOTE Hybrid approach
+ // TODO Allow users to set tolerance for absolute_epsilon_equal
+ if (nnfw::misc::fp32::absolute_epsilon_equal(lhs, rhs))
+ {
+ return true;
+ }
+
+ return nnfw::misc::fp32::epsilon_equal(lhs, rhs, tolerance);
+ };
+
+ nnfw::misc::tensor::Comparator comparator(equals);
+ TfLiteInterpMatchApp app(comparator);
+
+ app.verbose() = _param.verbose;
+
+ bool res = app.run(*tfl_interp, *nnapi);
+
+ if (!res)
+ {
+ return 255;
+ }
+
+ std::cout << "[NNAPI TEST] PASSED" << std::endl;
+
+ if (_param.tensor_logging)
+ nnfw::tflite::TensorLogger::instance().save(_param.log_path, *tfl_interp);
+
+ return 0;
+}
+
+RandomTestRunner RandomTestRunner::make(int seed)
+{
+ RandomTestParam param;
+
+ param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0);
+ param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1);
+ ;
+
+ return RandomTestRunner{seed, param};
+}
diff --git a/runtimes/libs/tflite/src/FeatureView.cpp b/runtimes/libs/tflite/src/FeatureView.cpp
new file mode 100644
index 000000000..fdf5a4b00
--- /dev/null
+++ b/runtimes/libs/tflite/src/FeatureView.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/FeatureView.h"
+#include "tflite/TensorUtils.h"
+
+#include <cassert>
+
+namespace nnfw
+{
+namespace tflite
+{
+
+nnfw::misc::feature::Shape getFeatureShape(const TfLiteTensor *tensor)
+{
+ nnfw::misc::feature::Shape shape{tensor->dims->data[3], tensor->dims->data[1],
+ tensor->dims->data[2]};
+
+ return shape;
+}
+
+FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const InputIndex &index)
+{
+ const auto tensor_index = interp.inputs().at(index.asInt());
+ auto tensor_ptr = interp.tensor(tensor_index);
+
+ assert(isFloatTensor(tensor_ptr));
+ assert(isFeatureTensor(tensor_ptr));
+
+ _shape = getFeatureShape(tensor_ptr);
+ _base = interp.typed_tensor<float>(tensor_index);
+}
+
+FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const OutputIndex &index)
+{
+ const auto tensor_index = interp.outputs().at(index.asInt());
+ auto tensor_ptr = interp.tensor(tensor_index);
+
+ assert(isFloatTensor(tensor_ptr));
+ assert(isFeatureTensor(tensor_ptr));
+
+ _shape = getFeatureShape(tensor_ptr);
+ _base = interp.typed_tensor<float>(tensor_index);
+}
+
+float FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col) const
+{
+ return *(_base + getElementOffset(ch, row, col));
+}
+
+float &FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col)
+{
+ return *(_base + getElementOffset(ch, row, col));
+}
+
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/Quantization.cpp b/runtimes/libs/tflite/src/Quantization.cpp
new file mode 100644
index 000000000..9c162c342
--- /dev/null
+++ b/runtimes/libs/tflite/src/Quantization.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/Quantization.h"
+
+TfLiteQuantizationParams make_default_quantization(void)
+{
+ return TfLiteQuantizationParams{0.0f, 0};
+}
diff --git a/runtimes/libs/tflite/src/TensorShapeUtils.cpp b/runtimes/libs/tflite/src/TensorShapeUtils.cpp
new file mode 100644
index 000000000..29628cd26
--- /dev/null
+++ b/runtimes/libs/tflite/src/TensorShapeUtils.cpp
@@ -0,0 +1,29 @@
+#include "tflite/TensorShapeUtils.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape,
+ const nnfw::misc::tensor::Shape &rhs_shape)
+{
+ const uint32_t lhs_rank = lhs_shape.rank();
+ const uint32_t rhs_rank = rhs_shape.rank();
+ const uint32_t out_rank = std::max(lhs_rank, rhs_rank);
+ const uint32_t lhs_rank_diff = out_rank - lhs_rank;
+ const uint32_t rhs_rank_diff = out_rank - rhs_rank;
+
+ nnfw::misc::tensor::Shape out_shape(out_rank);
+
+ for (uint32_t axis = 0; axis < out_rank; ++axis)
+ {
+ out_shape.dim(axis) = std::max(axis < lhs_rank_diff ? 1 : lhs_shape.dim(axis - lhs_rank_diff),
+ axis < rhs_rank_diff ? 1 : rhs_shape.dim(axis - rhs_rank_diff));
+ }
+
+ return out_shape;
+}
+
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/TensorView.test.cpp b/runtimes/libs/tflite/src/TensorView.test.cpp
new file mode 100644
index 000000000..c710b3c33
--- /dev/null
+++ b/runtimes/libs/tflite/src/TensorView.test.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/TensorView.h"
+
+#include <cassert>
+
+void int_test(void)
+{
+ int value[6] = {1, 2, 3, 4, 5, 6};
+
+ const nnfw::misc::tensor::Shape shape{2, 3};
+ const nnfw::tflite::TensorView<int> view{shape, value};
+
+ assert(view.at(nnfw::misc::tensor::Index{0, 0}) == 1);
+ assert(view.at(nnfw::misc::tensor::Index{0, 1}) == 2);
+ assert(view.at(nnfw::misc::tensor::Index{0, 2}) == 3);
+ assert(view.at(nnfw::misc::tensor::Index{1, 0}) == 4);
+ assert(view.at(nnfw::misc::tensor::Index{1, 1}) == 5);
+ assert(view.at(nnfw::misc::tensor::Index{1, 2}) == 6);
+}
+
+int main(int argc, char **argv)
+{
+ float value[6] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
+
+ const nnfw::misc::tensor::Shape shape{2, 3};
+ const nnfw::tflite::TensorView<float> view{shape, value};
+
+ assert(view.at(nnfw::misc::tensor::Index{0, 0}) == 1.0f);
+ assert(view.at(nnfw::misc::tensor::Index{0, 1}) == 2.0f);
+ assert(view.at(nnfw::misc::tensor::Index{0, 2}) == 3.0f);
+ assert(view.at(nnfw::misc::tensor::Index{1, 0}) == 4.0f);
+ assert(view.at(nnfw::misc::tensor::Index{1, 1}) == 5.0f);
+ assert(view.at(nnfw::misc::tensor::Index{1, 2}) == 6.0f);
+
+ int_test();
+
+ return 0;
+}
diff --git a/runtimes/libs/tflite/src/ext/kernels/Abs.cpp b/runtimes/libs/tflite/src/ext/kernels/Abs.cpp
new file mode 100644
index 000000000..61181465d
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/kernels/Abs.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/ext/kernels/Abs.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+#include <iostream>
+#include <cmath>
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace Abs
+{
+
+void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; }
+
+void FreeAbs(TfLiteContext *, void *) {}
+
+TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node)
+{
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
+
+ const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
+
+ TF_LITE_ENSURE_EQ(context, input->type, output->type);
+
+ return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims));
+}
+
+TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node)
+{
+ const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
+ size_t elements = ::tflite::NumElements(input);
+ switch (input->type)
+ {
+ case kTfLiteFloat32:
+ {
+ auto *in = input->data.f;
+ auto *in_end = in + elements;
+ auto *out = output->data.f;
+ for (; in < in_end; in++, out++)
+ *out = std::abs(*in);
+ return kTfLiteOk;
+ }
+ case kTfLiteInt32:
+ {
+ auto *in = input->data.i32;
+ auto *in_end = in + elements;
+ auto *out = output->data.i32;
+ for (; in < in_end; in++, out++)
+ *out = std::abs(*in);
+ return kTfLiteOk;
+ }
+ case kTfLiteInt64:
+ {
+ auto *in = input->data.i64;
+ auto *in_end = in + elements;
+ auto *out = output->data.i64;
+ for (; in < in_end; in++, out++)
+ *out = std::abs(*in);
+ return kTfLiteOk;
+ }
+ case kTfLiteUInt8:
+ {
+ auto *in = input->data.uint8;
+ auto *in_end = in + elements;
+ auto *out = output->data.uint8;
+ for (; in < in_end; in++, out++)
+ *out = *in;
+ return kTfLiteOk;
+ }
+ default:
+ {
+ context->ReportError(context, "Input type %d is not supported", input->type);
+ return kTfLiteError;
+ }
+ }
+}
+
+} // namespace Abs
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/ext/kernels/SquaredDifference.cpp b/runtimes/libs/tflite/src/ext/kernels/SquaredDifference.cpp
new file mode 100644
index 000000000..615878513
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/kernels/SquaredDifference.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/ext/kernels/SquaredDifference.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+#include <iostream>
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace SquaredDifference
+{
+
+void *InitSquaredDifference(TfLiteContext *, const char *, size_t) { return nullptr; }
+
+void FreeSquaredDifference(TfLiteContext *, void *) {}
+
+TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node)
+{
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
+
+ const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
+ const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
+
+ TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
+ TF_LITE_ENSURE_EQ(context, input1->type, output->type);
+
+ return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input1->dims));
+}
+
+TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node)
+{
+
+ const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
+ const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
+
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
+
+ size_t elements = ::tflite::NumElements(input1);
+
+ switch (input1->type)
+ {
+ case kTfLiteFloat32:
+ {
+ const float *in1 = input1->data.f;
+ const float *in2 = input2->data.f;
+ const float *in_end1 = in1 + elements;
+ float *out = output->data.f;
+
+ for (; in1 < in_end1; in1++, in2++, out++)
+ *out = ((*in1 - *in2) * (*in1 - *in2));
+
+ return kTfLiteOk;
+ }
+ case kTfLiteInt32:
+ {
+ const int *in1 = input1->data.i32;
+ const int *in2 = input2->data.i32;
+ const int *in_end1 = in1 + elements;
+ int *out = output->data.i32;
+
+ for (; in1 < in_end1; in1++, in2++, out++)
+ *out = ((*in1 - *in2) * (*in1 - *in2));
+
+ return kTfLiteOk;
+ }
+ case kTfLiteInt64:
+ {
+ const int64_t *in1 = input1->data.i64;
+ const int64_t *in2 = input1->data.i64;
+ const int64_t *in_end1 = in1 + elements;
+ int64_t *out = output->data.i64;
+
+ for (; in1 < in_end1; in1++, in2++, out++)
+ *out = ((*in1 - *in2) * (*in1 - *in2));
+
+ return kTfLiteOk;
+ }
+ default:
+ {
+ context->ReportError(context, "InputType is %d Unsupported", input1->type);
+ return kTfLiteError;
+ }
+ }
+}
+
+} // namespace SquaredDifference
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/ext/kernels/TensorFlowMax.cpp b/runtimes/libs/tflite/src/ext/kernels/TensorFlowMax.cpp
new file mode 100644
index 000000000..207de98f5
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/kernels/TensorFlowMax.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/ext/kernels/TensorFlowMax.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+#include <iostream>
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace TensorFlowMax
+{
+
+struct TensorFlowMaxOp
+{
+ TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node)
+ {
+ input = ::tflite::GetInput(context, node, 0);
+ axis = ::tflite::GetInput(context, node, 1);
+ output = ::tflite::GetOutput(context, node, 0);
+ }
+ const TfLiteTensor *input;
+ const TfLiteTensor *axis;
+ TfLiteTensor *output;
+};
+
+void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t)
+{
+ // Creates two temp tensors to store index and axis for internal
+ // implementation only.
+ auto *scratch_tensor_index = new int;
+ context->AddTensors(context, 2, scratch_tensor_index);
+ return scratch_tensor_index;
+}
+
+void FreeTensorFlowMax(TfLiteContext *, void *buffer)
+{
+ delete static_cast<TensorFlowMaxOp *>(buffer);
+}
+
+// Resizes the temp tensor that stores resolved axis.
+TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context,
+ TfLiteTensor *resolved_axis)
+{
+ TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
+ axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
+ return context->ResizeTensor(context, resolved_axis, axis_size);
+}
+
+// Resizes output array based on the input size and resolved axis.
+TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
+{
+ int64_t num_axis = ::tflite::NumElements(op_context->axis);
+ TfLiteIntArray *input_dims = op_context->input->dims;
+ int input_num_dims = ::tflite::NumDimensions(op_context->input);
+ const int *axis = op_context->axis->data.i32;
+
+ {
+ // Calculates size of reducing axis.
+ int64_t num_reduce_axis = num_axis;
+ for (int64_t i = 0; i < num_axis; ++i)
+ {
+ int current = axis[i];
+ if (current < 0)
+ {
+ current += input_num_dims;
+ }
+ TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
+ for (int64_t j = 0; j < i; ++j)
+ {
+ int previous = axis[j];
+ if (previous < 0)
+ {
+ previous += input_num_dims;
+ }
+ if (current == previous)
+ {
+ --num_reduce_axis;
+ break;
+ }
+ }
+ }
+ // Determines output dimensions.
+ int output_num_dims = ::tflite::NumDimensions(op_context->output);
+ TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
+ (input_num_dims - num_reduce_axis == output_num_dims));
+
+ if (input_num_dims == output_num_dims)
+ {
+ TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
+ for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ int current = axis[axis_idx];
+ output_dims->data[current] = 1;
+ }
+ return context->ResizeTensor(context, op_context->output, output_dims);
+ }
+ else
+ {
+ TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
+ int num_skip_axis = 0;
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
+ {
+ ++num_skip_axis;
+ is_axis = true;
+ break;
+ }
+ }
+ if (!is_axis)
+ {
+ output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
+ }
+ }
+ return context->ResizeTensor(context, op_context->output, output_dims);
+ }
+ }
+}
+
+// Initializes temp tensors to store index and resolved axis.
+TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
+ TensorFlowMaxOp *op_context)
+{
+ // Creates a temp index to iterate through input data.
+ int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
+ TfLiteIntArrayFree(node->temporaries);
+ node->temporaries = TfLiteIntArrayCreate(2);
+ node->temporaries->data[0] = *scratch_tensor_index;
+ TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
+ scratch_tensor->type = kTfLiteInt32;
+ scratch_tensor->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
+ index_size->data[0] = ::tflite::NumDimensions(op_context->input);
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
+
+ // Creates a temp tensor to store resolved axis given input data.
+ node->temporaries->data[1] = *scratch_tensor_index + 1;
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ resolved_axis->type = kTfLiteInt32;
+ return kTfLiteOk;
+}
+
+TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
+{
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
+
+ TensorFlowMaxOp op_context(context, node);
+ TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
+
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ // Leaves work to Eval if axis is not constant; else resizes output.
+ if (!::tflite::IsConstantTensor(op_context.axis))
+ {
+ ::tflite::SetTensorToDynamic(op_context.output);
+ ::tflite::SetTensorToDynamic(resolved_axis);
+ return kTfLiteOk;
+ }
+ resolved_axis->allocation_type = kTfLiteArenaRw;
+ TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
+ return ResizeOutputTensor(context, &op_context);
+}
+
+// Gets offset of index if expanded on axis. When expanded, the flattened offset
+// will not change, if the output index changes on the given axis. For example,
+// if you have a 2D tensor and you are expanding to 3D on axis 0,
+// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
+// offset.
+inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
+ const int num_axis, const int *axis)
+{
+ size_t offset = 0;
+ int out_idx = 0;
+ for (int in_idx = 0; in_idx < num_dims; ++in_idx)
+ {
+ // if we need to expand this axis
+ bool is_axis = false;
+ if (axis != nullptr)
+ {
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (in_idx == axis[axis_idx])
+ {
+ is_axis = true;
+ break;
+ }
+ }
+ }
+ if (!is_axis)
+ {
+ offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
+ out_idx++;
+ }
+ else
+ {
+ offset = offset * static_cast<size_t>(dims[in_idx]);
+ }
+ }
+ return offset;
+}
+
+// Gets offset of index if reducing on axis. When reducing, the flattened offset
+// will not change, if the input index changes on the given axis. For example,
+// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
+// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
+// offset.
+// TODO(kanlig): uses Dims to represent dimensions.
+inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
+ const int num_axis, const int *axis)
+{
+ size_t offset = 0;
+ for (int idx = 0; idx < num_dims; ++idx)
+ {
+ // if we need to skip this axis
+ bool is_axis = false;
+ if (axis != nullptr)
+ {
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (idx == axis[axis_idx])
+ {
+ is_axis = true;
+ break;
+ }
+ }
+ }
+ if (!is_axis)
+ {
+ offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
+ }
+ }
+ return offset;
+}
+
+// Gets next index to iterate through a multidimensional array.
+inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
+{
+ int carry = 1;
+ for (int idx = num_dims - 1; idx >= 0; --idx)
+ {
+ int current_val = current[idx] + carry;
+ TF_LITE_ENSURE(context, (dims[idx] >= current_val));
+ if (dims[idx] == current_val)
+ {
+ current[idx] = 0;
+ }
+ else
+ {
+ current[idx] = current_val;
+ carry = 0;
+ break;
+ }
+ }
+ return (carry == 0);
+}
+
+template <typename T>
+inline TfLiteStatus
+CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
+ T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
+ const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
+{
+ // resolves axis.
+ int num_resolved_axis = 0;
+ for (int idx = 0; idx < num_axis_dimensions; ++idx)
+ {
+ int current = axis[idx];
+ TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
+ if (current < 0)
+ {
+ current += input_num_dims;
+ }
+ bool is_dup = false;
+ for (int j = 0; j < num_resolved_axis; ++j)
+ {
+ if (resolved_axis[j] == current)
+ {
+ is_dup = true;
+ break;
+ }
+ }
+ if (!is_dup)
+ {
+ resolved_axis[num_resolved_axis++] = current;
+ }
+ }
+
+ TF_LITE_ENSURE(context, (input_num_dims > 0));
+ TF_LITE_ENSURE(context, (input_dims != nullptr));
+ TF_LITE_ENSURE(context, (temp_index != nullptr));
+
+ // resets output data.
+ for (int idx = 0; idx < output_num_dims; ++idx)
+ {
+ temp_index[idx] = 0;
+ }
+ for (bool has_next = true; has_next;
+ has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
+ {
+ size_t output_offset =
+ ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
+ size_t input_offset = ExpandedInputOffset(input_num_dims, input_dims, temp_index,
+ num_resolved_axis, resolved_axis);
+ output_data[output_offset] = input_data[input_offset];
+ }
+
+ // resets temp index.
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ temp_index[idx] = 0;
+ }
+
+ // iterates through input_data.
+ for (bool has_next = true; has_next;
+ has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
+ {
+ size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
+ size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
+ num_resolved_axis, resolved_axis);
+ if (output_data[output_offset] < input_data[input_offset])
+ {
+ output_data[output_offset] = input_data[input_offset];
+ }
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
+{
+
+ TensorFlowMaxOp op_context(context, node);
+ int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
+ TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ // Resize the output tensor if the output tensor is dynamic.
+ if (::tflite::IsDynamicTensor(op_context.output))
+ {
+ TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+ TfLiteStatus returnStatus = kTfLiteOk;
+ switch (op_context.input->type)
+ {
+ case kTfLiteFloat32:
+ returnStatus = CustomMax<float>(
+ context, op_context.input->data.f, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
+ temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteInt32:
+ returnStatus = CustomMax<int>(context, op_context.input->data.i32,
+ op_context.input->dims->data, op_context.input->dims->size,
+ op_context.output->data.i32, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32,
+ num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteUInt8:
+ returnStatus = CustomMax<uint8_t>(
+ context, op_context.input->data.uint8, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.uint8,
+ op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
+ num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteInt64:
+ returnStatus = CustomMax<int64_t>(
+ context, op_context.input->data.i64, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
+ temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ default:
+ returnStatus = kTfLiteError;
+ }
+
+ return returnStatus;
+}
+
+} // namespace TensorFlowMax
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/ext/kernels/TensorFlowSum.cpp b/runtimes/libs/tflite/src/ext/kernels/TensorFlowSum.cpp
new file mode 100644
index 000000000..40f266baa
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/kernels/TensorFlowSum.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/ext/kernels/TensorFlowSum.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+#include <iostream>
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace custom
+{
+namespace TensorFlowSum
+{
+
+struct TensorFlowSumOp
+{
+ TensorFlowSumOp(TfLiteContext *context, TfLiteNode *node)
+ {
+ input = ::tflite::GetInput(context, node, 0);
+ axis = ::tflite::GetInput(context, node, 1);
+ output = ::tflite::GetOutput(context, node, 0);
+ }
+ const TfLiteTensor *input;
+ const TfLiteTensor *axis;
+ TfLiteTensor *output;
+};
+
+void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t)
+{
+ // Creates two temp tensors to store index and axis for internal
+ // implementation only.
+ auto *scratch_tensor_index = new int;
+ context->AddTensors(context, 2, scratch_tensor_index);
+ return scratch_tensor_index;
+}
+
+void FreeTensorFlowSum(TfLiteContext *, void *buffer)
+{
+ delete static_cast<TensorFlowSumOp *>(buffer);
+}
+
+// Resizes the temp tensor that stores resolved axis.
+TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowSumOp *op_context,
+ TfLiteTensor *resolved_axis)
+{
+ TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
+ axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
+ return context->ResizeTensor(context, resolved_axis, axis_size);
+}
+
+// Resizes output array based on the input size and resolved axis.
+TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context)
+{
+ int64_t num_axis = ::tflite::NumElements(op_context->axis);
+ TfLiteIntArray *input_dims = op_context->input->dims;
+ int input_num_dims = ::tflite::NumDimensions(op_context->input);
+ const int *axis = op_context->axis->data.i32;
+
+ {
+ // Calculates size of reducing axis.
+ int64_t num_reduce_axis = num_axis;
+ for (int64_t i = 0; i < num_axis; ++i)
+ {
+ int current = axis[i];
+ if (current < 0)
+ {
+ current += input_num_dims;
+ }
+ TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
+ for (int64_t j = 0; j < i; ++j)
+ {
+ int previous = axis[j];
+ if (previous < 0)
+ {
+ previous += input_num_dims;
+ }
+ if (current == previous)
+ {
+ --num_reduce_axis;
+ break;
+ }
+ }
+ }
+ // Determines output dimensions.
+ int output_num_dims = ::tflite::NumDimensions(op_context->output);
+ TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
+ (input_num_dims - num_reduce_axis == output_num_dims));
+
+ if (input_num_dims == output_num_dims)
+ {
+ TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
+ for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ int current = axis[axis_idx];
+ output_dims->data[current] = 1;
+ }
+ return context->ResizeTensor(context, op_context->output, output_dims);
+ }
+ else
+ {
+ TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
+ int num_skip_axis = 0;
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ bool is_axis = false;
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
+ {
+ ++num_skip_axis;
+ is_axis = true;
+ break;
+ }
+ }
+ if (!is_axis)
+ {
+ output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
+ }
+ }
+ return context->ResizeTensor(context, op_context->output, output_dims);
+ }
+ }
+}
+
+// Initializes temp tensors to store index and resolved axis.
+TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
+ TensorFlowSumOp *op_context)
+{
+ // Creates a temp index to iterate through input data.
+ int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
+ TfLiteIntArrayFree(node->temporaries);
+ node->temporaries = TfLiteIntArrayCreate(2);
+ node->temporaries->data[0] = *scratch_tensor_index;
+ TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
+ scratch_tensor->type = kTfLiteInt32;
+ scratch_tensor->allocation_type = kTfLiteArenaRw;
+ TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
+ index_size->data[0] = ::tflite::NumDimensions(op_context->input);
+ TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
+
+ // Creates a temp tensor to store resolved axis given input data.
+ node->temporaries->data[1] = *scratch_tensor_index + 1;
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ resolved_axis->type = kTfLiteInt32;
+ return kTfLiteOk;
+}
+
+TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
+{
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
+
+ TensorFlowSumOp op_context(context, node);
+ TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
+
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ // Leaves work to Eval if axis is not constant; else resizes output.
+ if (!::tflite::IsConstantTensor(op_context.axis))
+ {
+ ::tflite::SetTensorToDynamic(op_context.output);
+ ::tflite::SetTensorToDynamic(resolved_axis);
+ return kTfLiteOk;
+ }
+ resolved_axis->allocation_type = kTfLiteArenaRw;
+ TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
+ return ResizeOutputTensor(context, &op_context);
+}
+
+// Gets offset of index if expanded on axis. When expanded, the flattened offset
+// will not change, if the output index changes on the given axis. For example,
+// if you have a 2D tensor and you are expanding to 3D on axis 0,
+// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
+// offset.
+inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
+ const int num_axis, const int *axis)
+{
+ size_t offset = 0;
+ int out_idx = 0;
+ for (int in_idx = 0; in_idx < num_dims; ++in_idx)
+ {
+ // if we need to expand this axis
+ bool is_axis = false;
+ if (axis != nullptr)
+ {
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (in_idx == axis[axis_idx])
+ {
+ is_axis = true;
+ break;
+ }
+ }
+ }
+ if (!is_axis)
+ {
+ offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
+ out_idx++;
+ }
+ else
+ {
+ offset = offset * static_cast<size_t>(dims[in_idx]);
+ }
+ }
+ return offset;
+}
+
+// Gets offset of index if reducing on axis. When reducing, the flattened offset
+// will not change, if the input index changes on the given axis. For example,
+// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
+// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
+// offset.
+// TODO(kanlig): uses Dims to represent dimensions.
+inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
+ const int num_axis, const int *axis)
+{
+ size_t offset = 0;
+ for (int idx = 0; idx < num_dims; ++idx)
+ {
+ // if we need to skip this axis
+ bool is_axis = false;
+ if (axis != nullptr)
+ {
+ for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ {
+ if (idx == axis[axis_idx])
+ {
+ is_axis = true;
+ break;
+ }
+ }
+ }
+ if (!is_axis)
+ {
+ offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
+ }
+ }
+ return offset;
+}
+
+// Gets next index to iterate through a multidimensional array.
+inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
+{
+ int carry = 1;
+ for (int idx = num_dims - 1; idx >= 0; --idx)
+ {
+ int current_val = current[idx] + carry;
+ TF_LITE_ENSURE(context, (dims[idx] >= current_val));
+ if (dims[idx] == current_val)
+ {
+ current[idx] = 0;
+ }
+ else
+ {
+ current[idx] = current_val;
+ carry = 0;
+ break;
+ }
+ }
+ return (carry == 0);
+}
+
+template <typename T>
+inline TfLiteStatus
+CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
+ T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
+ const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
+{
+ // resolves axis.
+ int num_resolved_axis = 0;
+ for (int idx = 0; idx < num_axis_dimensions; ++idx)
+ {
+ int current = axis[idx];
+ TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
+ if (current < 0)
+ {
+ current += input_num_dims;
+ }
+ bool is_dup = false;
+ for (int j = 0; j < num_resolved_axis; ++j)
+ {
+ if (resolved_axis[j] == current)
+ {
+ is_dup = true;
+ break;
+ }
+ }
+ if (!is_dup)
+ {
+ resolved_axis[num_resolved_axis++] = current;
+ }
+ }
+
+ TF_LITE_ENSURE(context, (input_num_dims > 0));
+ TF_LITE_ENSURE(context, (input_dims != nullptr));
+ TF_LITE_ENSURE(context, (temp_index != nullptr));
+
+ // resets output data.
+ for (int idx = 0; idx < output_num_dims; ++idx)
+ {
+ temp_index[idx] = 0;
+ }
+ for (bool has_next = true; has_next;
+ has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
+ {
+ size_t output_offset =
+ ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
+ output_data[output_offset] = 0;
+ }
+
+ // resets temp index.
+ for (int idx = 0; idx < input_num_dims; ++idx)
+ {
+ temp_index[idx] = 0;
+ }
+
+ // iterates through input_data.
+ for (bool has_next = true; has_next;
+ has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
+ {
+ size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
+ size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
+ num_resolved_axis, resolved_axis);
+ output_data[output_offset] += input_data[input_offset];
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
+{
+
+ TensorFlowSumOp op_context(context, node);
+ int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
+ TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
+ TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
+ // Resize the output tensor if the output tensor is dynamic.
+ if (::tflite::IsDynamicTensor(op_context.output))
+ {
+ TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+ TfLiteStatus returnStatus = kTfLiteOk;
+ switch (op_context.input->type)
+ {
+ case kTfLiteFloat32:
+ returnStatus = CustomSum<float>(
+ context, op_context.input->data.f, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
+ temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteInt32:
+ returnStatus = CustomSum<int>(context, op_context.input->data.i32,
+ op_context.input->dims->data, op_context.input->dims->size,
+ op_context.output->data.i32, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32,
+ num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteUInt8:
+ returnStatus = CustomSum<uint8_t>(
+ context, op_context.input->data.uint8, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.uint8,
+ op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
+ num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ case kTfLiteInt64:
+ returnStatus = CustomSum<int64_t>(
+ context, op_context.input->data.i64, op_context.input->dims->data,
+ op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
+ op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
+ temp_index->data.i32, resolved_axis->data.i32);
+ break;
+ default:
+ returnStatus = kTfLiteError;
+ }
+
+ return returnStatus;
+}
+
+} // namespace TensorFlowSum
+} // namespace custom
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/ext/kernels/register.cpp b/runtimes/libs/tflite/src/ext/kernels/register.cpp
new file mode 100644
index 000000000..a99899a80
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/kernels/register.cpp
@@ -0,0 +1,247 @@
+/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This code is derived from the following file (in TensorFlow)
+// 'externals/tensorflow/tensorflow/lite/kernels/register.cc'
+#include "tflite/ext/kernels/register.h"
+#include "tflite/ext/kernels/CustomOps.h"
+
+namespace tflite {
+namespace ops {
+namespace custom {
+
+TfLiteRegistration* Register_DETECTION_POSTPROCESS();
+
+} // namespace custom
+}
+}
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+
+TfLiteRegistration *Register_RELU();
+TfLiteRegistration *Register_RELU_N1_TO_1();
+TfLiteRegistration *Register_RELU6();
+TfLiteRegistration *Register_TANH();
+TfLiteRegistration *Register_LOGISTIC();
+TfLiteRegistration *Register_AVERAGE_POOL_2D();
+TfLiteRegistration *Register_MAX_POOL_2D();
+TfLiteRegistration *Register_L2_POOL_2D();
+TfLiteRegistration *Register_CONV_2D();
+TfLiteRegistration *Register_DEPTHWISE_CONV_2D();
+TfLiteRegistration *Register_SVDF();
+TfLiteRegistration *Register_RNN();
+TfLiteRegistration *Register_BIDIRECTIONAL_SEQUENCE_RNN();
+TfLiteRegistration *Register_UNIDIRECTIONAL_SEQUENCE_RNN();
+TfLiteRegistration *Register_EMBEDDING_LOOKUP();
+TfLiteRegistration *Register_EMBEDDING_LOOKUP_SPARSE();
+TfLiteRegistration *Register_FULLY_CONNECTED();
+TfLiteRegistration *Register_LSH_PROJECTION();
+TfLiteRegistration *Register_HASHTABLE_LOOKUP();
+TfLiteRegistration *Register_SOFTMAX();
+TfLiteRegistration *Register_CONCATENATION();
+TfLiteRegistration *Register_ADD();
+TfLiteRegistration *Register_SPACE_TO_BATCH_ND();
+TfLiteRegistration *Register_DIV();
+TfLiteRegistration *Register_SUB();
+TfLiteRegistration *Register_BATCH_TO_SPACE_ND();
+TfLiteRegistration *Register_MUL();
+TfLiteRegistration *Register_L2_NORMALIZATION();
+TfLiteRegistration *Register_LOCAL_RESPONSE_NORMALIZATION();
+TfLiteRegistration *Register_LSTM();
+TfLiteRegistration *Register_BIDIRECTIONAL_SEQUENCE_LSTM();
+TfLiteRegistration *Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
+TfLiteRegistration *Register_PAD();
+TfLiteRegistration *Register_PADV2();
+TfLiteRegistration *Register_RESHAPE();
+TfLiteRegistration *Register_RESIZE_BILINEAR();
+TfLiteRegistration *Register_SKIP_GRAM();
+TfLiteRegistration *Register_SPACE_TO_DEPTH();
+TfLiteRegistration *Register_GATHER();
+TfLiteRegistration *Register_TRANSPOSE();
+TfLiteRegistration *Register_MEAN();
+TfLiteRegistration *Register_SPLIT();
+TfLiteRegistration *Register_SQUEEZE();
+TfLiteRegistration *Register_STRIDED_SLICE();
+TfLiteRegistration *Register_EXP();
+TfLiteRegistration *Register_TOPK_V2();
+TfLiteRegistration *Register_LOG_SOFTMAX();
+TfLiteRegistration *Register_CAST();
+TfLiteRegistration *Register_DEQUANTIZE();
+TfLiteRegistration *Register_PRELU();
+TfLiteRegistration *Register_MAXIMUM();
+TfLiteRegistration *Register_MINIMUM();
+TfLiteRegistration *Register_ARG_MAX();
+TfLiteRegistration *Register_GREATER();
+TfLiteRegistration *Register_GREATER_EQUAL();
+TfLiteRegistration *Register_LESS();
+TfLiteRegistration *Register_LESS_EQUAL();
+TfLiteRegistration *Register_FLOOR();
+TfLiteRegistration *Register_NEG();
+TfLiteRegistration *Register_SELECT();
+TfLiteRegistration *Register_SLICE();
+TfLiteRegistration *Register_SIN();
+TfLiteRegistration *Register_TRANSPOSE_CONV();
+TfLiteRegistration *Register_SPARSE_TO_DENSE();
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+TfLiteRegistration *Register_SUM();
+TfLiteRegistration *Register_REDUCE_MAX();
+TfLiteRegistration *Register_REDUCE_MIN();
+TfLiteRegistration *Register_EQUAL();
+TfLiteRegistration *Register_NOT_EQUAL();
+TfLiteRegistration *Register_SQRT();
+TfLiteRegistration *Register_RSQRT();
+TfLiteRegistration *Register_SHAPE();
+TfLiteRegistration *Register_POW();
+TfLiteRegistration *Register_FAKE_QUANT();
+TfLiteRegistration *Register_PACK();
+TfLiteRegistration *Register_ONE_HOT();
+TfLiteRegistration *Register_LOGICAL_OR();
+TfLiteRegistration *Register_LOGICAL_AND();
+TfLiteRegistration *Register_LOGICAL_NOT();
+TfLiteRegistration *Register_UNPACK();
+TfLiteRegistration *Register_FLOOR_DIV();
+TfLiteRegistration *Register_SQUARE();
+TfLiteRegistration *Register_ZEROS_LIKE();
+TfLiteRegistration* Register_FLOOR_MOD();
+TfLiteRegistration* Register_RANGE();
+TfLiteRegistration* Register_LEAKY_RELU();
+TfLiteRegistration* Register_SQUARED_DIFFERENCE();
+TfLiteRegistration* Register_FILL();
+TfLiteRegistration* Register_MIRROR_PAD();
+#endif // TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 12
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
+
+namespace nnfw {
+namespace tflite {
+
+BuiltinOpResolver::BuiltinOpResolver()
+{
+ // Using namespace directive to minimize diff with upstream tensorflow
+ using namespace ::tflite::ops::custom;
+ using namespace ::tflite::ops::builtin;
+ using namespace ::tflite;
+
+ AddBuiltin(BuiltinOperator_RELU, Register_RELU());
+ AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
+ AddBuiltin(BuiltinOperator_RELU6, Register_RELU6());
+ AddBuiltin(BuiltinOperator_TANH, Register_TANH());
+ AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC());
+ AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D());
+ AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D());
+ AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
+ AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D());
+ AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D());
+ AddBuiltin(BuiltinOperator_SVDF, Register_SVDF());
+ AddBuiltin(BuiltinOperator_RNN, Register_RNN());
+ AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, Register_BIDIRECTIONAL_SEQUENCE_RNN());
+ AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, Register_UNIDIRECTIONAL_SEQUENCE_RNN());
+ AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP());
+ AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, Register_EMBEDDING_LOOKUP_SPARSE());
+ AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED());
+ AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
+ AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
+ AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX());
+ AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION());
+ AddBuiltin(BuiltinOperator_ADD, Register_ADD());
+ AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND());
+ AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND());
+ AddBuiltin(BuiltinOperator_MUL, Register_MUL());
+ AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION());
+ AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, Register_LOCAL_RESPONSE_NORMALIZATION());
+ AddBuiltin(BuiltinOperator_LSTM, Register_LSTM());
+ AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, Register_BIDIRECTIONAL_SEQUENCE_LSTM());
+ AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, Register_UNIDIRECTIONAL_SEQUENCE_LSTM());
+ AddBuiltin(BuiltinOperator_PAD, Register_PAD());
+ AddBuiltin(BuiltinOperator_PADV2, Register_PADV2());
+ AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
+ AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR());
+ AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
+ AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH());
+ AddBuiltin(BuiltinOperator_GATHER, Register_GATHER());
+ AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE());
+ AddBuiltin(BuiltinOperator_MEAN, Register_MEAN());
+ AddBuiltin(BuiltinOperator_DIV, Register_DIV());
+ AddBuiltin(BuiltinOperator_SUB, Register_SUB());
+ AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT());
+ AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE());
+ AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE());
+ AddBuiltin(BuiltinOperator_EXP, Register_EXP());
+ AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2());
+ AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX());
+ AddBuiltin(BuiltinOperator_CAST, Register_CAST());
+ AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE());
+ AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
+ AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM());
+ AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM());
+ AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX());
+ AddBuiltin(BuiltinOperator_GREATER, Register_GREATER());
+ AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL());
+ AddBuiltin(BuiltinOperator_LESS, Register_LESS());
+ AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL());
+ AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
+ AddBuiltin(BuiltinOperator_NEG, Register_NEG());
+ AddBuiltin(BuiltinOperator_SELECT, Register_SELECT());
+ AddBuiltin(BuiltinOperator_SLICE, Register_SLICE());
+ AddBuiltin(BuiltinOperator_SIN, Register_SIN());
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ AddBuiltin(BuiltinOperator_SUM, Register_SUM());
+ AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
+ AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN());
+ AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
+ AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
+ AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());
+ AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL());
+ AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
+ AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT());
+ AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
+ AddBuiltin(BuiltinOperator_POW, Register_POW());
+ AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
+ AddBuiltin(BuiltinOperator_PACK, Register_PACK());
+ AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
+ AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
+ AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
+ AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
+ AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK());
+ AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV());
+ AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
+ AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
+ AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD());
+ AddBuiltin(BuiltinOperator_RANGE, Register_RANGE());
+ AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU());
+ AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE());
+ AddBuiltin(BuiltinOperator_FILL, Register_FILL());
+ AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD());
+#endif // TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+
+ AddCustom("TensorFlowMax", nnfw::tflite::custom::Register_TensorFlowMax());
+ AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference());
+ AddCustom("TensorFlowSum", nnfw::tflite::custom::Register_TensorFlowSum());
+ AddCustom("Abs", nnfw::tflite::custom::Register_Abs());
+ AddCustom("TFLite_Detection_PostProcess",
+ ::tflite::ops::custom::Register_DETECTION_POSTPROCESS());
+
+}
+
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/ext/nnapi_delegate.cpp b/runtimes/libs/tflite/src/ext/nnapi_delegate.cpp
new file mode 100644
index 000000000..55bdb0cd5
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/nnapi_delegate.cpp
@@ -0,0 +1,1238 @@
+/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This code is derived from the following file (in TensorFlow v1.12)
+// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.cc'
+#include "tflite/ext/nnapi_delegate.h"
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 9
+#include "tensorflow/lite/builtin_op_data.h"
+#include "tensorflow/lite/error_reporter.h"
+#else
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#endif
+#include "tensorflow/lite/model.h"
+#include <rua/Shim.h>
+#include "NeuralNetworksExShim.h"
+
+#ifdef __ANDROID__
+#include <android/log.h>
+#include <sys/system_properties.h>
+#endif
+
+namespace nnfw {
+namespace tflite {
+
+void logError(const char* format, ...) {
+ // stderr is convenient for native tests, but is not captured for apps
+ va_list args_for_stderr;
+ va_start(args_for_stderr, format);
+ vfprintf(stderr, format, args_for_stderr);
+ va_end(args_for_stderr);
+ fprintf(stderr, "\n");
+ fflush(stderr);
+#ifdef __ANDROID__
+ // produce logcat output for general consumption
+ va_list args_for_log;
+ va_start(args_for_log, format);
+ __android_log_vprint(ANDROID_LOG_ERROR, "tflite", format, args_for_log);
+ va_end(args_for_log);
+#endif
+}
+
+#define FATAL(...) \
+ logError(__VA_ARGS__); \
+ exit(1);
+
+// TODO(aselle): Change the error model to use status codes.
+#define CHECK_TFLITE_SUCCESS(x) \
+ if (x != kTfLiteOk) { \
+ FATAL("Aborting since tflite returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
+ }
+
+#define CHECK_NN(x) \
+ if (x != ANEURALNETWORKS_NO_ERROR) { \
+ FATAL("Aborting since NNAPI returned failure nnapi_delegate.cc:%d", \
+ __LINE__); \
+ }
+
+#define RETURN_ERROR_IF_TFLITE_FAILED(x) \
+ if (x != kTfLiteOk) { \
+ logError( \
+ "Returning error since TFLite returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
+ return kTfLiteError; \
+ }
+
+#define RETURN_ERROR_IF_NN_FAILED(x) \
+ if (x != ANEURALNETWORKS_NO_ERROR) { \
+ logError( \
+ "Returning error since NNAPI returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
+ return kTfLiteError; \
+ }
+
+// Tracking of NNAPI operand ids
+static const int64_t kOperandIdNotSet = -1;
+static const int64_t kOperandNotNeeded = -2;
+
+namespace {
+
+int32_t GetAndroidSdkVersion() {
+#ifdef __ANDROID__
+ const char* sdkProp = "ro.build.version.sdk";
+ char sdkVersion[PROP_VALUE_MAX];
+ int length = __system_property_get(sdkProp, sdkVersion);
+ if (length != 0) {
+ for (int i = 0; i < length; ++i) {
+ int digit = sdkVersion[i] - '0';
+ if (digit < 0 || digit > 9) {
+ // Non-numeric SDK version, assume it's higher then expected;
+ return 0xFFFF;
+ }
+ }
+ return std::strtol(sdkVersion, NULL, 0);
+ }
+ FATAL("No %s prop", sdkProp);
+#endif // __ANDROID__
+ return 0;
+}
+
+int32_t GetAndroidSdkVersionCached() {
+ static int32_t androidSdkVersion = GetAndroidSdkVersion();
+ return androidSdkVersion;
+}
+
+static const uint32_t dimension_for_scalar[1] = {1};
+
+} // namespace
+
+NNAPIAllocation::NNAPIAllocation(const char* filename,
+ ::tflite::ErrorReporter* error_reporter)
+ : MMAPAllocation(filename, error_reporter) {
+ if (mmapped_buffer_ != MAP_FAILED)
+ CHECK_NN(ANeuralNetworksMemory_createFromFd(buffer_size_bytes_, PROT_READ,
+ mmap_fd_, 0, &handle_));
+}
+
+NNAPIAllocation::~NNAPIAllocation() {
+ if (handle_) {
+ ANeuralNetworksMemory_free(handle_);
+ }
+}
+
+NNAPIDelegate::~NNAPIDelegate() {
+ if (nn_compiled_model_) {
+ ANeuralNetworksCompilation_free(nn_compiled_model_);
+ nn_compiled_model_ = nullptr;
+ }
+ if (nn_model_) {
+ ANeuralNetworksModel_free(nn_model_);
+ nn_model_ = nullptr;
+ // TODO(aselle): Is this thread-safe and callable multiple times?
+ }
+ // ANeuralNetworksShutdown();
+}
+
+// Adds the tensors of the interpreter to the NN API model.
+TfLiteStatus addTensorOperands(::tflite::Interpreter* interpreter,
+ ANeuralNetworksModel* nn_model,
+ uint32_t* no_of_operands_added,
+ std::vector<int64_t>* nnapi_ids) {
+ uint32_t next_id = 0;
+ for (size_t i = 0; i < interpreter->tensors_size(); i++) {
+ // Skip temporaries and RNN back-edges.
+ if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
+
+ (*nnapi_ids)[i] = int64_t(next_id);
+
+ int32_t nn_type = 0;
+ // NNAPI requires 32-bit float scale to be zero, tflite doesn't care
+ float scale = 0.0f;
+ int32_t zeroPoint = 0;
+ TfLiteTensor* tensor = interpreter->tensor(i);
+ switch (tensor->type) {
+ case kTfLiteNoType:
+ // Tensors added during initialization of Ops don't have a type yet and
+ // should not be registered with the NNAPI.
+ continue;
+ case kTfLiteFloat32:
+ nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
+ break;
+ case kTfLiteUInt8:
+ nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
+ scale = tensor->params.scale;
+ // FIXME The next line is a workaround because currently zero scale is
+ // passed down from TF
+ // Lite. Note that the latest NeuralNetworks.h (see
+ // https://android.googlesource.com/platform/frameworks/ml/+/master/nn/runtime/include/NeuralNetworks.h)
+ // requires scale to be greater than zero. Remove this workaround
+ // when the scale
+ // value is correctly passed.
+ scale = (scale == 0.0f) ? 1.0f : scale;
+ zeroPoint = tensor->params.zero_point;
+ break;
+ case kTfLiteInt32:
+ nn_type = ANEURALNETWORKS_TENSOR_INT32;
+ scale = tensor->params.scale;
+ zeroPoint = tensor->params.zero_point;
+ break;
+ case kTfLiteBool:
+ // Workaround to pass bool type under NNAPI
+ // Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0
+ nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
+ scale = 1.0f;
+ zeroPoint = 0;
+ break;
+ default:
+ logError("Unsupported tensor type %d", tensor->type);
+ return kTfLiteError;
+ }
+ if (tensor->dims->size == 0) {
+ // WORKAROUND Some model have dimension zero
+ switch (tensor->type) {
+ case kTfLiteFloat32:
+ nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
+ break;
+ case kTfLiteInt32:
+ nn_type = ANEURALNETWORKS_TENSOR_INT32;
+ break;
+ default:
+ logError("NNAPI doesn't support tensors with rank 0 (index %d name %s)",
+ i, tensor->name);
+ return kTfLiteError;
+ }
+ }
+ if (tensor->dims->size > 4) {
+ logError("NNAPI doesn't support tensors with rank > 4 (index %d name %s)",
+ i, tensor->name);
+ return kTfLiteError;
+ }
+ // TODO(aselle): Note, many of these are intermediate results. Do I need
+ // to ever specify these sizes. I am currently below doing setValue
+ // on all of them, but I shouldn't in the future.
+ // Answer(jeanluc): If all the operators can set the dimension correctly,
+ // you won't need to.
+ ANeuralNetworksOperandType operand_type{
+ nn_type, static_cast<uint32_t>(tensor->dims->size),
+ reinterpret_cast<uint32_t*>(tensor->dims->data), scale, zeroPoint};
+ if (tensor->dims->size == 0) {
+ // WORKAROUND Some model have dimension zero
+ // Consider scalar as vector size 1
+ operand_type.dimensions = dimension_for_scalar;
+ operand_type.dimensionCount = 1;
+ }
+ RETURN_ERROR_IF_NN_FAILED(
+ ANeuralNetworksModel_addOperand(nn_model, &operand_type));
+ // TODO(aselle): Based on Michael's suggestion, limiting this to read
+ // only memory
+ if (tensor->allocation_type == kTfLiteMmapRo) {
+ if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
+ static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
+ RETURN_ERROR_IF_NN_FAILED(
+ ANeuralNetworksModel_setOperandValueFromMemory(
+ nn_model, next_id, alloc->memory(),
+ alloc->offset(tensor->data.raw), tensor->bytes));
+ } else {
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, tensor->data.raw, tensor->bytes));
+ }
+ } else if (tensor->bytes == 0) {
+ // These size 0 tensors are optional tensors reserved.
+ RETURN_ERROR_IF_NN_FAILED(
+ ANeuralNetworksModel_setOperandValue(nn_model, next_id, nullptr, 0));
+ }
+
+ ++next_id;
+ }
+ *no_of_operands_added = next_id;
+ return kTfLiteOk;
+}
+
+void MapAndAddTensorIds(const int* from_ids_buf, size_t from_ids_count,
+ std::vector<uint32_t>* into,
+ const std::vector<int64_t>& map) {
+ for (size_t i = 0; i < from_ids_count; i++) {
+ int from_id = from_ids_buf[i];
+ if (from_id == kOptionalTensor) {
+ into->push_back(from_id);
+ } else {
+ into->push_back(map[from_id]);
+ }
+ }
+}
+
+// Adds the operations and their parameters to the NN API model.
+// 'next-id' is the operand ID of the next operand of the model.
+TfLiteStatus AddOpsAndParams(
+ ::tflite::Interpreter* interpreter, ANeuralNetworksModel* nn_model,
+ uint32_t next_id, std::vector<int>* model_state_inputs,
+ std::vector<int>* model_state_outputs,
+ const std::vector<int64_t>& tensor_id_to_nnapi_id) {
+ for (size_t i = 0; i < interpreter->nodes_size(); i++) {
+ const auto* node_and_registration = interpreter->node_and_registration(i);
+ const TfLiteNode& node = node_and_registration->first;
+ const TfLiteRegistration& registration = node_and_registration->second;
+ ::tflite::BuiltinOperator builtin =
+ static_cast<::tflite::BuiltinOperator>(registration.builtin_code);
+
+ // Add the parameters.
+ std::vector<uint32_t> augmented_inputs, augmented_outputs;
+ MapAndAddTensorIds(node.inputs->data, node.inputs->size, &augmented_inputs,
+ tensor_id_to_nnapi_id);
+ MapAndAddTensorIds(node.outputs->data, node.outputs->size,
+ &augmented_outputs, tensor_id_to_nnapi_id);
+
+ auto add_scalar_int32 = [&nn_model, &augmented_inputs,
+ &next_id](int value) {
+ ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_INT32;
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
+ sizeof(int32_t)))
+ augmented_inputs.push_back(next_id++);
+ };
+
+ auto add_scalar_float32 = [&nn_model, &augmented_inputs,
+ &next_id](float value) {
+ ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_FLOAT32;
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
+ sizeof(float)))
+ augmented_inputs.push_back(next_id++);
+ };
+
+ auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
+ ANeuralNetworksOperandType operand_type{};
+ operand_type.type = ANEURALNETWORKS_TENSOR_INT32;
+ operand_type.dimensionCount = 1;
+ operand_type.dimensions = &num_values;
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, values, sizeof(int32_t) * num_values));
+ augmented_inputs.push_back(next_id++);
+ };
+
+ // Handle state tensors of RNN, LSTM, SVDF.
+ // For each state_out tensor, a corresponding state_in operand needs to be
+ // created for NNAPI.
+ auto duplicate_state_tensor_float32 =
+ [interpreter, &nn_model, &next_id, &augmented_inputs,
+ &model_state_inputs, &model_state_outputs](int tensor_id) {
+ const TfLiteTensor* tensor = interpreter->tensor(tensor_id);
+ ANeuralNetworksOperandType operand_type{
+ ANEURALNETWORKS_TENSOR_FLOAT32,
+ static_cast<uint32_t>(tensor->dims->size),
+ reinterpret_cast<uint32_t*>(tensor->dims->data),
+ tensor->params.scale, tensor->params.zero_point};
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
+ augmented_inputs.push_back(next_id);
+ model_state_inputs->push_back(next_id);
+ model_state_outputs->push_back(tensor_id);
+ next_id++;
+ };
+ auto check_and_add_activation = [&add_scalar_int32](int activation) {
+ if (activation > kTfLiteActRelu6) {
+ logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ return kTfLiteError;
+ }
+ add_scalar_int32(activation);
+ return kTfLiteOk;
+ };
+
+ auto add_add_params = [&add_scalar_int32](void* data) {
+ auto* builtin = reinterpret_cast<TfLiteAddParams*>(data);
+ if (builtin->activation > kTfLiteActRelu6) {
+ logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ return kTfLiteError;
+ }
+ add_scalar_int32(builtin->activation);
+ return kTfLiteOk;
+ };
+
+ auto add_pooling_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
+ auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
+ add_scalar_int32(builtin->padding);
+ add_scalar_int32(builtin->stride_width);
+ add_scalar_int32(builtin->stride_height);
+ add_scalar_int32(builtin->filter_width);
+ add_scalar_int32(builtin->filter_height);
+ return check_and_add_activation(builtin->activation);
+ };
+
+ auto add_convolution_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
+ auto builtin = reinterpret_cast<TfLiteConvParams*>(data);
+ add_scalar_int32(builtin->padding);
+ add_scalar_int32(builtin->stride_width);
+ add_scalar_int32(builtin->stride_height);
+ return check_and_add_activation(builtin->activation);
+ };
+
+ auto add_depthwise_conv_params = [&add_scalar_int32,
+ &check_and_add_activation](void* data) {
+ auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(data);
+ add_scalar_int32(builtin->padding);
+ add_scalar_int32(builtin->stride_width);
+ add_scalar_int32(builtin->stride_height);
+ add_scalar_int32(builtin->depth_multiplier);
+ return check_and_add_activation(builtin->activation);
+ };
+
+ auto add_fully_connected_params = [&check_and_add_activation](void* data) {
+ auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data);
+ return check_and_add_activation(builtin->activation);
+ };
+
+ auto add_concatenation_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(data);
+ add_scalar_int32(builtin->axis);
+ if (builtin->activation != kTfLiteActNone) {
+ logError("Concatenation does not support fused activation in NNAPI");
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+ };
+
+ auto add_softmax_params = [&add_scalar_float32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(data);
+ add_scalar_float32(builtin->beta);
+ };
+
+ auto add_space_to_depth_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(data);
+ add_scalar_int32(builtin->block_size);
+ };
+
+ auto add_lstm_params = [&add_scalar_int32,
+ &add_scalar_float32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteLSTMParams*>(data);
+ add_scalar_int32(builtin->activation);
+ add_scalar_float32(builtin->cell_clip);
+ add_scalar_float32(builtin->proj_clip);
+ };
+
+ // LSTM in NNAPI requires scratch tensor as an output operand.
+ auto add_lstm_scratch_tensor_float32 = [interpreter, &node, &nn_model,
+ &next_id, &augmented_outputs]() {
+ if (node.temporaries->size == 0) return;
+ int scratch_buffer_index = node.temporaries->data[0];
+ const TfLiteTensor* tensor = interpreter->tensor(scratch_buffer_index);
+ ANeuralNetworksOperandType operand_type{
+ ANEURALNETWORKS_TENSOR_FLOAT32,
+ static_cast<uint32_t>(tensor->dims->size),
+ reinterpret_cast<uint32_t*>(tensor->dims->data), tensor->params.scale,
+ tensor->params.zero_point};
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
+ augmented_outputs.insert(augmented_outputs.begin(), next_id++);
+ };
+
+ auto add_mean_params = [&add_scalar_int32](void* data) {
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 9
+ auto builtin = reinterpret_cast<TfLiteMeanParams*>(data);
+#else
+ auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
+#endif
+ add_scalar_int32(builtin->keep_dims);
+ };
+
+ auto add_svdf_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteSVDFParams*>(data);
+ add_scalar_int32(builtin->rank);
+ add_scalar_int32(builtin->activation);
+ };
+
+ auto add_rnn_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteRNNParams*>(data);
+ add_scalar_int32(builtin->activation);
+ };
+
+ auto add_squeeze_params = [&](void* data) {
+ const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data);
+ // Note that we add the squeeze dimensions even if the dimensions were
+ // unspecified (empty), as NNAPI requires the operand.
+ add_vector_int32(builtin->squeeze_dims,
+ static_cast<uint32_t>(builtin->num_squeeze_dims));
+ };
+
+ // Handle optional input tensors.
+ auto add_optional_tensors = [&nn_model, &augmented_inputs,
+ &next_id](int nn_type) {
+ for (size_t idx = 0; idx < augmented_inputs.size(); idx++) {
+ if (augmented_inputs[idx] == static_cast<uint32_t>(kOptionalTensor)) {
+ const std::vector<uint32_t> dim = {0, 0};
+ ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0};
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id,
+ nullptr, 0))
+ augmented_inputs[idx] = next_id++;
+ }
+ }
+ };
+
+ int nnapi_version = 10;
+#include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc"
+
+ ANeuralNetworksOperationType nn_op_type = -1;
+
+ // Using namespace directive to minimize diff with upstream tensorflow
+ namespace tflite = ::tflite;
+
+ switch (builtin) {
+ case tflite::BuiltinOperator_ADD:
+ nn_op_type = ANEURALNETWORKS_ADD;
+ RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
+ break;
+ case tflite::BuiltinOperator_MUL:
+ nn_op_type = ANEURALNETWORKS_MUL;
+ RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
+ break;
+ case tflite::BuiltinOperator_AVERAGE_POOL_2D:
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
+ break;
+ case tflite::BuiltinOperator_MAX_POOL_2D:
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
+ break;
+ case tflite::BuiltinOperator_L2_POOL_2D:
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
+ break;
+ case tflite::BuiltinOperator_CONV_2D: {
+ auto builtin = reinterpret_cast<TfLiteConvParams*>(node.builtin_data);
+ if (builtin->dilation_width_factor != 1 ||
+ builtin->dilation_height_factor != 1 || node.inputs->size != 3) {
+ logError("NNAPI does not support dilated Conv2D.");
+ return kTfLiteError;
+ }
+ }
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_convolution_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_CONV_2D;
+ break;
+ case tflite::BuiltinOperator_RELU:
+ nn_op_type = ANEURALNETWORKS_RELU;
+ break;
+ case tflite::BuiltinOperator_RELU_N1_TO_1:
+ nn_op_type = ANEURALNETWORKS_RELU1;
+ break;
+ case tflite::BuiltinOperator_RELU6:
+ nn_op_type = ANEURALNETWORKS_RELU6;
+ break;
+ case tflite::BuiltinOperator_TANH:
+ nn_op_type = ANEURALNETWORKS_TANH;
+ break;
+ case tflite::BuiltinOperator_FLOOR:
+ nn_op_type = ANEURALNETWORKS_FLOOR;
+ break;
+ case tflite::BuiltinOperator_LOGISTIC:
+ nn_op_type = ANEURALNETWORKS_LOGISTIC;
+ break;
+ case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_depthwise_conv_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
+ break;
+ case tflite::BuiltinOperator_CONCATENATION:
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_concatenation_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_CONCATENATION;
+ break;
+ case tflite::BuiltinOperator_SOFTMAX:
+ add_softmax_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_SOFTMAX;
+ break;
+ case tflite::BuiltinOperator_FULLY_CONNECTED:
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_fully_connected_params(node.builtin_data));
+ nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
+ break;
+ case tflite::BuiltinOperator_RESHAPE:
+ if (node.inputs->size != 2) {
+ logError("NNAPI only supports 2-input RESHAPE");
+ return kTfLiteError;
+ }
+ nn_op_type = ANEURALNETWORKS_RESHAPE;
+ // add_reshape_params(node.builtin_data);
+ break;
+ case tflite::BuiltinOperator_RESIZE_BILINEAR:
+ add_resize_bilinear_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR;
+ break;
+ case tflite::BuiltinOperator_SPACE_TO_DEPTH:
+ add_space_to_depth_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
+ break;
+ case tflite::BuiltinOperator_LSTM: {
+ if (node.inputs->size + /* no of params */ 3 != 21) {
+ logError("NNAPI only supports 21-input LSTMs");
+ return kTfLiteError;
+ }
+ duplicate_state_tensor_float32(
+ node.outputs->data[/*kOutputStateTensor*/ 0]);
+ duplicate_state_tensor_float32(
+ node.outputs->data[/*kCellStateTensor*/ 1]);
+ add_lstm_params(node.builtin_data);
+ add_lstm_scratch_tensor_float32();
+ add_optional_tensors(ANEURALNETWORKS_TENSOR_FLOAT32);
+ nn_op_type = ANEURALNETWORKS_LSTM;
+ break;
+ }
+ case tflite::BuiltinOperator_DEQUANTIZE:
+ nn_op_type = ANEURALNETWORKS_DEQUANTIZE;
+ break;
+ case tflite::BuiltinOperator_SVDF: {
+ duplicate_state_tensor_float32(node.outputs->data[/*kStateTensor*/ 0]);
+ add_svdf_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_SVDF;
+ break;
+ }
+ case tflite::BuiltinOperator_RNN: {
+ duplicate_state_tensor_float32(
+ node.outputs->data[/*kHiddenStateTensor*/ 0]);
+ add_rnn_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_RNN;
+ break;
+ }
+ case tflite::BuiltinOperator_EMBEDDING_LOOKUP:
+ nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP;
+ break;
+ case tflite::BuiltinOperator_PAD:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_PAD;
+ break;
+ case tflite::BuiltinOperator_MEAN:
+ nnapi_version = 11; // require NNAPI 1.1
+ add_mean_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_MEAN;
+ break;
+ case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
+ nn_op_type = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
+ add_lrn_params(node.builtin_data);
+ break;
+ case tflite::BuiltinOperator_DIV:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_DIV;
+ RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
+ reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation));
+ break;
+ case tflite::BuiltinOperator_SUB:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_SUB;
+ RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
+ reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation));
+ break;
+ case tflite::BuiltinOperator_SQUEEZE:
+ nnapi_version = 11; // requires NNAPI 1.1
+ add_squeeze_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_SQUEEZE;
+ break;
+ case tflite::BuiltinOperator_TRANSPOSE:
+ // The permutation input tensor value dictates the output dimensions.
+ // TODO(b/110888333): Support dynamically-sized tensors in delegates.
+ if ((node.inputs->size > 1) &&
+ (interpreter->tensor(node.inputs->data[1])->allocation_type !=
+ kTfLiteMmapRo)) {
+ logError("NNAPI does not yet support dynamic tensors.");
+ return kTfLiteError;
+ }
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_TRANSPOSE;
+ break;
+ case tflite::BuiltinOperator_L2_NORMALIZATION:
+ nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION;
+ if (reinterpret_cast<TfLiteL2NormParams*>(node.builtin_data)
+ ->activation != kTfLiteActNone) {
+ logError(
+ "NNAPI does not support L2Normalization with fused activations");
+ return kTfLiteError;
+ }
+ if ((node.inputs->size > 0) &&
+ (interpreter->tensor(node.inputs->data[0])->dims->size != 4)) {
+ logError("NNAPI only supports input rank 4 for L2Normalization");
+ return kTfLiteError;
+ }
+ break;
+ case tflite::BuiltinOperator_HASHTABLE_LOOKUP:
+ if (interpreter->tensor(node.outputs->data[0])->type !=
+ kTfLiteFloat32) {
+ logError("NNAPI only support HASHTABLE_LOOKUP with float32 output",
+ builtin);
+ return kTfLiteError;
+ }
+ nn_op_type = ANEURALNETWORKS_HASHTABLE_LOOKUP;
+ break;
+ case tflite::BuiltinOperator_STRIDED_SLICE:
+ add_strided_slice_params(node.builtin_data);
+ nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
+ break;
+ case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_SPACE_TO_BATCH_ND;
+ break;
+ case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_BATCH_TO_SPACE_ND;
+ check_batch_to_space_params();
+ break;
+ case tflite::BuiltinOperator_CAST:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_CAST_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_TOPK_V2:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_TOPK_V2_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_GATHER:
+ add_gather_ex_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_GATHER_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_SPLIT:
+ add_split_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SPLIT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_NEG:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_NEG_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_EXP:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_EXP_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_TRANSPOSE_CONV:
+ add_transpose_conv_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_TRANSPOSE_CONV_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_PRELU:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_PRELU_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_ARG_MAX:
+ check_arg_max_input(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_ARGMAX_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ case tflite::BuiltinOperator_PACK:
+ add_pack_ex_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_PACK_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_UNPACK:
+ add_unpack_ex_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_UNPACK_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_SQRT:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SQRT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_RSQRT:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_RSQRT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_EQUAL:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_EQUAL_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_NOT_EQUAL:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_NOT_EQUAL_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_SUM:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_REDUCE_MAX:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_REDUCE_MAX_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_REDUCE_MIN:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_REDUCE_MIN_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_LOGICAL_AND:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_LOGICAL_AND_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_LOGICAL_OR:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_LOGICAL_OR_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_LOGICAL_NOT:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_LOGICAL_NOT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+
+#endif
+ case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
+ case tflite::BuiltinOperator_LSH_PROJECTION:
+ case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
+ case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN:
+ case tflite::BuiltinOperator_EMBEDDING_LOOKUP_SPARSE:
+ case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM:
+ case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
+ //case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
+ case tflite::BuiltinOperator_PADV2:
+ //case tflite::BuiltinOperator_RESIZE_BILINEAR:
+ case tflite::BuiltinOperator_CALL:
+ case tflite::BuiltinOperator_SKIP_GRAM:
+ //case tflite::BuiltinOperator_RELU_N1_TO_1:
+ //case tflite::BuiltinOperator_GATHER:
+ //case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
+ //case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
+ //case tflite::BuiltinOperator_TOPK_V2:
+ //case tflite::BuiltinOperator_SPLIT:
+ //case tflite::BuiltinOperator_STRIDED_SLICE:
+ //case tflite::BuiltinOperator_EXP:
+ case tflite::BuiltinOperator_LOG_SOFTMAX:
+ //case tflite::BuiltinOperator_DEQUANTIZE:
+ case tflite::BuiltinOperator_DELEGATE:
+ //case tflite::BuiltinOperator_CAST:
+ //case tflite::BuiltinOperator_PRELU:
+ case tflite::BuiltinOperator_MAXIMUM:
+ case tflite::BuiltinOperator_MINIMUM:
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ case tflite::BuiltinOperator_ARG_MIN:
+#endif
+ case tflite::BuiltinOperator_GREATER:
+ case tflite::BuiltinOperator_GREATER_EQUAL:
+ case tflite::BuiltinOperator_LESS:
+ case tflite::BuiltinOperator_LESS_EQUAL:
+ //case tflite::BuiltinOperator_NEG:
+ case tflite::BuiltinOperator_SELECT:
+ case tflite::BuiltinOperator_SLICE:
+ case tflite::BuiltinOperator_SIN:
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ case tflite::BuiltinOperator_LOG:
+#endif
+ //case tflite::BuiltinOperator_TRANSPOSE_CONV:
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ case tflite::BuiltinOperator_TILE:
+ case tflite::BuiltinOperator_EXPAND_DIMS:
+ case tflite::BuiltinOperator_SPARSE_TO_DENSE:
+ //case tflite::BuiltinOperator_EQUAL:
+ //case tflite::BuiltinOperator_NOT_EQUAL:
+ //case tflite::BuiltinOperator_SUM:
+ //case tflite::BuiltinOperator_REDUCE_MAX:
+ //case tflite::BuiltinOperator_REDUCE_MIN:
+ case tflite::BuiltinOperator_REDUCE_PROD:
+ //case tflite::BuiltinOperator_SQRT:
+ //case tflite::BuiltinOperator_RSQRT:
+ case tflite::BuiltinOperator_SHAPE:
+ case tflite::BuiltinOperator_POW:
+ case tflite::BuiltinOperator_FAKE_QUANT:
+ //case tflite::BuiltinOperator_PACK:
+ //case tflite::BuiltinOperator_LOGICAL_OR:
+ case tflite::BuiltinOperator_ONE_HOT:
+ //case tflite::BuiltinOperator_LOGICAL_AND:
+ //case tflite::BuiltinOperator_LOGICAL_NOT:
+ //case tflite::BuiltinOperator_UNPACK:
+ case tflite::BuiltinOperator_FLOOR_DIV:
+ case tflite::BuiltinOperator_REDUCE_ANY:
+ case tflite::BuiltinOperator_SQUARE:
+ case tflite::BuiltinOperator_ZEROS_LIKE:
+ case tflite::BuiltinOperator_FILL:
+#endif
+ logError("Op code %d is currently not delegated to NNAPI", builtin);
+ return kTfLiteError;
+ break;
+ case tflite::BuiltinOperator_CUSTOM: {
+ std::string custom_name(registration.custom_name);
+ if (custom_name.compare("TensorFlowMax") == 0) {
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_REDUCE_MAX_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ }
+ else if (custom_name.compare("SquaredDifference") == 0) {
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ }
+ else if (custom_name.compare("TensorFlowSum") == 0) {
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ }
+ else if (custom_name.compare("Abs") == 0) {
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_ABS_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue;
+ }
+ logError("Custom operations are not supported when using NNAPI.");
+ return kTfLiteError;
+ break;
+ }
+ default:
+ logError("Op code %d is currently not delegated to NNAPI", builtin);
+ return kTfLiteError;
+ break;
+ }
+
+ if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) {
+ //logError("Op %d needs NNAPI1.1", builtin);
+ //return kTfLiteError;
+ }
+
+ // Add the operation.
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation(
+ nn_model, nn_op_type, static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(),
+ static_cast<uint32_t>(augmented_outputs.size()),
+ reinterpret_cast<uint32_t*>(augmented_outputs.data())));
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Interpreter* interpreter) {
+ if (nn_model_ && nn_compiled_model_) return model_status_;
+
+ // TODO(aselle): This is not correct. need to handle resize invalidation.
+ if (!nn_model_) {
+ CHECK_NN(ANeuralNetworksModel_create(&nn_model_));
+
+ // Find which tensors should be added to NNAPI. TFLite has temporaries
+ // and RNN back-edges which are are not valid for NNAPI. We look through all
+ // inputs and outputs and mark the mapping in tensor_id_to_nnapi_id with
+ // kOperandIdNotSet. addTensorOperands will replace those with the
+ // corresponding NNAPI operand ids and skip kOperandNotNeeded entries.
+ std::vector<int64_t> tensor_id_to_nnapi_id(interpreter->tensors_size(),
+ kOperandNotNeeded);
+ auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf,
+ int count) {
+ for (int j = 0; j < count; j++) {
+ auto tensor_id = buf[j];
+ if (tensor_id != kOptionalTensor) {
+ tensor_id_to_nnapi_id[tensor_id] = kOperandIdNotSet;
+ }
+ }
+ };
+ for (size_t i = 0; i < interpreter->nodes_size(); i++) {
+ const auto* node_and_registration = interpreter->node_and_registration(i);
+ const TfLiteNode& node = node_and_registration->first;
+ set_ids_to_not_set(node.inputs->data, node.inputs->size);
+ set_ids_to_not_set(node.outputs->data, node.outputs->size);
+ }
+ set_ids_to_not_set(interpreter->inputs().data(),
+ interpreter->inputs().size());
+ set_ids_to_not_set(interpreter->outputs().data(),
+ interpreter->outputs().size());
+
+ uint32_t next_id = 0;
+ RETURN_ERROR_IF_TFLITE_FAILED(addTensorOperands(
+ interpreter, nn_model_, &next_id, &tensor_id_to_nnapi_id));
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ AddOpsAndParams(interpreter, nn_model_, next_id, &model_states_inputs_,
+ &model_states_outputs_, tensor_id_to_nnapi_id));
+
+ std::vector<uint32_t> augmented_inputs;
+ MapAndAddTensorIds(interpreter->inputs().data(),
+ interpreter->inputs().size(), &augmented_inputs,
+ tensor_id_to_nnapi_id);
+ augmented_inputs.insert(augmented_inputs.end(),
+ model_states_inputs_.begin(),
+ model_states_inputs_.end());
+ std::vector<uint32_t> augmented_outputs;
+ MapAndAddTensorIds(interpreter->outputs().data(),
+ interpreter->outputs().size(), &augmented_outputs,
+ tensor_id_to_nnapi_id);
+ MapAndAddTensorIds(model_states_outputs_.data(),
+ model_states_outputs_.size(), &augmented_outputs,
+ tensor_id_to_nnapi_id);
+
+ CHECK_NN(ANeuralNetworksModel_identifyInputsAndOutputs(
+ nn_model_, static_cast<uint32_t>(augmented_inputs.size()),
+ reinterpret_cast<const uint32_t*>(augmented_inputs.data()),
+ static_cast<uint32_t>(augmented_outputs.size()),
+ reinterpret_cast<const uint32_t*>(augmented_outputs.data())));
+
+ // TODO Support ANeuralNetworksModel_relaxComputationFloat32toFloat16
+ //if (GetAndroidSdkVersionCached() >= 28) {
+ // CHECK_NN(ANeuralNetworksModel_relaxComputationFloat32toFloat16(
+ // nn_model_, interpreter->GetAllowFp16PrecisionForFp32()));
+ //}
+ CHECK_NN(ANeuralNetworksModel_finish(nn_model_));
+ }
+ if (!nn_compiled_model_) {
+ CHECK_NN(ANeuralNetworksCompilation_create(nn_model_, &nn_compiled_model_));
+ CHECK_NN(ANeuralNetworksCompilation_finish(nn_compiled_model_));
+ }
+ return kTfLiteOk;
+}
+
+#include <unordered_map>
+
+TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
+ if (!nn_model_) {
+ model_status_ = BuildGraph(interpreter);
+ if (model_status_ != kTfLiteOk) {
+ logError("Failed to build graph for NNAPI");
+ }
+ }
+ if (model_status_ != kTfLiteOk) {
+ return model_status_;
+ }
+
+ ANeuralNetworksExecution* execution = nullptr;
+ CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution));
+
+ // Allocate temporary buffer to save casted boolean tensor
+ std::unordered_map<size_t, uint8_t*> input_boolean_tensors;
+ std::unordered_map<size_t, uint8_t*> output_boolean_tensors;
+ for (size_t i = 0; i < interpreter->inputs().size(); i++)
+ {
+ int input = interpreter->inputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(input);
+ if (tensor->type == kTfLiteBool)
+ {
+ size_t elements = tensor->bytes / sizeof(bool);
+ uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+ input_boolean_tensors[i] = temp_tensor;
+ for (size_t idx = 0; idx < elements; idx++)
+ {
+ temp_tensor[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
+ }
+ }
+ }
+ for (size_t i = 0; i < interpreter->outputs().size(); i++)
+ {
+ int output = interpreter->outputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(output);
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+ output_boolean_tensors[i] = temp_tensor;
+ }
+ }
+
+ // Currently perform deep copy of input buffer
+ for (size_t i = 0; i < interpreter->inputs().size(); i++) {
+ int input = interpreter->inputs()[i];
+ // TODO(aselle): Is this what we want or do we want input instead?
+ // TODO(aselle): This should be called setInputValue maybe to be cons.
+ TfLiteTensor* tensor = interpreter->tensor(input);
+ if (tensor->type == kTfLiteBool)
+ {
+ CHECK_NN(ANeuralNetworksExecution_setInput(
+ execution, i, nullptr, input_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+ }
+ else
+ {
+ CHECK_NN(ANeuralNetworksExecution_setInput(
+ execution, i, nullptr, tensor->data.raw, tensor->bytes));
+ }
+ }
+
+ // Tell nn api where to place final data.
+ for (size_t i = 0; i < interpreter->outputs().size(); i++) {
+ int output = interpreter->outputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(output);
+
+ if (tensor->type == kTfLiteBool)
+ {
+ CHECK_NN(ANeuralNetworksExecution_setOutput(
+ execution, i, nullptr, output_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+ }
+ else
+ {
+ CHECK_NN(ANeuralNetworksExecution_setOutput(
+ execution, i, nullptr, tensor->data.raw, tensor->bytes));
+ }
+ }
+
+ // The state_out of previous invocation need to be mapped to state_in of
+ // current invocation.
+ for (size_t i = 0; i < model_states_outputs_.size(); i++) {
+ int state_tensor_idx = model_states_outputs_[i];
+ TfLiteTensor* tensor = interpreter->tensor(state_tensor_idx);
+ // Here we are using a deep copy for state_in tensors so that we are not
+ // reading and writing into the same buffer during a invocation.
+ // TODO(miaowang): using double shared buffer to minimize the copies.
+ CHECK_NN(ANeuralNetworksExecution_setInput(
+ execution, i + interpreter->inputs().size(), nullptr, tensor->data.raw,
+ tensor->bytes));
+ // Tell NNAPI where to output the state_out.
+ CHECK_NN(ANeuralNetworksExecution_setOutput(
+ execution, i + interpreter->outputs().size(), nullptr, tensor->data.raw,
+ tensor->bytes));
+ }
+
+ // Currently use blocking compute.
+ ANeuralNetworksEvent* event = nullptr;
+ CHECK_NN(ANeuralNetworksExecution_startCompute(execution, &event));
+ CHECK_NN(ANeuralNetworksEvent_wait(event));
+ ANeuralNetworksEvent_free(event);
+ ANeuralNetworksExecution_free(execution);
+
+ // Tell nn api where to place final data.
+ for (size_t i = 0; i < interpreter->inputs().size(); i++) {
+ int input = interpreter->inputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(input);
+
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = input_boolean_tensors[i];
+ input_boolean_tensors[i] = nullptr;
+ delete temp_tensor;
+ }
+ }
+ for (size_t i = 0; i < interpreter->outputs().size(); i++) {
+ int output = interpreter->outputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(output);
+
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = output_boolean_tensors[i];
+ size_t elements = tensor->bytes / sizeof(bool);
+ for (size_t idx = 0; idx < elements; idx++)
+ {
+ tensor->data.b[idx] = ((temp_tensor[idx] == 0x00) ? false : true);
+ }
+ output_boolean_tensors[i] = nullptr;
+ delete temp_tensor;
+ }
+ }
+
+#if 0
+ printf("From the NN API:\n");
+ TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);
+ if (float* data =
+ interpreter->typed_tensor<float>(interpreter->outputs()[0])) {
+ size_t num = tensor->bytes / sizeof(float);
+ for (float* p = data; p < data + num; p++) {
+ printf(" %f", *p);
+ }
+ printf("\n");
+ }
+#endif
+
+ return kTfLiteOk;
+}
+
+bool NNAPIDelegate::IsSupported() { return nnfw::NNAPIExists(); }
+
+} // namespace tflite
+} // namespace nnfw
+
+// clang-format on
diff --git a/runtimes/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/runtimes/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
new file mode 100644
index 000000000..a3e99ee7a
--- /dev/null
+++ b/runtimes/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
@@ -0,0 +1,123 @@
+// This file is included from AddOpsAndParams defined in nnapi_delegate.cc
+// and contains lambda for extened implementation to original Tensorflow Lite.
+ auto add_resize_bilinear_params = [&add_scalar_int32, &interpreter, &augmented_inputs](void* data) {
+ auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(data);
+ if (builtin->align_corners) {
+ FATAL("Resize bilinear does not support align corners in NNAPI");
+ }
+
+ TfLiteTensor* tensor = interpreter->tensor(augmented_inputs.back());
+ assert(tensor->type == kTfLiteInt32);
+ assert(tensor->bytes == sizeof(int)*2);
+ augmented_inputs.pop_back();
+
+ int height = ((int*)(tensor->data.raw))[1];
+ int width = ((int*)(tensor->data.raw))[0];
+ add_scalar_int32(height);
+ add_scalar_int32(width);
+ };
+
+ auto add_transpose_conv_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(data);
+ add_scalar_int32(builtin->padding);
+ add_scalar_int32(builtin->stride_width);
+ add_scalar_int32(builtin->stride_height);
+ };
+
+ auto add_lrn_params = [&add_scalar_int32,
+ &add_scalar_float32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(data);
+ add_scalar_int32(builtin->radius);
+ add_scalar_float32(builtin->bias);
+ add_scalar_float32(builtin->alpha);
+ add_scalar_float32(builtin->beta);
+ };
+
+ auto add_strided_slice_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(data);
+ add_scalar_int32(builtin->begin_mask);
+ add_scalar_int32(builtin->end_mask);
+ // ellipsis_mask and new_axis_mask are not supported on nn runtime
+ // cf) tflite interpreter supports both operations
+ if (builtin->ellipsis_mask) {
+ FATAL("STRIDE_SLICE does not support ellipsis_mask in NNAPI");
+ }
+ if (builtin->new_axis_mask) {
+ FATAL("STRIDE_SLICE does not support new_axis_mask in NNAPI");
+ }
+ add_scalar_int32(builtin->shrink_axis_mask);
+ };
+
+ auto add_gather_ex_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteGatherParams*>(data);
+ add_scalar_int32(builtin->axis);
+ if (builtin->axis != 0) {
+ FATAL("GATHER does not support axis>0 in NNAPI");
+ }
+ };
+
+#if TFLITE_MAJOR_VER == 1 && TFLITE_MINOR_VER == 13
+ auto add_pack_ex_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLitePackParams*>(data);
+ add_scalar_int32(builtin->values_count);
+ add_scalar_int32(builtin->axis);
+ };
+
+ auto add_unpack_ex_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteUnpackParams*>(data);
+ add_scalar_int32(builtin->num);
+ add_scalar_int32(builtin->axis);
+ };
+#endif
+
+ auto check_batch_to_space_params = [interpreter, &node, &augmented_inputs]() {
+
+ //If there are 3 inputs, check if crops is having default values {0, 0, 0, 0}
+ //Else unsupported by NNAPI
+
+ if(augmented_inputs.size() == 3)
+ {
+ const uint32_t crops_buffer_index = node.inputs->data[2];
+ const TfLiteTensor* crops = interpreter->tensor(crops_buffer_index);
+ const int *crops_value = crops->data.i32;
+
+ //Check if crops is having default values {0, 0, 0, 0}
+ if(crops_value[0] != 0 || crops_value[1] != 0 || crops_value[2] != 0 || crops_value[3] != 0)
+ {
+ FATAL("BATCH_TO_SPACE_ND does not support Explicit crops in NNAPI");
+ }
+ else
+ {
+ //Restrict crops input and pass only other two inputs
+ augmented_inputs.pop_back();
+ }
+ }
+ };
+
+ auto add_split_params = [&add_scalar_int32, &augmented_inputs](void* data) {
+ // swap 1st and 2nd operand order
+ auto input_tensor = augmented_inputs[1];
+ auto axis = augmented_inputs[0];
+ augmented_inputs[0] = input_tensor;
+ augmented_inputs[1] = axis;
+
+ auto builtin = reinterpret_cast<TfLiteSplitParams*>(data);
+ add_scalar_int32(builtin->num_splits);
+ };
+
+ auto check_arg_max_input = [&interpreter, &augmented_inputs](void *data) {
+ auto params = reinterpret_cast<TfLiteArgMaxParams*>(data);
+ if (params->output_type != kTfLiteInt32)
+ {
+ FATAL("Cannot handle output type in NNAPI");
+ }
+
+ TfLiteTensor* axis_tensor = interpreter->tensor(augmented_inputs.back());
+ assert(axis_tensor->type == kTfLiteInt32);
+
+ int64_t count = 1;
+ for (int i = 0; i < axis_tensor->dims->size; ++i) {
+ count *= axis_tensor->dims->data[i];
+ }
+ assert(count == 1);
+ };
diff --git a/runtimes/libs/tflite/src/interp/FlatBufferBuilder.cpp b/runtimes/libs/tflite/src/interp/FlatBufferBuilder.cpp
new file mode 100644
index 000000000..f54e67202
--- /dev/null
+++ b/runtimes/libs/tflite/src/interp/FlatBufferBuilder.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/interp/FlatBufferBuilder.h"
+
+#include "tflite/ext/kernels/register.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+std::unique_ptr<::tflite::Interpreter> FlatBufferBuilder::build(void) const
+{
+ std::unique_ptr<::tflite::Interpreter> interpreter;
+
+ nnfw::tflite::BuiltinOpResolver resolver;
+
+ ::tflite::InterpreterBuilder builder(_model, resolver);
+
+ builder(&interpreter);
+
+ return interpreter;
+}
+
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtimes/libs/tflite/src/interp/FunctionBuilder.cpp b/runtimes/libs/tflite/src/interp/FunctionBuilder.cpp
new file mode 100644
index 000000000..599a4f393
--- /dev/null
+++ b/runtimes/libs/tflite/src/interp/FunctionBuilder.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/interp/FunctionBuilder.h"
+
+namespace nnfw
+{
+namespace tflite
+{
+
+std::unique_ptr<::tflite::Interpreter> FunctionBuilder::build(void) const
+{
+ auto res = std::unique_ptr<::tflite::Interpreter>{new ::tflite::Interpreter};
+
+ _fn(*res);
+
+ return res;
+}
+
+} // namespace tflite
+} // namespace nnfw