summaryrefslogtreecommitdiff
path: root/runtime/libs
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
commite2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch)
tree44a1a7951d168dd4370e13593ed03f4bc6d920c5 /runtime/libs
parent302e6564a7a76109e1178207e44e45a58631c477 (diff)
downloadnnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'runtime/libs')
-rw-r--r--runtime/libs/benchmark/include/benchmark/CsvWriter.h1
-rw-r--r--runtime/libs/benchmark/include/benchmark/Phase.h14
-rw-r--r--runtime/libs/benchmark/include/benchmark/Result.h2
-rw-r--r--runtime/libs/benchmark/include/benchmark/Util.h9
-rw-r--r--runtime/libs/benchmark/src/CsvWriter.cpp7
-rw-r--r--runtime/libs/benchmark/src/MemoryPoller.cpp2
-rw-r--r--runtime/libs/cpp14/CMakeLists.txt2
-rw-r--r--runtime/libs/cpp14/include/cpp14/memory.h66
-rw-r--r--runtime/libs/jsoncpp/.FORMATDENY0
-rw-r--r--runtime/libs/misc/CMakeLists.txt6
-rw-r--r--runtime/libs/misc/examples/tensor_index_iterator.cpp2
-rw-r--r--runtime/libs/misc/include/misc/EventRecorder.h1
-rw-r--r--runtime/libs/misc/include/misc/benchmark.h4
-rw-r--r--runtime/libs/misc/include/misc/string_helpers.h2
-rw-r--r--runtime/libs/misc/src/tensor/Comparator.cpp16
-rw-r--r--runtime/libs/profiling/include/profiling/profiling.h2
-rw-r--r--runtime/libs/profiling/include/profiling/time.h30
-rw-r--r--runtime/libs/profiling/src/profiling/time.cpp30
-rw-r--r--runtime/libs/rua/core/include/rua/Service.h1
-rw-r--r--runtime/libs/rua/dyn/include/rua/DynamicBinder.h2
-rw-r--r--runtime/libs/rua/dyn/src/DynamicBinder.cpp1
-rw-r--r--runtime/libs/rua/shim/include/rua/Shim.h1
-rw-r--r--runtime/libs/tflite/CMakeLists.txt10
-rw-r--r--runtime/libs/tflite/include/tflite/Diff.h1
-rw-r--r--runtime/libs/tflite/port/1.13.1/CMakeLists.txt2
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h41
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h6
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h75
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h41
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp103
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp405
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp400
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp3
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp196
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc32
-rw-r--r--runtime/libs/tflite/src/Diff.cpp17
-rw-r--r--runtime/libs/tflite/src/TensorShapeUtils.cpp16
37 files changed, 248 insertions, 1301 deletions
diff --git a/runtime/libs/benchmark/include/benchmark/CsvWriter.h b/runtime/libs/benchmark/include/benchmark/CsvWriter.h
index 3e141216b..5c259d7ed 100644
--- a/runtime/libs/benchmark/include/benchmark/CsvWriter.h
+++ b/runtime/libs/benchmark/include/benchmark/CsvWriter.h
@@ -54,6 +54,7 @@ private:
std::ofstream _ofs;
uint32_t _header_size;
uint32_t _col_idx;
+ uint32_t _row_idx;
};
} // namespace benchmark
diff --git a/runtime/libs/benchmark/include/benchmark/Phase.h b/runtime/libs/benchmark/include/benchmark/Phase.h
index bea9a87b2..ed8c8869e 100644
--- a/runtime/libs/benchmark/include/benchmark/Phase.h
+++ b/runtime/libs/benchmark/include/benchmark/Phase.h
@@ -48,4 +48,18 @@ inline std::string getPhaseString(Phase phase)
} // namespace benchmark
+namespace std
+{
+
+template <> struct hash<benchmark::Phase>
+{
+ size_t operator()(benchmark::Phase value) const noexcept
+ {
+ using type = typename std::underlying_type<benchmark::Phase>::type;
+ return hash<type>()(static_cast<type>(value));
+ }
+};
+
+} // namespace std
+
#endif // __NNFW_BENCHMARK_PHASE_H__
diff --git a/runtime/libs/benchmark/include/benchmark/Result.h b/runtime/libs/benchmark/include/benchmark/Result.h
index 570fa2114..2d86d95ec 100644
--- a/runtime/libs/benchmark/include/benchmark/Result.h
+++ b/runtime/libs/benchmark/include/benchmark/Result.h
@@ -39,7 +39,7 @@ uint32_t maxMemory(const std::unordered_map<benchmark::Phase, uint32_t> &map)
return answer.second;
}
-} // namespace anonymous
+} // namespace
namespace benchmark
{
diff --git a/runtime/libs/benchmark/include/benchmark/Util.h b/runtime/libs/benchmark/include/benchmark/Util.h
index b10360fa0..2e1f985b1 100644
--- a/runtime/libs/benchmark/include/benchmark/Util.h
+++ b/runtime/libs/benchmark/include/benchmark/Util.h
@@ -90,11 +90,10 @@ inline void writeResult(const Result &result, const std::string &exec, const std
bool done = writer.done();
- std::cout << "Writing to " << csv_filename << " is ";
- if (done)
- std::cout << "done" << std::endl;
- else
- std::cout << "failed" << std::endl;
+ if (!done)
+ {
+ std::cerr << "Writing to " << csv_filename << " is failed" << std::endl;
+ }
}
} // namespace benchmark
diff --git a/runtime/libs/benchmark/src/CsvWriter.cpp b/runtime/libs/benchmark/src/CsvWriter.cpp
index 9f2c5b09d..5f47c6511 100644
--- a/runtime/libs/benchmark/src/CsvWriter.cpp
+++ b/runtime/libs/benchmark/src/CsvWriter.cpp
@@ -24,7 +24,7 @@ const std::vector<std::string> csv_header{
#include "benchmark/CsvHeader.lst"
};
-} // namespace anonymous
+} // namespace
namespace benchmark
{
@@ -35,7 +35,7 @@ CsvWriter::CsvWriter(const std::string &csv_filename) : CsvWriter(csv_filename,
}
CsvWriter::CsvWriter(const std::string &csv_filename, const std::vector<std::string> &header)
- : _ofs(csv_filename), _header_size(header.size()), _col_idx(0)
+ : _ofs(csv_filename), _header_size(header.size()), _col_idx(0), _row_idx(0)
{
assert(csv_filename.empty() == false);
assert(header.size() != 0);
@@ -61,6 +61,7 @@ void CsvWriter::postWrite()
if (++_col_idx == _header_size)
{
_ofs << newline;
+ _row_idx += 1;
_col_idx = 0;
}
else
@@ -93,7 +94,7 @@ void CsvWriter::write(char val)
postWrite();
}
-bool CsvWriter::done() { return _col_idx == 0; }
+bool CsvWriter::done() { return (_col_idx == 0) && (_row_idx == 2); }
CsvWriter &operator<<(CsvWriter &csvw, const std::string &val)
{
diff --git a/runtime/libs/benchmark/src/MemoryPoller.cpp b/runtime/libs/benchmark/src/MemoryPoller.cpp
index 436d536e4..95fc34bb2 100644
--- a/runtime/libs/benchmark/src/MemoryPoller.cpp
+++ b/runtime/libs/benchmark/src/MemoryPoller.cpp
@@ -82,7 +82,7 @@ std::vector<std::string> getValueFromFileStatus(const std::string &file, const s
return val;
}
-} // namespace anonymous
+} // namespace
namespace benchmark
{
diff --git a/runtime/libs/cpp14/CMakeLists.txt b/runtime/libs/cpp14/CMakeLists.txt
deleted file mode 100644
index bba9e132d..000000000
--- a/runtime/libs/cpp14/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-add_library(nnfw_lib_cpp14 INTERFACE)
-target_include_directories(nnfw_lib_cpp14 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
diff --git a/runtime/libs/cpp14/include/cpp14/memory.h b/runtime/libs/cpp14/include/cpp14/memory.h
deleted file mode 100644
index 7070e1c99..000000000
--- a/runtime/libs/cpp14/include/cpp14/memory.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file memory.h
- * @ingroup COM_AI_RUNTIME
- * @brief This file contains @c make_unique which is not supported by C++11
- * @details Implementation is based on http://isocpp.org/files/papers/N3656.txt
- */
-#ifndef __NNFW_CPP14_MEMORY_H__
-#define __NNFW_CPP14_MEMORY_H__
-
-#include <memory>
-
-namespace nnfw
-{
-namespace cpp14
-{
-
-template <typename T> struct _Unique_if
-{
- typedef std::unique_ptr<T> _Single_object;
-};
-
-template <typename T> struct _Unique_if<T[]>
-{
- typedef std::unique_ptr<T[]> _Unknown_bound;
-};
-
-template <typename T, size_t N> struct _Unique_if<T[N]>
-{
- typedef void _Known_bound;
-};
-
-template <typename T, typename... Args>
-typename _Unique_if<T>::_Single_object make_unique(Args &&... args)
-{
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
-template <typename T> typename _Unique_if<T>::_Unknown_bound make_unique(size_t n)
-{
- typedef typename std::remove_extent<T>::type U;
- return std::unique_ptr<T>(new U[n]());
-}
-
-template <typename T, typename... Args>
-typename _Unique_if<T>::_Known_bound make_unique(Args &&...) = delete;
-
-} // namespace cpp14
-} // namespace nnfw
-
-#endif // __NNFW_CPP14_MEMORY_H__
diff --git a/runtime/libs/jsoncpp/.FORMATDENY b/runtime/libs/jsoncpp/.FORMATDENY
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/runtime/libs/jsoncpp/.FORMATDENY
diff --git a/runtime/libs/misc/CMakeLists.txt b/runtime/libs/misc/CMakeLists.txt
index 557d403ec..5efa300f8 100644
--- a/runtime/libs/misc/CMakeLists.txt
+++ b/runtime/libs/misc/CMakeLists.txt
@@ -7,5 +7,11 @@ set_target_properties(nnfw_lib_misc PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_link_libraries(nnfw_lib_misc PRIVATE nnfw_common)
target_link_libraries(nnfw_lib_misc PRIVATE nnfw_coverage)
+install(TARGETS nnfw_lib_misc ARCHIVE DESTINATION lib)
+install(DIRECTORY "include/misc"
+ DESTINATION "include/onert" # FIXME This is only for onert developers
+ FILES_MATCHING PATTERN "*.h"
+ )
+
add_executable(nnfw_tensor_index_iterator "examples/tensor_index_iterator.cpp")
target_link_libraries(nnfw_tensor_index_iterator nnfw_lib_misc)
diff --git a/runtime/libs/misc/examples/tensor_index_iterator.cpp b/runtime/libs/misc/examples/tensor_index_iterator.cpp
index d94da9f49..590b433df 100644
--- a/runtime/libs/misc/examples/tensor_index_iterator.cpp
+++ b/runtime/libs/misc/examples/tensor_index_iterator.cpp
@@ -31,8 +31,8 @@ void test_iterate(void)
array.fill(0);
- using nnfw::misc::tensor::iterate;
using nnfw::misc::tensor::Index;
+ using nnfw::misc::tensor::iterate;
iterate(shape) << [&](const Index &index) {
assert(index.rank() == shape.rank());
diff --git a/runtime/libs/misc/include/misc/EventRecorder.h b/runtime/libs/misc/include/misc/EventRecorder.h
index 1e621fdf8..35d4074bb 100644
--- a/runtime/libs/misc/include/misc/EventRecorder.h
+++ b/runtime/libs/misc/include/misc/EventRecorder.h
@@ -57,6 +57,7 @@ public:
void emit(const CounterEvent &evt);
public:
+ bool empty() { return _ss.str().empty(); }
void writeToFile(std::ostream &os);
private:
diff --git a/runtime/libs/misc/include/misc/benchmark.h b/runtime/libs/misc/include/misc/benchmark.h
index fe5b97585..aa487aca9 100644
--- a/runtime/libs/misc/include/misc/benchmark.h
+++ b/runtime/libs/misc/include/misc/benchmark.h
@@ -69,9 +69,9 @@ private:
template <typename T, typename Callable>
Accumulator<T> &operator<<(Accumulator<T> &&acc, Callable cb)
{
- auto begin = std::chrono::steady_clock::now();
+ auto begin = std::chrono::high_resolution_clock::now();
cb();
- auto end = std::chrono::steady_clock::now();
+ auto end = std::chrono::high_resolution_clock::now();
acc() += std::chrono::duration_cast<T>(end - begin);
diff --git a/runtime/libs/misc/include/misc/string_helpers.h b/runtime/libs/misc/include/misc/string_helpers.h
index e42a12754..6aac3a83b 100644
--- a/runtime/libs/misc/include/misc/string_helpers.h
+++ b/runtime/libs/misc/include/misc/string_helpers.h
@@ -36,7 +36,7 @@ template <typename Arg, typename... Args> void _str(std::ostream &os, Arg &&arg,
_str(os, std::forward<Args>(args)...);
}
-} // namespace {anonymous}
+} // namespace
namespace nnfw
{
diff --git a/runtime/libs/misc/src/tensor/Comparator.cpp b/runtime/libs/misc/src/tensor/Comparator.cpp
index e765e77b2..80a18c11a 100644
--- a/runtime/libs/misc/src/tensor/Comparator.cpp
+++ b/runtime/libs/misc/src/tensor/Comparator.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "misc/tensor/Comparator.h"
#include "misc/tensor/Zipper.h"
diff --git a/runtime/libs/profiling/include/profiling/profiling.h b/runtime/libs/profiling/include/profiling/profiling.h
index ee0df1338..79ad060c5 100644
--- a/runtime/libs/profiling/include/profiling/profiling.h
+++ b/runtime/libs/profiling/include/profiling/profiling.h
@@ -25,7 +25,7 @@ namespace profiling
{
class Profiler; // forward declaration
}
-}
+} // namespace tflite
namespace profiling
{
diff --git a/runtime/libs/profiling/include/profiling/time.h b/runtime/libs/profiling/include/profiling/time.h
index 200563aa6..03d18ddc8 100644
--- a/runtime/libs/profiling/include/profiling/time.h
+++ b/runtime/libs/profiling/include/profiling/time.h
@@ -1,17 +1,19 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
// NOTE To minimize diff with upstream tensorflow, disable clang-format
// clang-format off
diff --git a/runtime/libs/profiling/src/profiling/time.cpp b/runtime/libs/profiling/src/profiling/time.cpp
index 761023e6d..4e045556e 100644
--- a/runtime/libs/profiling/src/profiling/time.cpp
+++ b/runtime/libs/profiling/src/profiling/time.cpp
@@ -1,17 +1,19 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
// NOTE To minimize diff with upstream tensorflow, disable clang-format
// clang-format off
diff --git a/runtime/libs/rua/core/include/rua/Service.h b/runtime/libs/rua/core/include/rua/Service.h
index a79524a8a..2129b7ac2 100644
--- a/runtime/libs/rua/core/include/rua/Service.h
+++ b/runtime/libs/rua/core/include/rua/Service.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/runtime/libs/rua/dyn/include/rua/DynamicBinder.h b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h
index 8ce0c42f8..1e2d30665 100644
--- a/runtime/libs/rua/dyn/include/rua/DynamicBinder.h
+++ b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h
@@ -30,6 +30,6 @@ struct DynamicBinder
static const rua::RuntimeService *get(void);
};
-} // namespace
+} // namespace rua
#endif // __NNFW_RUA_DYNAMIC_BINDER_H__
diff --git a/runtime/libs/rua/dyn/src/DynamicBinder.cpp b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
index 68dae6262..fa3f0bb1e 100644
--- a/runtime/libs/rua/dyn/src/DynamicBinder.cpp
+++ b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/runtime/libs/rua/shim/include/rua/Shim.h b/runtime/libs/rua/shim/include/rua/Shim.h
index 07a4bb2fd..755803e3b 100644
--- a/runtime/libs/rua/shim/include/rua/Shim.h
+++ b/runtime/libs/rua/shim/include/rua/Shim.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/runtime/libs/tflite/CMakeLists.txt b/runtime/libs/tflite/CMakeLists.txt
index b5a16bcd9..04ced8e0f 100644
--- a/runtime/libs/tflite/CMakeLists.txt
+++ b/runtime/libs/tflite/CMakeLists.txt
@@ -1,3 +1,9 @@
+nnfw_find_package(TensorFlowLite QUIET)
+if(NOT TensorFlowLite_FOUND)
+ message(STATUS "Check tensorflow lite library extension build: need tensorflow lite library")
+ return()
+endif(NOT TensorFlowLite_FOUND)
+
add_subdirectory(port)
file(GLOB_RECURSE SOURCES "src/*.cpp")
@@ -13,5 +19,9 @@ target_link_libraries(nnfw_lib_tflite PRIVATE ${LIB_PTHREAD} dl)
target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_common)
target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_coverage)
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
add_executable(nnfw_lib_tflite_test_TensorView src/TensorView.test.cpp)
target_link_libraries(nnfw_lib_tflite_test_TensorView nnfw_lib_tflite)
diff --git a/runtime/libs/tflite/include/tflite/Diff.h b/runtime/libs/tflite/include/tflite/Diff.h
index eca2fd502..38011b65d 100644
--- a/runtime/libs/tflite/include/tflite/Diff.h
+++ b/runtime/libs/tflite/include/tflite/Diff.h
@@ -139,6 +139,7 @@ private:
template <> uint8_t RandomGenerator::generate<uint8_t>(void);
template <> bool RandomGenerator::generate<bool>(void);
+template <> int32_t RandomGenerator::generate<int32_t>(void);
/**
* @brief Structure for NNAPI correctness test
diff --git a/runtime/libs/tflite/port/1.13.1/CMakeLists.txt b/runtime/libs/tflite/port/1.13.1/CMakeLists.txt
index 311e11cae..e3cf97569 100644
--- a/runtime/libs/tflite/port/1.13.1/CMakeLists.txt
+++ b/runtime/libs/tflite/port/1.13.1/CMakeLists.txt
@@ -2,8 +2,6 @@ if(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1)
return()
endif(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1)
-nnfw_find_package(TensorFlowLite REQUIRED)
-
file(GLOB_RECURSE SOURCES "src/*.cpp")
add_library(tensorflow-lite-ex STATIC ${SOURCES})
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h
deleted file mode 100644
index 697ba33e9..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_TFLITE_EXT_KERNELS_ABS_H__
-#define __NNFW_TFLITE_EXT_KERNELS_ABS_H__
-
-#include "tensorflow/lite/context.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace Abs
-{
-
-void *InitAbs(TfLiteContext *context, const char *buffer, size_t length);
-void FreeAbs(TfLiteContext *context, void *buffer);
-TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node);
-TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node);
-
-} // namespace Abs
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_ABS_H__
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h
index 3370db778..c073ad58e 100644
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h
+++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h
@@ -24,10 +24,7 @@
#define __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
#include "tensorflow/lite/context.h"
-#include "tflite/ext/kernels/TensorFlowMax.h"
#include "tflite/ext/kernels/SquaredDifference.h"
-#include "tflite/ext/kernels/TensorFlowSum.h"
-#include "tflite/ext/kernels/Abs.h"
namespace nnfw
{
@@ -48,10 +45,7 @@ namespace custom
return &r; \
}
-REGISTER_FUNCTION(TensorFlowMax)
REGISTER_FUNCTION(SquaredDifference)
-REGISTER_FUNCTION(TensorFlowSum)
-REGISTER_FUNCTION(Abs)
#undef REGISTER_FUNCTION
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h
deleted file mode 100644
index d573308ed..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file TensorFlowMax.h
- * @brief This file contains TensorFlowMax namespace and TensorFlowMax function definitions
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
-#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
-
-#include "tensorflow/lite/context.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowMax
-{
-
-/**
- * @brief Initialize TensorFlowMax operand using the contents of buffer
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @param[in] length The buffer length
- * @return The void pointer for user data
- */
-void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length);
-
-/**
- * @brief Release any memory it might have allocated via 'InitTensorFlowMax'
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @return N/A
- */
-void FreeTensorFlowMax(TfLiteContext *context, void *buffer);
-
-/**
- * @brief Prepare the TensorFlowMax operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
-TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
-
-/**
- * @brief Evaluation the TensorFlowMax operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
-TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
-
-} // namespace TensorFlowMax
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h
deleted file mode 100644
index 29455aac5..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
-#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
-
-#include "tensorflow/lite/context.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowSum
-{
-
-void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length);
-void FreeTensorFlowSum(TfLiteContext *context, void *buffer);
-TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
-TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
-
-} // namespace TensorFlowSum
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp
deleted file mode 100644
index 61181465d..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/Abs.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-#include <cmath>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace Abs
-{
-
-void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; }
-
-void FreeAbs(TfLiteContext *, void *) {}
-
-TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 1);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
-
- TF_LITE_ENSURE_EQ(context, input->type, output->type);
-
- return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims));
-}
-
-TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node)
-{
- const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
- size_t elements = ::tflite::NumElements(input);
- switch (input->type)
- {
- case kTfLiteFloat32:
- {
- auto *in = input->data.f;
- auto *in_end = in + elements;
- auto *out = output->data.f;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteInt32:
- {
- auto *in = input->data.i32;
- auto *in_end = in + elements;
- auto *out = output->data.i32;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteInt64:
- {
- auto *in = input->data.i64;
- auto *in_end = in + elements;
- auto *out = output->data.i64;
- for (; in < in_end; in++, out++)
- *out = std::abs(*in);
- return kTfLiteOk;
- }
- case kTfLiteUInt8:
- {
- auto *in = input->data.uint8;
- auto *in_end = in + elements;
- auto *out = output->data.uint8;
- for (; in < in_end; in++, out++)
- *out = *in;
- return kTfLiteOk;
- }
- default:
- {
- context->ReportError(context, "Input type %d is not supported", input->type);
- return kTfLiteError;
- }
- }
-}
-
-} // namespace Abs
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp
deleted file mode 100644
index 207de98f5..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/TensorFlowMax.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowMax
-{
-
-struct TensorFlowMaxOp
-{
- TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node)
- {
- input = ::tflite::GetInput(context, node, 0);
- axis = ::tflite::GetInput(context, node, 1);
- output = ::tflite::GetOutput(context, node, 0);
- }
- const TfLiteTensor *input;
- const TfLiteTensor *axis;
- TfLiteTensor *output;
-};
-
-void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t)
-{
- // Creates two temp tensors to store index and axis for internal
- // implementation only.
- auto *scratch_tensor_index = new int;
- context->AddTensors(context, 2, scratch_tensor_index);
- return scratch_tensor_index;
-}
-
-void FreeTensorFlowMax(TfLiteContext *, void *buffer)
-{
- delete static_cast<TensorFlowMaxOp *>(buffer);
-}
-
-// Resizes the temp tensor that stores resolved axis.
-TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context,
- TfLiteTensor *resolved_axis)
-{
- TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
- return context->ResizeTensor(context, resolved_axis, axis_size);
-}
-
-// Resizes output array based on the input size and resolved axis.
-TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
-{
- int64_t num_axis = ::tflite::NumElements(op_context->axis);
- TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = ::tflite::NumDimensions(op_context->input);
- const int *axis = op_context->axis->data.i32;
-
- {
- // Calculates size of reducing axis.
- int64_t num_reduce_axis = num_axis;
- for (int64_t i = 0; i < num_axis; ++i)
- {
- int current = axis[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int64_t j = 0; j < i; ++j)
- {
- int previous = axis[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- int output_num_dims = ::tflite::NumDimensions(op_context->output);
- TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
- (input_num_dims - num_reduce_axis == output_num_dims));
-
- if (input_num_dims == output_num_dims)
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- int current = axis[axis_idx];
- output_dims->data[current] = 1;
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- else
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
- }
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- }
-}
-
-// Initializes temp tensors to store index and resolved axis.
-TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
- TensorFlowMaxOp *op_context)
-{
- // Creates a temp index to iterate through input data.
- int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(2);
- node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
- scratch_tensor->type = kTfLiteInt32;
- scratch_tensor->allocation_type = kTfLiteArenaRw;
- TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = ::tflite::NumDimensions(op_context->input);
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
-
- // Creates a temp tensor to store resolved axis given input data.
- node->temporaries->data[1] = *scratch_tensor_index + 1;
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- resolved_axis->type = kTfLiteInt32;
- return kTfLiteOk;
-}
-
-TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- TensorFlowMaxOp op_context(context, node);
- TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
-
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Leaves work to Eval if axis is not constant; else resizes output.
- if (!::tflite::IsConstantTensor(op_context.axis))
- {
- ::tflite::SetTensorToDynamic(op_context.output);
- ::tflite::SetTensorToDynamic(resolved_axis);
- return kTfLiteOk;
- }
- resolved_axis->allocation_type = kTfLiteArenaRw;
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- return ResizeOutputTensor(context, &op_context);
-}
-
-// Gets offset of index if expanded on axis. When expanded, the flattened offset
-// will not change, if the output index changes on the given axis. For example,
-// if you have a 2D tensor and you are expanding to 3D on axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
-// offset.
-inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- int out_idx = 0;
- for (int in_idx = 0; in_idx < num_dims; ++in_idx)
- {
- // if we need to expand this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (in_idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
- out_idx++;
- }
- else
- {
- offset = offset * static_cast<size_t>(dims[in_idx]);
- }
- }
- return offset;
-}
-
-// Gets offset of index if reducing on axis. When reducing, the flattened offset
-// will not change, if the input index changes on the given axis. For example,
-// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
-// offset.
-// TODO(kanlig): uses Dims to represent dimensions.
-inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- for (int idx = 0; idx < num_dims; ++idx)
- {
- // if we need to skip this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
- }
- }
- return offset;
-}
-
-// Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
-{
- int carry = 1;
- for (int idx = num_dims - 1; idx >= 0; --idx)
- {
- int current_val = current[idx] + carry;
- TF_LITE_ENSURE(context, (dims[idx] >= current_val));
- if (dims[idx] == current_val)
- {
- current[idx] = 0;
- }
- else
- {
- current[idx] = current_val;
- carry = 0;
- break;
- }
- }
- return (carry == 0);
-}
-
-template <typename T>
-inline TfLiteStatus
-CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
- T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
-{
- // resolves axis.
- int num_resolved_axis = 0;
- for (int idx = 0; idx < num_axis_dimensions; ++idx)
- {
- int current = axis[idx];
- TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
- if (current < 0)
- {
- current += input_num_dims;
- }
- bool is_dup = false;
- for (int j = 0; j < num_resolved_axis; ++j)
- {
- if (resolved_axis[j] == current)
- {
- is_dup = true;
- break;
- }
- }
- if (!is_dup)
- {
- resolved_axis[num_resolved_axis++] = current;
- }
- }
-
- TF_LITE_ENSURE(context, (input_num_dims > 0));
- TF_LITE_ENSURE(context, (input_dims != nullptr));
- TF_LITE_ENSURE(context, (temp_index != nullptr));
-
- // resets output data.
- for (int idx = 0; idx < output_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
- {
- size_t output_offset =
- ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
- size_t input_offset = ExpandedInputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- output_data[output_offset] = input_data[input_offset];
- }
-
- // resets temp index.
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
-
- // iterates through input_data.
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
- {
- size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
- size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- if (output_data[output_offset] < input_data[input_offset])
- {
- output_data[output_offset] = input_data[input_offset];
- }
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
-{
-
- TensorFlowMaxOp op_context(context, node);
- int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
- TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Resize the output tensor if the output tensor is dynamic.
- if (::tflite::IsDynamicTensor(op_context.output))
- {
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
- }
-
- TfLiteStatus returnStatus = kTfLiteOk;
- switch (op_context.input->type)
- {
- case kTfLiteFloat32:
- returnStatus = CustomMax<float>(
- context, op_context.input->data.f, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt32:
- returnStatus = CustomMax<int>(context, op_context.input->data.i32,
- op_context.input->dims->data, op_context.input->dims->size,
- op_context.output->data.i32, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteUInt8:
- returnStatus = CustomMax<uint8_t>(
- context, op_context.input->data.uint8, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.uint8,
- op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt64:
- returnStatus = CustomMax<int64_t>(
- context, op_context.input->data.i64, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- default:
- returnStatus = kTfLiteError;
- }
-
- return returnStatus;
-}
-
-} // namespace TensorFlowMax
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp
deleted file mode 100644
index 40f266baa..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/TensorFlowSum.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace TensorFlowSum
-{
-
-struct TensorFlowSumOp
-{
- TensorFlowSumOp(TfLiteContext *context, TfLiteNode *node)
- {
- input = ::tflite::GetInput(context, node, 0);
- axis = ::tflite::GetInput(context, node, 1);
- output = ::tflite::GetOutput(context, node, 0);
- }
- const TfLiteTensor *input;
- const TfLiteTensor *axis;
- TfLiteTensor *output;
-};
-
-void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t)
-{
- // Creates two temp tensors to store index and axis for internal
- // implementation only.
- auto *scratch_tensor_index = new int;
- context->AddTensors(context, 2, scratch_tensor_index);
- return scratch_tensor_index;
-}
-
-void FreeTensorFlowSum(TfLiteContext *, void *buffer)
-{
- delete static_cast<TensorFlowSumOp *>(buffer);
-}
-
-// Resizes the temp tensor that stores resolved axis.
-TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowSumOp *op_context,
- TfLiteTensor *resolved_axis)
-{
- TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
- return context->ResizeTensor(context, resolved_axis, axis_size);
-}
-
-// Resizes output array based on the input size and resolved axis.
-TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context)
-{
- int64_t num_axis = ::tflite::NumElements(op_context->axis);
- TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = ::tflite::NumDimensions(op_context->input);
- const int *axis = op_context->axis->data.i32;
-
- {
- // Calculates size of reducing axis.
- int64_t num_reduce_axis = num_axis;
- for (int64_t i = 0; i < num_axis; ++i)
- {
- int current = axis[i];
- if (current < 0)
- {
- current += input_num_dims;
- }
- TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int64_t j = 0; j < i; ++j)
- {
- int previous = axis[j];
- if (previous < 0)
- {
- previous += input_num_dims;
- }
- if (current == previous)
- {
- --num_reduce_axis;
- break;
- }
- }
- }
- // Determines output dimensions.
- int output_num_dims = ::tflite::NumDimensions(op_context->output);
- TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
- (input_num_dims - num_reduce_axis == output_num_dims));
-
- if (input_num_dims == output_num_dims)
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- int current = axis[axis_idx];
- output_dims->data[current] = 1;
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- else
- {
- TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims);
- int num_skip_axis = 0;
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- bool is_axis = false;
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx)
- {
- ++num_skip_axis;
- is_axis = true;
- break;
- }
- }
- if (!is_axis)
- {
- output_dims->data[idx - num_skip_axis] = input_dims->data[idx];
- }
- }
- return context->ResizeTensor(context, op_context->output, output_dims);
- }
- }
-}
-
-// Initializes temp tensors to store index and resolved axis.
-TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node,
- TensorFlowSumOp *op_context)
-{
- // Creates a temp index to iterate through input data.
- int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data);
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(2);
- node->temporaries->data[0] = *scratch_tensor_index;
- TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]];
- scratch_tensor->type = kTfLiteInt32;
- scratch_tensor->allocation_type = kTfLiteArenaRw;
- TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = ::tflite::NumDimensions(op_context->input);
- TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
-
- // Creates a temp tensor to store resolved axis given input data.
- node->temporaries->data[1] = *scratch_tensor_index + 1;
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- resolved_axis->type = kTfLiteInt32;
- return kTfLiteOk;
-}
-
-TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- TensorFlowSumOp op_context(context, node);
- TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
-
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Leaves work to Eval if axis is not constant; else resizes output.
- if (!::tflite::IsConstantTensor(op_context.axis))
- {
- ::tflite::SetTensorToDynamic(op_context.output);
- ::tflite::SetTensorToDynamic(resolved_axis);
- return kTfLiteOk;
- }
- resolved_axis->allocation_type = kTfLiteArenaRw;
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- return ResizeOutputTensor(context, &op_context);
-}
-
-// Gets offset of index if expanded on axis. When expanded, the flattened offset
-// will not change, if the output index changes on the given axis. For example,
-// if you have a 2D tensor and you are expanding to 3D on axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened
-// offset.
-inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- int out_idx = 0;
- for (int in_idx = 0; in_idx < num_dims; ++in_idx)
- {
- // if we need to expand this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (in_idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]);
- out_idx++;
- }
- else
- {
- offset = offset * static_cast<size_t>(dims[in_idx]);
- }
- }
- return offset;
-}
-
-// Gets offset of index if reducing on axis. When reducing, the flattened offset
-// will not change, if the input index changes on the given axis. For example,
-// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
-// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
-// offset.
-// TODO(kanlig): uses Dims to represent dimensions.
-inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index,
- const int num_axis, const int *axis)
-{
- size_t offset = 0;
- for (int idx = 0; idx < num_dims; ++idx)
- {
- // if we need to skip this axis
- bool is_axis = false;
- if (axis != nullptr)
- {
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
- {
- if (idx == axis[axis_idx])
- {
- is_axis = true;
- break;
- }
- }
- }
- if (!is_axis)
- {
- offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]);
- }
- }
- return offset;
-}
-
-// Gets next index to iterate through a multidimensional array.
-inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current)
-{
- int carry = 1;
- for (int idx = num_dims - 1; idx >= 0; --idx)
- {
- int current_val = current[idx] + carry;
- TF_LITE_ENSURE(context, (dims[idx] >= current_val));
- if (dims[idx] == current_val)
- {
- current[idx] = 0;
- }
- else
- {
- current[idx] = current_val;
- carry = 0;
- break;
- }
- }
- return (carry == 0);
-}
-
-template <typename T>
-inline TfLiteStatus
-CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
- T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
-{
- // resolves axis.
- int num_resolved_axis = 0;
- for (int idx = 0; idx < num_axis_dimensions; ++idx)
- {
- int current = axis[idx];
- TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0));
- if (current < 0)
- {
- current += input_num_dims;
- }
- bool is_dup = false;
- for (int j = 0; j < num_resolved_axis; ++j)
- {
- if (resolved_axis[j] == current)
- {
- is_dup = true;
- break;
- }
- }
- if (!is_dup)
- {
- resolved_axis[num_resolved_axis++] = current;
- }
- }
-
- TF_LITE_ENSURE(context, (input_num_dims > 0));
- TF_LITE_ENSURE(context, (input_dims != nullptr));
- TF_LITE_ENSURE(context, (temp_index != nullptr));
-
- // resets output data.
- for (int idx = 0; idx < output_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, output_num_dims, output_dims, temp_index))
- {
- size_t output_offset =
- ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr);
- output_data[output_offset] = 0;
- }
-
- // resets temp index.
- for (int idx = 0; idx < input_num_dims; ++idx)
- {
- temp_index[idx] = 0;
- }
-
- // iterates through input_data.
- for (bool has_next = true; has_next;
- has_next = NextIndex(context, input_num_dims, input_dims, temp_index))
- {
- size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr);
- size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index,
- num_resolved_axis, resolved_axis);
- output_data[output_offset] += input_data[input_offset];
- }
-
- return kTfLiteOk;
-}
-
-TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
-{
-
- TensorFlowSumOp op_context(context, node);
- int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
- TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
- TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
- // Resize the output tensor if the output tensor is dynamic.
- if (::tflite::IsDynamicTensor(op_context.output))
- {
- TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
- TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
- }
-
- TfLiteStatus returnStatus = kTfLiteOk;
- switch (op_context.input->type)
- {
- case kTfLiteFloat32:
- returnStatus = CustomSum<float>(
- context, op_context.input->data.f, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt32:
- returnStatus = CustomSum<int>(context, op_context.input->data.i32,
- op_context.input->dims->data, op_context.input->dims->size,
- op_context.output->data.i32, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteUInt8:
- returnStatus = CustomSum<uint8_t>(
- context, op_context.input->data.uint8, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.uint8,
- op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32,
- num_axis, false, temp_index->data.i32, resolved_axis->data.i32);
- break;
- case kTfLiteInt64:
- returnStatus = CustomSum<int64_t>(
- context, op_context.input->data.i64, op_context.input->dims->data,
- op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data,
- op_context.output->dims->size, op_context.axis->data.i32, num_axis, false,
- temp_index->data.i32, resolved_axis->data.i32);
- break;
- default:
- returnStatus = kTfLiteError;
- }
-
- return returnStatus;
-}
-
-} // namespace TensorFlowSum
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
index b2088b277..89f81b612 100644
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
+++ b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
@@ -296,10 +296,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_FILL, Register_FILL());
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD());
- AddCustom("TensorFlowMax", nnfw::tflite::custom::Register_TensorFlowMax());
AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference());
- AddCustom("TensorFlowSum", nnfw::tflite::custom::Register_TensorFlowSum());
- AddCustom("Abs", nnfw::tflite::custom::Register_Abs());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
index 99272f0e5..2924c44e9 100644
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
+++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
@@ -35,6 +35,8 @@ limitations under the License.
#include <sys/system_properties.h>
#endif
+#include <memory>
+
namespace nnfw {
namespace tflite {
@@ -159,6 +161,9 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
uint32_t* no_of_operands_added,
std::vector<int64_t>* nnapi_ids) {
uint32_t next_id = 0;
+ // Allocate temporary buffer to save casted boolean tensor
+ std::unordered_map<size_t, std::unique_ptr<uint8_t[]>> const_boolean_tensors;
+
for (size_t i = 0; i < subgraph->tensors_size(); i++) {
// Skip temporaries and RNN back-edges.
if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
@@ -196,9 +201,7 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
case kTfLiteBool:
// Workaround to pass bool type under NNAPI
// Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0
- nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
- scale = 1.0f;
- zeroPoint = 0;
+ nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
default:
logError("Unsupported tensor type %d", tensor->type);
@@ -243,7 +246,19 @@ TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
// TODO(aselle): Based on Michael's suggestion, limiting this to read
// only memory
if (tensor->allocation_type == kTfLiteMmapRo) {
- if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
+ if (tensor->type == kTfLiteBool)
+ {
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
+ size_t elements = tensor->bytes / sizeof(bool);
+ const_boolean_tensors[i] = std::make_unique<uint8_t[]>(elements);
+ for (size_t idx = 0; idx < elements; idx++)
+ {
+ const_boolean_tensors[i].get()[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
+ }
+ RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, const_boolean_tensors[i].get(), tensor->bytes));
+ }
+ else if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
RETURN_ERROR_IF_NN_FAILED(
ANeuralNetworksModel_setOperandValueFromMemory(
@@ -703,19 +718,32 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_TOPK_V2;
break;
+ case tflite::BuiltinOperator_GREATER:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER;
+ break;
+ case tflite::BuiltinOperator_GREATER_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
+ break;
+ case tflite::BuiltinOperator_LESS:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS;
+ break;
+ case tflite::BuiltinOperator_LESS_EQUAL:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
+ break;
case tflite::BuiltinOperator_GATHER:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_GATHER;
add_gather_params(node.builtin_data);
break;
case tflite::BuiltinOperator_SPLIT:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SPLIT;
add_split_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SPLIT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_NEG:
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_NEG;
@@ -733,21 +761,14 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
case tflite::BuiltinOperator_PRELU:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_PRELU_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_PRELU;
+ break;
case tflite::BuiltinOperator_ARG_MAX:
check_arg_max_input(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_ARGMAX_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_ARGMAX;
+ break;
case tflite::BuiltinOperator_PACK:
add_pack_ex_params(node.builtin_data);
CHECK_NN(ANeuralNetworksModel_addOperationEx(
@@ -773,66 +794,40 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_RSQRT;
break;
case tflite::BuiltinOperator_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_EQUAL;
+ break;
case tflite::BuiltinOperator_NOT_EQUAL:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_NOT_EQUAL_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
+ break;
case tflite::BuiltinOperator_SUM:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_REDUCE_MAX:
- add_reducer_v12_params(node.builtin_data);
+ add_reducer_params(node.builtin_data);
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
break;
case tflite::BuiltinOperator_REDUCE_MIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_MIN_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ break;
case tflite::BuiltinOperator_LOGICAL_AND:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_AND_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
+ break;
case tflite::BuiltinOperator_LOGICAL_OR:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_OR_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
+ break;
case tflite::BuiltinOperator_LOGICAL_NOT:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_LOGICAL_NOT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
+ break;
case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
@@ -851,6 +846,26 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_ABS;
break;
+ case tflite::BuiltinOperator_ONE_HOT:
+ add_one_hot_tensor_inputs_as_scalar();
+ add_one_hot_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_ONE_HOT_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
+ case tflite::BuiltinOperator_SIN:
+ nnapi_version = 12; // require NNAPI 1.2
+ nn_op_type = ANEURALNETWORKS_SIN;
+ break;
+ case tflite::BuiltinOperator_SHAPE:
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_SHAPE_EX,
+ static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t*>(node.outputs->data)));
+ continue; // _EX operator should use `continue` to skip addOperanation.
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
@@ -881,14 +896,14 @@ TfLiteStatus AddOpsAndParams(
//case tflite::BuiltinOperator_MINIMUM:
//case tflite::BuiltinOperator_ARG_MAX:
case tflite::BuiltinOperator_ARG_MIN:
- case tflite::BuiltinOperator_GREATER:
- case tflite::BuiltinOperator_GREATER_EQUAL:
- case tflite::BuiltinOperator_LESS:
- case tflite::BuiltinOperator_LESS_EQUAL:
+ //case tflite::BuiltinOperator_GREATER:
+ //case tflite::BuiltinOperator_GREATER_EQUAL:
+ //case tflite::BuiltinOperator_LESS:
+ //case tflite::BuiltinOperator_LESS_EQUAL:
//case tflite::BuiltinOperator_NEG:
case tflite::BuiltinOperator_SELECT:
// case tflite::BuiltinOperator_SLICE:
- case tflite::BuiltinOperator_SIN:
+ //case tflite::BuiltinOperator_SIN:
case tflite::BuiltinOperator_LOG:
//case tflite::BuiltinOperator_TRANSPOSE_CONV:
case tflite::BuiltinOperator_TILE:
@@ -902,12 +917,12 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_REDUCE_PROD:
//case tflite::BuiltinOperator_SQRT:
//case tflite::BuiltinOperator_RSQRT:
- case tflite::BuiltinOperator_SHAPE:
+ //case tflite::BuiltinOperator_SHAPE:
case tflite::BuiltinOperator_POW:
case tflite::BuiltinOperator_FAKE_QUANT:
//case tflite::BuiltinOperator_PACK:
//case tflite::BuiltinOperator_LOGICAL_OR:
- case tflite::BuiltinOperator_ONE_HOT:
+ //case tflite::BuiltinOperator_ONE_HOT:
//case tflite::BuiltinOperator_LOGICAL_AND:
//case tflite::BuiltinOperator_LOGICAL_NOT:
//case tflite::BuiltinOperator_UNPACK:
@@ -928,13 +943,7 @@ TfLiteStatus AddOpsAndParams(
break;
case tflite::BuiltinOperator_CUSTOM: {
std::string custom_name(registration.custom_name);
- if (custom_name.compare("TensorFlowMax") == 0) {
- add_reducer_v12_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
- break;
- }
- else if (custom_name.compare("SquaredDifference") == 0) {
+ if (custom_name.compare("SquaredDifference") == 0) {
CHECK_NN(ANeuralNetworksModel_addOperationEx(
nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
static_cast<uint32_t>(augmented_inputs.size()),
@@ -943,21 +952,6 @@ TfLiteStatus AddOpsAndParams(
reinterpret_cast<uint32_t*>(node.outputs->data)));
continue;
}
- else if (custom_name.compare("TensorFlowSum") == 0) {
- add_reducer_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_REDUCE_SUM_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("Abs") == 0) {
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_ABS;
- break;
- }
logError("Custom operations are not supported when using NNAPI.");
return kTfLiteError;
break;
@@ -1110,6 +1104,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
// TODO(aselle): This should be called setInputValue maybe to be cons.
TfLiteTensor* tensor = subgraph->tensor(input);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setInput(
@@ -1128,6 +1123,7 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
TfLiteTensor* tensor = subgraph->tensor(output);
// Workaround to pass bool type under NNAPI
+ // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
if (tensor->type == kTfLiteBool)
{
CHECK_NN(ANeuralNetworksExecution_setOutput(
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
index 5b718029b..ee758105f 100644
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
+++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
@@ -135,7 +135,7 @@
assert(count == 1);
};
- auto add_reducer_v12_params = [&add_scalar_bool8](void* data) {
+ auto add_reducer_params = [&add_scalar_bool8](void* data) {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
if (builtin == nullptr)
{
@@ -147,14 +147,24 @@
}
};
- auto add_reducer_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
- if (builtin == nullptr)
- {
- add_scalar_int32(0);
- }
- else
- {
- add_scalar_int32(builtin->keep_dims);
- }
+ auto add_one_hot_tensor_inputs_as_scalar = [subgraph, &node, &augmented_inputs,
+ &add_scalar_float32]() {
+ assert(augmented_inputs.size() == 4);
+ const auto on_value_idx = node.inputs->data[2];
+ const auto off_value_idx = node.inputs->data[3];
+ const auto on_value_tensor = subgraph->tensor(on_value_idx);
+ const auto off_value_tensor = subgraph->tensor(off_value_idx);
+ assert(on_value_tensor->type == kTfLiteFloat32);
+ assert(off_value_tensor->type == kTfLiteFloat32);
+ const auto on_value = *on_value_tensor->data.f;
+ const auto off_value = *off_value_tensor->data.f;
+ augmented_inputs.pop_back();
+ augmented_inputs.pop_back();
+ add_scalar_float32(on_value);
+ add_scalar_float32(off_value);
+ };
+
+ auto add_one_hot_params = [&add_scalar_int32](void* data) {
+ const auto* builtin = reinterpret_cast<TfLiteOneHotParams*>(data);
+ add_scalar_int32(builtin->axis);
};
diff --git a/runtime/libs/tflite/src/Diff.cpp b/runtime/libs/tflite/src/Diff.cpp
index 879de0735..9e66bbb5d 100644
--- a/runtime/libs/tflite/src/Diff.cpp
+++ b/runtime/libs/tflite/src/Diff.cpp
@@ -86,8 +86,8 @@ bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorVie
std::vector<nnfw::misc::tensor::Diff<T>> diffs;
assert(expected.shape() == obtained.shape());
- using nnfw::misc::tensor::zip;
using nnfw::misc::tensor::Index;
+ using nnfw::misc::tensor::zip;
zip(expected.shape(), expected, obtained)
<< [&](const Index &index, T expected_value, T obtained_value) {
@@ -296,6 +296,18 @@ template <> bool RandomGenerator::generate<bool>(void)
return dist(_rand);
}
+template <> int32_t RandomGenerator::generate<int32_t>(void)
+{
+ // Instead of INT_MAX, 4096 is chosen because int32_t input does not mean
+ // that the model can have any value in int32_t can hold.
+ // For example, one_hot operation gets indices as int32_t tensor.
+ // However, we usually expect it would hold a value in [0..depth).
+ // In our given model, depth was 10137.
+ const int int32_random_max = 4096;
+ std::uniform_int_distribution<> dist(0, int32_random_max);
+ return dist(_rand);
+}
+
#include "tflite/TensorLogger.h"
//
// Random Test Runner
@@ -615,7 +627,8 @@ RandomTestRunner RandomTestRunner::make(uint32_t seed)
param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0);
param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1);
- ;
+ param.tensor_logging = nnfw::misc::EnvVar("TENSOR_LOGGING").asBool(false);
+ param.log_path = nnfw::misc::EnvVar("TENSOR_LOGGING").asString("tensor_log.txt");
return RandomTestRunner{seed, param};
}
diff --git a/runtime/libs/tflite/src/TensorShapeUtils.cpp b/runtime/libs/tflite/src/TensorShapeUtils.cpp
index 29628cd26..689b6151b 100644
--- a/runtime/libs/tflite/src/TensorShapeUtils.cpp
+++ b/runtime/libs/tflite/src/TensorShapeUtils.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "tflite/TensorShapeUtils.h"
namespace nnfw