summaryrefslogtreecommitdiff
path: root/runtime/libs
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/libs')
-rw-r--r--runtime/libs/benchmark/include/benchmark/CsvWriter.h1
-rw-r--r--runtime/libs/benchmark/include/benchmark/MemoryInfo.h40
-rw-r--r--runtime/libs/benchmark/include/benchmark/MemoryPoller.h4
-rw-r--r--runtime/libs/benchmark/include/benchmark/Phase.h1
-rw-r--r--runtime/libs/benchmark/include/benchmark/Phases.h5
-rw-r--r--runtime/libs/benchmark/include/benchmark/Result.h4
-rw-r--r--runtime/libs/benchmark/src/CsvWriter.cpp2
-rw-r--r--runtime/libs/benchmark/src/MemoryInfo.cpp169
-rw-r--r--runtime/libs/benchmark/src/MemoryPoller.cpp167
-rw-r--r--runtime/libs/benchmark/src/Phases.cpp18
-rw-r--r--runtime/libs/benchmark/src/Result.cpp32
-rw-r--r--runtime/libs/misc/CMakeLists.txt19
-rw-r--r--runtime/libs/misc/include/misc/EnvConfigSource.h41
-rw-r--r--runtime/libs/misc/include/misc/GeneralConfigSource.h44
-rw-r--r--runtime/libs/misc/include/misc/IConfigSource.h46
-rw-r--r--runtime/libs/misc/include/misc/RandomGenerator.h1
-rw-r--r--runtime/libs/misc/include/misc/feature/Index.h2
-rw-r--r--runtime/libs/misc/include/misc/feature/Shape.h2
-rw-r--r--runtime/libs/misc/include/misc/kernel/Shape.h2
-rw-r--r--runtime/libs/misc/include/misc/polymorphic_downcast.h2
-rw-r--r--runtime/libs/misc/include/misc/string_helpers.h2
-rw-r--r--runtime/libs/misc/include/misc/tensor/Object.h5
-rw-r--r--runtime/libs/misc/include/misc/tensor/Zipper.h4
-rw-r--r--runtime/libs/misc/src/EnvConfigSource.cpp (renamed from runtime/libs/tflite/src/interp/FunctionBuilder.cpp)28
-rw-r--r--runtime/libs/misc/src/GeneralConfigSource.cpp (renamed from runtime/libs/tflite/src/Quantization.cpp)28
-rw-r--r--runtime/libs/misc/src/RandomGenerator.cpp28
-rw-r--r--runtime/libs/misc/src/string_helpers.test.cpp81
-rw-r--r--runtime/libs/misc/src/tensor/Comparator.cpp24
-rw-r--r--runtime/libs/misc/src/tensor/IndexEnumerator.test.cpp59
-rw-r--r--runtime/libs/misc/src/tensor/IndexIterator.test.cpp (renamed from runtime/libs/misc/examples/tensor_index_iterator.cpp)37
-rw-r--r--runtime/libs/ndarray/CMakeLists.txt15
-rw-r--r--runtime/libs/ndarray/include/ndarray/Array.h28
-rw-r--r--runtime/libs/ndarray/include/ndarray/ContiguousSpan.h2
-rw-r--r--runtime/libs/ndarray/src/Array.test.cpp452
-rw-r--r--runtime/libs/ndarray/src/ContiguousSpan.test.cpp196
-rw-r--r--runtime/libs/ndarray/src/detail/cxx14.h67
-rw-r--r--runtime/libs/ndarray/test/CMakeLists.txt17
-rw-r--r--runtime/libs/ndarray/test/ndarray_test.cpp92
-rw-r--r--runtime/libs/nnapi/CMakeLists.txt5
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksExShim.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h)0
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h)0
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksShim.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h)424
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksTypes.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h)86
-rw-r--r--runtime/libs/nnapi/v1.1/CMakeLists.txt4
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h64
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h141
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h709
-rw-r--r--runtime/libs/nnapi/v1.2/CMakeLists.txt4
-rw-r--r--runtime/libs/profiling/CMakeLists.txt1
-rw-r--r--runtime/libs/profiling/src/profiling/time.cpp8
-rw-r--r--runtime/libs/rua/anchor/CMakeLists.txt1
-rw-r--r--runtime/libs/rua/dyn/CMakeLists.txt1
-rw-r--r--runtime/libs/rua/dyn/src/DynamicBinder.cpp12
-rw-r--r--runtime/libs/tflite/CMakeLists.txt7
-rw-r--r--runtime/libs/tflite/include/tflite/Diff.h12
-rw-r--r--runtime/libs/tflite/include/tflite/FeatureView.h108
-rw-r--r--runtime/libs/tflite/include/tflite/InterpreterSession.h12
-rw-r--r--runtime/libs/tflite/include/tflite/NNAPISession.h102
-rw-r--r--runtime/libs/tflite/include/tflite/OutputIndex.h60
-rw-r--r--runtime/libs/tflite/include/tflite/Quantization.h44
-rw-r--r--runtime/libs/tflite/include/tflite/RandomInputInitializer.h (renamed from runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp)30
-rw-r--r--runtime/libs/tflite/include/tflite/RandomTestRunner.h103
-rw-r--r--runtime/libs/tflite/include/tflite/Session.h4
-rw-r--r--runtime/libs/tflite/include/tflite/TensorLogger.h168
-rw-r--r--runtime/libs/tflite/include/tflite/TensorShapeUtils.h64
-rw-r--r--runtime/libs/tflite/include/tflite/TensorUtils.h54
-rw-r--r--runtime/libs/tflite/include/tflite/TensorView.h14
-rw-r--r--runtime/libs/tflite/include/tflite/interp/Builder.h53
-rw-r--r--runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h64
-rw-r--r--runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h67
-rw-r--r--runtime/libs/tflite/port/1.13.1/CMakeLists.txt14
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h56
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h76
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h46
-rw-r--r--runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h92
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp109
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp314
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp1262
-rw-r--r--runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc153
-rw-r--r--runtime/libs/tflite/port/CMakeLists.txt7
-rw-r--r--runtime/libs/tflite/src/Diff.cpp82
-rw-r--r--runtime/libs/tflite/src/FeatureView.cpp70
-rw-r--r--runtime/libs/tflite/src/RandomInputInitializer.cpp71
-rw-r--r--runtime/libs/tflite/src/RandomTestRunner.cpp363
-rw-r--r--runtime/libs/tflite/src/TensorShapeUtils.cpp45
85 files changed, 1983 insertions, 4970 deletions
diff --git a/runtime/libs/benchmark/include/benchmark/CsvWriter.h b/runtime/libs/benchmark/include/benchmark/CsvWriter.h
index 5c259d7ed..d926bad8f 100644
--- a/runtime/libs/benchmark/include/benchmark/CsvWriter.h
+++ b/runtime/libs/benchmark/include/benchmark/CsvWriter.h
@@ -17,6 +17,7 @@
#ifndef __NNFW_BENCHMARK_CSV_WRITER_H__
#define __NNFW_BENCHMARK_CSV_WRITER_H__
+#include <cstdint>
#include <vector>
#include <string>
#include <fstream>
diff --git a/runtime/libs/benchmark/include/benchmark/MemoryInfo.h b/runtime/libs/benchmark/include/benchmark/MemoryInfo.h
new file mode 100644
index 000000000..6e8e12ba4
--- /dev/null
+++ b/runtime/libs/benchmark/include/benchmark/MemoryInfo.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_BENCHMARK_MEMORY_INFO_H__
+#define __NNFW_BENCHMARK_MEMORY_INFO_H__
+
+#include <cstdint>
+#include <string>
+
+namespace benchmark
+{
+
+bool prepareVmRSS();
+bool prepareVmHWM();
+bool prepareGpuMemory();
+bool preparePssSum();
+
+uint32_t getVmRSS();
+uint32_t getVmHWM();
+uint32_t getGpuMemory(const std::string &process_name);
+uint32_t getPssSum();
+
+std::string getProcessName();
+
+} // namespace benchmark
+
+#endif // __NNFW_BENCHMARK_MEMORY_INFO_H__
diff --git a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
index 48caa3b3a..47db3fd77 100644
--- a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
+++ b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
@@ -57,10 +57,6 @@ public:
private:
void process();
bool prepareMemoryPolling();
- uint32_t getVmRSS();
- uint32_t getVmHWM();
- uint32_t getGpuMemory();
- uint32_t getPssSum();
private:
std::chrono::milliseconds _duration;
diff --git a/runtime/libs/benchmark/include/benchmark/Phase.h b/runtime/libs/benchmark/include/benchmark/Phase.h
index 5eceb04c5..9b91a4391 100644
--- a/runtime/libs/benchmark/include/benchmark/Phase.h
+++ b/runtime/libs/benchmark/include/benchmark/Phase.h
@@ -19,6 +19,7 @@
#include "Types.h"
+#include <cstdint>
#include <string>
#include <vector>
diff --git a/runtime/libs/benchmark/include/benchmark/Phases.h b/runtime/libs/benchmark/include/benchmark/Phases.h
index 936a89742..7d642782a 100644
--- a/runtime/libs/benchmark/include/benchmark/Phases.h
+++ b/runtime/libs/benchmark/include/benchmark/Phases.h
@@ -50,6 +50,9 @@ public:
const MemoryPoller &mem_poll() const { return *_mem_poll; }
const Phase &at(const std::string &tag) const { return _phases.at(tag); }
+ uint32_t mem_before_init() const { return _mem_before_init; }
+ uint32_t mem_after_run() const { return _mem_after_run; }
+
private:
void run(const std::string &tag, const PhaseFunc &exec, const PhaseFunc *post, uint32_t loop_num,
bool option_disable);
@@ -58,6 +61,8 @@ private:
const PhaseOption _option;
std::unordered_map<std::string, Phase> _phases;
std::unique_ptr<MemoryPoller> _mem_poll;
+ uint32_t _mem_before_init;
+ uint32_t _mem_after_run;
};
} // namespace benchmark
diff --git a/runtime/libs/benchmark/include/benchmark/Result.h b/runtime/libs/benchmark/include/benchmark/Result.h
index 69084b300..4046d7c07 100644
--- a/runtime/libs/benchmark/include/benchmark/Result.h
+++ b/runtime/libs/benchmark/include/benchmark/Result.h
@@ -25,7 +25,7 @@
namespace benchmark
{
-// Data class between runner(nnpackage_run and tflite_run) and libbenchmark
+// Data class between runner(onert_run and tflite_run) and libbenchmark
class Result
{
public:
@@ -34,6 +34,8 @@ public:
double time[PhaseEnum::END_OF_PHASE][FigureType::END_OF_FIG_TYPE];
uint32_t memory[PhaseEnum::END_OF_PHASE][MemoryType::END_OF_MEM_TYPE];
bool print_memory = false;
+ uint32_t init_memory = 0;
+ uint32_t peak_memory = 0;
};
// TODO Support not only stdout but also ostream
diff --git a/runtime/libs/benchmark/src/CsvWriter.cpp b/runtime/libs/benchmark/src/CsvWriter.cpp
index 5f47c6511..6233129e7 100644
--- a/runtime/libs/benchmark/src/CsvWriter.cpp
+++ b/runtime/libs/benchmark/src/CsvWriter.cpp
@@ -35,7 +35,7 @@ CsvWriter::CsvWriter(const std::string &csv_filename) : CsvWriter(csv_filename,
}
CsvWriter::CsvWriter(const std::string &csv_filename, const std::vector<std::string> &header)
- : _ofs(csv_filename), _header_size(header.size()), _col_idx(0), _row_idx(0)
+ : _ofs(csv_filename), _header_size(header.size()), _col_idx(0), _row_idx(0)
{
assert(csv_filename.empty() == false);
assert(header.size() != 0);
diff --git a/runtime/libs/benchmark/src/MemoryInfo.cpp b/runtime/libs/benchmark/src/MemoryInfo.cpp
new file mode 100644
index 000000000..20d262961
--- /dev/null
+++ b/runtime/libs/benchmark/src/MemoryInfo.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "benchmark/MemoryInfo.h"
+
+#include <vector>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <cassert>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+namespace
+{
+
+const std::string proc_status_path("/proc/self/status");
+const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory");
+const std::string proc_smaps_path("/proc/self/smaps");
+
+bool isStrNumber(const std::string &s)
+{
+ return !s.empty() &&
+ std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end();
+}
+
+std::vector<std::string> splitLine(std::string line, std::string delimiters = " \n\t")
+{
+ std::vector<std::string> words;
+ size_t prev = 0, pos;
+
+ while ((pos = line.find_first_of(delimiters, prev)) != std::string::npos)
+ {
+ if (pos > prev)
+ words.emplace_back(line.substr(prev, pos - prev));
+ prev = pos + 1;
+ }
+
+ if (prev < line.length())
+ words.emplace_back(line.substr(prev, std::string::npos));
+
+ return words;
+}
+
+std::vector<std::string> getValueFromFileStatus(const std::string &file, const std::string &key)
+{
+ std::ifstream ifs(file);
+ assert(ifs.is_open());
+
+ std::string line;
+ std::vector<std::string> val;
+
+ bool found = false;
+ while (std::getline(ifs, line))
+ {
+ if (line.find(key) != std::string::npos)
+ {
+ found = true;
+ break;
+ }
+ }
+ ifs.close();
+
+ if (!found)
+ {
+ // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase.
+ // At that time, just return empty.
+ return val;
+ }
+
+ val = splitLine(line);
+ return val;
+}
+
+// Because of smaps' structure, returns sum value as uint32_t
+uint32_t getSumValueFromFileSmaps(const std::string &file, const std::string &key)
+{
+ std::ifstream ifs(file);
+ assert(ifs.is_open());
+
+ std::string line;
+ uint32_t sum = 0;
+ while (std::getline(ifs, line))
+ {
+ if (line.find(key) != std::string::npos)
+ {
+ // an example by splitLine()
+ // `Pss: 0 kB`
+ // val[0]: "Pss:", val[1]: "0" val[2]: "kB"
+ auto val = splitLine(line);
+ assert(val.size() != 0);
+ // SwapPss could show so that check where Pss is at the beginning
+ if (val[0].find("Pss") != 0)
+ {
+ continue;
+ }
+ sum += std::stoul(val[1]);
+ }
+ }
+
+ return sum;
+}
+
+} // namespace
+
+namespace benchmark
+{
+
+bool prepareVmRSS() { return std::ifstream(proc_status_path).is_open(); }
+
+bool prepareVmHWM() { return std::ifstream(proc_status_path).is_open(); }
+
+bool prepareGpuMemory() { return std::ifstream(gpu_memory_path).is_open(); }
+
+bool preparePssSum() { return std::ifstream(proc_smaps_path).is_open(); }
+
+uint32_t getVmRSS()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "VmRSS");
+ if (val.size() == 0)
+ return 0;
+ assert(isStrNumber(val[1]));
+ return std::stoul(val[1]);
+}
+
+uint32_t getVmHWM()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "VmHWM");
+ if (val.size() == 0)
+ return 0;
+ // key: value
+ assert(isStrNumber(val[1]));
+ return std::stoul(val[1]);
+}
+
+uint32_t getGpuMemory(const std::string &process_name)
+{
+ assert(!process_name.empty());
+ auto val = getValueFromFileStatus(gpu_memory_path, process_name);
+ if (val.size() == 0)
+ return 0;
+ // process_name -> pid -> gpu_mem -> max_gpu_mem
+ assert(isStrNumber(val[2]));
+ return std::stoul(val[2]);
+}
+
+uint32_t getPssSum() { return getSumValueFromFileSmaps(proc_smaps_path, "Pss"); }
+
+std::string getProcessName()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "Name");
+ assert(val.size() >= 2);
+ return val[1];
+}
+
+} // namespace benchmark
diff --git a/runtime/libs/benchmark/src/MemoryPoller.cpp b/runtime/libs/benchmark/src/MemoryPoller.cpp
index 61fdecd46..62339306e 100644
--- a/runtime/libs/benchmark/src/MemoryPoller.cpp
+++ b/runtime/libs/benchmark/src/MemoryPoller.cpp
@@ -16,111 +16,18 @@
#include "benchmark/MemoryPoller.h"
#include "benchmark/Types.h"
+#include "benchmark/MemoryInfo.h"
#include <vector>
-#include <fstream>
-#include <sstream>
#include <stdexcept>
#include <cassert>
#include <iostream>
-namespace
-{
-
-const std::string proc_status_path("/proc/self/status");
-const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory");
-const std::string proc_smaps_path("/proc/self/smaps");
-
-bool isStrNumber(const std::string &s)
-{
- return !s.empty() &&
- std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end();
-}
-
-std::vector<std::string> splitLine(std::string line, std::string delimiters = " \n\t")
-{
- std::vector<std::string> words;
- size_t prev = 0, pos;
-
- while ((pos = line.find_first_of(delimiters, prev)) != std::string::npos)
- {
- if (pos > prev)
- words.emplace_back(line.substr(prev, pos - prev));
- prev = pos + 1;
- }
-
- if (prev < line.length())
- words.emplace_back(line.substr(prev, std::string::npos));
-
- return words;
-}
-
-std::vector<std::string> getValueFromFileStatus(const std::string &file, const std::string &key)
-{
- std::ifstream ifs(file);
- assert(ifs.is_open());
-
- std::string line;
- std::vector<std::string> val;
-
- bool found = false;
- while (std::getline(ifs, line))
- {
- if (line.find(key) != std::string::npos)
- {
- found = true;
- break;
- }
- }
- ifs.close();
-
- if (!found)
- {
- // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase.
- // At that time, just return empty.
- return val;
- }
-
- val = splitLine(line);
- return val;
-}
-
-// Because of smaps' structure, returns sum value as uint32_t
-uint32_t getSumValueFromFileSmaps(const std::string &file, const std::string &key)
-{
- std::ifstream ifs(file);
- assert(ifs.is_open());
-
- std::string line;
- uint32_t sum = 0;
- while (std::getline(ifs, line))
- {
- if (line.find(key) != std::string::npos)
- {
- // an example by splitLine()
- // `Pss: 0 kB`
- // val[0]: "Pss:", val[1]: "0" val[2]: "kB"
- auto val = splitLine(line);
- assert(val.size() != 0);
- // SwapPss could show so that check where Pss is at the beginning
- if (val[0].find("Pss") != 0)
- {
- continue;
- }
- sum += std::stoul(val[1]);
- }
- }
-
- return sum;
-}
-
-} // namespace
-
namespace benchmark
{
MemoryPoller::MemoryPoller(std::chrono::milliseconds duration, bool gpu_poll)
- : _duration(duration), _run(false), _term(false), _gpu_poll(gpu_poll)
+ : _duration(duration), _run(false), _term(false), _gpu_poll(gpu_poll)
{
if (prepareMemoryPolling() == false)
throw std::runtime_error("failed to prepare memory pooling");
@@ -168,7 +75,7 @@ bool MemoryPoller::end(PhaseEnum phase)
mem = getVmRSS();
if (_gpu_poll)
{
- mem += getGpuMemory();
+ mem += getGpuMemory(_process_name);
}
if (mem > _rss_map[phase])
_rss_map[phase] = mem;
@@ -176,7 +83,7 @@ bool MemoryPoller::end(PhaseEnum phase)
mem = getVmHWM();
if (_gpu_poll)
{
- mem += getGpuMemory();
+ mem += getGpuMemory(_process_name);
}
_hwm_map[phase] = mem;
@@ -208,13 +115,13 @@ void MemoryPoller::process()
uint32_t cur_hwm = getVmHWM();
if (_gpu_poll)
{
- auto gpu_mem = getGpuMemory();
+ auto gpu_mem = getGpuMemory(_process_name);
cur_rss += gpu_mem;
cur_hwm += gpu_mem;
}
uint32_t cur_pss = getPssSum();
- for (auto &phase : _phases)
+ for (const auto &phase : _phases)
{
auto &rss = _rss_map.at(phase);
if (rss < cur_rss)
@@ -236,77 +143,33 @@ void MemoryPoller::process()
bool MemoryPoller::prepareMemoryPolling()
{
// VmRSS
+ if (!prepareVmRSS())
{
- std::ifstream ifs(proc_status_path);
- if (!ifs.is_open())
- {
- std::cerr << "failed to open " << proc_status_path << std::endl;
- return false;
- }
- ifs.close();
+ std::cerr << "failed to prepare parsing vmrss" << std::endl;
+ return false;
}
// (Additionally) GpuMemory
if (_gpu_poll)
{
- std::ifstream ifs(gpu_memory_path);
- if (!ifs.is_open())
+ if (!prepareGpuMemory())
{
- std::cerr << "failed to open " << gpu_memory_path << std::endl;
+ std::cerr << "failed to prepare parsing gpu memory" << std::endl;
return false;
}
- ifs.close();
// Needs process name
- auto val = getValueFromFileStatus(proc_status_path, "Name");
- assert(val.size() != 0);
- _process_name = val[1];
+ _process_name = getProcessName();
}
// PSS
+ if (!preparePssSum())
{
- std::ifstream ifs(proc_smaps_path);
- if (!ifs.is_open())
- {
- std::cerr << "failed to open " << proc_smaps_path << std::endl;
- return false;
- }
- ifs.close();
+ std::cerr << "failed to prepare parsing pss sum" << std::endl;
+ return false;
}
return true;
}
-uint32_t MemoryPoller::getVmRSS()
-{
- auto val = getValueFromFileStatus(proc_status_path, "VmRSS");
- if (val.size() == 0)
- return 0;
- assert(isStrNumber(val[1]));
- return std::stoul(val[1]);
-}
-
-uint32_t MemoryPoller::getVmHWM()
-{
- auto val = getValueFromFileStatus(proc_status_path, "VmHWM");
- if (val.size() == 0)
- return 0;
- // key: value
- assert(isStrNumber(val[1]));
- return std::stoul(val[1]);
-}
-
-uint32_t MemoryPoller::getGpuMemory()
-{
- assert(!_process_name.empty());
- auto val = getValueFromFileStatus(gpu_memory_path, _process_name);
- if (val.size() == 0)
- return 0;
- // process_name -> pid -> gpu_mem -> max_gpu_mem
- assert(isStrNumber(val[2]));
- return std::stoul(val[2]);
-}
-
-uint32_t MemoryPoller::getPssSum() { return getSumValueFromFileSmaps(proc_smaps_path, "Pss"); }
-
} // namespace benchmark
diff --git a/runtime/libs/benchmark/src/Phases.cpp b/runtime/libs/benchmark/src/Phases.cpp
index 9ab67cfd9..d8d9c3cb7 100644
--- a/runtime/libs/benchmark/src/Phases.cpp
+++ b/runtime/libs/benchmark/src/Phases.cpp
@@ -17,20 +17,21 @@
#include "benchmark/Phases.h"
#include "benchmark/Types.h"
+#include "benchmark/MemoryInfo.h"
#include <cassert>
#include <chrono>
#include <iostream>
-#include <sys/time.h>
+#include <time.h>
namespace
{
uint64_t nowMicros()
{
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return static_cast<uint64_t>(tv.tv_sec) * 1e6 + tv.tv_usec;
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return static_cast<uint64_t>(ts.tv_nsec) / 1e3 + static_cast<uint64_t>(ts.tv_sec) * 1e6;
}
void SleepForMicros(uint64_t micros)
@@ -41,13 +42,16 @@ void SleepForMicros(uint64_t micros)
sleep_time.tv_nsec = micros * 1e3;
nanosleep(&sleep_time, nullptr);
}
-}
+} // namespace
namespace benchmark
{
-Phases::Phases(const PhaseOption &option) : _option(option)
+Phases::Phases(const PhaseOption &option) : _option(option), _mem_before_init(0), _mem_after_run(0)
{
+ assert(prepareVmRSS());
+ _mem_before_init = getVmHWM();
+
if (_option.memory)
{
_mem_poll = std::make_unique<MemoryPoller>(std::chrono::milliseconds(option.memory_interval),
@@ -93,6 +97,8 @@ void Phases::run(const std::string &tag, const PhaseFunc &exec, const PhaseFunc
}
}
+ _mem_after_run = getVmHWM();
+
if (p == PhaseEnum::END_OF_PHASE)
{
return;
diff --git a/runtime/libs/benchmark/src/Result.cpp b/runtime/libs/benchmark/src/Result.cpp
index df573da92..8c1e2d2ea 100644
--- a/runtime/libs/benchmark/src/Result.cpp
+++ b/runtime/libs/benchmark/src/Result.cpp
@@ -57,7 +57,7 @@ double minTimeMs(const benchmark::Phase &phase)
double geomeanTimeMs(const benchmark::Phase &phase)
{
double log_sum = 0.0;
- for (auto t_us : phase.time)
+ for (auto &&t_us : phase.time)
{
log_sum += std::log(t_us / 1e3);
}
@@ -77,9 +77,9 @@ uint32_t averageMemoryKb(const benchmark::Phase &phase, int type)
return average<uint32_t, uint32_t>(phase.memory[type]);
}
-uint32_t peakMemory(const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE]
- [benchmark::MemoryType::END_OF_MEM_TYPE],
- int type)
+uint32_t peakMemory(
+ const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE][benchmark::MemoryType::END_OF_MEM_TYPE],
+ int type)
{
using namespace benchmark;
// tricky. handle WARMUP as EXECUTE
@@ -88,7 +88,7 @@ uint32_t peakMemory(const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE]
}
void printResultTime(
- const double time[benchmark::PhaseEnum::END_OF_PHASE][benchmark::FigureType::END_OF_FIG_TYPE])
+ const double time[benchmark::PhaseEnum::END_OF_PHASE][benchmark::FigureType::END_OF_FIG_TYPE])
{
using namespace benchmark;
@@ -119,8 +119,8 @@ void printResultTime(
std::cout << "===================================" << std::endl;
}
-void printResultMemory(const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE]
- [benchmark::MemoryType::END_OF_MEM_TYPE])
+void printResultMemory(
+ const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE][benchmark::MemoryType::END_OF_MEM_TYPE])
{
using namespace benchmark;
@@ -141,6 +141,15 @@ void printResultMemory(const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE]
}
}
+void printUsedPeakMemory(uint32_t init_memory, uint32_t peak_memory)
+{
+ uint32_t used_peak_memory = peak_memory - init_memory;
+ std::cout << "Used Peak Memory : " << used_peak_memory << " kb" << std::endl;
+ std::cout << "- HWM after run : " << peak_memory << " kb" << std::endl;
+ std::cout << "- HWM before init: " << init_memory << " kb" << std::endl;
+ std::cout << "===================================" << std::endl;
+}
+
} // namespace
namespace benchmark
@@ -148,16 +157,16 @@ namespace benchmark
Result::Result(const Phases &phases)
{
- const auto option = phases.option();
+ const auto &option = phases.option();
{
for (int i = PhaseEnum::MODEL_LOAD; i <= PhaseEnum::PREPARE; ++i)
{
- auto phase = phases.at(gPhaseStrings[i]);
+ const auto &phase = phases.at(gPhaseStrings[i]);
time[i][FigureType::MEAN] = averageTimeMs(phase);
}
int i = PhaseEnum::EXECUTE;
- auto exec_phase = phases.at(gPhaseStrings[i]);
+ const auto &exec_phase = phases.at(gPhaseStrings[i]);
time[i][FigureType::MEAN] = averageTimeMs(exec_phase);
time[i][FigureType::MAX] = maxTimeMs(exec_phase);
time[i][FigureType::MIN] = minTimeMs(exec_phase);
@@ -175,6 +184,8 @@ Result::Result(const Phases &phases)
}
}
}
+ init_memory = phases.mem_before_init();
+ peak_memory = phases.mem_after_run();
}
void printResult(const Result &result)
@@ -185,6 +196,7 @@ void printResult(const Result &result)
return;
printResultMemory(result.memory);
+ printUsedPeakMemory(result.init_memory, result.peak_memory);
}
// TODO There are necessary for a kind of output data file so that it doesn't have to be csv file
diff --git a/runtime/libs/misc/CMakeLists.txt b/runtime/libs/misc/CMakeLists.txt
index 557d403ec..3e02adbc3 100644
--- a/runtime/libs/misc/CMakeLists.txt
+++ b/runtime/libs/misc/CMakeLists.txt
@@ -1,11 +1,22 @@
# Library `nnfw_lib_misc`
-file(GLOB_RECURSE NNFW_UTILITY_SRCS "src/*.cpp")
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
-add_library(nnfw_lib_misc STATIC ${NNFW_UTILITY_SRCS})
+add_library(nnfw_lib_misc STATIC ${SOURCES})
target_include_directories(nnfw_lib_misc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
set_target_properties(nnfw_lib_misc PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_link_libraries(nnfw_lib_misc PRIVATE nnfw_common)
target_link_libraries(nnfw_lib_misc PRIVATE nnfw_coverage)
-add_executable(nnfw_tensor_index_iterator "examples/tensor_index_iterator.cpp")
-target_link_libraries(nnfw_tensor_index_iterator nnfw_lib_misc)
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
+add_executable(nnfw_lib_misc_test ${TESTS})
+target_link_libraries(nnfw_lib_misc_test PRIVATE nnfw_lib_misc)
+target_link_libraries(nnfw_lib_misc_test PRIVATE nnfw_coverage)
+target_link_libraries(nnfw_lib_misc_test PUBLIC gtest gtest_main ${LIB_PTHREAD})
+
+add_test(nnfw_lib_misc_test nnfw_lib_misc_test)
+install(TARGETS nnfw_lib_misc_test DESTINATION unittest)
diff --git a/runtime/libs/misc/include/misc/EnvConfigSource.h b/runtime/libs/misc/include/misc/EnvConfigSource.h
new file mode 100644
index 000000000..63c8ae9c0
--- /dev/null
+++ b/runtime/libs/misc/include/misc/EnvConfigSource.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_MISC_ENV_CONFIG_SOURCE_H__
+#define __NNFW_MISC_ENV_CONFIG_SOURCE_H__
+
+#include "GeneralConfigSource.h"
+
+#include <unordered_map>
+
+namespace nnfw
+{
+namespace misc
+{
+
+class EnvConfigSource final : public GeneralConfigSource
+{
+public:
+ std::string get(const std::string &key) const override;
+
+private:
+ std::unordered_map<std::string, std::string> _default_attributes;
+};
+
+} // namespace misc
+} // namespace nnfw
+
+#endif // __NNFW_MISC_ENV_CONFIG_SOURCE_H__
diff --git a/runtime/libs/misc/include/misc/GeneralConfigSource.h b/runtime/libs/misc/include/misc/GeneralConfigSource.h
new file mode 100644
index 000000000..a3de66e81
--- /dev/null
+++ b/runtime/libs/misc/include/misc/GeneralConfigSource.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_MISC_GENERAL_CONFIG_SOURCE_H__
+#define __NNFW_MISC_GENERAL_CONFIG_SOURCE_H__
+
+#include "IConfigSource.h"
+
+#include <unordered_map>
+
+namespace nnfw
+{
+namespace misc
+{
+
+class GeneralConfigSource : public IConfigSource
+{
+public:
+ GeneralConfigSource() = default;
+
+ std::string get(const std::string &key) const override;
+ void set(const std::string &key, const std::string &val);
+
+private:
+ std::unordered_map<std::string, std::string> _map;
+};
+
+} // namespace misc
+} // namespace nnfw
+
+#endif // __NNFW_MISC_GENERAL_CONFIG_SOURCE_H__
diff --git a/runtime/libs/misc/include/misc/IConfigSource.h b/runtime/libs/misc/include/misc/IConfigSource.h
new file mode 100644
index 000000000..fe2c48ecf
--- /dev/null
+++ b/runtime/libs/misc/include/misc/IConfigSource.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_MISC_I_CONFIG_SOURCE_H__
+#define __NNFW_MISC_I_CONFIG_SOURCE_H__
+
+#include <string>
+
+namespace nnfw
+{
+namespace misc
+{
+
+struct IConfigSource
+{
+ /**
+ * @brief Destroy the IConfigSource object
+ */
+ virtual ~IConfigSource() = default;
+
+ /**
+ * @brief get the value for the matching key
+ *
+ * @param key string key to search
+ * @return string value associated with the key
+ */
+ virtual std::string get(const std::string &key) const = 0;
+};
+
+} // namespace misc
+} // namespace nnfw
+
+#endif // __NNFW_MISC_I_CONFIG_SOURCE_H__
diff --git a/runtime/libs/misc/include/misc/RandomGenerator.h b/runtime/libs/misc/include/misc/RandomGenerator.h
index 8d26b8c74..8da4f7f20 100644
--- a/runtime/libs/misc/include/misc/RandomGenerator.h
+++ b/runtime/libs/misc/include/misc/RandomGenerator.h
@@ -76,6 +76,7 @@ private:
std::normal_distribution<float> _dist;
};
+template <> int8_t RandomGenerator::generate<int8_t>(void);
template <> uint8_t RandomGenerator::generate<uint8_t>(void);
template <> bool RandomGenerator::generate<bool>(void);
template <> int32_t RandomGenerator::generate<int32_t>(void);
diff --git a/runtime/libs/misc/include/misc/feature/Index.h b/runtime/libs/misc/include/misc/feature/Index.h
index a361d8dd2..09d65a59a 100644
--- a/runtime/libs/misc/include/misc/feature/Index.h
+++ b/runtime/libs/misc/include/misc/feature/Index.h
@@ -62,7 +62,7 @@ public:
* @param[in] col The width index
*/
Index(int32_t batch, int32_t ch, int32_t row, int32_t col)
- : _batch{batch}, _ch{ch}, _row{row}, _col{col}
+ : _batch{batch}, _ch{ch}, _row{row}, _col{col}
{
// DO NOTHING
}
diff --git a/runtime/libs/misc/include/misc/feature/Shape.h b/runtime/libs/misc/include/misc/feature/Shape.h
index 09881f58b..2c31b457c 100644
--- a/runtime/libs/misc/include/misc/feature/Shape.h
+++ b/runtime/libs/misc/include/misc/feature/Shape.h
@@ -64,7 +64,7 @@ struct Shape
* @param[in] width The width value
*/
Shape(int32_t batch, int32_t depth, int32_t height, int32_t width)
- : N{batch}, C{depth}, H{height}, W{width}
+ : N{batch}, C{depth}, H{height}, W{width}
{
// DO NOTHING
}
diff --git a/runtime/libs/misc/include/misc/kernel/Shape.h b/runtime/libs/misc/include/misc/kernel/Shape.h
index 27d6a8bf0..176db0a11 100644
--- a/runtime/libs/misc/include/misc/kernel/Shape.h
+++ b/runtime/libs/misc/include/misc/kernel/Shape.h
@@ -55,7 +55,7 @@ struct Shape
* @param[in] width The width index
*/
Shape(int32_t count, int32_t depth, int32_t height, int32_t width)
- : N{count}, C{depth}, H{height}, W{width}
+ : N{count}, C{depth}, H{height}, W{width}
{
// DO NOTHING
}
diff --git a/runtime/libs/misc/include/misc/polymorphic_downcast.h b/runtime/libs/misc/include/misc/polymorphic_downcast.h
index 412b864e6..ee885eb70 100644
--- a/runtime/libs/misc/include/misc/polymorphic_downcast.h
+++ b/runtime/libs/misc/include/misc/polymorphic_downcast.h
@@ -27,9 +27,7 @@ namespace misc
template <typename DstType, typename SrcType> inline DstType polymorphic_downcast(SrcType *x)
{
-#ifndef __ANDROID__
assert(dynamic_cast<DstType>(x) == x);
-#endif
return static_cast<DstType>(x);
}
diff --git a/runtime/libs/misc/include/misc/string_helpers.h b/runtime/libs/misc/include/misc/string_helpers.h
index 46fecca71..c9d72034f 100644
--- a/runtime/libs/misc/include/misc/string_helpers.h
+++ b/runtime/libs/misc/include/misc/string_helpers.h
@@ -50,7 +50,7 @@ inline std::vector<std::string> split(const std::string &s, char delim)
std::vector<std::string> elems;
while (std::getline(ss, item, delim))
{
- elems.push_back(std::move(item));
+ elems.push_back(item);
}
return elems;
}
diff --git a/runtime/libs/misc/include/misc/tensor/Object.h b/runtime/libs/misc/include/misc/tensor/Object.h
index cba4f1baf..15ad6da4f 100644
--- a/runtime/libs/misc/include/misc/tensor/Object.h
+++ b/runtime/libs/misc/include/misc/tensor/Object.h
@@ -74,9 +74,8 @@ public:
_values.resize(_shape.dim(0) * _stride.at(0));
// Set 'value'
- iterate(_shape) << [this, &fn](const Index &index) {
- _values.at(_stride.offset(index)) = fn(_shape, index);
- };
+ iterate(_shape) <<
+ [this, &fn](const Index &index) { _values.at(_stride.offset(index)) = fn(_shape, index); };
}
}
diff --git a/runtime/libs/misc/include/misc/tensor/Zipper.h b/runtime/libs/misc/include/misc/tensor/Zipper.h
index 8f0ec4ab6..b1ca3d003 100644
--- a/runtime/libs/misc/include/misc/tensor/Zipper.h
+++ b/runtime/libs/misc/include/misc/tensor/Zipper.h
@@ -48,7 +48,7 @@ public:
* @param[in] rhs @c Reader object of a tensor
*/
Zipper(const Shape &shape, const Reader<T> &lhs, const Reader<T> &rhs)
- : _shape{shape}, _lhs{lhs}, _rhs{rhs}
+ : _shape{shape}, _lhs{lhs}, _rhs{rhs}
{
// DO NOTHING
}
@@ -63,7 +63,7 @@ public:
template <typename Callable> void zip(Callable cb) const
{
iterate(_shape) <<
- [this, &cb](const Index &index) { cb(index, _lhs.at(index), _rhs.at(index)); };
+ [this, &cb](const Index &index) { cb(index, _lhs.at(index), _rhs.at(index)); };
}
private:
diff --git a/runtime/libs/tflite/src/interp/FunctionBuilder.cpp b/runtime/libs/misc/src/EnvConfigSource.cpp
index 599a4f393..3abc9d196 100644
--- a/runtime/libs/tflite/src/interp/FunctionBuilder.cpp
+++ b/runtime/libs/misc/src/EnvConfigSource.cpp
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,27 @@
* limitations under the License.
*/
-#include "tflite/interp/FunctionBuilder.h"
+#include "misc/EnvConfigSource.h"
+
+#include <cstdlib>
namespace nnfw
{
-namespace tflite
+namespace misc
{
-std::unique_ptr<::tflite::Interpreter> FunctionBuilder::build(void) const
+std::string EnvConfigSource::get(const std::string &key) const
{
- auto res = std::unique_ptr<::tflite::Interpreter>{new ::tflite::Interpreter};
-
- _fn(*res);
-
- return res;
+ const char *value = std::getenv(key.c_str());
+ if (value != nullptr)
+ {
+ return value;
+ }
+ else
+ {
+ return GeneralConfigSource::get(key);
+ }
}
-} // namespace tflite
+} // namespace misc
} // namespace nnfw
diff --git a/runtime/libs/tflite/src/Quantization.cpp b/runtime/libs/misc/src/GeneralConfigSource.cpp
index 9c162c342..298c1663e 100644
--- a/runtime/libs/tflite/src/Quantization.cpp
+++ b/runtime/libs/misc/src/GeneralConfigSource.cpp
@@ -1,11 +1,11 @@
/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,9 +14,27 @@
* limitations under the License.
*/
-#include "tflite/Quantization.h"
+#include "misc/GeneralConfigSource.h"
-TfLiteQuantizationParams make_default_quantization(void)
+namespace nnfw
{
- return TfLiteQuantizationParams{0.0f, 0};
+namespace misc
+{
+
+std::string GeneralConfigSource::get(const std::string &key) const
+{
+ auto itr = _map.find(key);
+ if (itr == _map.end())
+ {
+ return "";
+ }
+ else
+ {
+ return itr->second;
+ }
}
+
+void GeneralConfigSource::set(const std::string &key, const std::string &val) { _map[key] = val; }
+
+} // namespace misc
+} // namespace nnfw
diff --git a/runtime/libs/misc/src/RandomGenerator.cpp b/runtime/libs/misc/src/RandomGenerator.cpp
index e7fbc10ca..af072326b 100644
--- a/runtime/libs/misc/src/RandomGenerator.cpp
+++ b/runtime/libs/misc/src/RandomGenerator.cpp
@@ -21,6 +21,34 @@ namespace nnfw
namespace misc
{
+template <> int8_t RandomGenerator::generate<int8_t>(void)
+{
+ // The value of type_range is 255.
+ float type_range = static_cast<float>(std::numeric_limits<int8_t>::max()) -
+ static_cast<float>(std::numeric_limits<int8_t>::min());
+ // Most _dist values range from -5.0 to 5.0.
+ float min_range = -5.0f;
+ float max_range = 5.0f;
+ // NOTE shifted_relative_val has Gaussian distribution that origin mean was 0 and standard
+ // deviation was 2. And then its values are distributed and shift to that mean is 127.5 and range
+ // is about [0, 255].
+ float shifted_relative_val = (_dist(_rand) - min_range) * type_range / (max_range - min_range);
+
+ // shifted_relative_val is adjusted to be mapped to end points of the range, if it is out of range
+ // values.
+ if (shifted_relative_val < -128.0f)
+ {
+ return -128;
+ }
+ else if (shifted_relative_val > type_range)
+ {
+ return 127;
+ }
+
+ // Convert shifted_relative_val from float to int8
+ return static_cast<int8_t>(shifted_relative_val);
+}
+
template <> uint8_t RandomGenerator::generate<uint8_t>(void)
{
// The value of type_range is 255.
diff --git a/runtime/libs/misc/src/string_helpers.test.cpp b/runtime/libs/misc/src/string_helpers.test.cpp
new file mode 100644
index 000000000..1111425d0
--- /dev/null
+++ b/runtime/libs/misc/src/string_helpers.test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "misc/string_helpers.h"
+
+#include <gtest/gtest.h>
+
+TEST(StringHelpersTest, split)
+{
+ const std::string example = "abc;def;ghi";
+
+ auto str_vector = nnfw::misc::split(example, ';');
+
+ ASSERT_EQ(str_vector.size(), 3);
+ EXPECT_STREQ(str_vector[0].c_str(), "abc");
+ EXPECT_STREQ(str_vector[1].c_str(), "def");
+ EXPECT_STREQ(str_vector[2].c_str(), "ghi");
+}
+
+TEST(StringHelpersTest, neg_split_empty)
+{
+ const std::string example = "";
+
+ auto str_vector = nnfw::misc::split(example, ';');
+
+ ASSERT_EQ(str_vector.size(), 0);
+}
+
+TEST(StringHelpersTest, neg_nonsplit)
+{
+ const std::string example = "abc;def;ghi";
+
+ auto str_vector = nnfw::misc::split(example, ':');
+
+ ASSERT_EQ(str_vector.size(), 1);
+ EXPECT_STREQ(str_vector[0].c_str(), example.c_str());
+}
+
+TEST(StringHelpersTest, append)
+{
+ auto append_str = nnfw::misc::str("abc", "-", 1);
+
+ EXPECT_STREQ(append_str.c_str(), "abc-1");
+}
+
+TEST(StringHelpersTest, neg_append_nullstr)
+{
+ const char *null_str = nullptr;
+ auto append_str = nnfw::misc::str(null_str, null_str);
+
+ ASSERT_EQ(append_str.size(), 0);
+}
+
+TEST(StringHelpersTest, join)
+{
+ const std::vector<std::string> example = {"abc", "def", "ghi"};
+
+ auto join_str = nnfw::misc::join(example.begin(), example.end(), ";");
+ EXPECT_STREQ(join_str.c_str(), "abc;def;ghi");
+}
+
+TEST(StringHelpersTest, neg_join_empty)
+{
+ const std::vector<std::string> example = {};
+
+ auto join_str = nnfw::misc::join(example.begin(), example.end(), ";");
+ ASSERT_EQ(join_str.size(), 0);
+}
diff --git a/runtime/libs/misc/src/tensor/Comparator.cpp b/runtime/libs/misc/src/tensor/Comparator.cpp
index 80a18c11a..5fcf38cc8 100644
--- a/runtime/libs/misc/src/tensor/Comparator.cpp
+++ b/runtime/libs/misc/src/tensor/Comparator.cpp
@@ -33,18 +33,18 @@ std::vector<Diff<float>> Comparator::compare(const Shape &shape, const Reader<fl
std::vector<Diff<float>> res;
zip(shape, expected, obtained) <<
- [&](const Index &index, float expected_value, float obtained_value) {
- if (!_compare_fn(expected_value, obtained_value))
- {
- res.emplace_back(index, expected_value, obtained_value);
- }
-
- // Update max_diff_index, if necessary
- if (observer != nullptr)
- {
- observer->notify(index, expected_value, obtained_value);
- }
- };
+ [&](const Index &index, float expected_value, float obtained_value) {
+ if (!_compare_fn(expected_value, obtained_value))
+ {
+ res.emplace_back(index, expected_value, obtained_value);
+ }
+
+ // Update max_diff_index, if necessary
+ if (observer != nullptr)
+ {
+ observer->notify(index, expected_value, obtained_value);
+ }
+ };
return res;
}
diff --git a/runtime/libs/misc/src/tensor/IndexEnumerator.test.cpp b/runtime/libs/misc/src/tensor/IndexEnumerator.test.cpp
new file mode 100644
index 000000000..4cff6067f
--- /dev/null
+++ b/runtime/libs/misc/src/tensor/IndexEnumerator.test.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "misc/tensor/IndexEnumerator.h"
+
+#include <vector>
+#include <algorithm>
+
+#include <gtest/gtest.h>
+
+using nnfw::misc::tensor::Shape;
+using nnfw::misc::tensor::Index;
+using nnfw::misc::tensor::IndexEnumerator;
+
+TEST(MiscIndexEnumeratorTest, iterate_full_range)
+{
+ const uint32_t H = 3;
+ const uint32_t W = 4;
+
+ const Shape shape{H, W};
+
+ std::vector<uint32_t> count;
+
+ count.resize(H * W, 0);
+
+ for (IndexEnumerator e{shape}; e.valid(); e.advance())
+ {
+ const auto &ind = e.curr();
+
+ ASSERT_EQ(2, ind.rank());
+ count.at(ind.at(0) * W + ind.at(1)) += 1;
+ }
+
+ ASSERT_TRUE(std::all_of(count.begin(), count.end(), [](uint32_t n) { return n == 1; }));
+}
+
+TEST(MiscIndexEnumeratorTest, neg_zero_rank_shape)
+{
+ // Test abnormal case of empty shape
+ // It is expected not to throw any exception, do nothing
+ const Shape shape{};
+ IndexEnumerator e{shape};
+ ASSERT_NO_THROW(e.valid());
+ ASSERT_NO_THROW(e.advance());
+ SUCCEED();
+}
diff --git a/runtime/libs/misc/examples/tensor_index_iterator.cpp b/runtime/libs/misc/src/tensor/IndexIterator.test.cpp
index 590b433df..875786bdd 100644
--- a/runtime/libs/misc/examples/tensor_index_iterator.cpp
+++ b/runtime/libs/misc/src/tensor/IndexIterator.test.cpp
@@ -16,24 +16,21 @@
#include "misc/tensor/IndexIterator.h"
-#include <array>
+#include <gtest/gtest.h>
-#include <iostream>
#include <algorithm>
+#include <array>
-#include <cassert>
+using namespace nnfw::misc::tensor;
-void test_iterate(void)
+TEST(MiscIndexIteratorTest, iterate)
{
- const nnfw::misc::tensor::Shape shape{3, 4, 7};
+ const Shape shape{3, 4, 7};
std::array<int, 3 * 4 * 7> array;
array.fill(0);
- using nnfw::misc::tensor::Index;
- using nnfw::misc::tensor::iterate;
-
iterate(shape) << [&](const Index &index) {
assert(index.rank() == shape.rank());
@@ -50,25 +47,15 @@ void test_iterate(void)
array[offset] += 1;
};
- assert(std::all_of(array.begin(), array.end(), [](int num) { return num == 1; }));
+ ASSERT_TRUE(std::all_of(array.begin(), array.end(), [](int num) { return num == 1; }));
}
-int main(int argc, char **argv)
+TEST(MiscIndexIteratorTest, neg_zero_rank_shape)
{
- test_iterate();
-
- nnfw::misc::tensor::Shape shape{3, 4, 3, 4};
-
- std::cout << "Iterate over tensor{3, 4, 3, 4}" << std::endl;
-
- nnfw::misc::tensor::iterate(shape) << [](const nnfw::misc::tensor::Index &index) {
- std::cout << "rank: " << index.rank() << std::endl;
-
- for (uint32_t d = 0; d < index.rank(); ++d)
- {
- std::cout << " offset(" << d << ") = " << index.at(d) << std::endl;
- }
- };
+ // Test abnormal case of empty shape
+ // It is expected not to throw any exception, do nothing
+ const Shape shape{};
- return 0;
+ ASSERT_NO_THROW(iterate(shape) << ([](const Index &index) {}));
+ SUCCEED();
}
diff --git a/runtime/libs/ndarray/CMakeLists.txt b/runtime/libs/ndarray/CMakeLists.txt
index b040f5115..8d0ba0487 100644
--- a/runtime/libs/ndarray/CMakeLists.txt
+++ b/runtime/libs/ndarray/CMakeLists.txt
@@ -3,8 +3,6 @@ add_library(ndarray STATIC src/Array.cpp src/ContiguousSpan.cpp)
set_target_properties(ndarray PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_include_directories(ndarray PUBLIC include)
-#can't make this private because of c++ templates
-target_include_directories(ndarray PUBLIC src)
option(NDARRAY_INLINE_TEMPLATES "Set to ON to disable extern declarations for common types")
@@ -15,5 +13,16 @@ endif()
target_link_libraries(ndarray PRIVATE nnfw_common)
target_link_libraries(ndarray PRIVATE nnfw_coverage)
-add_subdirectory(test)
+if(NOT ENABLE_TEST)
+ return()
+endif(NOT ENABLE_TEST)
+
+add_executable(ndarray_test src/Array.test.cpp src/ContiguousSpan.test.cpp)
+target_link_libraries(ndarray_test PRIVATE ndarray)
+target_link_libraries(ndarray_test PRIVATE nnfw_coverage)
+target_link_libraries(ndarray_test PUBLIC gtest gtest_main ${LIB_PTHREAD})
+
+add_test(ndarray_test ndarray_test)
+install(TARGETS ndarray_test DESTINATION unittest)
+
add_subdirectory(example)
diff --git a/runtime/libs/ndarray/include/ndarray/Array.h b/runtime/libs/ndarray/include/ndarray/Array.h
index 3890cc26b..568fe1c77 100644
--- a/runtime/libs/ndarray/include/ndarray/Array.h
+++ b/runtime/libs/ndarray/include/ndarray/Array.h
@@ -22,37 +22,21 @@
#include "ContiguousSpan.h"
#include "Shape.h"
-#if __cplusplus < 201402L
-#include "detail/cxx14.h" //integer_sequence and make_index_dequence definitions
-#else
-#include <utility>
-#endif
-
#include <algorithm>
-#include <cassert>
-#include <type_traits>
#include <array>
-#include <tuple>
+#include <cassert>
#include <cstddef>
+#include <tuple>
+#include <type_traits>
+#include <utility>
namespace ndarray
{
-// there is no index_sequence before c++14
-#if __cplusplus < 201402L
-
-template <size_t... Nums> using index_sequence = cxx14::index_sequence<Nums...>;
-
-template <size_t Num> using make_index_sequence = cxx14::make_index_sequence<Num>;
-
-#else
-
template <size_t... Nums> using index_sequence = std::index_sequence<Nums...>;
template <size_t _Num> using make_index_sequence = std::make_index_sequence<_Num>;
-#endif //__cplusplus < 201402L
-
struct Strides
{
explicit Strides(Shape s) : _strides{} { fillStrides(s); }
@@ -157,8 +141,8 @@ private:
size_t offset(index_sequence<Nums...> seq, Ts... x) const noexcept
{
static_assert(
- sizeof...(Ts) == sizeof...(Nums),
- "Sanity check failed. Generated index sequence size is not equal to argument count");
+ sizeof...(Ts) == sizeof...(Nums),
+ "Sanity check failed. Generated index sequence size is not equal to argument count");
return _strides.offset(seq, x...);
}
diff --git a/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h b/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h
index 8caa6a686..b322b77db 100644
--- a/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h
+++ b/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h
@@ -37,7 +37,7 @@ public:
template <typename It>
explicit ContiguousSpan(It first, It last) noexcept
- : _data(&*first), _len(std::distance(first, last))
+ : _data(&*first), _len(std::distance(first, last))
{
}
diff --git a/runtime/libs/ndarray/src/Array.test.cpp b/runtime/libs/ndarray/src/Array.test.cpp
new file mode 100644
index 000000000..15e67600d
--- /dev/null
+++ b/runtime/libs/ndarray/src/Array.test.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ndarray/Array.h"
+
+#include <gtest/gtest.h>
+
+using namespace ndarray;
+
+TEST(NDArrayArrayTests, basic_data_test)
+{
+ float raw_data[] = {1, 2, 3, 4};
+ int32_t raw_data_int[] = {1, 2, 3, 4};
+ uint32_t raw_data_uint[] = {1, 2, 3, 4};
+ int8_t raw_data_int8[] = {1, 2, 3, 4};
+
+ Array<float> data22{raw_data, {2, 2}};
+ Array<int32_t> data22_int{raw_data_int, {2, 2}};
+ Array<uint32_t> data22_uint{raw_data_uint, {2, 2}};
+ Array<int8_t> data22_int8{raw_data_int8, {2, 2}};
+
+ ASSERT_FLOAT_EQ(data22.at(0, 0), 1);
+ ASSERT_FLOAT_EQ(data22.at(0, 1), 2);
+ ASSERT_FLOAT_EQ(data22.at(1, 0), 3);
+ ASSERT_FLOAT_EQ(data22.at(1, 1), 4);
+ ASSERT_EQ(data22.shape().rank(), 2);
+ ASSERT_EQ(data22.shape().dim(0), 2);
+ ASSERT_EQ(data22.shape().dim(1), 2);
+
+ Array<float> data14{raw_data, {1, 4}};
+ ASSERT_FLOAT_EQ(data14.at(0, 0), 1);
+ ASSERT_FLOAT_EQ(data14.at(0, 1), 2);
+ ASSERT_FLOAT_EQ(data14.at(0, 2), 3);
+ ASSERT_FLOAT_EQ(data14.at(0, 3), 4);
+ ASSERT_EQ(data14.shape().rank(), 2);
+ ASSERT_EQ(data14.shape().dim(0), 1);
+ ASSERT_EQ(data14.shape().dim(1), 4);
+
+ // <float, false>
+ {
+ ContiguousSpan<float> cs = data22.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_FLOAT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<float> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_FLOAT_EQ(cs2.at(3), 4);
+
+ float sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<float> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<float>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_FLOAT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_FLOAT_EQ(cs4->at(3), 4);
+ }
+
+ // <float, true>
+ {
+ ContiguousSpan<float, true> cs = data22.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_FLOAT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<float, true> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_FLOAT_EQ(cs2.at(3), 4);
+
+ float sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_FLOAT_EQ(sum, 10);
+
+ std::vector<float> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<float, true>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_FLOAT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_FLOAT_EQ(cs4->at(3), 4);
+ }
+
+ // <int32_t, false>
+ {
+ ContiguousSpan<int32_t> cs = data22_int.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<int32_t> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_EQ(cs2.at(3), 4);
+
+ int32_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<int32_t> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<int32_t>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_EQ(cs4->at(3), 4);
+ }
+
+ // <int32_t, true>
+ {
+ ContiguousSpan<int32_t, true> cs = data22_int.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<int32_t, true> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_EQ(cs2.at(3), 4);
+
+ int32_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<int32_t> array_data{1, 2, 3, 4};
+ auto cs3 =
+ std::make_unique<ContiguousSpan<int32_t, true>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_EQ(cs4->at(3), 4);
+ }
+
+ // <uint32_t, false>
+ {
+ ContiguousSpan<uint32_t> cs = data22_uint.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<uint32_t> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_EQ(cs2.at(3), 4);
+
+ uint32_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<uint32_t> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<uint32_t>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ }
+
+ // <uint32_t, true>
+ {
+ ContiguousSpan<uint32_t, true> cs = data22_uint.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<uint32_t, true> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_EQ(cs2.at(3), 4);
+
+ uint32_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<uint32_t> array_data{1, 2, 3, 4};
+ auto cs3 =
+ std::make_unique<ContiguousSpan<uint32_t, true>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_EQ(cs4->at(3), 4);
+ }
+
+ // <int8_t, false>
+ {
+ ContiguousSpan<int8_t> cs = data22_int8.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_FLOAT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<int8_t> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_FLOAT_EQ(cs2.at(3), 4);
+
+ int8_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<int8_t> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<int8_t>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_EQ(cs4->at(3), 4);
+
+ auto cs5 = ContiguousSpan<int8_t>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs5.size(), 4);
+ ASSERT_EQ(cs5.at(3), 4);
+ }
+
+ // <int8_t, true>
+ {
+ ContiguousSpan<int8_t, true> cs = data22_int8.flat();
+ ASSERT_EQ(cs.size(), 4);
+ ASSERT_FLOAT_EQ(cs.at(3), 4);
+
+ ContiguousSpan<int8_t, true> cs2 = std::move(cs);
+ ASSERT_EQ(cs2.size(), 4);
+ ASSERT_FLOAT_EQ(cs2.at(3), 4);
+
+ int8_t sum = 0;
+ for (auto it = cs2.begin(); it < cs2.end(); it++)
+ {
+ sum += *it;
+ }
+ ASSERT_EQ(sum, 10);
+
+ std::vector<int8_t> array_data{1, 2, 3, 4};
+ auto cs3 = std::make_unique<ContiguousSpan<int8_t, true>>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs3->size(), 4);
+ ASSERT_EQ(cs3->at(3), 4);
+
+ auto cs4 = std::move(cs3);
+ ASSERT_EQ(cs3, nullptr);
+ ASSERT_EQ(cs4->size(), 4);
+ ASSERT_EQ(cs4->at(3), 4);
+
+ auto cs5 = ContiguousSpan<int8_t, true>(array_data.begin(), array_data.end());
+ ASSERT_EQ(cs5.size(), 4);
+ ASSERT_EQ(cs5.at(3), 4);
+ }
+
+ Array<float> lv = std::move(data14);
+ ASSERT_FLOAT_EQ(lv.at(0, 0), 1);
+ ASSERT_FLOAT_EQ(lv.at(0, 1), 2);
+ ASSERT_FLOAT_EQ(lv.at(0, 2), 3);
+ ASSERT_FLOAT_EQ(lv.at(0, 3), 4);
+}
+
+TEST(NDArrayArrayTests, slice_write_test)
+{
+ // float
+ {
+ float raw_data[4] = {0};
+
+ Array<float> data22{raw_data, {2, 2}};
+
+ data22.slice(1) = {1, 2};
+
+ ASSERT_FLOAT_EQ(data22.at(0, 0), 0);
+ ASSERT_FLOAT_EQ(data22.at(0, 1), 0);
+ ASSERT_FLOAT_EQ(data22.at(1, 0), 1);
+ ASSERT_FLOAT_EQ(data22.at(1, 1), 2);
+ }
+
+ // int32_t
+ {
+ int32_t raw_data[4] = {0};
+ Array<int32_t> data22{raw_data, {2, 2}};
+
+ data22.slice(1) = {1, 2};
+
+ ASSERT_EQ(data22.at(0, 0), 0);
+ ASSERT_EQ(data22.at(0, 1), 0);
+ ASSERT_EQ(data22.at(1, 0), 1);
+ ASSERT_EQ(data22.at(1, 1), 2);
+ }
+
+ // uint32_t
+ {
+ uint32_t raw_data[4] = {0};
+ Array<uint32_t> data22{raw_data, {2, 2}};
+
+ data22.slice(1) = {1, 2};
+
+ ASSERT_EQ(data22.at(0, 0), 0);
+ ASSERT_EQ(data22.at(0, 1), 0);
+ ASSERT_EQ(data22.at(1, 0), 1);
+ ASSERT_EQ(data22.at(1, 1), 2);
+ }
+
+ // int8_t
+ {
+ int8_t raw_data[4] = {0};
+ Array<int8_t> data22{raw_data, {2, 2}};
+
+ data22.slice(1) = {1, 2};
+
+ ASSERT_EQ(data22.at(0, 0), 0);
+ ASSERT_EQ(data22.at(0, 1), 0);
+ ASSERT_EQ(data22.at(1, 0), 1);
+ ASSERT_EQ(data22.at(1, 1), 2);
+ }
+}
+
+TEST(NDArrayArrayTests, slice_read_test)
+{
+ // float
+ {
+ float raw_data[4] = {1, 2, 3, 4};
+
+ Array<float> data22{raw_data, {2, 2}};
+
+ auto slice = data22.slice(1);
+
+ ASSERT_FLOAT_EQ(slice[0], 3);
+ ASSERT_FLOAT_EQ(slice[1], 4);
+ }
+
+ // int32_t
+ {
+ int32_t raw_data[4] = {1, 2, 3, 4};
+
+ Array<int32_t> data22{raw_data, {2, 2}};
+
+ auto slice = data22.slice(1);
+
+ ASSERT_EQ(slice[0], 3);
+ ASSERT_EQ(slice[1], 4);
+ }
+
+ // uint32_t
+ {
+ uint32_t raw_data[4] = {1, 2, 3, 4};
+
+ Array<uint32_t> data22{raw_data, {2, 2}};
+
+ auto slice = data22.slice(1);
+
+ ASSERT_EQ(slice[0], 3);
+ ASSERT_EQ(slice[1], 4);
+ }
+
+ // int8_t
+ {
+ int8_t raw_data[4] = {1, 2, 3, 4};
+
+ Array<int8_t> data22{raw_data, {2, 2}};
+
+ auto slice = data22.slice(1);
+
+ ASSERT_EQ(slice[0], 3);
+ ASSERT_EQ(slice[1], 4);
+ }
+}
+
+TEST(NDArrayArrayTests, multidim_test)
+{
+ // float
+ {
+ float raw_data[5] = {0, 1, 2, 3, 4};
+
+ Array<float> data22{raw_data, {1, 1, 1, 1, 5}};
+
+ ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 0), 0);
+ ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 1), 1);
+ ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 2), 2);
+ ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 3), 3);
+ ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 4), 4);
+ }
+
+ // int32_t
+ {
+ int32_t raw_data[5] = {0, 1, 2, 3, 4};
+
+ Array<int32_t> data22{raw_data, {1, 1, 1, 1, 5}};
+
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 0), 0);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 1), 1);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 2), 2);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 3), 3);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 4), 4);
+ }
+
+ // uint32_t
+ {
+ uint32_t raw_data[5] = {0, 1, 2, 3, 4};
+
+ Array<uint32_t> data22{raw_data, {1, 1, 1, 1, 5}};
+
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 0), 0);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 1), 1);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 2), 2);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 3), 3);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 4), 4);
+ }
+
+ // int8_t
+ {
+ int8_t raw_data[5] = {0, 1, 2, 3, 4};
+
+ Array<int8_t> data22{raw_data, {1, 1, 1, 1, 5}};
+
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 0), 0);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 1), 1);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 2), 2);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 3), 3);
+ ASSERT_EQ(data22.at(0, 0, 0, 0, 4), 4);
+ }
+}
diff --git a/runtime/libs/ndarray/src/ContiguousSpan.test.cpp b/runtime/libs/ndarray/src/ContiguousSpan.test.cpp
new file mode 100644
index 000000000..26efcc645
--- /dev/null
+++ b/runtime/libs/ndarray/src/ContiguousSpan.test.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ndarray/ContiguousSpan.h"
+
+#include <gtest/gtest.h>
+
+using namespace ndarray;
+
+TEST(NDArrayContiguousSpanTests, slice_assign_test)
+{
+ // float
+ {
+ std::vector<float> v1{1, 2, 3, 4, 5};
+ std::vector<float> v2(5);
+
+ ContiguousSpan<float> span1(v1.begin(), v1.end());
+ ContiguousSpan<float> span2(v2.begin(), v2.end());
+
+ span2.assign(span1);
+
+ ASSERT_EQ(v1, v2);
+ ASSERT_EQ(span1.size(), 5);
+ ASSERT_EQ(span2.size(), 5);
+
+ ASSERT_EQ(span2.at(2), 3);
+ ASSERT_EQ(span2.at(4), 5);
+
+ ASSERT_EQ(*(span1.data() + 2), *(span2.data() + 2));
+
+ ContiguousSpan<float> span3(span2.offset(1));
+ ASSERT_EQ(span3.size(), 4);
+ ASSERT_EQ(span3.at(0), 2);
+ ASSERT_EQ(span3.at(1), 3);
+ ASSERT_EQ(span3[2], 4);
+ ASSERT_EQ(span3[3], 5);
+
+ // const
+ ContiguousSpan<float, true> span4(v1.begin(), v1.end());
+ ASSERT_EQ(span4.size(), 5);
+ ASSERT_EQ(span4.at(0), 1);
+ ASSERT_EQ(span4.at(1), 2);
+ ASSERT_EQ(span4.at(2), 3);
+ ASSERT_EQ(span4[3], 4);
+ ASSERT_EQ(span4[4], 5);
+
+ ContiguousSpan<float, true> span5(span4.offset(1));
+ ASSERT_EQ(span5.size(), 4);
+ ASSERT_EQ(span5.at(0), 2);
+ ASSERT_EQ(span5.at(1), 3);
+ ASSERT_EQ(span5[2], 4);
+ ASSERT_EQ(span5[3], 5);
+ }
+
+ // int32_t
+ {
+ std::vector<int32_t> v1{1, 2, 3, 4, 5};
+ std::vector<int32_t> v2(5);
+
+ ContiguousSpan<int32_t> span1(v1.begin(), v1.end());
+ ContiguousSpan<int32_t> span2(v2.begin(), v2.end());
+
+ span2.assign(span1);
+
+ ASSERT_EQ(v1, v2);
+ ASSERT_EQ(span1.size(), 5);
+ ASSERT_EQ(span2.size(), 5);
+
+ ASSERT_EQ(span2.at(2), 3);
+ ASSERT_EQ(span2.at(4), 5);
+
+ ASSERT_EQ(*(span1.data() + 2), *(span2.data() + 2));
+
+ ContiguousSpan<int32_t> span3(span2.offset(1));
+ ASSERT_EQ(span3.size(), 4);
+ ASSERT_EQ(span3.at(0), 2);
+ ASSERT_EQ(span3.at(1), 3);
+ ASSERT_EQ(span3[2], 4);
+ ASSERT_EQ(span3[3], 5);
+
+ // const
+ ContiguousSpan<int32_t, true> span4(v1.begin(), v1.end());
+ ASSERT_EQ(span4.size(), 5);
+ ASSERT_EQ(span4.at(0), 1);
+ ASSERT_EQ(span4.at(1), 2);
+ ASSERT_EQ(span4.at(2), 3);
+ ASSERT_EQ(span4[3], 4);
+ ASSERT_EQ(span4[4], 5);
+
+ ContiguousSpan<int32_t, true> span5(span4.offset(1));
+ ASSERT_EQ(span5.size(), 4);
+ ASSERT_EQ(span5.at(0), 2);
+ ASSERT_EQ(span5.at(1), 3);
+ ASSERT_EQ(span5[2], 4);
+ ASSERT_EQ(span5[3], 5);
+ }
+
+ // uint32_t
+ {
+ std::vector<uint32_t> v1{1, 2, 3, 4, 5};
+ std::vector<uint32_t> v2(5);
+
+ ContiguousSpan<uint32_t> span1(v1.begin(), v1.end());
+ ContiguousSpan<uint32_t> span2(v2.begin(), v2.end());
+
+ span2.assign(span1);
+
+ ASSERT_EQ(v1, v2);
+ ASSERT_EQ(span1.size(), 5);
+ ASSERT_EQ(span2.size(), 5);
+
+ ASSERT_EQ(span2.at(2), 3);
+ ASSERT_EQ(span2.at(4), 5);
+
+ ASSERT_EQ(*(span1.data() + 2), *(span2.data() + 2));
+
+ ContiguousSpan<uint32_t> span3(span2.offset(1));
+ ASSERT_EQ(span3.size(), 4);
+ ASSERT_EQ(span3.at(0), 2);
+ ASSERT_EQ(span3.at(1), 3);
+ ASSERT_EQ(span3[2], 4);
+ ASSERT_EQ(span3[3], 5);
+
+ // const
+ ContiguousSpan<uint32_t, true> span4(v1.begin(), v1.end());
+ ASSERT_EQ(span4.size(), 5);
+ ASSERT_EQ(span4.at(0), 1);
+ ASSERT_EQ(span4.at(1), 2);
+ ASSERT_EQ(span4.at(2), 3);
+ ASSERT_EQ(span4[3], 4);
+ ASSERT_EQ(span4[4], 5);
+
+ ContiguousSpan<uint32_t, true> span5(span4.offset(1));
+ ASSERT_EQ(span5.size(), 4);
+ ASSERT_EQ(span5.at(0), 2);
+ ASSERT_EQ(span5.at(1), 3);
+ ASSERT_EQ(span5[2], 4);
+ ASSERT_EQ(span5[3], 5);
+ }
+
+ // int8_t
+ {
+ std::vector<int8_t> v1{1, 2, 3, 4, 5};
+ std::vector<int8_t> v2(5);
+
+ ContiguousSpan<int8_t> span1(v1.begin(), v1.end());
+ ContiguousSpan<int8_t> span2(v2.begin(), v2.end());
+
+ span2.assign(span1);
+
+ ASSERT_EQ(v1, v2);
+ ASSERT_EQ(span1.size(), 5);
+ ASSERT_EQ(span2.size(), 5);
+
+ ASSERT_EQ(span2.at(2), 3);
+ ASSERT_EQ(span2.at(4), 5);
+
+ ASSERT_EQ(*(span1.data() + 2), *(span2.data() + 2));
+
+ ContiguousSpan<int8_t> span3(span2.offset(1));
+ ASSERT_EQ(span3.size(), 4);
+ ASSERT_EQ(span3.at(0), 2);
+ ASSERT_EQ(span3.at(1), 3);
+ ASSERT_EQ(span3[2], 4);
+ ASSERT_EQ(span3[3], 5);
+
+ // const
+ ContiguousSpan<int8_t, true> span4(v1.begin(), v1.end());
+ ASSERT_EQ(span4.size(), 5);
+ ASSERT_EQ(span4.at(0), 1);
+ ASSERT_EQ(span4.at(1), 2);
+ ASSERT_EQ(span4.at(2), 3);
+ ASSERT_EQ(span4[3], 4);
+ ASSERT_EQ(span4[4], 5);
+
+ ContiguousSpan<int8_t, true> span5(span4.offset(1));
+ ASSERT_EQ(span5.size(), 4);
+ ASSERT_EQ(span5.at(0), 2);
+ ASSERT_EQ(span5.at(1), 3);
+ ASSERT_EQ(span5[2], 4);
+ ASSERT_EQ(span5[3], 5);
+ }
+}
diff --git a/runtime/libs/ndarray/src/detail/cxx14.h b/runtime/libs/ndarray/src/detail/cxx14.h
deleted file mode 100644
index 81135b3f2..000000000
--- a/runtime/libs/ndarray/src/detail/cxx14.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_CXX14_H_
-#define _NDARRAY_CXX14_H_
-
-namespace ndarray
-{
-
-namespace cxx14
-{
-
-template <size_t... Nums> struct index_sequence
-{
- using value_type = size_t;
-
- static constexpr std::size_t size() noexcept { return sizeof...(Nums); }
-};
-
-namespace detail
-{
-
-template <size_t v, typename Seq> struct _append;
-
-template <size_t v, size_t... Nums> struct _append<v, index_sequence<Nums...>>
-{
- using result = index_sequence<Nums..., v>;
-};
-
-template <size_t Len> struct make_index_sequence
-{
- using result =
- typename detail::_append<Len - 1, typename make_index_sequence<Len - 1>::result>::result;
-};
-
-template <> struct make_index_sequence<1>
-{
- using result = index_sequence<0>;
-};
-
-template <> struct make_index_sequence<0>
-{
- using result = index_sequence<>;
-};
-
-} // namespace detail
-
-template <size_t Num> using make_index_sequence = typename detail::make_index_sequence<Num>::result;
-
-} // namespace cxx14
-
-} // namespace ndarray
-
-#endif //_NDARRAY_CXX14_H_
diff --git a/runtime/libs/ndarray/test/CMakeLists.txt b/runtime/libs/ndarray/test/CMakeLists.txt
deleted file mode 100644
index 16f8779ee..000000000
--- a/runtime/libs/ndarray/test/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-if(NOT BUILD_NDARRAY_TEST)
- return()
-endif()
-
-add_executable(ndarray_test ndarray_test.cpp)
-
-target_link_libraries(ndarray_test PRIVATE ndarray)
-
-nnfw_find_package(GTest)
-if(NOT GTest_FOUND)
- message(STATUS "GTest not avaialble. Skipping NDArray test build")
- return()
-endif(NOT GTest_FOUND)
-
-target_link_libraries(ndarray_test PUBLIC gtest gtest_main ${LIB_PTHREAD})
-
-add_test(ndarray_test ndarray_test)
diff --git a/runtime/libs/ndarray/test/ndarray_test.cpp b/runtime/libs/ndarray/test/ndarray_test.cpp
deleted file mode 100644
index 0aa948c72..000000000
--- a/runtime/libs/ndarray/test/ndarray_test.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "gtest/gtest.h"
-
-#include "ndarray/Array.h"
-
-using namespace ndarray;
-
-TEST(NDArray_tests, basic_data_test)
-{
-
- float raw_data[] = {1, 2, 3, 4};
-
- Array<float> data22{raw_data, {2, 2}};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 2);
- ASSERT_FLOAT_EQ(data22.at(1, 0), 3);
- ASSERT_FLOAT_EQ(data22.at(1, 1), 4);
-
- Array<float> data14{raw_data, {1, 4}};
- ASSERT_FLOAT_EQ(data22.at(0, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 2);
- ASSERT_FLOAT_EQ(data22.at(0, 2), 3);
- ASSERT_FLOAT_EQ(data22.at(0, 3), 4);
-}
-
-TEST(NDArray_tests, slice_write_test)
-{
- float raw_data[4] = {0};
-
- Array<float> data22{raw_data, {2, 2}};
-
- data22.slice(1) = {1, 2};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0), 0);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 0);
- ASSERT_FLOAT_EQ(data22.at(1, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(1, 1), 2);
-}
-
-TEST(NDArray_tests, slice_read_test)
-{
- float raw_data[4] = {1, 2, 3, 4};
-
- Array<float> data22{raw_data, {2, 2}};
-
- auto slice = data22.slice(1);
-
- ASSERT_FLOAT_EQ(slice[0], 3);
- ASSERT_FLOAT_EQ(slice[1], 4);
-}
-
-TEST(NDArray_tests, multidim_test)
-{
- float raw_data[5] = {0, 1, 2, 3, 4};
-
- Array<float> data22{raw_data, {1, 1, 1, 1, 5}};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 0), 0);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 1), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 2), 2);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 3), 3);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 4), 4);
-}
-
-TEST(NDArray_tests, slice_assign_test)
-{
- std::vector<float> v1{1, 2, 3, 4, 5};
- std::vector<float> v2(5);
-
- ContiguousSpan<float> span1(v1.begin(), v1.end());
- ContiguousSpan<float> span2(v2.begin(), v2.end());
-
- span2.assign(span1);
-
- ASSERT_EQ(v1, v2);
-}
diff --git a/runtime/libs/nnapi/CMakeLists.txt b/runtime/libs/nnapi/CMakeLists.txt
index a5d9490d1..73f82b909 100644
--- a/runtime/libs/nnapi/CMakeLists.txt
+++ b/runtime/libs/nnapi/CMakeLists.txt
@@ -1,3 +1,4 @@
-add_subdirectories()
+add_library(nnfw_lib_nnapi INTERFACE)
-add_library(nnfw_lib_nnapi ALIAS nnfw_lib_nnapi_1_2)
+target_include_directories(nnfw_lib_nnapi INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
+target_link_libraries(nnfw_lib_nnapi INTERFACE nnfw-nnapi-header)
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/include/NeuralNetworksExShim.h
index 855613241..855613241 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksExShim.h
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h
index 1c482b54c..1c482b54c 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h b/runtime/libs/nnapi/include/NeuralNetworksShim.h
index 80082383f..2e8ccdb76 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksShim.h
@@ -16,7 +16,7 @@
*/
// NOTE This header is derived from part of the following file
-// https://github.com/tensorflow/tensorflow/blob/a59ad83d06abd38b5e142c41043db8886a92fca8/tensorflow/lite/nnapi/NeuralNetworksShim.h
+// https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/nnapi/NeuralNetworksShim.h
#ifndef __NEURAL_NETWORKS_SHIM_H__
#define __NEURAL_NETWORKS_SHIM_H__
@@ -225,8 +225,8 @@ inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int
* @return ANEURALNETWORKS_NO_ERROR if successful.
*/
inline int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
- ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksSymmPerChannelQuantParams *channelQuant)
+ ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksSymmPerChannelQuantParams *channelQuant)
{
LOAD_FUNCTION(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
EXECUTE_FUNCTION_RETURN(model, index, channelQuant);
@@ -1131,6 +1131,424 @@ inline int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution *
EXECUTE_FUNCTION_RETURN(execution, durationCode, duration);
}
+/**
+ * Queries whether an extension is supported by the driver implementation of
+ * the specified device.
+ *
+ * @param device The representation of the specified device.
+ * @param extension The extension name.
+ * @param isExtensionSupported The boolean value indicating whether the
+ * extension is supported.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ *
+ * Available since API level 29.
+ */
+inline int ANeuralNetworksDevice_getExtensionSupport(const ANeuralNetworksDevice *device,
+ const char *extensionName,
+ bool *isExtensionSupported)
+{
+ LOAD_FUNCTION(ANeuralNetworksDevice_getExtensionSupport);
+ EXECUTE_FUNCTION_RETURN(device, extensionName, isExtensionSupported);
+}
+
+/**
+ * Creates an operand type from an extension name and an extension operand code.
+ *
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ *
+ * Available since API level 29.
+ *
+ * @param model The model to contain the operand.
+ * @param extensionName The extension name.
+ * @param operandCodeWithinExtension The extension operand code.
+ * @param type The operand type.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksModel_getExtensionOperandType(ANeuralNetworksModel *model,
+ const char *extensionName,
+ uint16_t operandCodeWithinExtension,
+ int32_t *type)
+{
+ LOAD_FUNCTION(ANeuralNetworksModel_getExtensionOperandType);
+ EXECUTE_FUNCTION_RETURN(model, extensionName, operandCodeWithinExtension, type);
+}
+
+/**
+ * Creates an operation type from an extension name and an extension operation
+ * code.
+ *
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ *
+ * Available since API level 29.
+ *
+ * @param model The model to contain the operation.
+ * @param extensionName The extension name.
+ * @param operationCodeWithinExtension The extension operation code.
+ * @param type The operation type.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksModel_getExtensionOperationType(ANeuralNetworksModel *model,
+ const char *extensionName,
+ uint16_t operationCodeWithinExtension,
+ ANeuralNetworksOperationType *type)
+{
+ LOAD_FUNCTION(ANeuralNetworksModel_getExtensionOperationType);
+ EXECUTE_FUNCTION_RETURN(model, extensionName, operationCodeWithinExtension, type);
+}
+
+/**
+ * Sets extension operand parameters.
+ *
+ * Available since API level 29.
+ *
+ * @param model The model to be modified.
+ * @param index The index of the model operand we're setting.
+ * @param data A pointer to the extension operand data.
+ * The data does not have to outlive the call to this function.
+ * @param length The size in bytes of the data value.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksModel_setOperandExtensionData(ANeuralNetworksModel *model, int32_t index,
+ const void *data, size_t length)
+{
+ LOAD_FUNCTION(ANeuralNetworksModel_setOperandExtensionData);
+ EXECUTE_FUNCTION_RETURN(model, index, data, length);
+}
+#if __ANDROID_API__ >= 30
+/**
+ * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
+ *
+ * This only creates the memory descriptor. Its properties should be set with
+ * calls to
+ * {@link ANeuralNetworksMemoryDesc_addInputRole},
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
+ * {@link ANeuralNetworksMemoryDesc_setDimensions}.
+ *
+ * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties
+ * have been set.
+ *
+ * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory
+ * descriptor is no longer needed.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
+ * Set to NULL if unsuccessful.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc **desc)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_create);
+ EXECUTE_FUNCTION_RETURN(desc);
+}
+
+/**
+ * Destroy a memory descriptor.
+ *
+ * The memory descriptor need not have been finished by a call to
+ * {@link ANeuralNetworksMemoryDesc_finish}.
+ *
+ * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable
+ * and results in no operation.
+ */
+inline void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc *desc)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_free);
+ EXECUTE_FUNCTION(desc);
+}
+
+/**
+ * Specify that a memory object will be playing the role of an output to an
+ * execution created from a particular compilation.
+ *
+ * The compilation and the output index fully specify an output operand. This
+ * function may be invoked multiple times on the same memory descriptor with
+ * different output operands, and the same output operand may be specified on
+ * multiple memory descriptors. However, specifying the same output operand on
+ * the same memory descriptor object more than once will return an error.
+ *
+ * The dimensions of the corresponding model operands of all the roles specified
+ * by
+ * {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each
+ * other. Two dimensions are incompatible if both ranks are fully specified but
+ * have different values, or if there is at least one axis that is fully
+ * specified in both but has different values.
+ *
+ * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory
+ * descriptor before invoking {@link ANeuralNetworksMemoryDesc_finish}.
+ *
+ * Attempting to modify a memory descriptor once
+ * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
+ * error.
+ *
+ * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor to be modified.
+ * @param compilation The compilation object. It must already have been finished
+ * by calling {@link ANeuralNetworksCompilation_finish}, and must outlive the
+ * memory descriptor.
+ * @param index The index of the output argument we are referencing from the
+ * compilation. It is an index into the outputs list passed to
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ * the index associated with {@link
+ * ANeuralNetworksModel_addOperand}.
+ * @param frequency A floating-point value within the range (0.0, 1.0].
+ * Describes how likely the memory is to be used in the specified role. This is
+ * provided as a hint to optimize the case when multiple roles
+ * prefer different memory locations or data layouts.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc *desc,
+ const ANeuralNetworksCompilation *compilation,
+ int32_t index, float frequency)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_addOutputRole);
+ EXECUTE_FUNCTION_RETURN(desc, compilation, index, frequency);
+}
+
+/**
+ * Specify that a memory object will be playing the role of an input to an
+ * execution created from a particular compilation.
+ *
+ * The compilation and the input index fully specify an input operand. This
+ * function may be invoked multiple times on the same memory descriptor with
+ * different input operands, and the same input operand may be specified on
+ * multiple memory descriptors. However, specifying the same input operand on
+ * the same memory descriptor more than once will return an error.
+ *
+ * The dimensions of the corresponding model operands of all the roles specified
+ * by
+ * {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each
+ * other. Two dimensions are incompatible if both ranks are fully specified but
+ * have different values, or if there is at least one axis that is fully
+ * specified in both but has different values.
+ *
+ * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory
+ * descriptor before invoking {@link ANeuralNetworksMemoryDesc_finish}.
+ *
+ * Attempting to modify a memory descriptor once
+ * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
+ * error.
+ *
+ * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor to be modified.
+ * @param compilation The compilation object. It must already have been finished
+ * by calling {@link ANeuralNetworksCompilation_finish}, and must outlive the
+ * memory descriptor.
+ * @param index The index of the input argument we are referencing from the
+ * compilation. It is an index into the inputs list passed to
+ * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
+ * the index associated with {@link
+ * ANeuralNetworksModel_addOperand}.
+ * @param frequency A floating-point value within the range (0.0, 1.0].
+ * Describes how likely the memory is to be used in the specified role. This is
+ * provided as a hint to optimize the case when different roles
+ * prefer different memory locations or data layouts.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc *desc,
+ const ANeuralNetworksCompilation *compilation,
+ uint32_t index, float frequency)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_addInputRole);
+ EXECUTE_FUNCTION_RETURN(desc, compilation, index, frequency);
+}
+
+/**
+ * Set the dimensional information of the memory descriptor.
+ *
+ * The specified dimensions must be compatible with the dimensions of the
+ * corresponding model operands of all the roles specified by
+ * {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are
+ * incompatible if both ranks are fully specified but have different values, or
+ * if there is at least one axis that is fully specified in both but has
+ * different values.
+ *
+ * Attempting to modify a memory descriptor once
+ * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
+ * error.
+ *
+ * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor to be modified.
+ * @param rank The number of dimensions. Must be 0 for scalars.
+ * @param dimensions An array of dimensions. An entry with the value 0 indicates
+ * that the corresponding axis has an unknown size.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc *desc, uint32_t rank,
+ const uint32_t *dimensions)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_setDimensions);
+ EXECUTE_FUNCTION_RETURN(desc, rank, dimensions);
+}
+
+/**
+ * Indicate that we have finished modifying a memory descriptor. Required before
+ * calling
+ * {@link ANeuralNetworksMemory_createFromDesc}.
+ *
+ * This function must only be called once for a given memory descriptor.
+ *
+ * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor to be finished.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc *desc)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemoryDesc_finish);
+ EXECUTE_FUNCTION_RETURN(desc);
+}
+
+/**
+ * Creates a memory object from a memory descriptor.
+ *
+ * The memory object is created with an uninitialized buffer. A memory object
+ * with an uninitialized buffer may only be used according to the roles
+ * specified by
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory
+ * in
+ * {@link ANeuralNetworksMemory_copy}. The buffer of a memory object is
+ * initialized after the memory object is used as an output in a successful
+ * execution, or used as the destination memory in a successful {@link
+ * ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may
+ * be used according to all roles specified in
+ * {@link ANeuralNetworksMemoryDesc}, or as the source or destination memory in
+ * {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will return
+ * to the uninitialized state if the memory object is used as an output in a
+ * failed execution, or used as the destination memory in a failed {@link
+ * ANeuralNetworksMemory_copy}.
+ *
+ * The dimensions of the memory descriptor are deduced from the dimensions of
+ * the corresponding model operands of all the roles specified by
+ * {@link ANeuralNetworksMemoryDesc_addInputRole} and
+ * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions
+ * set by the call to {@link ANeuralNetworksMemoryDesc_setDimensions}, if any.
+ * The memory descriptor may have unspecified dimensions or rank. In such a
+ * case, the same memory object may be used with different shapes of outputs in
+ * different executions. When the memory is used as an input, the input shape
+ * must be the same as the output shape from the last execution using this
+ * memory object as an output, or the last
+ * {@link ANeuralNetworkMemory_copy} using this memory object as the destination
+ * memory. Creating a memory object with unspecified dimensions or rank may fail
+ * for certain sets of roles.
+ *
+ * Using the memory in roles or shapes that are not compatible with the rules
+ * specified above will return an error.
+ *
+ * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
+ * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
+ * both offset and length must be set to zero and the entire memory region will
+ * be associated with the specified input or output operand.
+ *
+ * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the
+ * memory created from this function will return an error.
+ *
+ * {@link ANeuralNetworksMemory_free} must be called once the memory is no
+ * longer needed.
+ *
+ * Attempting to create memory from an unfinished memory descriptor will return
+ * an error.
+ *
+ * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the
+ * {@link ANeuralNetworksMemory} object.
+ *
+ * Available since API level 30.
+ *
+ * @param desc The memory descriptor.
+ * @param memory The memory object to be created.
+ * Set to NULL if unsuccessful.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if
+ * the memory is created with unspecified dimensions or rank and it is not
+ * supported for this set of roles.
+ */
+inline int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc *desc,
+ ANeuralNetworksMemory **memory)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemory_createFromDesc);
+ EXECUTE_FUNCTION_RETURN(desc, memory);
+}
+
+/**
+ * Copies data from one memory object to another.
+ *
+ * If at most one of the src and dst is created from
+ * {@link ANeuralNetworksMemory_createFromDesc}, the src and dst must have the
+ * same logical size:
+ * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd},
+ * or if it is created from {@link
+ * ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
+ * AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
+ * - If the memory is created from
+ * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a format other
+ * than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there
+ * is no padding and the data is tightly packed. This function may fail if the
+ * AHardwareBuffer cannot be accessed.
+ * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc},
+ * the logical size equals the size indicated by the {@link OperandCode}
+ * multiplied by the number of elements. This function will fail if the number
+ * of elements is unknown.
+ *
+ * If both src and dst are created from {@link
+ * ANeuralNetworksMemory_createFromDesc}, they must have compatible dimensions.
+ * Two dimensions are incompatible if both ranks are fully specified but have
+ * different values, or if there is at least one axis that is fully specified in
+ * both but has different values. The dst may have unspecified dimensions or
+ * rank. In such a case, the dimensions of dst will get updated according to the
+ * dimensions of the src.
+ *
+ * In both cases, if the src is created from
+ * {@link ANeuralNetworksMemory_createFromDesc}, it must have been used as an
+ * output in a successful execution, or used as the destination memory in a
+ * successful
+ * {@link ANeuralNetworksMemory_copy}.
+ *
+ * The src and dst may have different data layout, in which case the data
+ * copying is performed logically with data layout transformation.
+ *
+ * Available since API level 30.
+ *
+ * @param src The source memory object.
+ * @param dst The destination memory object.
+ *
+ * @return ANEURALNETWORKS_NO_ERROR if successful.
+ */
+inline int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory *src,
+ const ANeuralNetworksMemory *dst)
+{
+ LOAD_FUNCTION(ANeuralNetworksMemory_copy);
+ EXECUTE_FUNCTION_RETURN(src, dst);
+}
+#endif // __ANDROID_API__ >= 30
/**/
#endif // __NEURAL_NETWORKS_SHIM_H__
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h b/runtime/libs/nnapi/include/NeuralNetworksTypes.h
index d74402749..35c7a5802 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksTypes.h
@@ -16,7 +16,7 @@
*/
// NOTE This header is derived from part of the following file
-// https://github.com/tensorflow/tensorflow/blob/a59ad83d06abd38b5e142c41043db8886a92fca8/tensorflow/lite/nnapi/NeuralNetworksTypes.h
+// https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/nnapi/NeuralNetworksTypes.h
#ifndef __NEURAL_NETWORKS_TYPES_H__
#define __NEURAL_NETWORKS_TYPES_H__
@@ -56,12 +56,12 @@ typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *mod
const void *buffer, size_t length);
typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)(
- ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksSymmPerChannelQuantParams *channelQuant);
+ ANeuralNetworksModel *model, int32_t index,
+ const ANeuralNetworksSymmPerChannelQuantParams *channelQuant);
typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
- ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset,
- size_t length);
+ ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length);
typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model,
ANeuralNetworksOperationType type,
@@ -88,8 +88,8 @@ typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *ex
const void *buffer, size_t length);
typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
- ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution,
int32_t index,
@@ -97,8 +97,8 @@ typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *e
void *buffer, size_t length);
typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
- ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution,
ANeuralNetworksEvent **event);
@@ -125,23 +125,39 @@ typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(const ANeuralNetworksDev
int64_t *featureLevel);
typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)(
- const ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices,
- uint32_t numDevices, bool *supportedOps);
+ const ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices,
+ uint32_t numDevices, bool *supportedOps);
typedef int (*ANeuralNetworksCompilation_createForDevices_fn)(
- ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices, uint32_t numDevices,
- ANeuralNetworksCompilation **compilation);
+ ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices, uint32_t numDevices,
+ ANeuralNetworksCompilation **compilation);
typedef int (*ANeuralNetworksCompilation_setCaching_fn)(ANeuralNetworksCompilation *compilation,
const char *cacheDir, const uint8_t *token);
+#if __ANDROID_API__ >= 30
+typedef int (*ANeuralNetworksCompilation_setTimeout_fn)(ANeuralNetworksCompilation *compilation,
+ uint64_t duration);
+
+typedef int (*ANeuralNetworksCompilation_setPriority_fn)(ANeuralNetworksCompilation *compilation,
+ int priority);
+#endif // __ANDROID_API__ >= 30
+
typedef int (*ANeuralNetworksExecution_compute_fn)(ANeuralNetworksExecution *execution);
+#if __ANDROID_API__ >= 30
+typedef int (*ANeuralNetworksExecution_setTimeout_fn)(ANeuralNetworksExecution *execution,
+ uint64_t duration);
+
+typedef int (*ANeuralNetworksExecution_setLoopTimeout_fn)(ANeuralNetworksExecution *execution,
+ uint64_t duration);
+#endif // __ANDROID_API__ >= 30
+
typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(ANeuralNetworksExecution *execution,
int32_t index, uint32_t *rank);
typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)(
- ANeuralNetworksExecution *execution, int32_t index, uint32_t *dimensions);
+ ANeuralNetworksExecution *execution, int32_t index, uint32_t *dimensions);
typedef int (*ANeuralNetworksBurst_create_fn)(ANeuralNetworksCompilation *compilation,
ANeuralNetworksBurst **burst);
@@ -160,4 +176,46 @@ typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(ANeuralNetworksExecu
typedef int (*ANeuralNetworksExecution_getDuration_fn)(const ANeuralNetworksExecution *execution,
int32_t durationCode, uint64_t *duration);
+typedef int (*ANeuralNetworksDevice_getExtensionSupport_fn)(const ANeuralNetworksDevice *device,
+ const char *extensionName,
+ bool *isExtensionSupported);
+
+typedef int (*ANeuralNetworksModel_getExtensionOperandType_fn)(ANeuralNetworksModel *model,
+ const char *extensionName,
+ uint16_t operandCodeWithinExtension,
+ int32_t *type);
+
+typedef int (*ANeuralNetworksModel_getExtensionOperationType_fn)(
+ ANeuralNetworksModel *model, const char *extensionName, uint16_t operationCodeWithinExtension,
+ ANeuralNetworksOperationType *type);
+
+typedef int (*ANeuralNetworksModel_setOperandExtensionData_fn)(ANeuralNetworksModel *model,
+ int32_t index, const void *data,
+ size_t length);
+
+#if __ANDROID_API__ >= 30
+typedef int (*ANeuralNetworksMemoryDesc_create_fn)(ANeuralNetworksMemoryDesc **desc);
+
+typedef void (*ANeuralNetworksMemoryDesc_free_fn)(ANeuralNetworksMemoryDesc *desc);
+
+typedef int (*ANeuralNetworksMemoryDesc_addInputRole_fn)(
+ ANeuralNetworksMemoryDesc *desc, const ANeuralNetworksCompilation *compilation, int32_t index,
+ float frequency);
+
+typedef int (*ANeuralNetworksMemoryDesc_addOutputRole_fn)(
+ ANeuralNetworksMemoryDesc *desc, const ANeuralNetworksCompilation *compilation, uint32_t index,
+ float frequency);
+
+typedef int (*ANeuralNetworksMemoryDesc_setDimensions_fn)(ANeuralNetworksMemoryDesc *desc,
+ uint32_t rank,
+ const uint32_t *dimensions);
+
+typedef int (*ANeuralNetworksMemoryDesc_finish_fn)(ANeuralNetworksMemoryDesc *desc);
+
+typedef int (*ANeuralNetworksMemory_createFromDesc_fn)(const ANeuralNetworksMemoryDesc *desc,
+ ANeuralNetworksMemory **memory);
+
+typedef int (*ANeuralNetworksMemory_copy_fn)(const ANeuralNetworksMemory *src,
+ const ANeuralNetworksMemory *dst);
+#endif // __ANDROID_API__ >= 30
#endif // __NEURAL_NETWORKS_TYPES_H__
diff --git a/runtime/libs/nnapi/v1.1/CMakeLists.txt b/runtime/libs/nnapi/v1.1/CMakeLists.txt
deleted file mode 100644
index dc018c60f..000000000
--- a/runtime/libs/nnapi/v1.1/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_library(nnfw_lib_nnapi_1_1 INTERFACE)
-
-target_include_directories(nnfw_lib_nnapi_1_1 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(nnfw_lib_nnapi_1_1 INTERFACE nnfw-nnapi-header)
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h
deleted file mode 100644
index f684dab90..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-/**
- * @file NeuralNetworksExShim.h
- * @brief This file contains an actual implementation of
- * ANeuralNetworksModel_addOperationEx function
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef NN_API_EX_SHIM_H
-#define NN_API_EX_SHIM_H
-
-#include "NeuralNetworksEx.h"
-#include "NeuralNetworksLoadHelpers.h"
-
-typedef int (*ANeuralNetworksModel_addOperationEx_fn)(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type,
- uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount,
- const uint32_t *outputs);
-
-/**
- * @brief Add an extended operation to a model.
- *
- * @param[in] model The model to be modified.
- * @param[in] type The type of extended operation.
- * @param[in] inputCount The number of entries in the inputs array.
- * @param[in] inputs An array of indexes identifying each operand.
- * @param[in] outputCount The number of entries in the outputs array.
- * @param[in] outputs An array of indexes identifying each operand.
- *
- * @note The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish}
- * has been called will return an error.\n
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-
-inline int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type,
- uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount, const uint32_t *outputs)
-{
- LOAD_FUNCTION(ANeuralNetworksModel_addOperationEx);
- EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs);
-}
-
-#endif // NN_API_EX_SHIM_H
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h
deleted file mode 100644
index 201465f9c..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from part of the following file (in TensorFlow v1.12)
-// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
-
-/**
- * @file NeuralNetworksLoadHelpers.h
- * @ingroup COM_AI_RUNTIME
- * @brief This file contains functions to load NN API runtime library
- */
-
-#ifndef __NEURAL_NETWORKS_LOAD_HELPER_H__
-#define __NEURAL_NETWORKS_LOAD_HELPER_H__
-
-#include <dlfcn.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-/**
- * @brief Print log data
- * @param[in] format Format string of @c printf
- * @param[in] args Argument after format string. (Same with @c printf)
- */
-#define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__);
-
-/**
- * @brief Create a function pointer named @c fn after loading NN API library
- * @param[in] name Name of a function
- */
-#define LOAD_FUNCTION(name) \
- static name##_fn fn = reinterpret_cast<name##_fn>(nnfw::loadFunction(#name));
-
-/**
- * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
- * @param[in] args List of arguments for the function @c fn
- */
-#define EXECUTE_FUNCTION(...) \
- if (fn != nullptr) { \
- fn(__VA_ARGS__); \
- }
-
-/**
- * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
- * @param[in] args List of arguments for the function @c fn
- * @return the return value of @c fn
- */
-#define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0;
-
-namespace nnfw
-{
-
-/**
- * @brief Load NN API library
- * @param[in] name path of NN API library
- * @return a symbol table handle of NN API library
- */
-inline void* loadLibrary(const char* name) {
- // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn
- // api RT
- void* handle = nullptr;
-#if 1 //#ifdef __ANDROID__
- handle = dlopen(name, RTLD_LAZY | RTLD_LOCAL);
- if (handle == nullptr) {
- NNAPI_LOG("nnapi error: unable to open library %s", name);
- NNAPI_LOG(" %s", dlerror());
- }
-#endif
- return handle;
-}
-
-/**
- * @brief Load libneuralnetworks.so and return handle of library
- * @return a symbol table handle of NN API library
- */
-inline void* getLibraryHandle() {
- static void* handle = loadLibrary("libneuralnetworks.so");
- return handle;
-}
-
-/**
- * @brief Return function ptr in libneuralnetworks.so
- * @param[in] name Name of function
- * @return function pointer
- */
-inline void* loadFunction(const char* name) {
- void* fn = nullptr;
- if (getLibraryHandle() != nullptr) {
- fn = dlsym(getLibraryHandle(), name);
- }
- if (fn == nullptr) {
- NNAPI_LOG("nnapi error: unable to open function %s", name);
- NNAPI_LOG(" %s", dlerror());
- abort();
- }
- else {
-#ifdef _GNU_SOURCE
- Dl_info info;
- if (dladdr(fn, &info))
- {
- NNAPI_LOG("nnapi function '%s' is loaded from '%s' ", name, info.dli_fname);
- }
- else
- {
- NNAPI_LOG("nnapi function '%s' is failed to load", name);
- }
-
-#endif // _GNU_SOURCE
- }
- return fn;
-}
-
-/**
- * @brief Check if libneuralnetworks.so can be loaded
- * @return @c true if loading is successful, otherwise @c false.
- */
-inline bool NNAPIExists() {
- static bool nnapi_is_available = getLibraryHandle();
- return nnapi_is_available;
-}
-
-} // namespace nnfw
-
-#endif // __NEURAL_NETWORKS_LOAD_HELPER_H__
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h
deleted file mode 100644
index 60b16f766..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h
+++ /dev/null
@@ -1,709 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from part of the following file (in TensorFlow v1.12)
-// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
-#ifndef __NEURAL_NETWORKS_SHIM__
-#define __NEURAL_NETWORKS_SHIM__
-
-#include "NeuralNetworks.h"
-#include "NeuralNetworksLoadHelpers.h"
-
-// nn api function types
-
-typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
- size_t size, int protect, int fd, size_t offset,
- ANeuralNetworksMemory** memory);
-
-typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
-
-typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
-
-typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
-
-typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
-
-typedef int (*ANeuralNetworksCompilation_create_fn)(
- ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
-
-typedef void (*ANeuralNetworksCompilation_free_fn)(
- ANeuralNetworksCompilation* compilation);
-
-typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
- ANeuralNetworksCompilation* compilation, int32_t preference);
-
-typedef int (*ANeuralNetworksCompilation_finish_fn)(
- ANeuralNetworksCompilation* compilation);
-
-typedef int (*ANeuralNetworksModel_addOperand_fn)(
- ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
-
-typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
- ANeuralNetworksModel* model, int32_t index, const void* buffer,
- size_t length);
-
-typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
- ANeuralNetworksModel* model, int32_t index,
- const ANeuralNetworksMemory* memory, size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksModel_addOperation_fn)(
- ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
- uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
- const uint32_t* outputs);
-
-typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
- ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
- uint32_t outputCount, const uint32_t* outputs);
-
-typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
- ANeuralNetworksModel* model, bool allow);
-
-typedef int (*ANeuralNetworksExecution_create_fn)(
- ANeuralNetworksCompilation* compilation,
- ANeuralNetworksExecution** execution);
-
-typedef void (*ANeuralNetworksExecution_free_fn)(
- ANeuralNetworksExecution* execution);
-
-typedef int (*ANeuralNetworksExecution_setInput_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setOutput_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, void* buffer, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksExecution_startCompute_fn)(
- ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
-
-typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
-
-typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
-
-/**
- * Creates a shared memory object from a file descriptor.
- *
- * The shared memory is backed by a file descriptor via mmap.
- * See {@link ANeuralNetworksMemory} for a description on how to use
- * this shared memory.
- *
- * @param size The requested size in bytes.
- * Must not be larger than the file size.
- * @param prot The desired memory protection for the mapping.
- * It is either PROT_NONE or the bitwise OR of one or
- * more of the following flags: PROT_READ, PROT_WRITE.
- * @param fd The requested file descriptor.
- * The file descriptor has to be mmap-able. The file
- * descriptor will be duplicated.
- * @param offset The offset to the beginning of the file of the area to map.
- * The offset has to be aligned to a page size.
- * @param memory The memory object to be created.
- * Set to NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
- */
-inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd,
- size_t offset,
- ANeuralNetworksMemory** memory) {
- LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd);
- EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory);
-}
-
-/**
- * Delete a memory object.
- *
- * Destroys the object used by the run time to keep track of the memory.
- * This will free the underlying actual memory if no other code has open
- * handles to this memory.
- *
- * @param memory The memory object to be freed.
- */
-inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
- LOAD_FUNCTION(ANeuralNetworksMemory_free);
- EXECUTE_FUNCTION(memory);
-}
-
-/**
- * Create an empty {@link ANeuralNetworksModel}.
- *
- * <p>This only creates the object. Computation is performed once
- * {@link ANeuralNetworksExecution_startCompute} is invoked.
- *
- * The model should be constructed with calls to
- * {@link ANeuralNetworksModel_addOperation} and
- * {@link ANeuralNetworksModel_addOperand}
- *
- * <p>{@link ANeuralNetworksModel_finish} should be called once the model
- * has been fully constructed.</p>
- *
- * <p>{@link ANeuralNetworksModel_free} should be called once the model
- * is no longer needed.</p>
- *
- * @param model The {@link ANeuralNetworksModel} to be created.
- * Set to NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
- LOAD_FUNCTION(ANeuralNetworksModel_create);
- EXECUTE_FUNCTION_RETURN(model);
-}
-
-/**
- * Destroy a model.
- *
- * The model need not have been finished by a call to
- * {@link ANeuralNetworksModel_finish}.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be destroyed. Passing NULL is acceptable and
- * results in no operation.
- */
-inline void ANeuralNetworksModel_free(ANeuralNetworksModel* model) {
- LOAD_FUNCTION(ANeuralNetworksModel_free);
- EXECUTE_FUNCTION(model);
-}
-
-/**
- * Indicate that we have finished modifying a model. Required before
- * calling {@link ANeuralNetworksCompilation_compile}.
- *
- * An application is responsible to make sure that no other thread uses
- * the model at the same time.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be finished.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) {
- LOAD_FUNCTION(ANeuralNetworksModel_finish);
- EXECUTE_FUNCTION_RETURN(model);
-}
-
-/**
- * Add an operand to a model.
- *
- * The order in which the operands are added is important. The first one added
- * to a model will have the index value 0, the second 1, etc. These indexes are
- * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
- * {@link ANeuralNetworksExecution_setInput},
- * {@link ANeuralNetworksExecution_setInputFromMemory},
- * {@link ANeuralNetworksExecution_setOutput},
- * {@link ANeuralNetworksExecution_setOutputFromMemory} and
- * {@link ANeuralNetworksExecution_setOperandValue}.
- *
- * To build a model that can accommodate inputs of various sizes, as you may
- * want to do for a CNN, set the size of the dimensions that will vary at run
- * time to 0. If you do so, provide the full dimensions when calling
- * {@link ANeuralNetworksExecution_setInput} or {@link
- * ANeuralNetworksExecution_setInputFromMemory}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param type The {@link ANeuralNetworksOperandType} that describes the shape
- * of the operand.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_addOperand(
- ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type) {
- LOAD_FUNCTION(ANeuralNetworksModel_addOperand);
- EXECUTE_FUNCTION_RETURN(model, type);
-}
-
-/**
- * Sets an operand to a constant value.
- *
- * For scalar values, the content of buffer is copied into the model.
- *
- * For tensor values, a pointer to the buffer is stored within the model.
- * The application is responsible for not changing the content of this region
- * until all executions using this model have completed. As the data may
- * be copied during processing, modifying the data after this call yields
- * undefined results.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param index The index of the model operand we're setting.
- * @param buffer A pointer to the data to use.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model,
- int32_t index,
- const void* buffer,
- size_t length) {
- LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue);
- EXECUTE_FUNCTION_RETURN(model, index, buffer, length);
-}
-
-/**
- * Sets an operand to a value stored in a memory object.
- *
- * The content of the memory is not copied. A reference to that memory is stored
- * inside the model. The application is responsible for not changing the content
- * of the memory region until all executions using this model have completed.
- * As the data may be copied during processing, modifying the data after this
- * call yields undefined results.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param index The index of the model operand we're setting.
- * @param buffer A pointer to the data to use.
- * @param memory The memory containing the data.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_setOperandValueFromMemory(
- ANeuralNetworksModel* model, int32_t index,
- const ANeuralNetworksMemory* memory, size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory);
- EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length);
-}
-
-/**
- * Add an operation to a model.
- *
- * @param model The model to be modified.
- * @param type The type of the operation.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying each operand.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying each operand.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
- ANeuralNetworksOperationType type,
- uint32_t inputCount,
- const uint32_t* inputs,
- uint32_t outputCount,
- const uint32_t* outputs) {
- LOAD_FUNCTION(ANeuralNetworksModel_addOperation);
- EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount,
- outputs);
-}
-
-/**
- * Specifies which operands will be the model's inputs and outputs.
- *
- * An operand cannot be used for both input and output. Doing so will
- * return an error.
- *
- * @param model The model to be modified.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying the input operands.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying the output operands.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- */
-inline int ANeuralNetworksModel_identifyInputsAndOutputs(
- ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
- uint32_t outputCount, const uint32_t* outputs) {
- LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs);
- EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs);
-}
-
-/**
- * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
- * calculated with range and/or precision as low as that of the IEEE 754 16-bit
- * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
- * must be calculated using at least the range and precision of the IEEE 754
- * 32-bit floating-point format.
- *
- * @param model The model to be modified.
- * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
- * calculated with range and/or precision as low as that of the
- * IEEE 754 16-bit floating point format. 'false' indicates
- * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
- * at least the range and precision of the IEEE 754 32-bit floating
- * point format.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * Available since API level 28.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- */
-inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(
- ANeuralNetworksModel* model, bool allow) {
- LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
- EXECUTE_FUNCTION_RETURN(model, allow);
-}
-
-/**
- * Create a {@link ANeuralNetworksCompilation} to compile the given model.
- * This only creates the object. Compilation is only performed once
- * {@link ANeuralNetworksCompilation_start} is invoked.
- *
- * <p>The provided model must outlive the compilation.</p>
- *
- * The model must already have been finished by a call to
- * {@link ANeuralNetworksModel_finish}.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param model The {@link ANeuralNetworksModel} to be compiled.
- * @param compilation The newly created object or NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
- * if the model is invalid.
- */
-inline int ANeuralNetworksCompilation_create(
- ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_create);
- EXECUTE_FUNCTION_RETURN(model, compilation);
-}
-
-/**
- * Destroy a compilation.
- *
- * <p>If called on a compilation for which
- * {@link ANeuralNetworksCompilation_start} has been called, the
- * function will return immediately but will mark the compilation to be deleted
- * once the compilation completes. The {@link ANeuralNetworksCompilation_wait}
- * will return ERROR_DELETED.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param compilation The compilation to be destroyed. Passing NULL is
- * acceptable and results in no operation.
- */
-inline void ANeuralNetworksCompilation_free(
- ANeuralNetworksCompilation* compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_free);
- EXECUTE_FUNCTION(compilation);
-}
-
-/**
- * Sets the execution preference.
- *
- * <p>Provides guidance to the runtime when trade-offs are possible.</p>
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param compilation The compilation to be modified.
- * @param preference Either {@link PREFER_LOW_POWER},
- * {@link PREFER_SINGLE_FAST_ANSWER}, or
- * {@link PREFER_SUSTAINED_SPEED}.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksCompilation_setPreference(
- ANeuralNetworksCompilation* compilation, int32_t preference) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference);
- EXECUTE_FUNCTION_RETURN(compilation, preference);
-}
-
-/**
- * Waits until the compilation completes.
- *
- * More than one thread can wait on a compilation. When the compilation
- * completes, all threads will be released.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally.
- */
-inline int ANeuralNetworksCompilation_finish(
- ANeuralNetworksCompilation* compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_finish);
- EXECUTE_FUNCTION_RETURN(compilation);
-}
-/**
- * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
- * This only creates the object. Computation is only performed once
- * {@link ANeuralNetworksExecution_startCompute} is invoked.
- *
- * <p>The provided compilation must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
- * @param execution The newly created object or NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
- * if the compilation is invalid.
- */
-inline int ANeuralNetworksExecution_create(
- ANeuralNetworksCompilation* compilation,
- ANeuralNetworksExecution** execution) {
- LOAD_FUNCTION(ANeuralNetworksExecution_create);
- EXECUTE_FUNCTION_RETURN(compilation, execution);
-}
-
-/**
- * Destroy an execution.
- *
- * <p>If called on an execution for which
- * {@link ANeuralNetworksExecution_startCompute} has been called, the
- * function will return immediately but will mark the execution to be deleted
- * once the computation completes. The {link ANeuralNetworksExecution_wait}
- * will return ANEURALNETWORKS_ERROR_DELETED.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be destroyed. Passing NULL is acceptable
- * and results in no operation.
- */
-inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
- LOAD_FUNCTION(ANeuralNetworksExecution_free);
- EXECUTE_FUNCTION(execution);
-}
-
-/**
- * Associate a user buffer with an input of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided buffer must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the input argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This should be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other properties of the type must be the same as
- * specified in the model. If the type is the same as specified
- * when the model was built, NULL can be passed.
- * @param buffer The buffer containing the data.
- * @param length The length in bytes of the buffer.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the input.
- */
-inline int ANeuralNetworksExecution_setInput(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const void* buffer, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setInput);
- EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
-}
-
-/**
- * Associate part of a memory object with an input of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided memory must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the input argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param memory The memory containing the data.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the input.
- */
-inline int ANeuralNetworksExecution_setInputFromMemory(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory);
- EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
-}
-
-/**
- * Associate a user buffer with an output of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided buffer must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the output argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param buffer The buffer where the data is to be written.
- * @param length The length in bytes of the buffer.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the output.
- */
-inline int ANeuralNetworksExecution_setOutput(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, void* buffer, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setOutput);
- EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
-}
-
-/**
- * Associate part of a memory object with an output of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided memory must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the output argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param memory The memory where the data is to be stored.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The length in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the output.
- */
-inline int ANeuralNetworksExecution_setOutputFromMemory(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory);
- EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
-}
-
-/**
- * Schedule evaluation of the execution.
- *
- * <p>Schedules evaluation of the execution. Once the model has been
- * applied and the outputs are ready to be consumed, the execution will be
- * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal.
- * </p>
- *
- * Multiple executions can be scheduled and evaluated concurrently, and
- * compilations can be performed concurrently with executions. The runtime makes
- * no guarantee on the ordering of the completion of compilations and
- * executions. If it's important to the application, the application should
- * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and
- * {@link ANeuralNetworksExecution_wait}.
- *
- * ANeuralNetworksExecution_wait must be called to recuperate the resources used
- * by the execution.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be scheduled and executed.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksExecution_startCompute(
- ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
- LOAD_FUNCTION(ANeuralNetworksExecution_startCompute);
- EXECUTE_FUNCTION_RETURN(execution, event);
-}
-
-/**
- * Waits until the execution completes.
- *
- * More than one thread can wait on an event. When the execution completes,
- * all threads will be released.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
- */
-inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) {
- LOAD_FUNCTION(ANeuralNetworksEvent_wait);
- EXECUTE_FUNCTION_RETURN(event);
-}
-
-/**
- * Destroys the event.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- */
-inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) {
- LOAD_FUNCTION(ANeuralNetworksEvent_free);
- EXECUTE_FUNCTION(event);
-}
-
-#endif // __NEURAL_NETWORKS_SHIM__
diff --git a/runtime/libs/nnapi/v1.2/CMakeLists.txt b/runtime/libs/nnapi/v1.2/CMakeLists.txt
deleted file mode 100644
index 21ec3015f..000000000
--- a/runtime/libs/nnapi/v1.2/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_library(nnfw_lib_nnapi_1_2 INTERFACE)
-
-target_include_directories(nnfw_lib_nnapi_1_2 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(nnfw_lib_nnapi_1_2 INTERFACE nnfw-nnapi-header)
diff --git a/runtime/libs/profiling/CMakeLists.txt b/runtime/libs/profiling/CMakeLists.txt
index e0398ce93..b115cc1c6 100644
--- a/runtime/libs/profiling/CMakeLists.txt
+++ b/runtime/libs/profiling/CMakeLists.txt
@@ -4,4 +4,3 @@ add_library(nnfw_lib_profiling STATIC ${SOURCES})
set_property(TARGET nnfw_lib_profiling PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(nnfw_lib_profiling PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
target_link_libraries(nnfw_lib_profiling PRIVATE nnfw_common)
-target_link_libraries(nnfw_lib_profiling PRIVATE nnfw_coverage)
diff --git a/runtime/libs/profiling/src/profiling/time.cpp b/runtime/libs/profiling/src/profiling/time.cpp
index 4e045556e..5f6f6657e 100644
--- a/runtime/libs/profiling/src/profiling/time.cpp
+++ b/runtime/libs/profiling/src/profiling/time.cpp
@@ -25,7 +25,7 @@
#if defined(_MSC_VER)
#include <chrono> // NOLINT(build/c++11)
#else
-#include <sys/time.h>
+#include <time.h>
#endif
namespace tflite {
@@ -43,9 +43,9 @@ uint64_t NowMicros() {
#else
uint64_t NowMicros() {
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return static_cast<uint64_t>(ts.tv_nsec) / 1e3 + static_cast<uint64_t>(ts.tv_sec) * 1e6;
}
#endif // defined(_MSC_VER)
diff --git a/runtime/libs/rua/anchor/CMakeLists.txt b/runtime/libs/rua/anchor/CMakeLists.txt
index 6e65641f4..fb41c47ea 100644
--- a/runtime/libs/rua/anchor/CMakeLists.txt
+++ b/runtime/libs/rua/anchor/CMakeLists.txt
@@ -6,4 +6,3 @@ target_include_directories(nnfw_lib_rua_anchor PUBLIC include)
target_link_libraries(nnfw_lib_rua_anchor PUBLIC nnfw_lib_rua_core)
target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_lib_rua_dyn)
target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_common)
-target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_coverage)
diff --git a/runtime/libs/rua/dyn/CMakeLists.txt b/runtime/libs/rua/dyn/CMakeLists.txt
index 3f9ac8928..01d8a7c02 100644
--- a/runtime/libs/rua/dyn/CMakeLists.txt
+++ b/runtime/libs/rua/dyn/CMakeLists.txt
@@ -5,4 +5,3 @@ set_target_properties(nnfw_lib_rua_dyn PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_include_directories(nnfw_lib_rua_dyn PUBLIC include)
target_link_libraries(nnfw_lib_rua_dyn PUBLIC nnfw_lib_rua_core)
target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_common)
-target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_coverage)
diff --git a/runtime/libs/rua/dyn/src/DynamicBinder.cpp b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
index fa3f0bb1e..f49892de1 100644
--- a/runtime/libs/rua/dyn/src/DynamicBinder.cpp
+++ b/runtime/libs/rua/dyn/src/DynamicBinder.cpp
@@ -97,8 +97,8 @@ typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *mod
const void *buffer, size_t length);
typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
- ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset,
- size_t length);
+ ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset,
+ size_t length);
typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model,
ANeuralNetworksOperationType type,
@@ -242,8 +242,8 @@ typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *ex
const void *buffer, size_t length);
typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
- ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution,
int32_t index,
@@ -251,8 +251,8 @@ typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *e
void *buffer, size_t length);
typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
- ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset, size_t length);
+ ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type,
+ const ANeuralNetworksMemory *memory, size_t offset, size_t length);
typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution,
ANeuralNetworksEvent **event);
diff --git a/runtime/libs/tflite/CMakeLists.txt b/runtime/libs/tflite/CMakeLists.txt
index 93a3c9789..3c5779099 100644
--- a/runtime/libs/tflite/CMakeLists.txt
+++ b/runtime/libs/tflite/CMakeLists.txt
@@ -1,11 +1,9 @@
-nnfw_find_package(TensorFlowLite EXACT 1.13.1 QUIET)
+nnfw_find_package(TensorFlowLite EXACT 2.8.0 QUIET)
if(NOT TensorFlowLite_FOUND)
message(STATUS "Check tensorflow lite library extension build: need tensorflow lite library")
return()
endif(NOT TensorFlowLite_FOUND)
-add_subdirectory(port)
-
file(GLOB_RECURSE SOURCES "src/*.cpp")
file(GLOB_RECURSE TESTS "src/*.test.cpp")
list(REMOVE_ITEM SOURCES ${TESTS})
@@ -13,11 +11,10 @@ list(REMOVE_ITEM SOURCES ${TESTS})
add_library(nnfw_lib_tflite STATIC ${SOURCES})
set_target_properties(nnfw_lib_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_include_directories(nnfw_lib_tflite PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(nnfw_lib_tflite PUBLIC tensorflow-lite-ex)
+target_link_libraries(nnfw_lib_tflite PUBLIC tensorflow-lite-2.8.0)
target_link_libraries(nnfw_lib_tflite PUBLIC nnfw_lib_misc)
target_link_libraries(nnfw_lib_tflite PRIVATE ${LIB_PTHREAD} dl)
target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_common)
-target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_coverage)
if(NOT ENABLE_TEST)
return()
diff --git a/runtime/libs/tflite/include/tflite/Diff.h b/runtime/libs/tflite/include/tflite/Diff.h
index fdc1a310b..2d30d4135 100644
--- a/runtime/libs/tflite/include/tflite/Diff.h
+++ b/runtime/libs/tflite/include/tflite/Diff.h
@@ -23,7 +23,7 @@
#ifndef __NNFW_TFLITE_DIFF_H__
#define __NNFW_TFLITE_DIFF_H__
-#include "tensorflow/lite/interpreter.h"
+#include "tflite/TensorView.h"
#include "misc/RandomGenerator.h"
#include "misc/tensor/Index.h"
@@ -31,7 +31,7 @@
#include "misc/tensor/Shape.h"
#include "misc/tensor/Comparator.h"
-#include "tflite/TensorView.h"
+#include <tensorflow/lite/c/c_api.h>
#include <functional>
#include <vector>
@@ -47,7 +47,7 @@ public:
* @param[in] comparator Comparator object for tensor comparation
*/
TfLiteInterpMatchApp(const nnfw::misc::tensor::Comparator &comparator)
- : _verbose{false}, _comparator(comparator)
+ : _verbose{false}, _comparator(comparator)
{
// DO NOTHING
}
@@ -65,11 +65,11 @@ private:
public:
/**
* @brief Run two interpreter and return the output matching
- * @param[in] pure Interpreter object of expected(with TfLite)
- * @param[in] nnapi Interpreter object of obtained(through NNAPI)
+ * @param[in] expected Interpreter object of expected
+ * @param[in] obtained Interpreter object of obtained
* @return @c true if two Interpreter results are same, otherwise @c false
*/
- bool run(::tflite::Interpreter &pure, ::tflite::Interpreter &nnapi) const;
+ bool run(TfLiteInterpreter &expected, TfLiteInterpreter &obtained) const;
/**
* @brief Compare two TensorView values and return the match result
* @param[in] expected TensorView object to read expected values
diff --git a/runtime/libs/tflite/include/tflite/FeatureView.h b/runtime/libs/tflite/include/tflite/FeatureView.h
deleted file mode 100644
index a8f069c40..000000000
--- a/runtime/libs/tflite/include/tflite/FeatureView.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file FeatureView.h
- * @brief This file contains FeatureView class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_FEATURE_VIEW_H__
-#define __NNFW_TFLITE_FEATURE_VIEW_H__
-
-#include "tensorflow/lite/interpreter.h"
-
-#include "tflite/InputIndex.h"
-#include "tflite/OutputIndex.h"
-
-#include "misc/feature/Shape.h"
-#include "misc/feature/Reader.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-
-template <typename T> class FeatureView;
-
-/**
- * @brief Class to support reading element of float type feature
- */
-template <> class FeatureView<float> : public nnfw::misc::feature::Reader<float>
-{
-public:
- /**
- * @brief Construct a new FeatureView object
- * @param[in] interp Interpreter to read from
- * @param[in] index InputIndex index of input
- */
- FeatureView(::tflite::Interpreter &interp, const InputIndex &index);
- /**
- * @brief Construct a new FeatureView object
- * @param[in] interp Interpreter to read from
- * @param[in] index OutputIndex index of output
- */
- FeatureView(::tflite::Interpreter &interp, const OutputIndex &index);
-
-public:
- /**
- * @brief Get value of element using channel, row and column index
- * @param[in] ch Channel index
- * @param[in] row Row index
- * @param[in] col Column index
- * @return Value of element
- */
- float at(uint32_t ch, uint32_t row, uint32_t col) const;
- /**
- * @brief Get reference of element using channel, row and column index
- * @param[in] ch Channel index
- * @param[in] row Row index
- * @param[in] col Column index
- * @return Reference of element
- */
- float &at(uint32_t ch, uint32_t row, uint32_t col);
-
- float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0;
-
-private:
- /**
- * @brief Get offset of element from channel, row and column index
- * @param[in] ch Channel index
- * @param[in] row Row index
- * @param[in] col Column index
- * @return Offset of element
- */
- uint32_t getElementOffset(uint32_t ch, uint32_t row, uint32_t col) const
- {
- uint32_t res = 0;
-
- // TensorFlow Lite assumes that NHWC ordering for tessor
- res += row * _shape.W * _shape.C;
- res += col * _shape.C;
- res += ch;
-
- return res;
- }
-
-private:
- nnfw::misc::feature::Shape _shape;
- float *_base;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_FEATURE_VIEW_H__
diff --git a/runtime/libs/tflite/include/tflite/InterpreterSession.h b/runtime/libs/tflite/include/tflite/InterpreterSession.h
index deaf05a7f..8fc19494a 100644
--- a/runtime/libs/tflite/include/tflite/InterpreterSession.h
+++ b/runtime/libs/tflite/include/tflite/InterpreterSession.h
@@ -40,7 +40,7 @@ public:
* @brief Construct a InterpreterSession object with interpreter of TfLite
* @param[in] interp The TfLite interpreter pointer
*/
- InterpreterSession(::tflite::Interpreter *interp) : _interp{interp}
+ InterpreterSession(TfLiteInterpreter *interp) : _interp{interp}
{
// DO NOTHING
}
@@ -50,7 +50,7 @@ public:
* @brief Get TfLite interpreter pointer
* @return The TfLite interpreter
*/
- ::tflite::Interpreter *interp(void) override { return _interp; }
+ TfLiteInterpreter *interp(void) override { return _interp; }
public:
/**
@@ -59,9 +59,7 @@ public:
*/
bool prepare(void) override
{
- _interp->UseNNAPI(false);
-
- if (kTfLiteOk != _interp->AllocateTensors())
+ if (kTfLiteOk != TfLiteInterpreterAllocateTensors(_interp))
{
return false;
}
@@ -76,7 +74,7 @@ public:
bool run(void) override
{
// Return true if Invoke returns kTfLiteOk
- return kTfLiteOk == _interp->Invoke();
+ return kTfLiteOk == TfLiteInterpreterInvoke(_interp);
}
/**
@@ -90,7 +88,7 @@ public:
}
private:
- ::tflite::Interpreter *const _interp;
+ TfLiteInterpreter *const _interp;
};
} // namespace tflite
diff --git a/runtime/libs/tflite/include/tflite/NNAPISession.h b/runtime/libs/tflite/include/tflite/NNAPISession.h
deleted file mode 100644
index f430e86d3..000000000
--- a/runtime/libs/tflite/include/tflite/NNAPISession.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file NNAPISession.h
- * @brief This file contains NNAPISession class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_NNAPI_SESSION_H__
-#define __NNFW_TFLITE_NNAPI_SESSION_H__
-
-#include "Session.h"
-#include "tflite/ext/nnapi_delegate.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Class to define NNAPI interpreter session which is inherited from Session class
- */
-class NNAPISession final : public Session
-{
-public:
- /**
- * @brief Construct a NNAPISession object with interpreter of TfLite
- * @param[in] interp The TfLite interpreter pointer
- * @note Invoke BuildGraph() of NNAPI delegate from Interpreter
- */
- NNAPISession(::tflite::Interpreter *interp) : _interp{interp}
- {
- // Construct Graph from Interpreter
- // primary_subgraph: Experimental interface. Return 1st sugbraph
- _delegate.BuildGraph(&interp->primary_subgraph());
- }
-
-public:
- /**
- * @brief Get TfLite interpreter pointer
- * @return The TfLite interpreter
- */
- ::tflite::Interpreter *interp(void) override { return _interp; }
-
-public:
- /**
- * @brief Prepare the TfLite interpreter session
- * @return @c true if tensor preparation is successful, otherwise @c false
- */
- bool prepare(void) override
- {
- // Explicitly turn off T/F lite internal NNAPI delegation in order to use locally defined
- // NNAPI delegation.
- _interp->UseNNAPI(false);
-
- if (kTfLiteOk != _interp->AllocateTensors())
- {
- return false;
- }
-
- return true;
- }
-
- /**
- * @brief Run the Invoke function of NNAPI delegate
- * @return @c true if Invoke() is successful, otherwise @c false
- */
- bool run(void) override { return kTfLiteOk == _delegate.Invoke(&_interp->primary_subgraph()); }
-
- /**
- * @brief Tear down TfLite interpreter session
- * @return @c true always
- */
- bool teardown(void) override
- {
- // DO NOTHING
- return true;
- }
-
-private:
- ::tflite::Interpreter *const _interp;
- nnfw::tflite::NNAPIDelegate _delegate;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_NNAPI_SESSION_H__
diff --git a/runtime/libs/tflite/include/tflite/OutputIndex.h b/runtime/libs/tflite/include/tflite/OutputIndex.h
deleted file mode 100644
index dd1ca8d44..000000000
--- a/runtime/libs/tflite/include/tflite/OutputIndex.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file OutputIndex.h
- * @brief This file contains OutputIndex class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_OUTPUT_INDEX_H__
-#define __NNFW_TFLITE_OUTPUT_INDEX_H__
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Class to define OutputIndex
- */
-class OutputIndex
-{
-public:
- /**
- * @brief Construct a OutputIndex object with index value
- * @param[in] index The value of index
- */
- OutputIndex(int index) : _index(index)
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Get index value as int
- * @return Index valuel as int
- */
- int asInt(void) const { return _index; }
-
-private:
- int _index;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_OUTPUT_INDEX_H__
diff --git a/runtime/libs/tflite/include/tflite/Quantization.h b/runtime/libs/tflite/include/tflite/Quantization.h
deleted file mode 100644
index 8272bcdc0..000000000
--- a/runtime/libs/tflite/include/tflite/Quantization.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Quantization.h
- * @brief This file contains BitwiseIntToFloat union and quantization related
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_QUANTIZATION_H__
-#define __NNFW_TFLITE_QUANTIZATION_H__
-
-/**
- * @brief Union to provide bitwise conversion of integer and float
- */
-union BitwiseIntToFloat {
- int i;
- float f;
-};
-
-static const float FLOAT_NEAREST_TO_1 = BitwiseIntToFloat{0x3f7fffff}.f;
-
-#include "tensorflow/lite/context.h"
-
-/**
- * @brief Get TfLiteQuantizationParams object with default values
- * @return TfLiteQuantizationParams object
- */
-TfLiteQuantizationParams make_default_quantization(void);
-
-#endif // __NNFW_TFLITE_QUANTIZATION_H__
diff --git a/runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp b/runtime/libs/tflite/include/tflite/RandomInputInitializer.h
index f54e67202..7dac3a827 100644
--- a/runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp
+++ b/runtime/libs/tflite/include/tflite/RandomInputInitializer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,27 +14,33 @@
* limitations under the License.
*/
-#include "tflite/interp/FlatBufferBuilder.h"
+#ifndef __NNFW_TFLITE_RANDOM_INPUT_INITIALIZER_H__
+#define __NNFW_TFLITE_RANDOM_INPUT_INITIALIZER_H__
-#include "tflite/ext/kernels/register.h"
+#include <misc/RandomGenerator.h>
+
+#include <tensorflow/lite/c/c_api.h>
namespace nnfw
{
namespace tflite
{
-std::unique_ptr<::tflite::Interpreter> FlatBufferBuilder::build(void) const
+class RandomInputInitializer
{
- std::unique_ptr<::tflite::Interpreter> interpreter;
-
- nnfw::tflite::BuiltinOpResolver resolver;
+public:
+ RandomInputInitializer(misc::RandomGenerator &randgen) : _randgen{randgen}
+ {
+ // DO NOTHING
+ }
- ::tflite::InterpreterBuilder builder(_model, resolver);
+ void run(TfLiteInterpreter &interp);
- builder(&interpreter);
-
- return interpreter;
-}
+private:
+ nnfw::misc::RandomGenerator &_randgen;
+};
} // namespace tflite
} // namespace nnfw
+
+#endif // __NNFW_TFLITE_RANDOM_INPUT_INITIALIZER_H__
diff --git a/runtime/libs/tflite/include/tflite/RandomTestRunner.h b/runtime/libs/tflite/include/tflite/RandomTestRunner.h
deleted file mode 100644
index c0b304c74..000000000
--- a/runtime/libs/tflite/include/tflite/RandomTestRunner.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file RandomTestRunner.h
- * @brief This file contains class for random input testing
- */
-
-#ifndef __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__
-#define __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__
-
-#include "tflite/interp/Builder.h"
-
-#include <misc/RandomGenerator.h>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Structure for NNAPI correctness test
- */
-struct RandomTestParam
-{
- int verbose; //!< Verbosity of debug information
- int tolerance; //!< Torlerance of value difference
- int tensor_logging = 0; //!< Save logging to a file if not 0
- std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1
-};
-
-/**
- * @brief Class to define Random test runner
- */
-class RandomTestRunner
-{
-public:
- /**
- * @brief Construct a new RandomTestRunner object
- * @param[in] seed Random seed value
- * @param[in] param RandomTestParam object for test runner
- * @param[in] quantization TfLiteQuantizationParams type to represent quantization value
- */
- RandomTestRunner(uint32_t seed, const RandomTestParam &param)
- : _randgen{seed, 0.0f, 2.0f}, _param{param}
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Run the random test runner
- * @param[in] running_count Count to run tflite interpreter with NNAPI
- * @return 0 if test succeeds, otherwise failure
- */
- int run(size_t running_count);
-
-public:
- /**
- * @brief Get RandomGenerator reference
- * @return RandomGenerator reference
- */
- nnfw::misc::RandomGenerator &generator() { return _randgen; };
-
-public:
- /**
- * @brief Compile the random test runner
- * @param[in] builder Interpreter Builder used to run
- */
- void compile(const nnfw::tflite::Builder &builder);
-
-private:
- nnfw::misc::RandomGenerator _randgen;
- const RandomTestParam _param;
- std::unique_ptr<::tflite::Interpreter> _tfl_interp;
- std::unique_ptr<::tflite::Interpreter> _nnapi;
-
-public:
- /**
- * @brief Create a RandomTestRunner object
- * @param[in] seed Random seed value
- * @return RandomGenerator object
- */
- static RandomTestRunner make(uint32_t seed);
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__
diff --git a/runtime/libs/tflite/include/tflite/Session.h b/runtime/libs/tflite/include/tflite/Session.h
index b653acf61..0aa2ce7fb 100644
--- a/runtime/libs/tflite/include/tflite/Session.h
+++ b/runtime/libs/tflite/include/tflite/Session.h
@@ -23,7 +23,7 @@
#ifndef __NNFW_TFLITE_SESSION_H__
#define __NNFW_TFLITE_SESSION_H__
-#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/c/c_api.h>
namespace nnfw
{
@@ -44,7 +44,7 @@ struct Session
* @brief Get the Interpreter object pointer
* @return The Interpreter object pointer
*/
- virtual ::tflite::Interpreter *interp(void) = 0;
+ virtual TfLiteInterpreter *interp(void) = 0;
/**
* @brief Prepare the session
diff --git a/runtime/libs/tflite/include/tflite/TensorLogger.h b/runtime/libs/tflite/include/tflite/TensorLogger.h
deleted file mode 100644
index a824c3411..000000000
--- a/runtime/libs/tflite/include/tflite/TensorLogger.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file TensorLogger.h
- * @brief This file contains TensorLogger class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_TENSOR_LOGGER_H__
-#define __NNFW_TFLITE_TENSOR_LOGGER_H__
-
-#include "misc/tensor/IndexIterator.h"
-#include "tflite/TensorView.h"
-
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/context.h>
-#include <fstream>
-#include <iomanip>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Class to write input and output value / shape into a file in python form
- * @note This is a utility to write input and output value / shape into a file in python form.\n
- * any python app can load this value by running the python code below:\n
- * exec(open(filename).read())\n
- * generated python code looks like the following: \n
- * tensor_shape_gen = []\n
- * tensor_value_gen = []\n\n
- * tensor_shape_gen.append("{2, 1, 2}")\n
- * tensor_value_gen.append([1, 2, 3, 4])\n\n
- * tensor_shape_gen.append("{2}")\n
- * tensor_value_gen.append([1, 2])\n\n
- * tensor_shape_gen.append("{2, 1, 2}")\n
- * tensor_value_gen.append([1, 4, 3, 8])\n
- */
-class TensorLogger
-{
-private:
- std::ofstream _outfile;
-
-public:
- /**
- * @brief Get TensorLogger instance
- * @return The TensorLogger instance
- */
- static TensorLogger &get()
- {
- static TensorLogger instance;
- return instance;
- }
-
- /**
- * @brief Save the tensor details to file from interpreter
- * @param[in] path The file path to save
- * @param[in] interp The TfLite interpreter
- */
- void save(const std::string &path, ::tflite::Interpreter &interp)
- {
- open(path);
-
- int log_index = 0;
- for (const auto id : interp.inputs())
- {
- _outfile << "# input tensors" << std::endl;
- printTensor(interp, id, log_index++);
- }
- for (const auto id : interp.outputs())
- {
- _outfile << "# output tensors" << std::endl;
- printTensor(interp, id, log_index++);
- }
- close();
- }
-
-private:
- void open(const std::string &path)
- {
- if (!_outfile.is_open())
- _outfile.open(path, std::ios_base::out);
-
- _outfile << "# ------ file: " << path << " ------" << std::endl
- << "tensor_shape_gen = []" << std::endl
- << "tensor_value_gen = []" << std::endl
- << std::endl;
- }
-
- void printTensor(::tflite::Interpreter &interp, const int id, const int log_index)
- {
- const TfLiteTensor *tensor = interp.tensor(id);
-
- _outfile << "# tensor name: " << tensor->name << std::endl;
- _outfile << "# tflite::interpreter.tensor(" << id << ") -> "
- "tensor_value_gen["
- << log_index << "]" << std::endl;
-
- if (tensor->type == kTfLiteInt32)
- {
- printTensorShape(tensor);
- printTensorValue<int32_t>(tensor, tensor->data.i32);
- }
- else if (interp.tensor(id)->type == kTfLiteUInt8)
- {
- printTensorShape(tensor);
- printTensorValue<uint8_t>(tensor, tensor->data.uint8);
- }
- else if (tensor->type == kTfLiteFloat32)
- {
- printTensorShape(tensor);
- printTensorValue<float>(tensor, tensor->data.f);
- }
- }
-
- void printTensorShape(const TfLiteTensor *tensor)
- {
- _outfile << "tensor_shape_gen.append('{";
-
- int r = 0;
- for (; r < tensor->dims->size - 1; r++)
- {
- _outfile << tensor->dims->data[r] << ", ";
- }
- _outfile << tensor->dims->data[r];
-
- _outfile << "}')" << std::endl;
- }
-
- template <typename T> void printTensorValue(const TfLiteTensor *tensor, T *tensor_data_ptr)
- {
- _outfile << "tensor_value_gen.append([";
-
- _outfile << std::fixed << std::setprecision(10);
-
- const T *end = reinterpret_cast<const T *>(tensor->data.raw_const + tensor->bytes);
- for (T *ptr = tensor_data_ptr; ptr < end; ptr++)
- _outfile << *ptr << ", ";
-
- _outfile << "])" << std::endl << std::endl;
- }
-
- void close()
- {
- _outfile << "# --------- tensor shape and value defined above ---------" << std::endl;
- _outfile.close();
- }
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_TENSOR_LOGGER_H__
diff --git a/runtime/libs/tflite/include/tflite/TensorShapeUtils.h b/runtime/libs/tflite/include/tflite/TensorShapeUtils.h
deleted file mode 100644
index ba8687413..000000000
--- a/runtime/libs/tflite/include/tflite/TensorShapeUtils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file TensorShapeUtils.h
- * @brief This file contains utilities function of tensor shape
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
-#define __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
-
-#include "misc/tensor/Shape.h"
-
-#include <vector>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Converts tensor::Shape into a vector
- * @param[in] shape The tensor shape to be converted
- * @return vector value of given shape object
- */
-static inline std::vector<int32_t> as_dims(const nnfw::misc::tensor::Shape &shape)
-{
- std::vector<int32_t> dims;
-
- for (uint32_t axis = 0; axis < shape.rank(); ++axis)
- {
- dims.emplace_back(shape.dim(axis));
- }
-
- return dims;
-}
-
-/**
- * @brief Broadcasts between two given shapes
- * @param[in] lhs_shape The left hand side shape
- * @param[in] rhs_shape The right hand side shape
- * @return The broadcasted shape
- */
-nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape,
- const nnfw::misc::tensor::Shape &rhs_shape);
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__
diff --git a/runtime/libs/tflite/include/tflite/TensorUtils.h b/runtime/libs/tflite/include/tflite/TensorUtils.h
deleted file mode 100644
index 08af1468b..000000000
--- a/runtime/libs/tflite/include/tflite/TensorUtils.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file TensorUtils.h
- * @brief This file contains utilities function
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_TENSOR_UTILS_H__
-#define __NNFW_TFLITE_TENSOR_UTILS_H__
-
-#include <tensorflow/lite/context.h>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Get @c true if tensor type is kTfLiteFloat32, otherwise @c false
- * @param[in] tensor The tensor object to be compared
- * @return @c true if tensor type is kTfLiteFloat32, otherwise @c false
- */
-inline bool isFloatTensor(const TfLiteTensor *tensor) { return tensor->type == kTfLiteFloat32; }
-
-/**
- * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1,
- * otherwise @c false
- * @param[in] tensor The tensor object to be compared
- * @return @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
- */
-inline bool isFeatureTensor(const TfLiteTensor *tensor)
-{
- return (tensor->dims->size == 4) && (tensor->dims->data[0] == 1);
-}
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_TENSOR_UTILS_H__
diff --git a/runtime/libs/tflite/include/tflite/TensorView.h b/runtime/libs/tflite/include/tflite/TensorView.h
index ce791a73f..956fce43f 100644
--- a/runtime/libs/tflite/include/tflite/TensorView.h
+++ b/runtime/libs/tflite/include/tflite/TensorView.h
@@ -23,13 +23,13 @@
#ifndef __NNFW_TFLITE_TENSOR_VIEW_H__
#define __NNFW_TFLITE_TENSOR_VIEW_H__
-#include "tensorflow/lite/interpreter.h"
-
#include "misc/tensor/Shape.h"
#include "misc/tensor/Index.h"
#include "misc/tensor/Reader.h"
#include "misc/tensor/NonIncreasingStride.h"
+#include <tensorflow/lite/c/c_api.h>
+
namespace nnfw
{
namespace tflite
@@ -98,19 +98,17 @@ public:
* @param[in] tensor_index The tensor index
* @return The new TensorView<T> object
*/
- static TensorView<T> make(::tflite::Interpreter &interp, int tensor_index)
+ static TensorView<T> make(const TfLiteTensor *tensor)
{
- auto tensor_ptr = interp.tensor(tensor_index);
-
// Set 'shape'
- nnfw::misc::tensor::Shape shape(tensor_ptr->dims->size);
+ nnfw::misc::tensor::Shape shape(TfLiteTensorNumDims(tensor));
for (uint32_t axis = 0; axis < shape.rank(); ++axis)
{
- shape.dim(axis) = tensor_ptr->dims->data[axis];
+ shape.dim(axis) = TfLiteTensorDim(tensor, axis);
}
- return TensorView<T>(shape, interp.typed_tensor<T>(tensor_index));
+ return TensorView<T>(shape, reinterpret_cast<T *>(TfLiteTensorData(tensor)));
}
};
diff --git a/runtime/libs/tflite/include/tflite/interp/Builder.h b/runtime/libs/tflite/include/tflite/interp/Builder.h
deleted file mode 100644
index 0f54e1779..000000000
--- a/runtime/libs/tflite/include/tflite/interp/Builder.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file Builder.h
- * @brief This file contains Builder structure
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_INTERP_BUILDER_H__
-#define __NNFW_TFLITE_INTERP_BUILDER_H__
-
-#include <tensorflow/lite/interpreter.h>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Structure to Builder
- */
-struct Builder
-{
- /**
- * @brief Destroy the Builder object
- */
- virtual ~Builder() = default;
-
- /**
- * @brief Build a FlatBuffer model
- * @return The TfLite interpreter object
- */
- virtual std::unique_ptr<::tflite::Interpreter> build(void) const = 0;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_INTERP_BUILDER_H__
diff --git a/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h b/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h
deleted file mode 100644
index 2d96af50b..000000000
--- a/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file FlatBufferBuilder.h
- * @brief This file contains FlatBufferBuilder class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
-#define __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
-
-#include <tensorflow/lite/model.h>
-
-#include "tflite/interp/Builder.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Class to define FlatBufferBuilder which is inherited from Builder
- */
-class FlatBufferBuilder final : public Builder
-{
-public:
- /**
- * @brief Construct a FlatBufferBuilder object with FlatBufferModel of TfLite
- * @param[in] model The TfLite Flatbuffer model
- */
- FlatBufferBuilder(const ::tflite::FlatBufferModel &model) : _model{model}
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Build a FlatBuffer model
- * @return The TfLite interpreter pointer address
- */
- std::unique_ptr<::tflite::Interpreter> build(void) const override;
-
-private:
- const ::tflite::FlatBufferModel &_model;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
diff --git a/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h b/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h
deleted file mode 100644
index 7bfb8db2d..000000000
--- a/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file FunctionBuilder.h
- * @brief This file contains FunctionBuilder class
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
-#define __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
-
-#include <tensorflow/lite/model.h>
-
-#include "tflite/interp/Builder.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-
-/**
- * @brief Class to define FunctionBuilder which is inherited from Builder
- */
-class FunctionBuilder final : public Builder
-{
-public:
- using SetupFunc = std::function<void(::tflite::Interpreter &)>;
-
-public:
- /**
- * @brief Construct a FunctionBuilder object with SetupFunction
- * @param[in] fn The SetupFunc object
- */
- FunctionBuilder(const SetupFunc &fn) : _fn{fn}
- {
- // DO NOTHING
- }
-
-public:
- /**
- * @brief Build a SetupFunc
- * @return The TfLite interpreter pointer address
- */
- std::unique_ptr<::tflite::Interpreter> build(void) const override;
-
-private:
- SetupFunc _fn;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__
diff --git a/runtime/libs/tflite/port/1.13.1/CMakeLists.txt b/runtime/libs/tflite/port/1.13.1/CMakeLists.txt
deleted file mode 100644
index e3cf97569..000000000
--- a/runtime/libs/tflite/port/1.13.1/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-if(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1)
- return()
-endif(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1)
-
-file(GLOB_RECURSE SOURCES "src/*.cpp")
-
-add_library(tensorflow-lite-ex STATIC ${SOURCES})
-set_target_properties(tensorflow-lite-ex PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(tensorflow-lite-ex PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(tensorflow-lite-ex PUBLIC tensorflow-lite)
-target_link_libraries(tensorflow-lite-ex PUBLIC nnfw_lib_misc nnfw_lib_rua_shim)
-target_link_libraries(tensorflow-lite-ex PRIVATE ${LIB_PTHREAD} dl)
-target_link_libraries(tensorflow-lite-ex PRIVATE nnfw_common)
-target_link_libraries(tensorflow-lite-ex PRIVATE nnfw_coverage)
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h
deleted file mode 100644
index c073ad58e..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file CustomOps.h
- * @brief This file contains registration of custom operands
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
-#define __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
-
-#include "tensorflow/lite/context.h"
-#include "tflite/ext/kernels/SquaredDifference.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-
-#define REGISTER_FUNCTION(Name) \
- TfLiteRegistration *Register_##Name(void) \
- { \
- static TfLiteRegistration r = {}; \
- r.init = Name::Init##Name; \
- r.free = Name::Free##Name; \
- r.prepare = Name::Prepare##Name; \
- r.invoke = Name::Eval##Name; \
- r.custom_name = #Name; \
- return &r; \
- }
-
-REGISTER_FUNCTION(SquaredDifference)
-
-#undef REGISTER_FUNCTION
-
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h
deleted file mode 100644
index 5512ead78..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file SquaredDifference.h
- * @brief This file contains SquaredDifference namespace and SquaredDifference function
- * definitions
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
-#define __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
-
-#include "tensorflow/lite/context.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace SquaredDifference
-{
-
-/**
- * @brief Initialize SquaredDifference operand using the contents of buffer
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @param[in] length The buffer length
- * @return The void pointer for user data
- */
-void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length);
-
-/**
- * @brief Release any memory it might have allocated via 'InitSquaredDifference'
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @return N/A
- */
-void FreeSquaredDifference(TfLiteContext *context, void *buffer);
-
-/**
- * @brief Prepare the SquaredDifference operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
-TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node);
-
-/**
- * @brief Evaluation the SquaredDifference operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
-TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
-
-} // namespace SquaredDifference
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h
deleted file mode 100644
index 6e32b35fb..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from the following file (in TensorFlow v1.13.1)
-// 'externals/tensorflow/tensorflow/lite/kernels/register.h'
-#ifndef __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
-#define __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
-
-#include <unordered_map>
-#include "tensorflow/lite/context.h"
-#include "tensorflow/lite/model.h"
-
-namespace nnfw {
-namespace tflite {
-
-class BuiltinOpResolver : public ::tflite::MutableOpResolver {
- public:
- BuiltinOpResolver();
-
- const TfLiteRegistration* FindOp(::tflite::BuiltinOperator op,
- int version) const override;
- const TfLiteRegistration* FindOp(const char* op, int version) const override;
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
-
-// clang-format on
diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h
deleted file mode 100644
index 231baa25c..000000000
--- a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from the following file (in TensorFlow v1.13.1)
-// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.h'
-#ifndef __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
-#define __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
-
-#include "tensorflow/lite/allocation.h"
-#include "tensorflow/lite/c/c_api_internal.h"
-#include "tensorflow/lite/core/api/error_reporter.h"
-#include "tensorflow/lite/core/subgraph.h"
-#include "tensorflow/lite/interpreter.h"
-
-struct ANeuralNetworksModel;
-struct ANeuralNetworksMemory;
-struct ANeuralNetworksCompilation;
-
-namespace nnfw {
-namespace tflite {
-
-class NNAPIAllocation : public ::tflite::MMAPAllocation {
- public:
- NNAPIAllocation(const char* filename, ::tflite::ErrorReporter* error_reporter);
- ~NNAPIAllocation();
-
- size_t offset(const void* ptr) const {
- auto signed_offset = reinterpret_cast<const uint8_t*>(ptr) -
- reinterpret_cast<const uint8_t*>(mmapped_buffer_);
-
- return static_cast<size_t>(signed_offset);
- }
-
- ANeuralNetworksMemory* memory() const { return handle_; }
- bool valid() const override { return handle_ != nullptr; }
-
- private:
- mutable ANeuralNetworksMemory* handle_ = nullptr;
-};
-
-class NNAPIDelegate {
- public:
- ~NNAPIDelegate();
-
- // Convert a tflite graph to NNAPI
- TfLiteStatus BuildGraph(::tflite::Subgraph* subgraph);
-
- // Run
- TfLiteStatus Invoke(::tflite::Subgraph* subgraph);
-
- // Whether the current platform supports NNAPI delegation.
- static bool IsSupported();
-
- private:
- // The NN API model handle
- ANeuralNetworksModel* nn_model_ = nullptr;
- // The NN API compilation handle
- ANeuralNetworksCompilation* nn_compiled_model_ = nullptr;
- // Model status
- TfLiteStatus model_status_ = kTfLiteOk;
-
- // List of state tensors for LSTM, RNN, SVDF.
- // NN API does not allow ops to maintain states across multiple
- // invocations. We need to manually create state input tensors from
- // corresponding state output tensors of TFLite operations, and map them
- // correctly.
- std::vector<int> model_states_inputs_; // holds NNAPI operand ids
- std::vector<int> model_states_outputs_; // holds TFLite tensor ids
-};
-
-} // namespace tflite
-} // namespace nnfw
-
-#endif // __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
-
-// clang-format on
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp
deleted file mode 100644
index 615878513..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/ext/kernels/SquaredDifference.h"
-#include "tensorflow/lite/kernels/kernel_util.h"
-
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-namespace custom
-{
-namespace SquaredDifference
-{
-
-void *InitSquaredDifference(TfLiteContext *, const char *, size_t) { return nullptr; }
-
-void FreeSquaredDifference(TfLiteContext *, void *) {}
-
-TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node)
-{
- TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
-
- const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
- const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
-
- TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
- TF_LITE_ENSURE_EQ(context, input1->type, output->type);
-
- return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input1->dims));
-}
-
-TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node)
-{
-
- const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
- const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
-
- TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
-
- size_t elements = ::tflite::NumElements(input1);
-
- switch (input1->type)
- {
- case kTfLiteFloat32:
- {
- const float *in1 = input1->data.f;
- const float *in2 = input2->data.f;
- const float *in_end1 = in1 + elements;
- float *out = output->data.f;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- case kTfLiteInt32:
- {
- const int *in1 = input1->data.i32;
- const int *in2 = input2->data.i32;
- const int *in_end1 = in1 + elements;
- int *out = output->data.i32;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- case kTfLiteInt64:
- {
- const int64_t *in1 = input1->data.i64;
- const int64_t *in2 = input1->data.i64;
- const int64_t *in_end1 = in1 + elements;
- int64_t *out = output->data.i64;
-
- for (; in1 < in_end1; in1++, in2++, out++)
- *out = ((*in1 - *in2) * (*in1 - *in2));
-
- return kTfLiteOk;
- }
- default:
- {
- context->ReportError(context, "InputType is %d Unsupported", input1->type);
- return kTfLiteError;
- }
- }
-}
-
-} // namespace SquaredDifference
-} // namespace custom
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
deleted file mode 100644
index 89f81b612..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp
+++ /dev/null
@@ -1,314 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This code is derived from the following file (in TensorFlow v1.13.1)
-// 'externals/tensorflow/tensorflow/lite/kernels/register.cc'
-#include "tflite/ext/kernels/register.h"
-#include "tensorflow/lite/util.h"
-#include "tflite/ext/kernels/CustomOps.h"
-
-namespace tflite {
-namespace ops {
-
-namespace custom {
-
-// Need additional external library for AUDIO_SPECTROGRAM
-//TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
-TfLiteRegistration* Register_LAYER_NORM_LSTM();
-TfLiteRegistration* Register_MFCC();
-TfLiteRegistration* Register_DETECTION_POSTPROCESS();
-TfLiteRegistration* Register_RELU_1();
-
-} // namespace custom
-}
-}
-
-namespace tflite {
-namespace ops {
-namespace builtin {
-
-TfLiteRegistration* Register_ABS();
-TfLiteRegistration* Register_RELU();
-TfLiteRegistration* Register_RELU_N1_TO_1();
-TfLiteRegistration* Register_RELU6();
-TfLiteRegistration* Register_TANH();
-TfLiteRegistration* Register_LOGISTIC();
-TfLiteRegistration* Register_AVERAGE_POOL_2D();
-TfLiteRegistration* Register_MAX_POOL_2D();
-TfLiteRegistration* Register_L2_POOL_2D();
-TfLiteRegistration* Register_CONV_2D();
-TfLiteRegistration* Register_DEPTHWISE_CONV_2D();
-TfLiteRegistration* Register_SVDF();
-TfLiteRegistration* Register_RNN();
-TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN();
-TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_RNN();
-TfLiteRegistration* Register_EMBEDDING_LOOKUP();
-TfLiteRegistration* Register_EMBEDDING_LOOKUP_SPARSE();
-TfLiteRegistration* Register_FULLY_CONNECTED();
-TfLiteRegistration* Register_LSH_PROJECTION();
-TfLiteRegistration* Register_HASHTABLE_LOOKUP();
-TfLiteRegistration* Register_SOFTMAX();
-TfLiteRegistration* Register_CONCATENATION();
-TfLiteRegistration* Register_ADD();
-TfLiteRegistration* Register_SPACE_TO_BATCH_ND();
-TfLiteRegistration* Register_DIV();
-TfLiteRegistration* Register_SUB();
-TfLiteRegistration* Register_BATCH_TO_SPACE_ND();
-TfLiteRegistration* Register_MUL();
-TfLiteRegistration* Register_L2_NORMALIZATION();
-TfLiteRegistration* Register_LOCAL_RESPONSE_NORMALIZATION();
-TfLiteRegistration* Register_LSTM();
-TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM();
-TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
-TfLiteRegistration* Register_PAD();
-TfLiteRegistration* Register_PADV2();
-TfLiteRegistration* Register_RESHAPE();
-TfLiteRegistration* Register_RESIZE_BILINEAR();
-TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR();
-TfLiteRegistration* Register_SKIP_GRAM();
-TfLiteRegistration* Register_SPACE_TO_DEPTH();
-TfLiteRegistration* Register_GATHER();
-TfLiteRegistration* Register_TRANSPOSE();
-TfLiteRegistration* Register_MEAN();
-TfLiteRegistration* Register_SPLIT();
-TfLiteRegistration* Register_SPLIT_V();
-TfLiteRegistration* Register_SQUEEZE();
-TfLiteRegistration* Register_STRIDED_SLICE();
-TfLiteRegistration* Register_EXP();
-TfLiteRegistration* Register_TOPK_V2();
-TfLiteRegistration* Register_LOG();
-TfLiteRegistration* Register_LOG_SOFTMAX();
-TfLiteRegistration* Register_CAST();
-TfLiteRegistration* Register_DEQUANTIZE();
-TfLiteRegistration* Register_PRELU();
-TfLiteRegistration* Register_MAXIMUM();
-TfLiteRegistration* Register_MINIMUM();
-TfLiteRegistration* Register_ARG_MAX();
-TfLiteRegistration* Register_ARG_MIN();
-TfLiteRegistration* Register_GREATER();
-TfLiteRegistration* Register_GREATER_EQUAL();
-TfLiteRegistration* Register_LESS();
-TfLiteRegistration* Register_LESS_EQUAL();
-TfLiteRegistration* Register_FLOOR();
-TfLiteRegistration* Register_TILE();
-TfLiteRegistration* Register_NEG();
-TfLiteRegistration* Register_SUM();
-TfLiteRegistration* Register_REDUCE_PROD();
-TfLiteRegistration* Register_REDUCE_MAX();
-TfLiteRegistration* Register_REDUCE_MIN();
-TfLiteRegistration* Register_REDUCE_ANY();
-TfLiteRegistration* Register_SELECT();
-TfLiteRegistration* Register_SLICE();
-TfLiteRegistration* Register_SIN();
-TfLiteRegistration* Register_TRANSPOSE_CONV();
-TfLiteRegistration* Register_EXPAND_DIMS();
-TfLiteRegistration* Register_SPARSE_TO_DENSE();
-TfLiteRegistration* Register_EQUAL();
-TfLiteRegistration* Register_NOT_EQUAL();
-TfLiteRegistration* Register_SQRT();
-TfLiteRegistration* Register_RSQRT();
-TfLiteRegistration* Register_SHAPE();
-TfLiteRegistration* Register_POW();
-TfLiteRegistration* Register_FAKE_QUANT();
-TfLiteRegistration* Register_PACK();
-TfLiteRegistration* Register_ONE_HOT();
-TfLiteRegistration* Register_LOGICAL_OR();
-TfLiteRegistration* Register_LOGICAL_AND();
-TfLiteRegistration* Register_LOGICAL_NOT();
-TfLiteRegistration* Register_UNPACK();
-TfLiteRegistration* Register_FLOOR_DIV();
-TfLiteRegistration* Register_SQUARE();
-TfLiteRegistration* Register_ZEROS_LIKE();
-TfLiteRegistration* Register_FLOOR_MOD();
-TfLiteRegistration* Register_RANGE();
-TfLiteRegistration* Register_LEAKY_RELU();
-TfLiteRegistration* Register_SQUARED_DIFFERENCE();
-TfLiteRegistration* Register_FILL();
-TfLiteRegistration* Register_MIRROR_PAD();
-
-} // namespace builtin
-} // namespace ops
-} // namespace tflite
-
-namespace nnfw {
-namespace tflite {
-
-// Using namespace directive to minimize diff with upstream tensorflow
-using namespace ::tflite::ops::custom;
-using namespace ::tflite::ops::builtin;
-using namespace ::tflite;
-
-// Fix to use strict build option
-TfLiteStatus UnsupportedTensorFlowOp(TfLiteContext* context, TfLiteNode* /*node*/) {
- context->ReportError(
- context,
- "Regular TensorFlow ops are not supported by this interpreter. Make sure "
- "you invoke the Flex delegate before inference.");
- return kTfLiteError;
-}
-
-const TfLiteRegistration* BuiltinOpResolver::FindOp(tflite::BuiltinOperator op,
- int version) const {
- return MutableOpResolver::FindOp(op, version);
-}
-
-const TfLiteRegistration* BuiltinOpResolver::FindOp(const char* op,
- int version) const {
- // Return the NULL Op for all ops whose name start with "Flex", allowing
- // the interpreter to delegate their execution.
- if (IsFlexOp(op)) {
- static TfLiteRegistration null_op{
- nullptr, nullptr, &UnsupportedTensorFlowOp,
- nullptr, nullptr, BuiltinOperator_CUSTOM,
- "Flex", 1};
- return &null_op;
- }
- return MutableOpResolver::FindOp(op, version);
-}
-
-BuiltinOpResolver::BuiltinOpResolver() {
- AddBuiltin(BuiltinOperator_ABS, Register_ABS());
- AddBuiltin(BuiltinOperator_RELU, Register_RELU());
- AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
- AddBuiltin(BuiltinOperator_RELU6, Register_RELU6());
- AddBuiltin(BuiltinOperator_TANH, Register_TANH());
- AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC());
- AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D());
- AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D());
- AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
- AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D());
- AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(),
- /* min_version */ 1,
- /* max_version */ 2);
- AddBuiltin(BuiltinOperator_SVDF, Register_SVDF());
- AddBuiltin(BuiltinOperator_RNN, Register_RNN());
- AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
- Register_BIDIRECTIONAL_SEQUENCE_RNN());
- AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
- Register_UNIDIRECTIONAL_SEQUENCE_RNN());
- AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP());
- AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
- Register_EMBEDDING_LOOKUP_SPARSE());
- AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
- /* min_version */ 1,
- /* max_version */ 2);
- AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
- AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
- AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX());
- AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION());
- AddBuiltin(BuiltinOperator_ADD, Register_ADD());
- AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND());
- AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND());
- AddBuiltin(BuiltinOperator_MUL, Register_MUL());
- AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION());
- AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- Register_LOCAL_RESPONSE_NORMALIZATION());
- AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), /* min_version */ 1,
- /* max_version */ 2);
- AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
- Register_BIDIRECTIONAL_SEQUENCE_LSTM());
- AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
- Register_UNIDIRECTIONAL_SEQUENCE_LSTM());
- AddBuiltin(BuiltinOperator_PAD, Register_PAD());
- AddBuiltin(BuiltinOperator_PADV2, Register_PADV2());
- AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
- AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR());
- AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- Register_RESIZE_NEAREST_NEIGHBOR());
- AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
- AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH());
- AddBuiltin(BuiltinOperator_GATHER, Register_GATHER());
- AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE());
- AddBuiltin(BuiltinOperator_MEAN, Register_MEAN());
- AddBuiltin(BuiltinOperator_DIV, Register_DIV());
- AddBuiltin(BuiltinOperator_SUB, Register_SUB());
- AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT());
- AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V());
- AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE());
- AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE());
- AddBuiltin(BuiltinOperator_EXP, Register_EXP());
- AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2());
- AddBuiltin(BuiltinOperator_LOG, Register_LOG());
- AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX());
- AddBuiltin(BuiltinOperator_CAST, Register_CAST());
- AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(),
- /* min_version */ 1,
- /* max_version */ 2);
- AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
- AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM());
- AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM());
- AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX());
- AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN());
- AddBuiltin(BuiltinOperator_GREATER, Register_GREATER());
- AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL());
- AddBuiltin(BuiltinOperator_LESS, Register_LESS());
- AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL());
- AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
- AddBuiltin(BuiltinOperator_NEG, Register_NEG());
- AddBuiltin(BuiltinOperator_SELECT, Register_SELECT());
- AddBuiltin(BuiltinOperator_SLICE, Register_SLICE());
- AddBuiltin(BuiltinOperator_SIN, Register_SIN());
- AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
- AddBuiltin(BuiltinOperator_TILE, Register_TILE());
- AddBuiltin(BuiltinOperator_SUM, Register_SUM());
- AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD());
- AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
- AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN());
- AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY());
- AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
- AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
- AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());
- AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL());
- AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
- AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT());
- AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
- AddBuiltin(BuiltinOperator_POW, Register_POW());
- AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
- AddBuiltin(BuiltinOperator_PACK, Register_PACK());
- AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
- AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
- AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
- AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
- AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK());
- AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV());
- AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
- AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
- AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD());
- AddBuiltin(BuiltinOperator_RANGE, Register_RANGE());
- AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU());
- AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE());
- AddBuiltin(BuiltinOperator_FILL, Register_FILL());
- AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD());
-
- AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference());
-
- // TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
- // custom ops aren't always included by default.
- AddCustom("Mfcc", tflite::ops::custom::Register_MFCC());
- // Need additional external library for audio spectrogram
- //AddCustom("AudioSpectrogram",
- // tflite::ops::custom::Register_AUDIO_SPECTROGRAM());
- AddCustom("LayerNormLstm", tflite::ops::custom::Register_LAYER_NORM_LSTM());
- AddCustom("Relu1", tflite::ops::custom::Register_RELU_1());
- AddCustom("TFLite_Detection_PostProcess",
- tflite::ops::custom::Register_DETECTION_POSTPROCESS());
-}
-
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
deleted file mode 100644
index 9675570ad..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
+++ /dev/null
@@ -1,1262 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This code is derived from the following file (in TensorFlow v1.13.1)
-// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.cc'
-#include "tflite/ext/nnapi_delegate.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include "tensorflow/lite/c/builtin_op_data.h"
-#include "tensorflow/lite/core/api/error_reporter.h"
-#include "tensorflow/lite/model.h"
-#include <rua/Shim.h>
-#include "NeuralNetworksExShim.h"
-
-#ifdef __ANDROID__
-#include <android/log.h>
-#include <sys/system_properties.h>
-#endif
-
-#include <memory>
-
-namespace nnfw {
-namespace tflite {
-
-void logError(const char* format, ...) {
- // stderr is convenient for native tests, but is not captured for apps
- va_list args_for_stderr;
- va_start(args_for_stderr, format);
- vfprintf(stderr, format, args_for_stderr);
- va_end(args_for_stderr);
- fprintf(stderr, "\n");
- fflush(stderr);
-#ifdef __ANDROID__
- // produce logcat output for general consumption
- va_list args_for_log;
- va_start(args_for_log, format);
- __android_log_vprint(ANDROID_LOG_ERROR, "tflite", format, args_for_log);
- va_end(args_for_log);
-#endif
-}
-
-#define FATAL(...) \
- logError(__VA_ARGS__); \
- exit(1);
-
-// TODO(aselle): Change the error model to use status codes.
-#define CHECK_TFLITE_SUCCESS(x) \
- if (x != kTfLiteOk) { \
- FATAL("Aborting since tflite returned failure nnapi_delegate.cc:%d.", \
- __LINE__); \
- }
-
-#define CHECK_NN(x) \
- if (x != ANEURALNETWORKS_NO_ERROR) { \
- FATAL("Aborting since NNAPI returned failure nnapi_delegate.cc:%d", \
- __LINE__); \
- }
-
-#define RETURN_ERROR_IF_TFLITE_FAILED(x) \
- if (x != kTfLiteOk) { \
- logError( \
- "Returning error since TFLite returned failure nnapi_delegate.cc:%d.", \
- __LINE__); \
- return kTfLiteError; \
- }
-
-#define RETURN_ERROR_IF_NN_FAILED(x) \
- if (x != ANEURALNETWORKS_NO_ERROR) { \
- logError( \
- "Returning error since NNAPI returned failure nnapi_delegate.cc:%d.", \
- __LINE__); \
- return kTfLiteError; \
- }
-
-// Tracking of NNAPI operand ids
-static const int64_t kOperandIdNotSet = -1;
-static const int64_t kOperandNotNeeded = -2;
-
-namespace {
-
-int32_t GetAndroidSdkVersion() {
-#ifdef __ANDROID__
- const char* sdkProp = "ro.build.version.sdk";
- char sdkVersion[PROP_VALUE_MAX];
- int length = __system_property_get(sdkProp, sdkVersion);
- if (length != 0) {
- for (int i = 0; i < length; ++i) {
- int digit = sdkVersion[i] - '0';
- if (digit < 0 || digit > 9) {
- // Non-numeric SDK version, assume it's higher then expected;
- return 0xFFFF;
- }
- }
- // NOTE use std::strtol instead of atoi: security issue
- return std::strtol(sdkVersion, NULL, 0);
- }
- FATAL("No %s prop", sdkProp);
-#endif // __ANDROID__
- return 0;
-}
-
-int32_t GetAndroidSdkVersionCached() {
- static int32_t androidSdkVersion = GetAndroidSdkVersion();
- return androidSdkVersion;
-}
-
-// WORKAROUND Some model have dimension zero
-// Consider scalar as vector size 1
-static const uint32_t dimension_for_scalar[1] = {1};
-
-} // namespace
-
-NNAPIAllocation::NNAPIAllocation(const char* filename,
- ::tflite::ErrorReporter* error_reporter)
- : MMAPAllocation(filename, error_reporter) {
- if (mmapped_buffer_ != MAP_FAILED)
- CHECK_NN(ANeuralNetworksMemory_createFromFd(buffer_size_bytes_, PROT_READ,
- mmap_fd_, 0, &handle_));
-}
-
-NNAPIAllocation::~NNAPIAllocation() {
- if (handle_) {
- ANeuralNetworksMemory_free(handle_);
- }
-}
-
-NNAPIDelegate::~NNAPIDelegate() {
- if (nn_compiled_model_) {
- ANeuralNetworksCompilation_free(nn_compiled_model_);
- nn_compiled_model_ = nullptr;
- }
- if (nn_model_) {
- ANeuralNetworksModel_free(nn_model_);
- nn_model_ = nullptr;
- // TODO(aselle): Is this thread-safe and callable multiple times?
- }
- // ANeuralNetworksShutdown();
-}
-
-// Adds the tensors of the subgraph to the NN API model.
-TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph,
- ANeuralNetworksModel* nn_model,
- uint32_t* no_of_operands_added,
- std::vector<int64_t>* nnapi_ids) {
- uint32_t next_id = 0;
- // Allocate temporary buffer to save casted boolean tensor
- std::unordered_map<size_t, std::unique_ptr<uint8_t[]>> const_boolean_tensors;
-
- for (size_t i = 0; i < subgraph->tensors_size(); i++) {
- // Skip temporaries and RNN back-edges.
- if ((*nnapi_ids)[i] == kOperandNotNeeded) continue;
-
- (*nnapi_ids)[i] = int64_t(next_id);
-
- int32_t nn_type = 0;
- // NNAPI requires 32-bit float scale to be zero, tflite doesn't care
- float scale = 0.0f;
- int32_t zeroPoint = 0;
- TfLiteTensor* tensor = subgraph->tensor(i);
- switch (tensor->type) {
- case kTfLiteNoType:
- // Tensors added during initialization of Ops don't have a type yet and
- // should not be registered with the NNAPI.
- continue;
- case kTfLiteFloat32:
- nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
- break;
- case kTfLiteUInt8:
- // NNAPI uses ANEURALNETWORKS_TENSOR_QUANT8_ASYMM to represent uint8 type
- // ex. ANEURALNETWORKS_CAST
- nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
- scale = tensor->params.scale;
- // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM type requires scale > 0,
- // zeroPoint >= 0 and zeroPoint <= 255
- scale = (scale == 0.0f) ? 1.0f : scale;
- zeroPoint = tensor->params.zero_point;
- break;
- case kTfLiteInt32:
- nn_type = ANEURALNETWORKS_TENSOR_INT32;
- scale = tensor->params.scale;
- zeroPoint = tensor->params.zero_point;
- break;
- case kTfLiteBool:
- // Workaround to pass bool type under NNAPI
- // Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0
- nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
- break;
- default:
- logError("Unsupported tensor type %d", tensor->type);
- return kTfLiteError;
- }
- if (tensor->dims->size == 0) {
- // WORKAROUND Some model have dimension zero
- switch (tensor->type) {
- case kTfLiteFloat32:
- nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
- break;
- case kTfLiteInt32:
- nn_type = ANEURALNETWORKS_TENSOR_INT32;
- break;
- default:
- logError("NNAPI doesn't support tensors with rank 0 (index %d name %s)",
- i, tensor->name);
- return kTfLiteError;
- }
- }
- if (tensor->dims->size > 4) {
- logError("NNAPI doesn't support tensors with rank > 4 (index %d name %s)",
- i, tensor->name);
- return kTfLiteError;
- }
- // TODO(aselle): Note, many of these are intermediate results. Do I need
- // to ever specify these sizes. I am currently below doing setValue
- // on all of them, but I shouldn't in the future.
- // Answer(jeanluc): If all the operators can set the dimension correctly,
- // you won't need to.
- ANeuralNetworksOperandType operand_type{
- nn_type, static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data), scale, zeroPoint};
- if (tensor->dims->size == 0) {
- // WORKAROUND Some model have dimension zero
- // Consider scalar as vector size 1
- operand_type.dimensions = dimension_for_scalar;
- operand_type.dimensionCount = 1;
- }
- RETURN_ERROR_IF_NN_FAILED(
- ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- // TODO(aselle): Based on Michael's suggestion, limiting this to read
- // only memory
- if (tensor->allocation_type == kTfLiteMmapRo) {
- if (tensor->type == kTfLiteBool)
- {
- // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
- size_t elements = tensor->bytes / sizeof(bool);
- const_boolean_tensors[i] = std::make_unique<uint8_t[]>(elements);
- for (size_t idx = 0; idx < elements; idx++)
- {
- const_boolean_tensors[i].get()[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
- }
- RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
- nn_model, next_id, const_boolean_tensors[i].get(), tensor->bytes));
- }
- else if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
- static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
- RETURN_ERROR_IF_NN_FAILED(
- ANeuralNetworksModel_setOperandValueFromMemory(
- nn_model, next_id, alloc->memory(),
- alloc->offset(tensor->data.raw), tensor->bytes));
- } else {
- RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue(
- nn_model, next_id, tensor->data.raw, tensor->bytes));
- }
- } else if (tensor->bytes == 0) {
- // These size 0 tensors are optional tensors reserved.
- RETURN_ERROR_IF_NN_FAILED(
- ANeuralNetworksModel_setOperandValue(nn_model, next_id, nullptr, 0));
- }
-
- ++next_id;
- }
- *no_of_operands_added = next_id;
- return kTfLiteOk;
-}
-
-void MapAndAddTensorIds(const int* from_ids_buf, size_t from_ids_count,
- std::vector<uint32_t>* into,
- const std::vector<int64_t>& map) {
- for (size_t i = 0; i < from_ids_count; i++) {
- int from_id = from_ids_buf[i];
- if (from_id == kOptionalTensor) {
- into->push_back(from_id);
- } else {
- into->push_back(map[from_id]);
- }
- }
-}
-
-// Adds the operations and their parameters to the NN API model.
-// 'next-id' is the operand ID of the next operand of the model.
-TfLiteStatus AddOpsAndParams(
- ::tflite::Subgraph* subgraph, ANeuralNetworksModel* nn_model,
- uint32_t next_id, std::vector<int>* model_state_inputs,
- std::vector<int>* model_state_outputs,
- const std::vector<int64_t>& tensor_id_to_nnapi_id) {
- for (size_t i = 0; i < subgraph->nodes_size(); i++) {
- const auto* node_and_registration = subgraph->node_and_registration(i);
- const TfLiteNode& node = node_and_registration->first;
- const TfLiteRegistration& registration = node_and_registration->second;
- ::tflite::BuiltinOperator builtin =
- static_cast<::tflite::BuiltinOperator>(registration.builtin_code);
-
- // Add the parameters.
- std::vector<uint32_t> augmented_inputs, augmented_outputs;
- MapAndAddTensorIds(node.inputs->data, node.inputs->size, &augmented_inputs,
- tensor_id_to_nnapi_id);
- MapAndAddTensorIds(node.outputs->data, node.outputs->size,
- &augmented_outputs, tensor_id_to_nnapi_id);
-
- auto add_scalar_int32 = [&nn_model, &augmented_inputs,
- &next_id](int value) {
- // Fix to use strict build option
- ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_INT32;
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
- sizeof(int32_t)))
- augmented_inputs.push_back(next_id++);
- };
-
- auto add_scalar_float32 = [&nn_model, &augmented_inputs,
- &next_id](float value) {
- // Fix to use strict build option
- ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_FLOAT32;
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
- sizeof(float)))
- augmented_inputs.push_back(next_id++);
- };
-
- auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
- // Fix to use strict build option
- ANeuralNetworksOperandType operand_type{};
- operand_type.type = ANEURALNETWORKS_TENSOR_INT32;
- operand_type.dimensionCount = 1;
- operand_type.dimensions = &num_values;
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(
- nn_model, next_id, values, sizeof(int32_t) * num_values));
- augmented_inputs.push_back(next_id++);
- };
-
- // Handle state tensors of RNN, LSTM, SVDF.
- // For each state_out tensor, a corresponding state_in operand needs to be
- // created for NNAPI.
- auto duplicate_state_tensor_float32 =
- [subgraph, &nn_model, &next_id, &augmented_inputs, &model_state_inputs,
- &model_state_outputs](int tensor_id) {
- const TfLiteTensor* tensor = subgraph->tensor(tensor_id);
- ANeuralNetworksOperandType operand_type{
- ANEURALNETWORKS_TENSOR_FLOAT32,
- static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data),
- tensor->params.scale, tensor->params.zero_point};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- augmented_inputs.push_back(next_id);
- model_state_inputs->push_back(next_id);
- model_state_outputs->push_back(tensor_id);
- next_id++;
- };
- auto check_and_add_activation = [&add_scalar_int32](int activation) {
- if (activation > kTfLiteActRelu6) {
- logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
- return kTfLiteError;
- }
- add_scalar_int32(activation);
- return kTfLiteOk;
- };
-
- auto add_add_params = [&add_scalar_int32](void* data) {
- auto* builtin = reinterpret_cast<TfLiteAddParams*>(data);
- if (builtin->activation > kTfLiteActRelu6) {
- logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
- return kTfLiteError;
- }
- add_scalar_int32(builtin->activation);
- return kTfLiteOk;
- };
-
- auto add_pooling_params = [&add_scalar_int32,
- &check_and_add_activation](void* data) {
- auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->filter_width);
- add_scalar_int32(builtin->filter_height);
- return check_and_add_activation(builtin->activation);
- };
-
- auto add_convolution_params = [&add_scalar_int32,
- &check_and_add_activation](void* data) {
- auto builtin = reinterpret_cast<TfLiteConvParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- return check_and_add_activation(builtin->activation);
- };
-
- auto add_depthwise_conv_params = [&add_scalar_int32,
- &check_and_add_activation](void* data) {
- auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- add_scalar_int32(builtin->depth_multiplier);
- return check_and_add_activation(builtin->activation);
- };
-
- auto add_fully_connected_params = [&check_and_add_activation](void* data) {
- auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data);
- return check_and_add_activation(builtin->activation);
- };
-
- auto add_concatenation_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(data);
- add_scalar_int32(builtin->axis);
- if (builtin->activation != kTfLiteActNone) {
- logError("Concatenation does not support fused activation in NNAPI");
- return kTfLiteError;
- }
- return kTfLiteOk;
- };
-
- auto add_softmax_params = [&add_scalar_float32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(data);
- add_scalar_float32(builtin->beta);
- };
-
- auto add_space_to_depth_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(data);
- add_scalar_int32(builtin->block_size);
- };
-
- auto add_lstm_params = [&add_scalar_int32,
- &add_scalar_float32](void* data) {
- auto builtin = reinterpret_cast<TfLiteLSTMParams*>(data);
- add_scalar_int32(builtin->activation);
- add_scalar_float32(builtin->cell_clip);
- add_scalar_float32(builtin->proj_clip);
- };
-
- // LSTM in NNAPI requires scratch tensor as an output operand.
- auto add_lstm_scratch_tensor_float32 = [subgraph, &node, &nn_model,
- &next_id, &augmented_outputs]() {
- if (node.temporaries->size == 0) return;
- int scratch_buffer_index = node.temporaries->data[0];
- const TfLiteTensor* tensor = subgraph->tensor(scratch_buffer_index);
- ANeuralNetworksOperandType operand_type{
- ANEURALNETWORKS_TENSOR_FLOAT32,
- static_cast<uint32_t>(tensor->dims->size),
- reinterpret_cast<uint32_t*>(tensor->dims->data), tensor->params.scale,
- tensor->params.zero_point};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type));
- augmented_outputs.insert(augmented_outputs.begin(), next_id++);
- };
-
- auto add_mean_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
- add_scalar_int32(builtin->keep_dims);
- };
-
- auto add_svdf_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteSVDFParams*>(data);
- add_scalar_int32(builtin->rank);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_rnn_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteRNNParams*>(data);
- add_scalar_int32(builtin->activation);
- };
-
- auto add_squeeze_params = [&](void* data) {
- const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data);
- // Note that we add the squeeze dimensions even if the dimensions were
- // unspecified (empty), as NNAPI requires the operand.
- add_vector_int32(builtin->squeeze_dims,
- static_cast<uint32_t>(builtin->num_squeeze_dims));
- };
-
- // Handle optional input tensors.
- auto add_optional_tensors = [&nn_model, &augmented_inputs,
- &next_id](int nn_type) {
- for (size_t idx = 0; idx < augmented_inputs.size(); idx++) {
- // Fix to use strict build option
- if (augmented_inputs[idx] == static_cast<uint32_t>(kOptionalTensor)) {
- const std::vector<uint32_t> dim = {0, 0};
- ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0};
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id,
- nullptr, 0))
- augmented_inputs[idx] = next_id++;
- }
- }
- };
-
- int nnapi_version = 10;
-#include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc"
-
- // Fix to use strict build option
- ANeuralNetworksOperationType nn_op_type = -1;
-
- // Using namespace directive to minimize diff with upstream tensorflow
- namespace tflite = ::tflite;
-
- switch (builtin) {
- case tflite::BuiltinOperator_ADD:
- nn_op_type = ANEURALNETWORKS_ADD;
- RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
- break;
- case tflite::BuiltinOperator_MUL:
- nn_op_type = ANEURALNETWORKS_MUL;
- RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
- break;
- case tflite::BuiltinOperator_AVERAGE_POOL_2D:
- RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
- break;
- case tflite::BuiltinOperator_MAX_POOL_2D:
- RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
- break;
- case tflite::BuiltinOperator_L2_POOL_2D:
- RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
- break;
- case tflite::BuiltinOperator_CONV_2D: {
- auto builtin = reinterpret_cast<TfLiteConvParams*>(node.builtin_data);
- if (builtin->dilation_width_factor != 1 ||
- builtin->dilation_height_factor != 1 || node.inputs->size != 3) {
- logError("NNAPI does not support dilated Conv2D.");
- return kTfLiteError;
- }
- }
- RETURN_ERROR_IF_TFLITE_FAILED(
- add_convolution_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_CONV_2D;
- break;
- case tflite::BuiltinOperator_RELU:
- nn_op_type = ANEURALNETWORKS_RELU;
- break;
- case tflite::BuiltinOperator_RELU_N1_TO_1:
- nn_op_type = ANEURALNETWORKS_RELU1;
- break;
- case tflite::BuiltinOperator_RELU6:
- nn_op_type = ANEURALNETWORKS_RELU6;
- break;
- case tflite::BuiltinOperator_TANH:
- nn_op_type = ANEURALNETWORKS_TANH;
- break;
- case tflite::BuiltinOperator_FLOOR:
- nn_op_type = ANEURALNETWORKS_FLOOR;
- break;
- case tflite::BuiltinOperator_LOGISTIC:
- nn_op_type = ANEURALNETWORKS_LOGISTIC;
- break;
- case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
- RETURN_ERROR_IF_TFLITE_FAILED(
- add_depthwise_conv_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
- break;
- case tflite::BuiltinOperator_CONCATENATION:
- RETURN_ERROR_IF_TFLITE_FAILED(
- add_concatenation_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_CONCATENATION;
- break;
- case tflite::BuiltinOperator_SOFTMAX:
- add_softmax_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SOFTMAX;
- break;
- case tflite::BuiltinOperator_FULLY_CONNECTED:
- RETURN_ERROR_IF_TFLITE_FAILED(
- add_fully_connected_params(node.builtin_data));
- nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
- break;
- case tflite::BuiltinOperator_RESHAPE:
- if (node.inputs->size != 2) {
- logError("NNAPI only supports 2-input RESHAPE");
- return kTfLiteError;
- }
- nn_op_type = ANEURALNETWORKS_RESHAPE;
- // add_reshape_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_RESIZE_BILINEAR:
- add_resize_bilinear_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR;
- break;
- case tflite::BuiltinOperator_SPACE_TO_DEPTH:
- add_space_to_depth_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
- break;
- case tflite::BuiltinOperator_LSTM: {
- if (node.inputs->size + /* no of params */ 3 != 21) {
- logError("NNAPI only supports 21-input LSTMs");
- return kTfLiteError;
- }
- duplicate_state_tensor_float32(
- node.outputs->data[/*kOutputStateTensor*/ 0]);
- duplicate_state_tensor_float32(
- node.outputs->data[/*kCellStateTensor*/ 1]);
- add_lstm_params(node.builtin_data);
- add_lstm_scratch_tensor_float32();
- add_optional_tensors(ANEURALNETWORKS_TENSOR_FLOAT32);
- nn_op_type = ANEURALNETWORKS_LSTM;
- break;
- }
- case tflite::BuiltinOperator_DEQUANTIZE:
- nn_op_type = ANEURALNETWORKS_DEQUANTIZE;
- break;
- case tflite::BuiltinOperator_SVDF: {
- duplicate_state_tensor_float32(node.outputs->data[/*kStateTensor*/ 0]);
- add_svdf_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SVDF;
- break;
- }
- case tflite::BuiltinOperator_RNN: {
- duplicate_state_tensor_float32(
- node.outputs->data[/*kHiddenStateTensor*/ 0]);
- add_rnn_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_RNN;
- break;
- }
- case tflite::BuiltinOperator_EMBEDDING_LOOKUP:
- nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP;
- break;
- case tflite::BuiltinOperator_PAD:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_PAD;
- break;
- case tflite::BuiltinOperator_MEAN:
- nnapi_version = 11; // require NNAPI 1.1
- add_mean_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_MEAN;
- break;
- case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
- nn_op_type = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
- add_lrn_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_DIV:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_DIV;
- RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
- reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation));
- break;
- case tflite::BuiltinOperator_SUB:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_SUB;
- RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
- reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation));
- break;
- case tflite::BuiltinOperator_SQUEEZE:
- nnapi_version = 11; // requires NNAPI 1.1
- add_squeeze_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_SQUEEZE;
- break;
- case tflite::BuiltinOperator_TRANSPOSE:
- // The permutation input tensor value dictates the output dimensions.
- // TODO(b/110888333): Support dynamically-sized tensors in delegates.
- if ((node.inputs->size > 1) &&
- (subgraph->tensor(node.inputs->data[1])->allocation_type !=
- kTfLiteMmapRo)) {
- logError("NNAPI does not yet support dynamic tensors.");
- return kTfLiteError;
- }
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_TRANSPOSE;
- break;
- case tflite::BuiltinOperator_L2_NORMALIZATION:
- nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION;
- if (reinterpret_cast<TfLiteL2NormParams*>(node.builtin_data)
- ->activation != kTfLiteActNone) {
- logError(
- "NNAPI does not support L2Normalization with fused activations");
- return kTfLiteError;
- }
- if ((node.inputs->size > 0) &&
- (subgraph->tensor(node.inputs->data[0])->dims->size != 4)) {
- logError("NNAPI only supports input rank 4 for L2Normalization");
- return kTfLiteError;
- }
- break;
- case tflite::BuiltinOperator_HASHTABLE_LOOKUP:
- if (subgraph->tensor(node.outputs->data[0])->type != kTfLiteFloat32) {
- logError("NNAPI only support HASHTABLE_LOOKUP with float32 output",
- builtin);
- return kTfLiteError;
- }
- nn_op_type = ANEURALNETWORKS_HASHTABLE_LOOKUP;
- break;
- case tflite::BuiltinOperator_SLICE:
- nn_op_type = ANEURALNETWORKS_SLICE;
- break;
- case tflite::BuiltinOperator_STRIDED_SLICE:
- add_strided_slice_params(node.builtin_data);
- nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
- break;
- case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_SPACE_TO_BATCH_ND;
- break;
- case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
- nnapi_version = 11; // require NNAPI 1.1
- nn_op_type = ANEURALNETWORKS_BATCH_TO_SPACE_ND;
- check_batch_to_space_params();
- break;
- case tflite::BuiltinOperator_CAST:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_CAST;
- break;
- case tflite::BuiltinOperator_TOPK_V2:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_TOPK_V2;
- break;
- case tflite::BuiltinOperator_GREATER:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_GREATER;
- break;
- case tflite::BuiltinOperator_GREATER_EQUAL:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
- break;
- case tflite::BuiltinOperator_LESS:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LESS;
- break;
- case tflite::BuiltinOperator_LESS_EQUAL:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
- break;
- case tflite::BuiltinOperator_GATHER:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_GATHER;
- add_gather_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_SPLIT:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_SPLIT;
- add_split_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_NEG:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_NEG;
- break;
- case tflite::BuiltinOperator_EXP:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_EXP;
- break;
- case tflite::BuiltinOperator_TRANSPOSE_CONV:
- add_transpose_conv_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_TRANSPOSE_CONV_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_PRELU:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_PRELU;
- break;
- case tflite::BuiltinOperator_ARG_MAX:
- check_arg_max_input(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_ARGMAX;
- break;
- case tflite::BuiltinOperator_PACK:
- add_pack_ex_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_PACK_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_UNPACK:
- add_unpack_ex_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_UNPACK_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_SQRT:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_SQRT;
- break;
- case tflite::BuiltinOperator_RSQRT:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_RSQRT;
- break;
- case tflite::BuiltinOperator_EQUAL:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_EQUAL;
- break;
- case tflite::BuiltinOperator_NOT_EQUAL:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
- break;
- case tflite::BuiltinOperator_SUM:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
- add_reducer_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_REDUCE_ANY:
- add_reducer_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_ANY;
- break;
- case tflite::BuiltinOperator_REDUCE_MAX:
- add_reducer_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
- break;
- case tflite::BuiltinOperator_REDUCE_MIN:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
- add_reducer_params(node.builtin_data);
- break;
- case tflite::BuiltinOperator_LOG:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LOG;
- break;
- case tflite::BuiltinOperator_LOGICAL_AND:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
- break;
- case tflite::BuiltinOperator_LOGICAL_OR:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
- break;
- case tflite::BuiltinOperator_LOGICAL_NOT:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
- break;
- case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- case tflite::BuiltinOperator_MAXIMUM:
- nn_op_type = ANEURALNETWORKS_MAXIMUM;
- break;
- case tflite::BuiltinOperator_MINIMUM:
- nn_op_type = ANEURALNETWORKS_MINIMUM;
- break;
- case tflite::BuiltinOperator_ABS:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_ABS;
- break;
- case tflite::BuiltinOperator_ONE_HOT:
- add_one_hot_params(node.builtin_data);
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_ONE_HOT_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue; // _EX operator should use `continue` to skip addOperanation.
- case tflite::BuiltinOperator_SIN:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_SIN;
- break;
- case tflite::BuiltinOperator_SHAPE:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SHAPE_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue; // _EX operator should use `continue` to skip addOperanation.
- case tflite::BuiltinOperator_REDUCE_PROD:
- add_reducer_params(node.builtin_data);
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_REDUCE_PROD;
- break;
- case tflite::BuiltinOperator_EXPAND_DIMS:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_EXPAND_DIMS;
- break;
- case tflite::BuiltinOperator_POW:
- if (!(subgraph->tensor(node.inputs->data[0])->type == kTfLiteFloat32 &&
- subgraph->tensor(node.inputs->data[1])->type == kTfLiteFloat32)) {
- logError("NNAPI delegate for Pow supports only float32.", builtin);
- return kTfLiteError;
- }
- nn_op_type = ANEURALNETWORKS_POW;
- break;
- case tflite::BuiltinOperator_SELECT:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_SELECT;
- break;
- case tflite::BuiltinOperator_ZEROS_LIKE:
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_ZEROS_LIKE_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue; // _EX operator should use `continue` to skip addOperanation.
- case tflite::BuiltinOperator_TILE:
- nnapi_version = 12; // require NNAPI 1.2
- nn_op_type = ANEURALNETWORKS_TILE;
- break;
- case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
- case tflite::BuiltinOperator_LSH_PROJECTION:
- case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN:
- case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN:
- case tflite::BuiltinOperator_EMBEDDING_LOOKUP_SPARSE:
- case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM:
- case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
- //case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
- case tflite::BuiltinOperator_PADV2:
- //case tflite::BuiltinOperator_RESIZE_BILINEAR:
- case tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
- case tflite::BuiltinOperator_CALL:
- case tflite::BuiltinOperator_SKIP_GRAM:
- //case tflite::BuiltinOperator_RELU_N1_TO_1:
- //case tflite::BuiltinOperator_GATHER:
- //case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
- //case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
- //case tflite::BuiltinOperator_TOPK_V2:
- //case tflite::BuiltinOperator_SPLIT:
- //case tflite::BuiltinOperator_STRIDED_SLICE:
- //case tflite::BuiltinOperator_EXP:
- case tflite::BuiltinOperator_LOG_SOFTMAX:
- //case tflite::BuiltinOperator_DEQUANTIZE:
- case tflite::BuiltinOperator_DELEGATE:
- //case tflite::BuiltinOperator_CAST:
- //case tflite::BuiltinOperator_PRELU:
- //case tflite::BuiltinOperator_MAXIMUM:
- //case tflite::BuiltinOperator_MINIMUM:
- //case tflite::BuiltinOperator_ARG_MAX:
- case tflite::BuiltinOperator_ARG_MIN:
- //case tflite::BuiltinOperator_GREATER:
- //case tflite::BuiltinOperator_GREATER_EQUAL:
- //case tflite::BuiltinOperator_LESS:
- //case tflite::BuiltinOperator_LESS_EQUAL:
- //case tflite::BuiltinOperator_NEG:
- //case tflite::BuiltinOperator_SELECT:
- // case tflite::BuiltinOperator_SLICE:
- //case tflite::BuiltinOperator_SIN:
- //case tflite::BuiltinOperator_LOG:
- //case tflite::BuiltinOperator_TRANSPOSE_CONV:
- //case tflite::BuiltinOperator_TILE:
- //case tflite::BuiltinOperator_EXPAND_DIMS:
- case tflite::BuiltinOperator_SPARSE_TO_DENSE:
- //case tflite::BuiltinOperator_EQUAL:
- //case tflite::BuiltinOperator_NOT_EQUAL:
- //case tflite::BuiltinOperator_SUM:
- //case tflite::BuiltinOperator_REDUCE_MAX:
- //case tflite::BuiltinOperator_REDUCE_MIN:
- //case tflite::BuiltinOperator_REDUCE_PROD:
- //case tflite::BuiltinOperator_SQRT:
- //case tflite::BuiltinOperator_RSQRT:
- //case tflite::BuiltinOperator_SHAPE:
- //case tflite::BuiltinOperator_POW:
- case tflite::BuiltinOperator_FAKE_QUANT:
- //case tflite::BuiltinOperator_PACK:
- //case tflite::BuiltinOperator_LOGICAL_OR:
- //case tflite::BuiltinOperator_ONE_HOT:
- //case tflite::BuiltinOperator_LOGICAL_AND:
- //case tflite::BuiltinOperator_LOGICAL_NOT:
- //case tflite::BuiltinOperator_UNPACK:
- case tflite::BuiltinOperator_FLOOR_DIV:
- //case tflite::BuiltinOperator_REDUCE_ANY:
- case tflite::BuiltinOperator_SQUARE:
- //case tflite::BuiltinOperator_ZEROS_LIKE:
- case tflite::BuiltinOperator_FILL:
- case tflite::BuiltinOperator_FLOOR_MOD:
- case tflite::BuiltinOperator_RANGE:
- case tflite::BuiltinOperator_LEAKY_RELU:
- //case tflite::BuiltinOperator_SQUARED_DIFFERENCE:
- case tflite::BuiltinOperator_MIRROR_PAD:
- //case tflite::BuiltinOperator_ABS:
- case tflite::BuiltinOperator_SPLIT_V:
- logError("Op code %d is currently not delegated to NNAPI", builtin);
- return kTfLiteError;
- break;
- case tflite::BuiltinOperator_CUSTOM: {
- std::string custom_name(registration.custom_name);
- if (custom_name.compare("SquaredDifference") == 0) {
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- else if (custom_name.compare("MatrixBandPart") == 0) {
- CHECK_NN(ANeuralNetworksModel_addOperationEx(
- nn_model, ANEURALNETWORKS_MATRIX_BAND_PART_EX,
- static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(node.outputs->size),
- reinterpret_cast<uint32_t*>(node.outputs->data)));
- continue;
- }
- logError("Custom operations are not supported when using NNAPI.");
- return kTfLiteError;
- break;
- }
- default:
- // Fix to use strict build option
- logError("Op code %d is currently not delegated to NNAPI", builtin);
- return kTfLiteError;
- break;
- }
-
- if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) {
- //logError("Op %d needs NNAPI1.1", builtin);
- //return kTfLiteError;
- }
-
- // Add the operation.
- RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation(
- nn_model, nn_op_type, static_cast<uint32_t>(augmented_inputs.size()),
- augmented_inputs.data(),
- static_cast<uint32_t>(augmented_outputs.size()),
- reinterpret_cast<uint32_t*>(augmented_outputs.data())));
- }
- return kTfLiteOk;
-}
-
-TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Subgraph* subgraph) {
- if (nn_model_ && nn_compiled_model_) return model_status_;
-
- // TODO(aselle): This is not correct. need to handle resize invalidation.
- if (!nn_model_) {
- CHECK_NN(ANeuralNetworksModel_create(&nn_model_));
-
- // Find which tensors should be added to NNAPI. TFLite has temporaries
- // and RNN back-edges which are are not valid for NNAPI. We look through all
- // inputs and outputs and mark the mapping in tensor_id_to_nnapi_id with
- // kOperandIdNotSet. addTensorOperands will replace those with the
- // corresponding NNAPI operand ids and skip kOperandNotNeeded entries.
- std::vector<int64_t> tensor_id_to_nnapi_id(subgraph->tensors_size(),
- kOperandNotNeeded);
- // Fix to use strict build option
- auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf,
- int count) {
- for (int j = 0; j < count; j++) {
- auto tensor_id = buf[j];
- if (tensor_id != kOptionalTensor) {
- tensor_id_to_nnapi_id[tensor_id] = kOperandIdNotSet;
- }
- }
- };
- for (size_t i = 0; i < subgraph->nodes_size(); i++) {
- const auto* node_and_registration = subgraph->node_and_registration(i);
- const TfLiteNode& node = node_and_registration->first;
- set_ids_to_not_set(node.inputs->data, node.inputs->size);
- set_ids_to_not_set(node.outputs->data, node.outputs->size);
- }
- set_ids_to_not_set(subgraph->inputs().data(), subgraph->inputs().size());
- set_ids_to_not_set(subgraph->outputs().data(), subgraph->outputs().size());
-
- uint32_t next_id = 0;
- RETURN_ERROR_IF_TFLITE_FAILED(addTensorOperands(
- subgraph, nn_model_, &next_id, &tensor_id_to_nnapi_id));
- RETURN_ERROR_IF_TFLITE_FAILED(
- AddOpsAndParams(subgraph, nn_model_, next_id, &model_states_inputs_,
- &model_states_outputs_, tensor_id_to_nnapi_id));
-
- std::vector<uint32_t> augmented_inputs;
- MapAndAddTensorIds(subgraph->inputs().data(), subgraph->inputs().size(),
- &augmented_inputs, tensor_id_to_nnapi_id);
- augmented_inputs.insert(augmented_inputs.end(),
- model_states_inputs_.begin(),
- model_states_inputs_.end());
- std::vector<uint32_t> augmented_outputs;
- MapAndAddTensorIds(subgraph->outputs().data(), subgraph->outputs().size(),
- &augmented_outputs, tensor_id_to_nnapi_id);
- MapAndAddTensorIds(model_states_outputs_.data(),
- model_states_outputs_.size(), &augmented_outputs,
- tensor_id_to_nnapi_id);
-
- CHECK_NN(ANeuralNetworksModel_identifyInputsAndOutputs(
- nn_model_, static_cast<uint32_t>(augmented_inputs.size()),
- reinterpret_cast<const uint32_t*>(augmented_inputs.data()),
- static_cast<uint32_t>(augmented_outputs.size()),
- reinterpret_cast<const uint32_t*>(augmented_outputs.data())));
-
- // TODO Support ANeuralNetworksModel_relaxComputationFloat32toFloat16
- /*if (GetAndroidSdkVersionCached() >= 28) {
- CHECK_NN(ANeuralNetworksModel_relaxComputationFloat32toFloat16(
- nn_model_, subgraph->GetAllowFp16PrecisionForFp32()));
- }*/
- CHECK_NN(ANeuralNetworksModel_finish(nn_model_));
- }
- if (!nn_compiled_model_) {
- CHECK_NN(ANeuralNetworksCompilation_create(nn_model_, &nn_compiled_model_));
- CHECK_NN(ANeuralNetworksCompilation_finish(nn_compiled_model_));
- }
- return kTfLiteOk;
-}
-
-// Use unordered_map for temporary buffer
-#include <unordered_map>
-
-TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) {
- if (!nn_model_) {
- model_status_ = BuildGraph(subgraph);
- if (model_status_ != kTfLiteOk) {
- logError("Failed to build graph for NNAPI");
- }
- }
- if (model_status_ != kTfLiteOk) {
- return model_status_;
- }
-
- ANeuralNetworksExecution* execution = nullptr;
- CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution));
-
- // Allocate temporary buffer to save casted boolean tensor
- std::unordered_map<size_t, uint8_t*> input_boolean_tensors;
- std::unordered_map<size_t, uint8_t*> output_boolean_tensors;
- for (size_t i = 0; i < subgraph->inputs().size(); i++)
- {
- int input = subgraph->inputs()[i];
- TfLiteTensor* tensor = subgraph->tensor(input);
- if (tensor->type == kTfLiteBool)
- {
- size_t elements = tensor->bytes / sizeof(bool);
- uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
- input_boolean_tensors[i] = temp_tensor;
- for (size_t idx = 0; idx < elements; idx++)
- {
- temp_tensor[idx] = (tensor->data.b[idx] ? 0x00 : 0xff);
- }
- }
- }
- for (size_t i = 0; i < subgraph->outputs().size(); i++)
- {
- int output = subgraph->outputs()[i];
- TfLiteTensor* tensor = subgraph->tensor(output);
- if (tensor->type == kTfLiteBool)
- {
- uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
- output_boolean_tensors[i] = temp_tensor;
- }
- }
-
- // Currently perform deep copy of input buffer
- for (size_t i = 0; i < subgraph->inputs().size(); i++) {
- int input = subgraph->inputs()[i];
- // TODO(aselle): Is this what we want or do we want input instead?
- // TODO(aselle): This should be called setInputValue maybe to be cons.
- TfLiteTensor* tensor = subgraph->tensor(input);
- // Workaround to pass bool type under NNAPI
- // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
- if (tensor->type == kTfLiteBool)
- {
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i, nullptr, input_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
- }
- else
- {
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
- }
- }
-
- // Tell nn api where to place final data.
- for (size_t i = 0; i < subgraph->outputs().size(); i++) {
- int output = subgraph->outputs()[i];
- TfLiteTensor* tensor = subgraph->tensor(output);
-
- // Workaround to pass bool type under NNAPI
- // ANEURALNETWORKS_TENSOR_BOOL8 tensor element size is 8 bits
- if (tensor->type == kTfLiteBool)
- {
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i, nullptr, output_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
- }
- else
- {
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
- }
- }
-
- // The state_out of previous invocation need to be mapped to state_in of
- // current invocation.
- for (size_t i = 0; i < model_states_outputs_.size(); i++) {
- int state_tensor_idx = model_states_outputs_[i];
- TfLiteTensor* tensor = subgraph->tensor(state_tensor_idx);
- // Here we are using a deep copy for state_in tensors so that we are not
- // reading and writing into the same buffer during a invocation.
- // TODO(miaowang): using double shared buffer to minimize the copies.
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i + subgraph->inputs().size(), nullptr, tensor->data.raw,
- tensor->bytes));
- // Tell NNAPI where to output the state_out.
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i + subgraph->outputs().size(), nullptr, tensor->data.raw,
- tensor->bytes));
- }
-
- // Currently use blocking compute.
- ANeuralNetworksEvent* event = nullptr;
- CHECK_NN(ANeuralNetworksExecution_startCompute(execution, &event));
- CHECK_NN(ANeuralNetworksEvent_wait(event));
- ANeuralNetworksEvent_free(event);
- ANeuralNetworksExecution_free(execution);
-
- // Tell nn api where to place final data.
- for (size_t i = 0; i < subgraph->inputs().size(); i++) {
- int input = subgraph->inputs()[i];
- TfLiteTensor* tensor = subgraph->tensor(input);
-
- if (tensor->type == kTfLiteBool)
- {
- uint8_t* temp_tensor = input_boolean_tensors[i];
- input_boolean_tensors[i] = nullptr;
- delete temp_tensor;
- }
- }
- for (size_t i = 0; i < subgraph->outputs().size(); i++) {
- int output = subgraph->outputs()[i];
- TfLiteTensor* tensor = subgraph->tensor(output);
-
- if (tensor->type == kTfLiteBool)
- {
- uint8_t* temp_tensor = output_boolean_tensors[i];
- size_t elements = tensor->bytes / sizeof(bool);
- for (size_t idx = 0; idx < elements; idx++)
- {
- tensor->data.b[idx] = ((temp_tensor[idx] == 0x00) ? false : true);
- }
- output_boolean_tensors[i] = nullptr;
- delete temp_tensor;
- }
- }
-
-#if 0
- printf("From the NN API:\n");
- TfLiteTensor* tensor = subgraph->tensor(subgraph->outputs()[0]);
- if (float* data =
- subgraph->typed_tensor<float>(subgraph->outputs()[0])) {
- size_t num = tensor->bytes / sizeof(float);
- for (float* p = data; p < data + num; p++) {
- printf(" %f", *p);
- }
- printf("\n");
- }
-#endif
-
- return kTfLiteOk;
-}
-
-bool NNAPIDelegate::IsSupported() { return nnfw::NNAPIExists(); }
-
-} // namespace tflite
-} // namespace nnfw
-
-// clang-format on
diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
deleted file mode 100644
index 39355b106..000000000
--- a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc
+++ /dev/null
@@ -1,153 +0,0 @@
-// This file is included from AddOpsAndParams defined in nnapi_delegate.cc
-// and contains lambda for extened implementation to original Tensorflow Lite.
- auto add_scalar_bool8 = [&nn_model, &augmented_inputs,
- &next_id](bool value) {
- // Fix to use strict build option
- int8_t casted_value = (value ? 1 : 0);
- ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_BOOL;
- CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
- CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &casted_value,
- sizeof(int8_t)))
- augmented_inputs.push_back(next_id++);
- };
-
- auto add_resize_bilinear_params = [&add_scalar_int32, &subgraph, &augmented_inputs](void* data) {
- auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(data);
- if (builtin->align_corners) {
- FATAL("Resize bilinear does not support align corners in NNAPI");
- }
-
- TfLiteTensor* tensor = subgraph->tensor(augmented_inputs.back());
- assert(tensor->type == kTfLiteInt32);
- assert(tensor->bytes == sizeof(int)*2);
- augmented_inputs.pop_back();
-
- int height = ((int*)(tensor->data.raw))[1];
- int width = ((int*)(tensor->data.raw))[0];
- add_scalar_int32(height);
- add_scalar_int32(width);
- };
-
- auto add_transpose_conv_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(data);
- add_scalar_int32(builtin->padding);
- add_scalar_int32(builtin->stride_width);
- add_scalar_int32(builtin->stride_height);
- };
-
- auto add_lrn_params = [&add_scalar_int32,
- &add_scalar_float32](void* data) {
- auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(data);
- add_scalar_int32(builtin->radius);
- add_scalar_float32(builtin->bias);
- add_scalar_float32(builtin->alpha);
- add_scalar_float32(builtin->beta);
- };
-
- auto add_strided_slice_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(data);
- add_scalar_int32(builtin->begin_mask);
- add_scalar_int32(builtin->end_mask);
- // ellipsis_mask and new_axis_mask are not supported on nn runtime
- // cf) tflite interpreter supports both operations
- if (builtin->ellipsis_mask) {
- FATAL("STRIDE_SLICE does not support ellipsis_mask in NNAPI");
- }
- if (builtin->new_axis_mask) {
- FATAL("STRIDE_SLICE does not support new_axis_mask in NNAPI");
- }
- add_scalar_int32(builtin->shrink_axis_mask);
- };
-
- auto add_gather_params = [&add_scalar_int32, &augmented_inputs](void* data) {
- auto builtin = reinterpret_cast<TfLiteGatherParams*>(data);
- if (builtin->axis != 0) {
- FATAL("GATHER does not support axis>0 in NNAPI");
- }
-
- auto indices_index = augmented_inputs.back();
- augmented_inputs.pop_back();
- add_scalar_int32(builtin->axis);
- augmented_inputs.push_back(indices_index);
- };
-
- auto add_pack_ex_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLitePackParams*>(data);
- add_scalar_int32(builtin->values_count);
- add_scalar_int32(builtin->axis);
- };
-
- auto add_unpack_ex_params = [&add_scalar_int32](void* data) {
- auto builtin = reinterpret_cast<TfLiteUnpackParams*>(data);
- add_scalar_int32(builtin->num);
- add_scalar_int32(builtin->axis);
- };
-
- auto check_batch_to_space_params = [subgraph, &node, &augmented_inputs]() {
-
- //If there are 3 inputs, check if crops is having default values {0, 0, 0, 0}
- //Else unsupported by NNAPI
-
- if(augmented_inputs.size() == 3)
- {
- const uint32_t crops_buffer_index = node.inputs->data[2];
- const TfLiteTensor* crops = subgraph->tensor(crops_buffer_index);
- const int *crops_value = crops->data.i32;
-
- //Check if crops is having default values {0, 0, 0, 0}
- if(crops_value[0] != 0 || crops_value[1] != 0 || crops_value[2] != 0 || crops_value[3] != 0)
- {
- FATAL("BATCH_TO_SPACE_ND does not support Explicit crops in NNAPI");
- }
- else
- {
- //Restrict crops input and pass only other two inputs
- augmented_inputs.pop_back();
- }
- }
- };
-
- auto add_split_params = [&add_scalar_int32, &augmented_inputs](void* data) {
- // swap 1st and 2nd operand order
- auto input_tensor = augmented_inputs[1];
- auto axis = augmented_inputs[0];
- augmented_inputs[0] = input_tensor;
- augmented_inputs[1] = axis;
-
- auto builtin = reinterpret_cast<TfLiteSplitParams*>(data);
- add_scalar_int32(builtin->num_splits);
- };
-
- auto check_arg_max_input = [&subgraph, &augmented_inputs](void *data) {
- auto params = reinterpret_cast<TfLiteArgMaxParams*>(data);
- if (params->output_type != kTfLiteInt32)
- {
- FATAL("Cannot handle output type in NNAPI");
- }
-
- TfLiteTensor* axis_tensor = subgraph->tensor(augmented_inputs.back());
- assert(axis_tensor->type == kTfLiteInt32);
-
- int64_t count = 1;
- for (int i = 0; i < axis_tensor->dims->size; ++i) {
- count *= axis_tensor->dims->data[i];
- }
- assert(count == 1);
- };
-
- auto add_reducer_params = [&add_scalar_bool8](void* data) {
- auto builtin = reinterpret_cast<TfLiteReducerParams*>(data);
- if (builtin == nullptr)
- {
- add_scalar_bool8(0);
- }
- else
- {
- add_scalar_bool8(builtin->keep_dims);
- }
- };
-
- auto add_one_hot_params = [&add_scalar_int32](void* data) {
- const auto* builtin = reinterpret_cast<TfLiteOneHotParams*>(data);
- add_scalar_int32(builtin->axis);
- };
diff --git a/runtime/libs/tflite/port/CMakeLists.txt b/runtime/libs/tflite/port/CMakeLists.txt
deleted file mode 100644
index 82c83f722..000000000
--- a/runtime/libs/tflite/port/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# We may need to support multiple tensorflow version
-# Example)
-# For ubuntu: tensorflow lite v1.13.1
-# For tizen: tensorflow lite v1.9
-set(SUPPORT_TFLITE_VERSION "1.13.1" CACHE STRING "Supporting TensorFlow lite version")
-
-add_subdirectories()
diff --git a/runtime/libs/tflite/src/Diff.cpp b/runtime/libs/tflite/src/Diff.cpp
index 39f994352..8165798e0 100644
--- a/runtime/libs/tflite/src/Diff.cpp
+++ b/runtime/libs/tflite/src/Diff.cpp
@@ -22,6 +22,8 @@
#include "misc/tensor/Zipper.h"
#include "misc/tensor/Comparator.h"
+#include <tensorflow/lite/c/c_api.h>
+
#include <iostream>
#include <cassert>
@@ -29,9 +31,9 @@ class DiffSummary : public nnfw::misc::tensor::Comparator::Observer
{
public:
DiffSummary()
- : max_abs_diff_index(0), max_abs_diff_expected{0.0f}, max_abs_diff_obtained{0.0f},
- max_abs_diff_value{0.0f}, max_rel_diff_index(0), max_rel_diff_expected{0.0f},
- max_rel_diff_obtained{0.0f}, max_rel_diff_value{0.0f}
+ : max_abs_diff_index(0), max_abs_diff_expected{0.0f}, max_abs_diff_obtained{0.0f},
+ max_abs_diff_value{0.0f}, max_rel_diff_index(0), max_rel_diff_expected{0.0f},
+ max_rel_diff_obtained{0.0f}, max_rel_diff_value{0.0f}
{
// DO NOTHING
}
@@ -86,12 +88,12 @@ bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorVie
using nnfw::misc::tensor::zip;
zip(expected.shape(), expected, obtained)
- << [&](const Index &index, T expected_value, T obtained_value) {
- if (expected_value != obtained_value)
- {
- diffs.emplace_back(index, expected_value, obtained_value);
- }
- };
+ << [&](const Index &index, T expected_value, T obtained_value) {
+ if (expected_value != obtained_value)
+ {
+ diffs.emplace_back(index, expected_value, obtained_value);
+ }
+ };
// TODO Unify summary generation code
if (diffs.size() == 0)
@@ -121,8 +123,8 @@ bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorVie
template <>
bool TfLiteInterpMatchApp::compareSingleTensorView<float>(
- const nnfw::tflite::TensorView<float> &expected,
- const nnfw::tflite::TensorView<float> &obtained, int id) const
+ const nnfw::tflite::TensorView<float> &expected, const nnfw::tflite::TensorView<float> &obtained,
+ int id) const
{
DiffSummary summary;
@@ -190,53 +192,57 @@ bool TfLiteInterpMatchApp::compareSingleTensorView<float>(
#include <map>
-bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpreter &nnapi) const
+bool TfLiteInterpMatchApp::run(TfLiteInterpreter &expected, TfLiteInterpreter &obtained) const
{
- assert(interp.outputs() == nnapi.outputs());
+ auto output_count = TfLiteInterpreterGetOutputTensorCount(&expected);
+ assert(output_count == TfLiteInterpreterGetOutputTensorCount(&obtained));
bool all_matched = true;
- using Comparator = std::function<bool(int id, ::tflite::Interpreter &, ::tflite::Interpreter &)>;
+ using Comparator = std::function<bool(int32_t, const TfLiteTensor *, const TfLiteTensor *)>;
std::map<TfLiteType, Comparator> comparators;
- comparators[kTfLiteUInt8] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::tflite::TensorView<uint8_t>::make(interp, id);
- const auto obtained = nnfw::tflite::TensorView<uint8_t>::make(nnapi, id);
+ comparators[kTfLiteUInt8] = [this](int32_t id, const TfLiteTensor *expected_tensor,
+ const TfLiteTensor *obtained_tensor) {
+ const auto expected_view = nnfw::tflite::TensorView<uint8_t>::make(expected_tensor);
+ const auto obtained_view = nnfw::tflite::TensorView<uint8_t>::make(obtained_tensor);
- return compareSingleTensorView(expected, obtained, id);
+ return compareSingleTensorView(expected_view, obtained_view, id);
};
- comparators[kTfLiteInt32] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::tflite::TensorView<int32_t>::make(interp, id);
- const auto obtained = nnfw::tflite::TensorView<int32_t>::make(nnapi, id);
+ comparators[kTfLiteInt32] = [this](int32_t id, const TfLiteTensor *expected_tensor,
+ const TfLiteTensor *obtained_tensor) {
+ const auto expected_view = nnfw::tflite::TensorView<int32_t>::make(expected_tensor);
+ const auto obtained_view = nnfw::tflite::TensorView<int32_t>::make(obtained_tensor);
- return compareSingleTensorView(expected, obtained, id);
+ return compareSingleTensorView(expected_view, obtained_view, id);
};
- comparators[kTfLiteFloat32] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::tflite::TensorView<float>::make(interp, id);
- const auto obtained = nnfw::tflite::TensorView<float>::make(nnapi, id);
+ comparators[kTfLiteFloat32] = [this](int32_t id, const TfLiteTensor *expected_tensor,
+ const TfLiteTensor *obtained_tensor) {
+ const auto expected_view = nnfw::tflite::TensorView<float>::make(expected_tensor);
+ const auto obtained_view = nnfw::tflite::TensorView<float>::make(obtained_tensor);
- return compareSingleTensorView(expected, obtained, id);
+ return compareSingleTensorView(expected_view, obtained_view, id);
};
- comparators[kTfLiteBool] = [this](int id, ::tflite::Interpreter &interp,
- ::tflite::Interpreter &nnapi) {
- const auto expected = nnfw::tflite::TensorView<bool>::make(interp, id);
- const auto obtained = nnfw::tflite::TensorView<bool>::make(nnapi, id);
+ comparators[kTfLiteBool] = [this](int32_t id, const TfLiteTensor *expected_tensor,
+ const TfLiteTensor *obtained_tensor) {
+ const auto expected_view = nnfw::tflite::TensorView<bool>::make(expected_tensor);
+ const auto obtained_view = nnfw::tflite::TensorView<bool>::make(obtained_tensor);
- return compareSingleTensorView(expected, obtained, id);
+ return compareSingleTensorView(expected_view, obtained_view, id);
};
- for (const auto &id : interp.outputs())
+ for (int32_t idx = 0; idx < output_count; idx++)
{
- assert(interp.tensor(id)->type == nnapi.tensor(id)->type);
+ auto const expected_tensor = TfLiteInterpreterGetOutputTensor(&expected, idx);
+ auto const obtained_tensor = TfLiteInterpreterGetOutputTensor(&obtained, idx);
+ auto const tensor_type = TfLiteTensorType(expected_tensor);
+ assert(tensor_type == TfLiteTensorType(obtained_tensor));
- auto it = comparators.find(interp.tensor(id)->type);
+ auto it = comparators.find(tensor_type);
if (it == comparators.end())
{
@@ -245,7 +251,7 @@ bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpre
const auto &comparator = it->second;
- if (!comparator(id, interp, nnapi))
+ if (!comparator(idx, expected_tensor, obtained_tensor))
{
all_matched = false;
}
diff --git a/runtime/libs/tflite/src/FeatureView.cpp b/runtime/libs/tflite/src/FeatureView.cpp
deleted file mode 100644
index fdf5a4b00..000000000
--- a/runtime/libs/tflite/src/FeatureView.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/FeatureView.h"
-#include "tflite/TensorUtils.h"
-
-#include <cassert>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-nnfw::misc::feature::Shape getFeatureShape(const TfLiteTensor *tensor)
-{
- nnfw::misc::feature::Shape shape{tensor->dims->data[3], tensor->dims->data[1],
- tensor->dims->data[2]};
-
- return shape;
-}
-
-FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const InputIndex &index)
-{
- const auto tensor_index = interp.inputs().at(index.asInt());
- auto tensor_ptr = interp.tensor(tensor_index);
-
- assert(isFloatTensor(tensor_ptr));
- assert(isFeatureTensor(tensor_ptr));
-
- _shape = getFeatureShape(tensor_ptr);
- _base = interp.typed_tensor<float>(tensor_index);
-}
-
-FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const OutputIndex &index)
-{
- const auto tensor_index = interp.outputs().at(index.asInt());
- auto tensor_ptr = interp.tensor(tensor_index);
-
- assert(isFloatTensor(tensor_ptr));
- assert(isFeatureTensor(tensor_ptr));
-
- _shape = getFeatureShape(tensor_ptr);
- _base = interp.typed_tensor<float>(tensor_index);
-}
-
-float FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col) const
-{
- return *(_base + getElementOffset(ch, row, col));
-}
-
-float &FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col)
-{
- return *(_base + getElementOffset(ch, row, col));
-}
-
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/src/RandomInputInitializer.cpp b/runtime/libs/tflite/src/RandomInputInitializer.cpp
new file mode 100644
index 000000000..9ed90f38e
--- /dev/null
+++ b/runtime/libs/tflite/src/RandomInputInitializer.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/RandomInputInitializer.h"
+#include "tflite/TensorView.h"
+
+#include <misc/tensor/IndexIterator.h>
+
+namespace nnfw
+{
+namespace tflite
+{
+namespace
+{
+
+template <typename T>
+void setValue(nnfw::misc::RandomGenerator &randgen, const TfLiteTensor *tensor)
+{
+ auto tensor_view = nnfw::tflite::TensorView<T>::make(tensor);
+
+ nnfw::misc::tensor::iterate(tensor_view.shape())
+ << [&](const nnfw::misc::tensor::Index &ind) { tensor_view.at(ind) = randgen.generate<T>(); };
+}
+
+} // namespace
+
+void RandomInputInitializer::run(TfLiteInterpreter &interp)
+{
+ const auto input_count = TfLiteInterpreterGetInputTensorCount(&interp);
+ for (int32_t idx = 0; idx < input_count; idx++)
+ {
+ auto tensor = TfLiteInterpreterGetInputTensor(&interp, idx);
+ auto const tensor_type = TfLiteTensorType(tensor);
+ switch (tensor_type)
+ {
+ case kTfLiteFloat32:
+ setValue<float>(_randgen, tensor);
+ break;
+ case kTfLiteInt32:
+ setValue<int32_t>(_randgen, tensor);
+ break;
+ case kTfLiteUInt8:
+ setValue<uint8_t>(_randgen, tensor);
+ break;
+ case kTfLiteBool:
+ setValue<bool>(_randgen, tensor);
+ break;
+ case kTfLiteInt8:
+ setValue<int8_t>(_randgen, tensor);
+ break;
+ default:
+ throw std::runtime_error{"Not supported input type"};
+ }
+ }
+}
+
+} // namespace tflite
+} // namespace nnfw
diff --git a/runtime/libs/tflite/src/RandomTestRunner.cpp b/runtime/libs/tflite/src/RandomTestRunner.cpp
deleted file mode 100644
index f7fccbf3b..000000000
--- a/runtime/libs/tflite/src/RandomTestRunner.cpp
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/RandomTestRunner.h"
-#include "tflite/Diff.h"
-#include "tflite/TensorLogger.h"
-#include "tflite/ext/nnapi_delegate.h"
-
-#include <misc/tensor/IndexIterator.h>
-#include <misc/tensor/Object.h>
-#include <misc/EnvVar.h>
-#include <misc/fp32.h>
-
-#include <cassert>
-#include <map>
-#include <functional>
-#include <iostream>
-
-namespace nnfw
-{
-namespace tflite
-{
-
-using namespace std::placeholders;
-
-void RandomTestRunner::compile(const nnfw::tflite::Builder &builder)
-{
- _tfl_interp = builder.build();
- _nnapi = builder.build();
-
- _tfl_interp->UseNNAPI(false);
-
- // Allocate Tensors
- _tfl_interp->AllocateTensors();
- _nnapi->AllocateTensors();
-
- assert(_tfl_interp->inputs() == _nnapi->inputs());
-
- using ::tflite::Interpreter;
- using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>;
-
- std::map<TfLiteType, Initializer> initializers;
- std::map<TfLiteType, Initializer> reseters;
-
- // Generate singed 32-bit integer (s32) input
- initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteInt32);
- assert(_nnapi->tensor(id)->type == kTfLiteInt32);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- int32_t value = 0;
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- // TODO Generate random values
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- ++value;
- };
- };
-
- // Generate singed 32-bit integer (s32) input
- reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteInt32);
- assert(_nnapi->tensor(id)->type == kTfLiteInt32);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- int32_t value = 0;
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- // TODO Generate random values
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteUInt8);
- assert(_nnapi->tensor(id)->type == kTfLiteUInt8);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<uint8_t (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<uint8_t>);
- const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
- assert(tfl_interp_view.shape() == data.shape());
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- const auto value = data.at(ind);
-
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteUInt8);
- assert(_nnapi->tensor(id)->type == kTfLiteUInt8);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<uint8_t (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<uint8_t>);
- const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
- assert(tfl_interp_view.shape() == data.shape());
-
- uint8_t value = 0;
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteFloat32);
- assert(_nnapi->tensor(id)->type == kTfLiteFloat32);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<float (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<float>);
- const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- const auto value = data.at(ind);
-
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteFloat32);
- assert(_nnapi->tensor(id)->type == kTfLiteFloat32);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<float (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<float>);
- const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- float value = 0;
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- initializers[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteBool);
- assert(_nnapi->tensor(id)->type == kTfLiteBool);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<bool (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<bool>);
- const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- const auto value = data.at(ind);
-
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- reseters[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
- assert(_tfl_interp->tensor(id)->type == kTfLiteBool);
- assert(_nnapi->tensor(id)->type == kTfLiteBool);
-
- auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id);
- auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id);
-
- assert(tfl_interp_view.shape() == nnapi_view.shape());
-
- auto fp = static_cast<bool (nnfw::misc::RandomGenerator::*)(
- const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &)>(
- &nnfw::misc::RandomGenerator::generate<bool>);
- const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(),
- std::bind(fp, _randgen, _1, _2));
-
- assert(tfl_interp_view.shape() == data.shape());
-
- bool value = false;
-
- nnfw::misc::tensor::iterate(tfl_interp_view.shape())
- << [&](const nnfw::misc::tensor::Index &ind) {
- tfl_interp_view.at(ind) = value;
- nnapi_view.at(ind) = value;
- };
- };
-
- // Fill IFM with random numbers
- for (const auto id : _tfl_interp->inputs())
- {
- assert(_tfl_interp->tensor(id)->type == _nnapi->tensor(id)->type);
-
- auto it = initializers.find(_tfl_interp->tensor(id)->type);
-
- if (it == initializers.end())
- {
- throw std::runtime_error{"Not supported input type"};
- }
-
- it->second(id, _tfl_interp.get(), _nnapi.get());
- }
-
- // Fill OFM with 0
- for (const auto id : _tfl_interp->outputs())
- {
- assert(_tfl_interp->tensor(id)->type == _nnapi->tensor(id)->type);
-
- auto it = reseters.find(_tfl_interp->tensor(id)->type);
-
- if (it == reseters.end())
- {
- throw std::runtime_error{"Not supported input type"};
- }
-
- it->second(id, _tfl_interp.get(), _nnapi.get());
- }
-}
-
-int RandomTestRunner::run(size_t running_count)
-{
- std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
- _tfl_interp->Invoke();
-
- nnfw::tflite::NNAPIDelegate d;
-
- for (size_t i = 1; i <= running_count; ++i)
- {
- std::cout << "[NNAPI TEST #" << i << "] Run T/F Lite Interpreter with NNAPI" << std::endl;
-
- char *env = getenv("UPSTREAM_DELEGATE");
-
- if (env && !std::string(env).compare("1"))
- {
- _nnapi->UseNNAPI(true);
- _nnapi->Invoke();
- }
- else
- {
- // WARNING
- // primary_subgraph: Experimental interface. Return 1st sugbraph
- // Invoke() will call BuildGraph() internally
- if (d.Invoke(&_nnapi.get()->primary_subgraph()))
- {
- throw std::runtime_error{"Failed to BuildGraph"};
- }
- }
-
- // Compare OFM
- std::cout << "[NNAPI TEST #" << i << "] Compare the result" << std::endl;
-
- const auto tolerance = _param.tolerance;
-
- auto equals = [tolerance](float lhs, float rhs) {
- // NOTE Hybrid approach
- // TODO Allow users to set tolerance for absolute_epsilon_equal
- if (nnfw::misc::fp32::absolute_epsilon_equal(lhs, rhs))
- {
- return true;
- }
-
- return nnfw::misc::fp32::epsilon_equal(lhs, rhs, tolerance);
- };
-
- nnfw::misc::tensor::Comparator comparator(equals);
- TfLiteInterpMatchApp app(comparator);
-
- app.verbose() = _param.verbose;
-
- bool res = app.run(*_tfl_interp, *_nnapi);
-
- if (!res)
- {
- return 255;
- }
-
- std::cout << "[NNAPI TEST #" << i << "] PASSED" << std::endl << std::endl;
-
- if (_param.tensor_logging)
- nnfw::tflite::TensorLogger::get().save(_param.log_path, *_tfl_interp);
- }
-
- return 0;
-}
-
-RandomTestRunner RandomTestRunner::make(uint32_t seed)
-{
- RandomTestParam param;
-
- param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0);
- param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1);
- param.tensor_logging = nnfw::misc::EnvVar("TENSOR_LOGGING").asBool(false);
- param.log_path = nnfw::misc::EnvVar("TENSOR_LOGGING").asString("tensor_log.txt");
-
- return RandomTestRunner{seed, param};
-}
-
-} // namespace tflite
-} // namespace nnfw
diff --git a/runtime/libs/tflite/src/TensorShapeUtils.cpp b/runtime/libs/tflite/src/TensorShapeUtils.cpp
deleted file mode 100644
index 689b6151b..000000000
--- a/runtime/libs/tflite/src/TensorShapeUtils.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the License);
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite/TensorShapeUtils.h"
-
-namespace nnfw
-{
-namespace tflite
-{
-
-nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape,
- const nnfw::misc::tensor::Shape &rhs_shape)
-{
- const uint32_t lhs_rank = lhs_shape.rank();
- const uint32_t rhs_rank = rhs_shape.rank();
- const uint32_t out_rank = std::max(lhs_rank, rhs_rank);
- const uint32_t lhs_rank_diff = out_rank - lhs_rank;
- const uint32_t rhs_rank_diff = out_rank - rhs_rank;
-
- nnfw::misc::tensor::Shape out_shape(out_rank);
-
- for (uint32_t axis = 0; axis < out_rank; ++axis)
- {
- out_shape.dim(axis) = std::max(axis < lhs_rank_diff ? 1 : lhs_shape.dim(axis - lhs_rank_diff),
- axis < rhs_rank_diff ? 1 : rhs_shape.dim(axis - rhs_rank_diff));
- }
-
- return out_shape;
-}
-
-} // namespace tflite
-} // namespace nnfw