summaryrefslogtreecommitdiff
path: root/runtime/libs
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
commitc55f8a6db48cda9d3a78048338b7f18c4cca62b8 (patch)
tree761ee8e171e5203f5c598ad93b2e7e0bc2e31aa2 /runtime/libs
parent74476a2d0296bdad70a2f7f90bc7419a8b05bffd (diff)
downloadnnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.gz
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.bz2
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.zip
Diffstat (limited to 'runtime/libs')
-rw-r--r--runtime/libs/benchmark/include/benchmark/MemoryInfo.h (renamed from runtime/libs/ndarray/src/ContiguousSpan.cpp)33
-rw-r--r--runtime/libs/benchmark/include/benchmark/MemoryPoller.h4
-rw-r--r--runtime/libs/benchmark/include/benchmark/Phases.h5
-rw-r--r--runtime/libs/benchmark/include/benchmark/Result.h2
-rw-r--r--runtime/libs/benchmark/src/MemoryInfo.cpp169
-rw-r--r--runtime/libs/benchmark/src/MemoryPoller.cpp163
-rw-r--r--runtime/libs/benchmark/src/Phases.cpp8
-rw-r--r--runtime/libs/benchmark/src/Result.cpp12
-rw-r--r--runtime/libs/misc/include/misc/polymorphic_downcast.h2
-rw-r--r--runtime/libs/ndarray/CMakeLists.txt19
-rw-r--r--runtime/libs/ndarray/example/CMakeLists.txt4
-rw-r--r--runtime/libs/ndarray/example/example_array.cpp76
-rw-r--r--runtime/libs/ndarray/example/example_no_array.cpp85
-rw-r--r--runtime/libs/ndarray/include/ndarray/Array.h195
-rw-r--r--runtime/libs/ndarray/include/ndarray/Common.h22
-rw-r--r--runtime/libs/ndarray/include/ndarray/ContiguousSpan.h108
-rw-r--r--runtime/libs/ndarray/include/ndarray/Shape.h66
-rw-r--r--runtime/libs/ndarray/src/Array.cpp27
-rw-r--r--runtime/libs/ndarray/src/detail/cxx14.h67
-rw-r--r--runtime/libs/ndarray/test/CMakeLists.txt17
-rw-r--r--runtime/libs/ndarray/test/ndarray_test.cpp92
-rw-r--r--runtime/libs/nnapi/CMakeLists.txt5
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksExShim.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h)0
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h)0
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksShim.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h)0
-rw-r--r--runtime/libs/nnapi/include/NeuralNetworksTypes.h (renamed from runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h)0
-rw-r--r--runtime/libs/nnapi/v1.1/CMakeLists.txt4
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h64
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h141
-rw-r--r--runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h709
-rw-r--r--runtime/libs/nnapi/v1.2/CMakeLists.txt4
31 files changed, 232 insertions, 1871 deletions
diff --git a/runtime/libs/ndarray/src/ContiguousSpan.cpp b/runtime/libs/benchmark/include/benchmark/MemoryInfo.h
index e06cfc2a1..6e8e12ba4 100644
--- a/runtime/libs/ndarray/src/ContiguousSpan.cpp
+++ b/runtime/libs/benchmark/include/benchmark/MemoryInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,18 +14,27 @@
* limitations under the License.
*/
-#include "ndarray/ContiguousSpan.h"
+#ifndef __NNFW_BENCHMARK_MEMORY_INFO_H__
+#define __NNFW_BENCHMARK_MEMORY_INFO_H__
-namespace ndarray
+#include <cstdint>
+#include <string>
+
+namespace benchmark
{
-template class ContiguousSpan<float, true>;
-template class ContiguousSpan<float, false>;
-template class ContiguousSpan<int32_t, true>;
-template class ContiguousSpan<int32_t, false>;
-template class ContiguousSpan<uint32_t, true>;
-template class ContiguousSpan<uint32_t, false>;
-template class ContiguousSpan<uint8_t, true>;
-template class ContiguousSpan<uint8_t, false>;
+bool prepareVmRSS();
+bool prepareVmHWM();
+bool prepareGpuMemory();
+bool preparePssSum();
+
+uint32_t getVmRSS();
+uint32_t getVmHWM();
+uint32_t getGpuMemory(const std::string &process_name);
+uint32_t getPssSum();
+
+std::string getProcessName();
+
+} // namespace benchmark
-} // namespace ndarray
+#endif // __NNFW_BENCHMARK_MEMORY_INFO_H__
diff --git a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
index 48caa3b3a..47db3fd77 100644
--- a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
+++ b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h
@@ -57,10 +57,6 @@ public:
private:
void process();
bool prepareMemoryPolling();
- uint32_t getVmRSS();
- uint32_t getVmHWM();
- uint32_t getGpuMemory();
- uint32_t getPssSum();
private:
std::chrono::milliseconds _duration;
diff --git a/runtime/libs/benchmark/include/benchmark/Phases.h b/runtime/libs/benchmark/include/benchmark/Phases.h
index 936a89742..7d642782a 100644
--- a/runtime/libs/benchmark/include/benchmark/Phases.h
+++ b/runtime/libs/benchmark/include/benchmark/Phases.h
@@ -50,6 +50,9 @@ public:
const MemoryPoller &mem_poll() const { return *_mem_poll; }
const Phase &at(const std::string &tag) const { return _phases.at(tag); }
+ uint32_t mem_before_init() const { return _mem_before_init; }
+ uint32_t mem_after_run() const { return _mem_after_run; }
+
private:
void run(const std::string &tag, const PhaseFunc &exec, const PhaseFunc *post, uint32_t loop_num,
bool option_disable);
@@ -58,6 +61,8 @@ private:
const PhaseOption _option;
std::unordered_map<std::string, Phase> _phases;
std::unique_ptr<MemoryPoller> _mem_poll;
+ uint32_t _mem_before_init;
+ uint32_t _mem_after_run;
};
} // namespace benchmark
diff --git a/runtime/libs/benchmark/include/benchmark/Result.h b/runtime/libs/benchmark/include/benchmark/Result.h
index 69084b300..7604aa904 100644
--- a/runtime/libs/benchmark/include/benchmark/Result.h
+++ b/runtime/libs/benchmark/include/benchmark/Result.h
@@ -34,6 +34,8 @@ public:
double time[PhaseEnum::END_OF_PHASE][FigureType::END_OF_FIG_TYPE];
uint32_t memory[PhaseEnum::END_OF_PHASE][MemoryType::END_OF_MEM_TYPE];
bool print_memory = false;
+ uint32_t init_memory = 0;
+ uint32_t peak_memory = 0;
};
// TODO Support not only stdout but also ostream
diff --git a/runtime/libs/benchmark/src/MemoryInfo.cpp b/runtime/libs/benchmark/src/MemoryInfo.cpp
new file mode 100644
index 000000000..20d262961
--- /dev/null
+++ b/runtime/libs/benchmark/src/MemoryInfo.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "benchmark/MemoryInfo.h"
+
+#include <vector>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+#include <cassert>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+namespace
+{
+
+const std::string proc_status_path("/proc/self/status");
+const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory");
+const std::string proc_smaps_path("/proc/self/smaps");
+
+bool isStrNumber(const std::string &s)
+{
+ return !s.empty() &&
+ std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end();
+}
+
+std::vector<std::string> splitLine(std::string line, std::string delimiters = " \n\t")
+{
+ std::vector<std::string> words;
+ size_t prev = 0, pos;
+
+ while ((pos = line.find_first_of(delimiters, prev)) != std::string::npos)
+ {
+ if (pos > prev)
+ words.emplace_back(line.substr(prev, pos - prev));
+ prev = pos + 1;
+ }
+
+ if (prev < line.length())
+ words.emplace_back(line.substr(prev, std::string::npos));
+
+ return words;
+}
+
+std::vector<std::string> getValueFromFileStatus(const std::string &file, const std::string &key)
+{
+ std::ifstream ifs(file);
+ assert(ifs.is_open());
+
+ std::string line;
+ std::vector<std::string> val;
+
+ bool found = false;
+ while (std::getline(ifs, line))
+ {
+ if (line.find(key) != std::string::npos)
+ {
+ found = true;
+ break;
+ }
+ }
+ ifs.close();
+
+ if (!found)
+ {
+ // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase.
+ // At that time, just return empty.
+ return val;
+ }
+
+ val = splitLine(line);
+ return val;
+}
+
+// Because of smaps' structure, returns sum value as uint32_t
+uint32_t getSumValueFromFileSmaps(const std::string &file, const std::string &key)
+{
+ std::ifstream ifs(file);
+ assert(ifs.is_open());
+
+ std::string line;
+ uint32_t sum = 0;
+ while (std::getline(ifs, line))
+ {
+ if (line.find(key) != std::string::npos)
+ {
+ // an example by splitLine()
+ // `Pss: 0 kB`
+ // val[0]: "Pss:", val[1]: "0" val[2]: "kB"
+ auto val = splitLine(line);
+ assert(val.size() != 0);
+ // SwapPss could show so that check where Pss is at the beginning
+ if (val[0].find("Pss") != 0)
+ {
+ continue;
+ }
+ sum += std::stoul(val[1]);
+ }
+ }
+
+ return sum;
+}
+
+} // namespace
+
+namespace benchmark
+{
+
+bool prepareVmRSS() { return std::ifstream(proc_status_path).is_open(); }
+
+bool prepareVmHWM() { return std::ifstream(proc_status_path).is_open(); }
+
+bool prepareGpuMemory() { return std::ifstream(gpu_memory_path).is_open(); }
+
+bool preparePssSum() { return std::ifstream(proc_smaps_path).is_open(); }
+
+uint32_t getVmRSS()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "VmRSS");
+ if (val.size() == 0)
+ return 0;
+ assert(isStrNumber(val[1]));
+ return std::stoul(val[1]);
+}
+
+uint32_t getVmHWM()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "VmHWM");
+ if (val.size() == 0)
+ return 0;
+ // key: value
+ assert(isStrNumber(val[1]));
+ return std::stoul(val[1]);
+}
+
+uint32_t getGpuMemory(const std::string &process_name)
+{
+ assert(!process_name.empty());
+ auto val = getValueFromFileStatus(gpu_memory_path, process_name);
+ if (val.size() == 0)
+ return 0;
+ // process_name -> pid -> gpu_mem -> max_gpu_mem
+ assert(isStrNumber(val[2]));
+ return std::stoul(val[2]);
+}
+
+uint32_t getPssSum() { return getSumValueFromFileSmaps(proc_smaps_path, "Pss"); }
+
+std::string getProcessName()
+{
+ auto val = getValueFromFileStatus(proc_status_path, "Name");
+ assert(val.size() >= 2);
+ return val[1];
+}
+
+} // namespace benchmark
diff --git a/runtime/libs/benchmark/src/MemoryPoller.cpp b/runtime/libs/benchmark/src/MemoryPoller.cpp
index 61fdecd46..050b5b163 100644
--- a/runtime/libs/benchmark/src/MemoryPoller.cpp
+++ b/runtime/libs/benchmark/src/MemoryPoller.cpp
@@ -16,106 +16,13 @@
#include "benchmark/MemoryPoller.h"
#include "benchmark/Types.h"
+#include "benchmark/MemoryInfo.h"
#include <vector>
-#include <fstream>
-#include <sstream>
#include <stdexcept>
#include <cassert>
#include <iostream>
-namespace
-{
-
-const std::string proc_status_path("/proc/self/status");
-const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory");
-const std::string proc_smaps_path("/proc/self/smaps");
-
-bool isStrNumber(const std::string &s)
-{
- return !s.empty() &&
- std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end();
-}
-
-std::vector<std::string> splitLine(std::string line, std::string delimiters = " \n\t")
-{
- std::vector<std::string> words;
- size_t prev = 0, pos;
-
- while ((pos = line.find_first_of(delimiters, prev)) != std::string::npos)
- {
- if (pos > prev)
- words.emplace_back(line.substr(prev, pos - prev));
- prev = pos + 1;
- }
-
- if (prev < line.length())
- words.emplace_back(line.substr(prev, std::string::npos));
-
- return words;
-}
-
-std::vector<std::string> getValueFromFileStatus(const std::string &file, const std::string &key)
-{
- std::ifstream ifs(file);
- assert(ifs.is_open());
-
- std::string line;
- std::vector<std::string> val;
-
- bool found = false;
- while (std::getline(ifs, line))
- {
- if (line.find(key) != std::string::npos)
- {
- found = true;
- break;
- }
- }
- ifs.close();
-
- if (!found)
- {
- // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase.
- // At that time, just return empty.
- return val;
- }
-
- val = splitLine(line);
- return val;
-}
-
-// Because of smaps' structure, returns sum value as uint32_t
-uint32_t getSumValueFromFileSmaps(const std::string &file, const std::string &key)
-{
- std::ifstream ifs(file);
- assert(ifs.is_open());
-
- std::string line;
- uint32_t sum = 0;
- while (std::getline(ifs, line))
- {
- if (line.find(key) != std::string::npos)
- {
- // an example by splitLine()
- // `Pss: 0 kB`
- // val[0]: "Pss:", val[1]: "0" val[2]: "kB"
- auto val = splitLine(line);
- assert(val.size() != 0);
- // SwapPss could show so that check where Pss is at the beginning
- if (val[0].find("Pss") != 0)
- {
- continue;
- }
- sum += std::stoul(val[1]);
- }
- }
-
- return sum;
-}
-
-} // namespace
-
namespace benchmark
{
@@ -168,7 +75,7 @@ bool MemoryPoller::end(PhaseEnum phase)
mem = getVmRSS();
if (_gpu_poll)
{
- mem += getGpuMemory();
+ mem += getGpuMemory(_process_name);
}
if (mem > _rss_map[phase])
_rss_map[phase] = mem;
@@ -176,7 +83,7 @@ bool MemoryPoller::end(PhaseEnum phase)
mem = getVmHWM();
if (_gpu_poll)
{
- mem += getGpuMemory();
+ mem += getGpuMemory(_process_name);
}
_hwm_map[phase] = mem;
@@ -208,7 +115,7 @@ void MemoryPoller::process()
uint32_t cur_hwm = getVmHWM();
if (_gpu_poll)
{
- auto gpu_mem = getGpuMemory();
+ auto gpu_mem = getGpuMemory(_process_name);
cur_rss += gpu_mem;
cur_hwm += gpu_mem;
}
@@ -236,77 +143,33 @@ void MemoryPoller::process()
bool MemoryPoller::prepareMemoryPolling()
{
// VmRSS
+ if (!prepareVmRSS())
{
- std::ifstream ifs(proc_status_path);
- if (!ifs.is_open())
- {
- std::cerr << "failed to open " << proc_status_path << std::endl;
- return false;
- }
- ifs.close();
+ std::cerr << "failed to prepare parsing vmrss" << std::endl;
+ return false;
}
// (Additionally) GpuMemory
if (_gpu_poll)
{
- std::ifstream ifs(gpu_memory_path);
- if (!ifs.is_open())
+ if (!prepareGpuMemory())
{
- std::cerr << "failed to open " << gpu_memory_path << std::endl;
+ std::cerr << "failed to prepare parsing gpu memory" << std::endl;
return false;
}
- ifs.close();
// Needs process name
- auto val = getValueFromFileStatus(proc_status_path, "Name");
- assert(val.size() != 0);
- _process_name = val[1];
+ _process_name = getProcessName();
}
// PSS
+ if (!preparePssSum())
{
- std::ifstream ifs(proc_smaps_path);
- if (!ifs.is_open())
- {
- std::cerr << "failed to open " << proc_smaps_path << std::endl;
- return false;
- }
- ifs.close();
+ std::cerr << "failed to prepare parsing pss sum" << std::endl;
+ return false;
}
return true;
}
-uint32_t MemoryPoller::getVmRSS()
-{
- auto val = getValueFromFileStatus(proc_status_path, "VmRSS");
- if (val.size() == 0)
- return 0;
- assert(isStrNumber(val[1]));
- return std::stoul(val[1]);
-}
-
-uint32_t MemoryPoller::getVmHWM()
-{
- auto val = getValueFromFileStatus(proc_status_path, "VmHWM");
- if (val.size() == 0)
- return 0;
- // key: value
- assert(isStrNumber(val[1]));
- return std::stoul(val[1]);
-}
-
-uint32_t MemoryPoller::getGpuMemory()
-{
- assert(!_process_name.empty());
- auto val = getValueFromFileStatus(gpu_memory_path, _process_name);
- if (val.size() == 0)
- return 0;
- // process_name -> pid -> gpu_mem -> max_gpu_mem
- assert(isStrNumber(val[2]));
- return std::stoul(val[2]);
-}
-
-uint32_t MemoryPoller::getPssSum() { return getSumValueFromFileSmaps(proc_smaps_path, "Pss"); }
-
} // namespace benchmark
diff --git a/runtime/libs/benchmark/src/Phases.cpp b/runtime/libs/benchmark/src/Phases.cpp
index 9ab67cfd9..897b943d3 100644
--- a/runtime/libs/benchmark/src/Phases.cpp
+++ b/runtime/libs/benchmark/src/Phases.cpp
@@ -17,6 +17,7 @@
#include "benchmark/Phases.h"
#include "benchmark/Types.h"
+#include "benchmark/MemoryInfo.h"
#include <cassert>
#include <chrono>
@@ -46,8 +47,11 @@ void SleepForMicros(uint64_t micros)
namespace benchmark
{
-Phases::Phases(const PhaseOption &option) : _option(option)
+Phases::Phases(const PhaseOption &option) : _option(option), _mem_before_init(0), _mem_after_run(0)
{
+ assert(prepareVmRSS());
+ _mem_before_init = getVmHWM();
+
if (_option.memory)
{
_mem_poll = std::make_unique<MemoryPoller>(std::chrono::milliseconds(option.memory_interval),
@@ -93,6 +97,8 @@ void Phases::run(const std::string &tag, const PhaseFunc &exec, const PhaseFunc
}
}
+ _mem_after_run = getVmHWM();
+
if (p == PhaseEnum::END_OF_PHASE)
{
return;
diff --git a/runtime/libs/benchmark/src/Result.cpp b/runtime/libs/benchmark/src/Result.cpp
index df573da92..e6cafb91c 100644
--- a/runtime/libs/benchmark/src/Result.cpp
+++ b/runtime/libs/benchmark/src/Result.cpp
@@ -141,6 +141,15 @@ void printResultMemory(const uint32_t memory[benchmark::PhaseEnum::END_OF_PHASE]
}
}
+void printUsedPeakMemory(uint32_t init_memory, uint32_t peak_memory)
+{
+ uint32_t used_peak_memory = peak_memory - init_memory;
+ std::cout << "Used Peak Memory : " << used_peak_memory << " kb" << std::endl;
+ std::cout << "- HWM after run : " << peak_memory << " kb" << std::endl;
+ std::cout << "- HWM before init: " << init_memory << " kb" << std::endl;
+ std::cout << "===================================" << std::endl;
+}
+
} // namespace
namespace benchmark
@@ -175,6 +184,8 @@ Result::Result(const Phases &phases)
}
}
}
+ init_memory = phases.mem_before_init();
+ peak_memory = phases.mem_after_run();
}
void printResult(const Result &result)
@@ -185,6 +196,7 @@ void printResult(const Result &result)
return;
printResultMemory(result.memory);
+ printUsedPeakMemory(result.init_memory, result.peak_memory);
}
// TODO There are necessary for a kind of output data file so that it doesn't have to be csv file
diff --git a/runtime/libs/misc/include/misc/polymorphic_downcast.h b/runtime/libs/misc/include/misc/polymorphic_downcast.h
index 412b864e6..ee885eb70 100644
--- a/runtime/libs/misc/include/misc/polymorphic_downcast.h
+++ b/runtime/libs/misc/include/misc/polymorphic_downcast.h
@@ -27,9 +27,7 @@ namespace misc
template <typename DstType, typename SrcType> inline DstType polymorphic_downcast(SrcType *x)
{
-#ifndef __ANDROID__
assert(dynamic_cast<DstType>(x) == x);
-#endif
return static_cast<DstType>(x);
}
diff --git a/runtime/libs/ndarray/CMakeLists.txt b/runtime/libs/ndarray/CMakeLists.txt
deleted file mode 100644
index b040f5115..000000000
--- a/runtime/libs/ndarray/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-add_library(ndarray STATIC src/Array.cpp src/ContiguousSpan.cpp)
-
-set_target_properties(ndarray PROPERTIES POSITION_INDEPENDENT_CODE ON)
-
-target_include_directories(ndarray PUBLIC include)
-#can't make this private because of c++ templates
-target_include_directories(ndarray PUBLIC src)
-
-option(NDARRAY_INLINE_TEMPLATES "Set to ON to disable extern declarations for common types")
-
-if(${NDARRAY_INLINE_TEMPLATES})
- target_compile_definitions(ndarray PUBLIC -DNDARRAY_INLINE_TEMPLATES=1)
-endif()
-
-target_link_libraries(ndarray PRIVATE nnfw_common)
-target_link_libraries(ndarray PRIVATE nnfw_coverage)
-
-add_subdirectory(test)
-add_subdirectory(example)
diff --git a/runtime/libs/ndarray/example/CMakeLists.txt b/runtime/libs/ndarray/example/CMakeLists.txt
deleted file mode 100644
index c4b575dad..000000000
--- a/runtime/libs/ndarray/example/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_executable(example_no_array example_no_array.cpp)
-
-add_executable(example_array example_array.cpp)
-target_link_libraries(example_array PRIVATE ndarray)
diff --git a/runtime/libs/ndarray/example/example_array.cpp b/runtime/libs/ndarray/example/example_array.cpp
deleted file mode 100644
index 85d274681..000000000
--- a/runtime/libs/ndarray/example/example_array.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ndarray/Array.h"
-
-#include <iostream>
-#include <iterator>
-
-using namespace ndarray;
-
-void gather_array(const Array<float> &input, Array<float> &output, const Array<int> &indices)
-{
- assert(indices.shape().rank() == 3);
- assert(input.shape().rank() == 3);
- assert(indices.shape().dim(1) == input.shape().rank());
-
- for (size_t i = 0; i < indices.shape().dim(0); ++i)
- {
- for (size_t j = 0; j < indices.shape().dim(1); ++j)
- {
- auto index = indices.slice(i, j);
- output.slice(i, j).assign(input.slice(index[0], index[1]));
- }
- }
-}
-
-int main()
-{
- // fill tensor of shape[3,3,4] with sequential numbers from [0..36)
- Shape in_shape{3, 3, 4};
- std::vector<float> input_data(in_shape.element_count());
- for (size_t i = 0; i < in_shape.element_count(); ++i)
- input_data[i] = i;
-
- Array<float> input(input_data.data(), in_shape);
-
- // select column-vectors on main diagonal
- Shape indices_shape{1, 3, 2};
- std::vector<int> indices_data(indices_shape.element_count());
- Array<int> indices(indices_data.data(), indices_shape);
-
- indices.slice(0, 0) = {0, 0};
- indices.slice(0, 1) = {1, 1};
- indices.slice(0, 2) = {2, 2};
-
- Shape output_shape{1, 3, 4};
- std::vector<float> output_data(output_shape.element_count());
-
- Array<float> output(output_data.data(), output_shape);
-
- gather_array(input, output, indices);
-
- for (size_t i = 0; i < indices_shape.dim(0); ++i)
- {
- for (size_t j = 0; j < indices_shape.dim(1); ++j)
- {
- auto output_piece = output.slice(i, j);
- std::ostream_iterator<int> cout_it(std::cout, ", ");
- std::copy(output_piece.begin(), output_piece.end(), cout_it);
- std::cout << std::endl;
- }
- }
-}
diff --git a/runtime/libs/ndarray/example/example_no_array.cpp b/runtime/libs/ndarray/example/example_no_array.cpp
deleted file mode 100644
index 3a4d05dca..000000000
--- a/runtime/libs/ndarray/example/example_no_array.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <array>
-#include <vector>
-#include <algorithm>
-#include <cassert>
-#include <iostream>
-
-void gather_no_array(const float *in_data, const std::array<size_t, 3> &dims, float *out_data,
- const std::array<size_t, 3> &out_dims, //[nselections,
- const int *indices, const std::array<size_t, 3> &indices_dims)
-{
- assert(indices_dims[1] == dims.size());
-
- for (int i = 0; i < indices_dims[0]; ++i)
- {
- for (int j = 0; j < indices_dims[1]; ++j)
- {
- const int *index_ptr = indices + i * indices_dims[2] * indices_dims[1] + j * indices_dims[2];
-
- size_t in_offset = index_ptr[0] * dims[2] * dims[1] + index_ptr[1] * dims[2];
-
- const float *in_ptr = in_data + in_offset;
-
- size_t out_offset = i * out_dims[2] * out_dims[1] + j * out_dims[2];
-
- float *out_ptr = out_data + out_offset;
-
- for (int k = 0; k < dims[2]; ++k)
- {
- out_ptr[k] = in_ptr[k];
- }
- }
- }
-}
-
-int main()
-{
- std::array<size_t, 3> in_dims{3, 3, 4};
- std::vector<float> input(3 * 3 * 4);
- for (size_t i = 0; i < 3 * 3 * 4; ++i)
- input[i] = i;
-
- std::array<size_t, 3> indices_shape{1, 3, 2};
- std::vector<int> indices(1 * 3 * 2);
-
- indices[0] = 0;
- indices[1] = 0;
- indices[2] = 1;
- indices[3] = 1;
- indices[4] = 2;
- indices[5] = 2;
-
- std::array<size_t, 3> output_dims{1, 3, 4};
- std::vector<float> output(1 * 3 * 4);
-
- gather_no_array(input.data(), in_dims, output.data(), output_dims, indices.data(), indices_shape);
-
- for (size_t i = 0; i < output_dims[0]; ++i)
- {
- for (size_t j = 0; j < output_dims[1]; ++j)
- {
- auto out_ptr = output.data() + i * output_dims[1] * output_dims[2] + j * output_dims[2];
- for (size_t k = 0; k < output_dims[2]; ++k)
- {
- std::cout << out_ptr[k] << ", ";
- }
- std::cout << std::endl;
- }
- }
-}
diff --git a/runtime/libs/ndarray/include/ndarray/Array.h b/runtime/libs/ndarray/include/ndarray/Array.h
deleted file mode 100644
index 3890cc26b..000000000
--- a/runtime/libs/ndarray/include/ndarray/Array.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_ARRAY_H_
-#define _NDARRAY_ARRAY_H_
-
-#include "Common.h"
-
-#include "ContiguousSpan.h"
-#include "Shape.h"
-
-#if __cplusplus < 201402L
-#include "detail/cxx14.h" //integer_sequence and make_index_dequence definitions
-#else
-#include <utility>
-#endif
-
-#include <algorithm>
-#include <cassert>
-#include <type_traits>
-#include <array>
-#include <tuple>
-#include <cstddef>
-
-namespace ndarray
-{
-
-// there is no index_sequence before c++14
-#if __cplusplus < 201402L
-
-template <size_t... Nums> using index_sequence = cxx14::index_sequence<Nums...>;
-
-template <size_t Num> using make_index_sequence = cxx14::make_index_sequence<Num>;
-
-#else
-
-template <size_t... Nums> using index_sequence = std::index_sequence<Nums...>;
-
-template <size_t _Num> using make_index_sequence = std::make_index_sequence<_Num>;
-
-#endif //__cplusplus < 201402L
-
-struct Strides
-{
- explicit Strides(Shape s) : _strides{} { fillStrides(s); }
-
- int operator[](size_t idx) const noexcept { return _strides[idx]; }
-
- // since we don't have c++14 fold expression
- template <typename Seq, typename... Ts> struct _calc_offset;
-
- template <size_t Num, size_t... Nums, typename T, typename... Ts>
- struct _calc_offset<index_sequence<Num, Nums...>, T, Ts...>
- {
- static constexpr size_t get(const std::array<int, 8> &strides, int x, Ts... xs)
- {
- return _calc_offset<index_sequence<Nums...>, Ts...>::get(strides, xs...) +
- x * std::get<Num>(strides);
- }
- };
-
- template <size_t Num, typename T> struct _calc_offset<index_sequence<Num>, T>
- {
- static constexpr size_t get(const std::array<int, 8> &strides, int x)
- {
- return x * std::get<Num>(strides);
- }
- };
-
- template <typename Seq, typename... Ts> constexpr size_t offset(Seq, Ts... x) const noexcept
- {
- // return ( 0 + ... + (std::get<Nums>(_strides) * x)); in c++14
- return _calc_offset<Seq, Ts...>::get(_strides, x...);
- }
-
-private:
- void fillStrides(const Shape &s) noexcept
- {
- int rank = s.rank();
- _strides[rank - 1] = 1;
- for (int d = rank - 2; d >= 0; --d)
- {
- _strides[d] = _strides[d + 1] * s.dim(d + 1);
- }
- }
-
- std::array<int, NDARRAY_MAX_DIMENSION_COUNT> _strides;
-};
-
-template <typename T> class Array
-{
-public:
- Array(T *data, Shape shape) noexcept : _data(data), _shape(shape), _strides(shape) {}
-
- Array(const Array &) = delete;
-
- Array(Array &&a) noexcept : _data(a._data), _shape(a._shape), _strides(a._strides)
- {
- a._data = nullptr;
- }
-
- template <typename... Ts> T &at(Ts... x) const noexcept { return _at(static_cast<size_t>(x)...); }
-
- /**
- * @brief returns last dimension as ContigniousSpan
- * @param x indices of slice to take. See tests for usage details
- * @return slice at given position
- */
- template <typename... Ts> ContiguousSpan<T, std::is_const<T>::value> slice(Ts... x) noexcept
- {
- assert(sizeof...(Ts) == _shape.rank() - 1);
- return {&at(x..., 0ul), _shape.dim(_shape.rank() - 1)};
- }
-
- /**
- * @brief returns last dimension as ContigniousSpan
- * @param x indices of slice to take. See tests for usage details
- * @return slice at given position
- */
- template <typename... Ts> ContiguousSpan<T, true> slice(Ts... x) const noexcept
- {
- assert(sizeof...(Ts) == _shape.rank() - 1);
- return {&at(x..., 0ul), _shape.dim(_shape.rank() - 1)};
- }
-
- ContiguousSpan<T, std::is_const<T>::value> flat() noexcept
- {
- return {_data, _shape.element_count()};
- }
-
- ContiguousSpan<T, true> flat() const noexcept { return {_data, _shape.element_count()}; }
-
- const Shape &shape() const noexcept { return _shape; }
-
-private:
- template <typename... Ts> T &_at(Ts... x) const noexcept
- {
- assert(sizeof...(x) == _shape.rank());
- using Indices = make_index_sequence<sizeof...(Ts)>;
- return _data[offset(Indices{}, x...)];
- }
-
- template <typename... Ts, size_t... Nums>
- size_t offset(index_sequence<Nums...> seq, Ts... x) const noexcept
- {
- static_assert(
- sizeof...(Ts) == sizeof...(Nums),
- "Sanity check failed. Generated index sequence size is not equal to argument count");
-
- return _strides.offset(seq, x...);
- }
-
- T *_data;
- Shape _shape;
- Strides _strides;
-};
-
-template <typename To, typename From> Array<To> array_cast(Array<From> &&from, Shape newShape)
-{
- assert(from.shape().element_count() / (sizeof(To) / sizeof(From)) == newShape.element_count());
- return Array<To>(reinterpret_cast<To *>(from.flat().data()), newShape);
-}
-
-template <typename To, typename From>
-Array<const To> array_cast(const Array<From> &from, Shape newShape)
-{
- assert(from.shape().element_count() / (sizeof(To) / sizeof(From)) == newShape.element_count());
- return Array<To>(reinterpret_cast<const To *>(from.flat().data()), newShape);
-}
-
-#ifndef NDARRAY_INLINE_TEMPLATES
-
-extern template class Array<float>;
-extern template class Array<int32_t>;
-extern template class Array<uint32_t>;
-extern template class Array<uint8_t>;
-
-#endif // NDARRAY_INLINE_TEMPLATES
-
-} // namespace ndarray
-
-#endif //_NDARRAY_ARRAY_H_
diff --git a/runtime/libs/ndarray/include/ndarray/Common.h b/runtime/libs/ndarray/include/ndarray/Common.h
deleted file mode 100644
index aa0cc6fe2..000000000
--- a/runtime/libs/ndarray/include/ndarray/Common.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_COMMON_H_
-#define _NDARRAY_COMMON_H_
-
-#define NDARRAY_MAX_DIMENSION_COUNT 8
-
-#endif //_NDARRAY_COMMON_H_
diff --git a/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h b/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h
deleted file mode 100644
index 8caa6a686..000000000
--- a/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_CONTIGNIOUS_SPAN_H_
-#define _NDARRAY_CONTIGNIOUS_SPAN_H_
-
-#include <type_traits>
-#include <vector>
-#include <cstdint>
-#include <cstddef>
-#include <cassert>
-
-namespace ndarray
-{
-
-template <typename T, bool isConst = false> class ContiguousSpan
-{
-public:
- using pointer_type = typename std::conditional<isConst, const T *, T *>::type;
- using reference_type = typename std::conditional<isConst, const T &, T &>::type;
- using iterator_type = pointer_type;
-
- ContiguousSpan(pointer_type data, size_t len) noexcept : _data(data), _len(len) {}
-
- template <typename It>
- explicit ContiguousSpan(It first, It last) noexcept
- : _data(&*first), _len(std::distance(first, last))
- {
- }
-
- ContiguousSpan(const ContiguousSpan &) = delete;
-
- ContiguousSpan(ContiguousSpan &&s) noexcept : _data(s._data), _len(s._len) { s._data = nullptr; }
-
- operator ContiguousSpan<T, true>() { return ContiguousSpan<T, true>{_data, _len}; }
-
- reference_type operator[](size_t idx) const noexcept { return _data[idx]; }
-
- reference_type at(size_t idx) const noexcept { return _data[idx]; }
-
- ContiguousSpan<T, isConst> offset(size_t offset)
- {
- assert(offset <= _len);
- return {_data + offset, _len - offset};
- }
-
- template <typename From, bool _ = isConst>
- typename std::enable_if<!_, void>::type assign(const From &f) noexcept
- {
- assignFrom(std::begin(f), std::end(f));
- }
-
- template <typename U, bool _ = isConst>
- typename std::enable_if<!_, ContiguousSpan &>::type
- operator=(std::initializer_list<U> list) noexcept
- {
- assignFrom(std::begin(list), std::end(list));
- return *this;
- }
-
- template <typename It, bool _ = isConst>
- typename std::enable_if<!_, void>::type assignFrom(It first, It last) noexcept
- {
- std::copy(first, last, begin());
- }
-
- size_t size() const { return _len; }
-
- iterator_type begin() const { return iterator_type{_data}; }
-
- iterator_type end() const { return iterator_type{_data + _len}; }
-
- pointer_type data() { return _data; }
-
-private:
- pointer_type _data;
- size_t _len;
-};
-
-#ifndef NDARRAY_INLINE_TEMPLATES
-
-extern template class ContiguousSpan<float, true>;
-extern template class ContiguousSpan<float, false>;
-extern template class ContiguousSpan<int32_t, true>;
-extern template class ContiguousSpan<int32_t, false>;
-extern template class ContiguousSpan<uint32_t, true>;
-extern template class ContiguousSpan<uint32_t, false>;
-extern template class ContiguousSpan<uint8_t, true>;
-extern template class ContiguousSpan<uint8_t, false>;
-
-#endif // NDARRAY_INLINE_TEMPLATES
-
-} // namespace ndarray
-
-#endif //_NDARRAY_CONTIGNIOUS_SPAN_H_
diff --git a/runtime/libs/ndarray/include/ndarray/Shape.h b/runtime/libs/ndarray/include/ndarray/Shape.h
deleted file mode 100644
index fa58613b8..000000000
--- a/runtime/libs/ndarray/include/ndarray/Shape.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_SHAPE_H_
-#define _NDARRAY_SHAPE_H_
-
-#include "Common.h"
-
-#include <array>
-#include <cassert>
-#include <cstddef>
-
-namespace ndarray
-{
-
-class Shape
-{
-public:
- //_dims{} here and later since array does not have std::initializer_list ctor
- // and aggregate initialization is not allowed here
- explicit Shape(size_t rank) noexcept : _dims{}, _rank(rank)
- {
- std::fill(_dims.begin(), _dims.end(), 0);
- }
-
- Shape(std::initializer_list<size_t> list) noexcept : _dims{}, _rank(list.size())
- {
- std::copy(list.begin(), list.end(), _dims.begin());
- }
-
- size_t dim(int i) const noexcept { return _dims.at(i); }
-
- size_t &dim(int i) noexcept { return _dims.at(i); }
-
- size_t element_count() const noexcept
- {
- uint32_t res = 1;
- for (size_t i = 0; i < rank(); ++i)
- res *= dim(i);
- assert(res <= 0xffffffff);
- return res;
- }
-
- size_t rank() const noexcept { return _rank; }
-
-private:
- std::array<size_t, NDARRAY_MAX_DIMENSION_COUNT> _dims;
- size_t _rank;
-};
-
-} // namespace ndarray
-
-#endif //_NDARRAY_SHAPE_H_
diff --git a/runtime/libs/ndarray/src/Array.cpp b/runtime/libs/ndarray/src/Array.cpp
deleted file mode 100644
index f9c9de9d3..000000000
--- a/runtime/libs/ndarray/src/Array.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ndarray/Array.h"
-
-namespace ndarray
-{
-
-template class Array<float>;
-template class Array<int32_t>;
-template class Array<uint32_t>;
-template class Array<uint8_t>;
-
-} // namespace ndarray
diff --git a/runtime/libs/ndarray/src/detail/cxx14.h b/runtime/libs/ndarray/src/detail/cxx14.h
deleted file mode 100644
index 81135b3f2..000000000
--- a/runtime/libs/ndarray/src/detail/cxx14.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NDARRAY_CXX14_H_
-#define _NDARRAY_CXX14_H_
-
-namespace ndarray
-{
-
-namespace cxx14
-{
-
-template <size_t... Nums> struct index_sequence
-{
- using value_type = size_t;
-
- static constexpr std::size_t size() noexcept { return sizeof...(Nums); }
-};
-
-namespace detail
-{
-
-template <size_t v, typename Seq> struct _append;
-
-template <size_t v, size_t... Nums> struct _append<v, index_sequence<Nums...>>
-{
- using result = index_sequence<Nums..., v>;
-};
-
-template <size_t Len> struct make_index_sequence
-{
- using result =
- typename detail::_append<Len - 1, typename make_index_sequence<Len - 1>::result>::result;
-};
-
-template <> struct make_index_sequence<1>
-{
- using result = index_sequence<0>;
-};
-
-template <> struct make_index_sequence<0>
-{
- using result = index_sequence<>;
-};
-
-} // namespace detail
-
-template <size_t Num> using make_index_sequence = typename detail::make_index_sequence<Num>::result;
-
-} // namespace cxx14
-
-} // namespace ndarray
-
-#endif //_NDARRAY_CXX14_H_
diff --git a/runtime/libs/ndarray/test/CMakeLists.txt b/runtime/libs/ndarray/test/CMakeLists.txt
deleted file mode 100644
index 16f8779ee..000000000
--- a/runtime/libs/ndarray/test/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-if(NOT BUILD_NDARRAY_TEST)
- return()
-endif()
-
-add_executable(ndarray_test ndarray_test.cpp)
-
-target_link_libraries(ndarray_test PRIVATE ndarray)
-
-nnfw_find_package(GTest)
-if(NOT GTest_FOUND)
- message(STATUS "GTest not avaialble. Skipping NDArray test build")
- return()
-endif(NOT GTest_FOUND)
-
-target_link_libraries(ndarray_test PUBLIC gtest gtest_main ${LIB_PTHREAD})
-
-add_test(ndarray_test ndarray_test)
diff --git a/runtime/libs/ndarray/test/ndarray_test.cpp b/runtime/libs/ndarray/test/ndarray_test.cpp
deleted file mode 100644
index 0aa948c72..000000000
--- a/runtime/libs/ndarray/test/ndarray_test.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "gtest/gtest.h"
-
-#include "ndarray/Array.h"
-
-using namespace ndarray;
-
-TEST(NDArray_tests, basic_data_test)
-{
-
- float raw_data[] = {1, 2, 3, 4};
-
- Array<float> data22{raw_data, {2, 2}};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 2);
- ASSERT_FLOAT_EQ(data22.at(1, 0), 3);
- ASSERT_FLOAT_EQ(data22.at(1, 1), 4);
-
- Array<float> data14{raw_data, {1, 4}};
- ASSERT_FLOAT_EQ(data22.at(0, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 2);
- ASSERT_FLOAT_EQ(data22.at(0, 2), 3);
- ASSERT_FLOAT_EQ(data22.at(0, 3), 4);
-}
-
-TEST(NDArray_tests, slice_write_test)
-{
- float raw_data[4] = {0};
-
- Array<float> data22{raw_data, {2, 2}};
-
- data22.slice(1) = {1, 2};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0), 0);
- ASSERT_FLOAT_EQ(data22.at(0, 1), 0);
- ASSERT_FLOAT_EQ(data22.at(1, 0), 1);
- ASSERT_FLOAT_EQ(data22.at(1, 1), 2);
-}
-
-TEST(NDArray_tests, slice_read_test)
-{
- float raw_data[4] = {1, 2, 3, 4};
-
- Array<float> data22{raw_data, {2, 2}};
-
- auto slice = data22.slice(1);
-
- ASSERT_FLOAT_EQ(slice[0], 3);
- ASSERT_FLOAT_EQ(slice[1], 4);
-}
-
-TEST(NDArray_tests, multidim_test)
-{
- float raw_data[5] = {0, 1, 2, 3, 4};
-
- Array<float> data22{raw_data, {1, 1, 1, 1, 5}};
-
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 0), 0);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 1), 1);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 2), 2);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 3), 3);
- ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 4), 4);
-}
-
-TEST(NDArray_tests, slice_assign_test)
-{
- std::vector<float> v1{1, 2, 3, 4, 5};
- std::vector<float> v2(5);
-
- ContiguousSpan<float> span1(v1.begin(), v1.end());
- ContiguousSpan<float> span2(v2.begin(), v2.end());
-
- span2.assign(span1);
-
- ASSERT_EQ(v1, v2);
-}
diff --git a/runtime/libs/nnapi/CMakeLists.txt b/runtime/libs/nnapi/CMakeLists.txt
index a5d9490d1..73f82b909 100644
--- a/runtime/libs/nnapi/CMakeLists.txt
+++ b/runtime/libs/nnapi/CMakeLists.txt
@@ -1,3 +1,4 @@
-add_subdirectories()
+add_library(nnfw_lib_nnapi INTERFACE)
-add_library(nnfw_lib_nnapi ALIAS nnfw_lib_nnapi_1_2)
+target_include_directories(nnfw_lib_nnapi INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
+target_link_libraries(nnfw_lib_nnapi INTERFACE nnfw-nnapi-header)
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/include/NeuralNetworksExShim.h
index 855613241..855613241 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksExShim.h
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h
index 1c482b54c..1c482b54c 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksLoadHelpers.h
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h b/runtime/libs/nnapi/include/NeuralNetworksShim.h
index 80082383f..80082383f 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksShim.h
diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h b/runtime/libs/nnapi/include/NeuralNetworksTypes.h
index d74402749..d74402749 100644
--- a/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h
+++ b/runtime/libs/nnapi/include/NeuralNetworksTypes.h
diff --git a/runtime/libs/nnapi/v1.1/CMakeLists.txt b/runtime/libs/nnapi/v1.1/CMakeLists.txt
deleted file mode 100644
index dc018c60f..000000000
--- a/runtime/libs/nnapi/v1.1/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_library(nnfw_lib_nnapi_1_1 INTERFACE)
-
-target_include_directories(nnfw_lib_nnapi_1_1 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(nnfw_lib_nnapi_1_1 INTERFACE nnfw-nnapi-header)
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h
deleted file mode 100644
index f684dab90..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-/**
- * @file NeuralNetworksExShim.h
- * @brief This file contains an actual implementation of
- * ANeuralNetworksModel_addOperationEx function
- * @ingroup COM_AI_RUNTIME
- */
-
-#ifndef NN_API_EX_SHIM_H
-#define NN_API_EX_SHIM_H
-
-#include "NeuralNetworksEx.h"
-#include "NeuralNetworksLoadHelpers.h"
-
-typedef int (*ANeuralNetworksModel_addOperationEx_fn)(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type,
- uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount,
- const uint32_t *outputs);
-
-/**
- * @brief Add an extended operation to a model.
- *
- * @param[in] model The model to be modified.
- * @param[in] type The type of extended operation.
- * @param[in] inputCount The number of entries in the inputs array.
- * @param[in] inputs An array of indexes identifying each operand.
- * @param[in] outputCount The number of entries in the outputs array.
- * @param[in] outputs An array of indexes identifying each operand.
- *
- * @note The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish}
- * has been called will return an error.\n
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-
-inline int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type,
- uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount, const uint32_t *outputs)
-{
- LOAD_FUNCTION(ANeuralNetworksModel_addOperationEx);
- EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs);
-}
-
-#endif // NN_API_EX_SHIM_H
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h
deleted file mode 100644
index 201465f9c..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from part of the following file (in TensorFlow v1.12)
-// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
-
-/**
- * @file NeuralNetworksLoadHelpers.h
- * @ingroup COM_AI_RUNTIME
- * @brief This file contains functions to load NN API runtime library
- */
-
-#ifndef __NEURAL_NETWORKS_LOAD_HELPER_H__
-#define __NEURAL_NETWORKS_LOAD_HELPER_H__
-
-#include <dlfcn.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-/**
- * @brief Print log data
- * @param[in] format Format string of @c printf
- * @param[in] args Argument after format string. (Same with @c printf)
- */
-#define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__);
-
-/**
- * @brief Create a function pointer named @c fn after loading NN API library
- * @param[in] name Name of a function
- */
-#define LOAD_FUNCTION(name) \
- static name##_fn fn = reinterpret_cast<name##_fn>(nnfw::loadFunction(#name));
-
-/**
- * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
- * @param[in] args List of arguments for the function @c fn
- */
-#define EXECUTE_FUNCTION(...) \
- if (fn != nullptr) { \
- fn(__VA_ARGS__); \
- }
-
-/**
- * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
- * @param[in] args List of arguments for the function @c fn
- * @return the return value of @c fn
- */
-#define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0;
-
-namespace nnfw
-{
-
-/**
- * @brief Load NN API library
- * @param[in] name path of NN API library
- * @return a symbol table handle of NN API library
- */
-inline void* loadLibrary(const char* name) {
- // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn
- // api RT
- void* handle = nullptr;
-#if 1 //#ifdef __ANDROID__
- handle = dlopen(name, RTLD_LAZY | RTLD_LOCAL);
- if (handle == nullptr) {
- NNAPI_LOG("nnapi error: unable to open library %s", name);
- NNAPI_LOG(" %s", dlerror());
- }
-#endif
- return handle;
-}
-
-/**
- * @brief Load libneuralnetworks.so and return handle of library
- * @return a symbol table handle of NN API library
- */
-inline void* getLibraryHandle() {
- static void* handle = loadLibrary("libneuralnetworks.so");
- return handle;
-}
-
-/**
- * @brief Return function ptr in libneuralnetworks.so
- * @param[in] name Name of function
- * @return function pointer
- */
-inline void* loadFunction(const char* name) {
- void* fn = nullptr;
- if (getLibraryHandle() != nullptr) {
- fn = dlsym(getLibraryHandle(), name);
- }
- if (fn == nullptr) {
- NNAPI_LOG("nnapi error: unable to open function %s", name);
- NNAPI_LOG(" %s", dlerror());
- abort();
- }
- else {
-#ifdef _GNU_SOURCE
- Dl_info info;
- if (dladdr(fn, &info))
- {
- NNAPI_LOG("nnapi function '%s' is loaded from '%s' ", name, info.dli_fname);
- }
- else
- {
- NNAPI_LOG("nnapi function '%s' is failed to load", name);
- }
-
-#endif // _GNU_SOURCE
- }
- return fn;
-}
-
-/**
- * @brief Check if libneuralnetworks.so can be loaded
- * @return @c true if loading is successful, otherwise @c false.
- */
-inline bool NNAPIExists() {
- static bool nnapi_is_available = getLibraryHandle();
- return nnapi_is_available;
-}
-
-} // namespace nnfw
-
-#endif // __NEURAL_NETWORKS_LOAD_HELPER_H__
diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h
deleted file mode 100644
index 60b16f766..000000000
--- a/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h
+++ /dev/null
@@ -1,709 +0,0 @@
-/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-// NOTE To minimize diff with upstream tensorflow, disable clang-format
-// clang-format off
-
-// NOTE This header is derived from part of the following file (in TensorFlow v1.12)
-// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
-#ifndef __NEURAL_NETWORKS_SHIM__
-#define __NEURAL_NETWORKS_SHIM__
-
-#include "NeuralNetworks.h"
-#include "NeuralNetworksLoadHelpers.h"
-
-// nn api function types
-
-typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
- size_t size, int protect, int fd, size_t offset,
- ANeuralNetworksMemory** memory);
-
-typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
-
-typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
-
-typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
-
-typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
-
-typedef int (*ANeuralNetworksCompilation_create_fn)(
- ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
-
-typedef void (*ANeuralNetworksCompilation_free_fn)(
- ANeuralNetworksCompilation* compilation);
-
-typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
- ANeuralNetworksCompilation* compilation, int32_t preference);
-
-typedef int (*ANeuralNetworksCompilation_finish_fn)(
- ANeuralNetworksCompilation* compilation);
-
-typedef int (*ANeuralNetworksModel_addOperand_fn)(
- ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
-
-typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
- ANeuralNetworksModel* model, int32_t index, const void* buffer,
- size_t length);
-
-typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
- ANeuralNetworksModel* model, int32_t index,
- const ANeuralNetworksMemory* memory, size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksModel_addOperation_fn)(
- ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
- uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
- const uint32_t* outputs);
-
-typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
- ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
- uint32_t outputCount, const uint32_t* outputs);
-
-typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
- ANeuralNetworksModel* model, bool allow);
-
-typedef int (*ANeuralNetworksExecution_create_fn)(
- ANeuralNetworksCompilation* compilation,
- ANeuralNetworksExecution** execution);
-
-typedef void (*ANeuralNetworksExecution_free_fn)(
- ANeuralNetworksExecution* execution);
-
-typedef int (*ANeuralNetworksExecution_setInput_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setOutput_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, void* buffer, size_t length);
-
-typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length);
-
-typedef int (*ANeuralNetworksExecution_startCompute_fn)(
- ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
-
-typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
-
-typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
-
-/**
- * Creates a shared memory object from a file descriptor.
- *
- * The shared memory is backed by a file descriptor via mmap.
- * See {@link ANeuralNetworksMemory} for a description on how to use
- * this shared memory.
- *
- * @param size The requested size in bytes.
- * Must not be larger than the file size.
- * @param prot The desired memory protection for the mapping.
- * It is either PROT_NONE or the bitwise OR of one or
- * more of the following flags: PROT_READ, PROT_WRITE.
- * @param fd The requested file descriptor.
- * The file descriptor has to be mmap-able. The file
- * descriptor will be duplicated.
- * @param offset The offset to the beginning of the file of the area to map.
- * The offset has to be aligned to a page size.
- * @param memory The memory object to be created.
- * Set to NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
- */
-inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd,
- size_t offset,
- ANeuralNetworksMemory** memory) {
- LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd);
- EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory);
-}
-
-/**
- * Delete a memory object.
- *
- * Destroys the object used by the run time to keep track of the memory.
- * This will free the underlying actual memory if no other code has open
- * handles to this memory.
- *
- * @param memory The memory object to be freed.
- */
-inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
- LOAD_FUNCTION(ANeuralNetworksMemory_free);
- EXECUTE_FUNCTION(memory);
-}
-
-/**
- * Create an empty {@link ANeuralNetworksModel}.
- *
- * <p>This only creates the object. Computation is performed once
- * {@link ANeuralNetworksExecution_startCompute} is invoked.
- *
- * The model should be constructed with calls to
- * {@link ANeuralNetworksModel_addOperation} and
- * {@link ANeuralNetworksModel_addOperand}
- *
- * <p>{@link ANeuralNetworksModel_finish} should be called once the model
- * has been fully constructed.</p>
- *
- * <p>{@link ANeuralNetworksModel_free} should be called once the model
- * is no longer needed.</p>
- *
- * @param model The {@link ANeuralNetworksModel} to be created.
- * Set to NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
- LOAD_FUNCTION(ANeuralNetworksModel_create);
- EXECUTE_FUNCTION_RETURN(model);
-}
-
-/**
- * Destroy a model.
- *
- * The model need not have been finished by a call to
- * {@link ANeuralNetworksModel_finish}.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be destroyed. Passing NULL is acceptable and
- * results in no operation.
- */
-inline void ANeuralNetworksModel_free(ANeuralNetworksModel* model) {
- LOAD_FUNCTION(ANeuralNetworksModel_free);
- EXECUTE_FUNCTION(model);
-}
-
-/**
- * Indicate that we have finished modifying a model. Required before
- * calling {@link ANeuralNetworksCompilation_compile}.
- *
- * An application is responsible to make sure that no other thread uses
- * the model at the same time.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be finished.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) {
- LOAD_FUNCTION(ANeuralNetworksModel_finish);
- EXECUTE_FUNCTION_RETURN(model);
-}
-
-/**
- * Add an operand to a model.
- *
- * The order in which the operands are added is important. The first one added
- * to a model will have the index value 0, the second 1, etc. These indexes are
- * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
- * {@link ANeuralNetworksExecution_setInput},
- * {@link ANeuralNetworksExecution_setInputFromMemory},
- * {@link ANeuralNetworksExecution_setOutput},
- * {@link ANeuralNetworksExecution_setOutputFromMemory} and
- * {@link ANeuralNetworksExecution_setOperandValue}.
- *
- * To build a model that can accommodate inputs of various sizes, as you may
- * want to do for a CNN, set the size of the dimensions that will vary at run
- * time to 0. If you do so, provide the full dimensions when calling
- * {@link ANeuralNetworksExecution_setInput} or {@link
- * ANeuralNetworksExecution_setInputFromMemory}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param type The {@link ANeuralNetworksOperandType} that describes the shape
- * of the operand.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_addOperand(
- ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type) {
- LOAD_FUNCTION(ANeuralNetworksModel_addOperand);
- EXECUTE_FUNCTION_RETURN(model, type);
-}
-
-/**
- * Sets an operand to a constant value.
- *
- * For scalar values, the content of buffer is copied into the model.
- *
- * For tensor values, a pointer to the buffer is stored within the model.
- * The application is responsible for not changing the content of this region
- * until all executions using this model have completed. As the data may
- * be copied during processing, modifying the data after this call yields
- * undefined results.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param index The index of the model operand we're setting.
- * @param buffer A pointer to the data to use.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model,
- int32_t index,
- const void* buffer,
- size_t length) {
- LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue);
- EXECUTE_FUNCTION_RETURN(model, index, buffer, length);
-}
-
-/**
- * Sets an operand to a value stored in a memory object.
- *
- * The content of the memory is not copied. A reference to that memory is stored
- * inside the model. The application is responsible for not changing the content
- * of the memory region until all executions using this model have completed.
- * As the data may be copied during processing, modifying the data after this
- * call yields undefined results.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @param model The model to be modified.
- * @param index The index of the model operand we're setting.
- * @param buffer A pointer to the data to use.
- * @param memory The memory containing the data.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_setOperandValueFromMemory(
- ANeuralNetworksModel* model, int32_t index,
- const ANeuralNetworksMemory* memory, size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory);
- EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length);
-}
-
-/**
- * Add an operation to a model.
- *
- * @param model The model to be modified.
- * @param type The type of the operation.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying each operand.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying each operand.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
- ANeuralNetworksOperationType type,
- uint32_t inputCount,
- const uint32_t* inputs,
- uint32_t outputCount,
- const uint32_t* outputs) {
- LOAD_FUNCTION(ANeuralNetworksModel_addOperation);
- EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount,
- outputs);
-}
-
-/**
- * Specifies which operands will be the model's inputs and outputs.
- *
- * An operand cannot be used for both input and output. Doing so will
- * return an error.
- *
- * @param model The model to be modified.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying the input operands.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying the output operands.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- *
- */
-inline int ANeuralNetworksModel_identifyInputsAndOutputs(
- ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
- uint32_t outputCount, const uint32_t* outputs) {
- LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs);
- EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs);
-}
-
-/**
- * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
- * calculated with range and/or precision as low as that of the IEEE 754 16-bit
- * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
- * must be calculated using at least the range and precision of the IEEE 754
- * 32-bit floating-point format.
- *
- * @param model The model to be modified.
- * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
- * calculated with range and/or precision as low as that of the
- * IEEE 754 16-bit floating point format. 'false' indicates
- * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
- * at least the range and precision of the IEEE 754 32-bit floating
- * point format.
- *
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been called will return an error.
- *
- * Available since API level 28.
- *
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
- */
-inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(
- ANeuralNetworksModel* model, bool allow) {
- LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
- EXECUTE_FUNCTION_RETURN(model, allow);
-}
-
-/**
- * Create a {@link ANeuralNetworksCompilation} to compile the given model.
- * This only creates the object. Compilation is only performed once
- * {@link ANeuralNetworksCompilation_start} is invoked.
- *
- * <p>The provided model must outlive the compilation.</p>
- *
- * The model must already have been finished by a call to
- * {@link ANeuralNetworksModel_finish}.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param model The {@link ANeuralNetworksModel} to be compiled.
- * @param compilation The newly created object or NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
- * if the model is invalid.
- */
-inline int ANeuralNetworksCompilation_create(
- ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_create);
- EXECUTE_FUNCTION_RETURN(model, compilation);
-}
-
-/**
- * Destroy a compilation.
- *
- * <p>If called on a compilation for which
- * {@link ANeuralNetworksCompilation_start} has been called, the
- * function will return immediately but will mark the compilation to be deleted
- * once the compilation completes. The {@link ANeuralNetworksCompilation_wait}
- * will return ERROR_DELETED.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param compilation The compilation to be destroyed. Passing NULL is
- * acceptable and results in no operation.
- */
-inline void ANeuralNetworksCompilation_free(
- ANeuralNetworksCompilation* compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_free);
- EXECUTE_FUNCTION(compilation);
-}
-
-/**
- * Sets the execution preference.
- *
- * <p>Provides guidance to the runtime when trade-offs are possible.</p>
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @param compilation The compilation to be modified.
- * @param preference Either {@link PREFER_LOW_POWER},
- * {@link PREFER_SINGLE_FAST_ANSWER}, or
- * {@link PREFER_SUSTAINED_SPEED}.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksCompilation_setPreference(
- ANeuralNetworksCompilation* compilation, int32_t preference) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference);
- EXECUTE_FUNCTION_RETURN(compilation, preference);
-}
-
-/**
- * Waits until the compilation completes.
- *
- * More than one thread can wait on a compilation. When the compilation
- * completes, all threads will be released.
- *
- * See {@link ANeuralNetworksCompilation} for information on multithreaded
- * usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally.
- */
-inline int ANeuralNetworksCompilation_finish(
- ANeuralNetworksCompilation* compilation) {
- LOAD_FUNCTION(ANeuralNetworksCompilation_finish);
- EXECUTE_FUNCTION_RETURN(compilation);
-}
-/**
- * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
- * This only creates the object. Computation is only performed once
- * {@link ANeuralNetworksExecution_startCompute} is invoked.
- *
- * <p>The provided compilation must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
- * @param execution The newly created object or NULL if unsuccessful.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
- * if the compilation is invalid.
- */
-inline int ANeuralNetworksExecution_create(
- ANeuralNetworksCompilation* compilation,
- ANeuralNetworksExecution** execution) {
- LOAD_FUNCTION(ANeuralNetworksExecution_create);
- EXECUTE_FUNCTION_RETURN(compilation, execution);
-}
-
-/**
- * Destroy an execution.
- *
- * <p>If called on an execution for which
- * {@link ANeuralNetworksExecution_startCompute} has been called, the
- * function will return immediately but will mark the execution to be deleted
- * once the computation completes. The {link ANeuralNetworksExecution_wait}
- * will return ANEURALNETWORKS_ERROR_DELETED.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be destroyed. Passing NULL is acceptable
- * and results in no operation.
- */
-inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
- LOAD_FUNCTION(ANeuralNetworksExecution_free);
- EXECUTE_FUNCTION(execution);
-}
-
-/**
- * Associate a user buffer with an input of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided buffer must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the input argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This should be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other properties of the type must be the same as
- * specified in the model. If the type is the same as specified
- * when the model was built, NULL can be passed.
- * @param buffer The buffer containing the data.
- * @param length The length in bytes of the buffer.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the input.
- */
-inline int ANeuralNetworksExecution_setInput(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const void* buffer, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setInput);
- EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
-}
-
-/**
- * Associate part of a memory object with an input of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided memory must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the input argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param memory The memory containing the data.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The size in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the input.
- */
-inline int ANeuralNetworksExecution_setInputFromMemory(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory);
- EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
-}
-
-/**
- * Associate a user buffer with an output of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided buffer must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the output argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param buffer The buffer where the data is to be written.
- * @param length The length in bytes of the buffer.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the output.
- */
-inline int ANeuralNetworksExecution_setOutput(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, void* buffer, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setOutput);
- EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
-}
-
-/**
- * Associate part of a memory object with an output of the model of the
- * {@link ANeuralNetworksExecution}.
- *
- * <p>The provided memory must outlive the execution.</p>
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be modified.
- * @param index The index of the output argument we are setting. It is
- * an index into the lists passed to
- * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
- * the index associated with {@link
- * ANeuralNetworksModel_addOperand}.
- * @param type The type of the operand. This can be used to specify the
- * dimensions that were set to 0 when the operand was added to the
- * model. All other values must be the same as specified in the
- * model. If the type is the same as specified when the model
- * was built, NULL can be passed.
- * @param memory The memory where the data is to be stored.
- * @param offset This specifies the location of the data within the memory.
- * The offset is in bytes from the start of memory.
- * @param length The length in bytes of the data value.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
- * the name is not recognized or the buffer is too small for the output.
- */
-inline int ANeuralNetworksExecution_setOutputFromMemory(
- ANeuralNetworksExecution* execution, int32_t index,
- const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
- size_t offset, size_t length) {
- LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory);
- EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
-}
-
-/**
- * Schedule evaluation of the execution.
- *
- * <p>Schedules evaluation of the execution. Once the model has been
- * applied and the outputs are ready to be consumed, the execution will be
- * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal.
- * </p>
- *
- * Multiple executions can be scheduled and evaluated concurrently, and
- * compilations can be performed concurrently with executions. The runtime makes
- * no guarantee on the ordering of the completion of compilations and
- * executions. If it's important to the application, the application should
- * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and
- * {@link ANeuralNetworksExecution_wait}.
- *
- * ANeuralNetworksExecution_wait must be called to recuperate the resources used
- * by the execution.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @param execution The execution to be scheduled and executed.
- *
- * @return ANEURALNETWORKS_NO_ERROR if successful.
- */
-inline int ANeuralNetworksExecution_startCompute(
- ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
- LOAD_FUNCTION(ANeuralNetworksExecution_startCompute);
- EXECUTE_FUNCTION_RETURN(execution, event);
-}
-
-/**
- * Waits until the execution completes.
- *
- * More than one thread can wait on an event. When the execution completes,
- * all threads will be released.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- *
- * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
- */
-inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) {
- LOAD_FUNCTION(ANeuralNetworksEvent_wait);
- EXECUTE_FUNCTION_RETURN(event);
-}
-
-/**
- * Destroys the event.
- *
- * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
- */
-inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) {
- LOAD_FUNCTION(ANeuralNetworksEvent_free);
- EXECUTE_FUNCTION(event);
-}
-
-#endif // __NEURAL_NETWORKS_SHIM__
diff --git a/runtime/libs/nnapi/v1.2/CMakeLists.txt b/runtime/libs/nnapi/v1.2/CMakeLists.txt
deleted file mode 100644
index 21ec3015f..000000000
--- a/runtime/libs/nnapi/v1.2/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_library(nnfw_lib_nnapi_1_2 INTERFACE)
-
-target_include_directories(nnfw_lib_nnapi_1_2 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(nnfw_lib_nnapi_1_2 INTERFACE nnfw-nnapi-header)