diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-04 18:09:24 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-04 18:09:24 +0900 |
commit | 302e6564a7a76109e1178207e44e45a58631c477 (patch) | |
tree | 6cc4bd95e5e438331fc2c53234af4ed0e0f3bc20 /runtime/libs | |
parent | bd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff) | |
download | nnfw-302e6564a7a76109e1178207e44e45a58631c477.tar.gz nnfw-302e6564a7a76109e1178207e44e45a58631c477.tar.bz2 nnfw-302e6564a7a76109e1178207e44e45a58631c477.zip |
Imported Upstream version 1.1.0upstream/1.1.0submit/tizen/20200304.094649submit/tizen/20200304.093946submit/tizen/20200304.092919accepted/tizen/unified/20200305.051107
Diffstat (limited to 'runtime/libs')
140 files changed, 22556 insertions, 0 deletions
diff --git a/runtime/libs/CMakeLists.txt b/runtime/libs/CMakeLists.txt new file mode 100644 index 000000000..99d2028f4 --- /dev/null +++ b/runtime/libs/CMakeLists.txt @@ -0,0 +1,4 @@ +# Add all subdirectories. +# Each library in sub-directory must have it's own CMakeLists.txt +# to build library's binaries or to support interface. +add_subdirectories() diff --git a/runtime/libs/benchmark/CMakeLists.txt b/runtime/libs/benchmark/CMakeLists.txt new file mode 100644 index 000000000..2af0ffaa3 --- /dev/null +++ b/runtime/libs/benchmark/CMakeLists.txt @@ -0,0 +1,6 @@ +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(nnfw_lib_benchmark SHARED ${SOURCES}) +target_include_directories(nnfw_lib_benchmark PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(nnfw_lib_benchmark PRIVATE ${LIB_PTHREAD}) +install(TARGETS nnfw_lib_benchmark DESTINATION lib) diff --git a/runtime/libs/benchmark/include/benchmark.h b/runtime/libs/benchmark/include/benchmark.h new file mode 100644 index 000000000..7d3238210 --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_H__ +#define __NNFW_BENCHMARK_H__ + +#include "benchmark/Result.h" +#include "benchmark/MemoryPoller.h" +#include "benchmark/CsvWriter.h" +#include "benchmark/Util.h" + +#endif // __NNFW_BENCHMARK_H__ diff --git a/runtime/libs/benchmark/include/benchmark/CsvHeader.lst b/runtime/libs/benchmark/include/benchmark/CsvHeader.lst new file mode 100644 index 000000000..c76b6131f --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/CsvHeader.lst @@ -0,0 +1,15 @@ +"Model", +"Backend", +"ModelLoad_Time", +"Prepare_Time", +"Execute_Time_Min", +"Execute_Time_Max", +"Execute_Time_Mean", +"ModelLoad_RSS", +"Prepare_RSS", +"Execute_RSS", +"Peak_RSS", +"ModelLoad_HWM", +"Prepare_HWM", +"Execute_HWM", +"Peak_HWM", diff --git a/runtime/libs/benchmark/include/benchmark/CsvWriter.h b/runtime/libs/benchmark/include/benchmark/CsvWriter.h new file mode 100644 index 000000000..3e141216b --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/CsvWriter.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_CSV_WRITER_H__ +#define __NNFW_BENCHMARK_CSV_WRITER_H__ + +#include <vector> +#include <string> +#include <fstream> + +namespace benchmark +{ + +class CsvWriter +{ +public: + CsvWriter(const std::string &csv_filename); + CsvWriter(const std::string &csv_filename, const std::vector<std::string> &header); + virtual ~CsvWriter(); + + void write(const std::string &val); + void write(double val); + void write(uint32_t val); + void write(char val); + bool done(); + +public: + static const char delimiter = ','; + static const char newline = '\n'; + + friend CsvWriter &operator<<(CsvWriter &csvw, const std::string &val); + friend CsvWriter &operator<<(CsvWriter &csvw, double val); + friend CsvWriter &operator<<(CsvWriter &csvw, uint32_t val); + friend CsvWriter &operator<<(CsvWriter &csvw, char val); + +private: + void writeHeader(const std::vector<std::string> &header); + inline void postWrite(); + +private: + std::ofstream _ofs; + uint32_t _header_size; + uint32_t _col_idx; +}; + +} // namespace benchmark + +#endif // __NNFW_BENCHMARK_CSV_WRITER_H__ diff --git a/runtime/libs/benchmark/include/benchmark/MemoryPoller.h b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h new file mode 100644 index 000000000..31f4edc0e --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/MemoryPoller.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_MEMORY_POLLER_H__ +#define __NNFW_BENCHMARK_MEMORY_POLLER_H__ + +#include <cstdint> +#include <algorithm> +#include <unordered_map> +#include <string> +#include <chrono> +#include <thread> +#include <mutex> +#include <condition_variable> +#include <list> + +#include "Phase.h" + +namespace benchmark +{ + +// NOTE. gpu_poll is not necessary on general targets. This is used on the only tv targets. +// TODO finally should be separated from data +// TODO Use ctor() and dtor() instead of start() and end() +class MemoryPoller +{ +public: + MemoryPoller(std::chrono::milliseconds duration = std::chrono::milliseconds(5), + bool gpu_poll = false); + + virtual ~MemoryPoller() + { + _term = true; + _cond_var_started.notify_all(); + _thread.join(); + } + + bool start(Phase phase); + bool end(Phase phase); + const std::unordered_map<Phase, uint32_t> &getRssMap() const { return _rss_map; } + const std::unordered_map<Phase, uint32_t> &getHwmMap() const { return _hwm_map; } + +private: + void process(); + bool prepareMemoryPolling(); + uint32_t getVmRSS(); + uint32_t getVmHWM(); + uint32_t getGpuMemory(); + +private: + std::chrono::milliseconds _duration; + std::thread _thread; + std::list<Phase> _phases; + std::unordered_map<Phase, uint32_t> _rss_map; + std::unordered_map<Phase, uint32_t> _hwm_map; + + std::mutex _mutex; + std::mutex _mutex_started; + std::condition_variable _cond_var_started; + + bool _term; + bool _run; + bool _gpu_poll; + std::string _process_name; +}; + +} // namespace benchmark + +#endif // __NNFW_BENCHMARK_MEMORY_POLLER_H__ diff --git a/runtime/libs/benchmark/include/benchmark/Phase.h b/runtime/libs/benchmark/include/benchmark/Phase.h new file mode 100644 index 000000000..bea9a87b2 --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/Phase.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_PHASE_H__ +#define __NNFW_BENCHMARK_PHASE_H__ + +#include <string> +#include <cassert> + +namespace benchmark +{ + +enum class Phase +{ + MODEL_LOAD, + PREPARE, + EXECUTE, +}; + +inline std::string getPhaseString(Phase phase) +{ + switch (phase) + { + case Phase::MODEL_LOAD: + return "MODEL_LOAD"; + case Phase::PREPARE: + return "PREPARE"; + case Phase::EXECUTE: + return "EXECUTE"; + default: + assert(false); + return ""; + } +} + +} // namespace benchmark + +#endif // __NNFW_BENCHMARK_PHASE_H__ diff --git a/runtime/libs/benchmark/include/benchmark/Result.h b/runtime/libs/benchmark/include/benchmark/Result.h new file mode 100644 index 000000000..570fa2114 --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/Result.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_RESULT_H__ +#define __NNFW_BENCHMARK_RESULT_H__ + +#include "Phase.h" +#include "MemoryPoller.h" + +#include <string> +#include <vector> +#include <numeric> +#include <algorithm> +#include <cassert> +#include <memory> + +namespace +{ + +uint32_t maxMemory(const std::unordered_map<benchmark::Phase, uint32_t> &map) +{ + auto answer = *std::max_element( + map.begin(), map.end(), + [](const std::pair<benchmark::Phase, uint32_t> &p1, + const std::pair<benchmark::Phase, uint32_t> &p2) { return p1.second < p2.second; }); + return answer.second; +} + +} // namespace anonymous + +namespace benchmark +{ + +// Data class between runner(nnpackage_run and tflite_run) and libbenchmark +class Result +{ +public: + Result(double model_load_time, double prepare_time, const std::vector<double> &execute_times) + : _model_load_time(model_load_time), _prepare_time(prepare_time), _model_load_rss(0), + _prepare_rss(0), _execute_rss(0), _peak_rss(0), _model_load_hwm(0), _prepare_hwm(0), + _execute_hwm(0), _peak_hwm(0) + { + // execute + double sum = 0.0; + double min = std::numeric_limits<double>::max(); + double max = std::numeric_limits<double>::lowest(); + for (auto t : execute_times) + { + sum += t; + min = std::min(min, t); + max = std::max(max, t); + } + _execute_time_mean = sum / static_cast<double>(execute_times.size()); + _execute_time_min = min; + _execute_time_max = max; + } + + Result(double model_load_time, double prepare_time, const std::vector<double> &execute_times, + const std::unique_ptr<MemoryPoller> &memory_poller) + : Result(model_load_time, prepare_time, execute_times) + { + if (!memory_poller) + return; + + const auto &rss = memory_poller->getRssMap(); + const auto &hwm = memory_poller->getHwmMap(); + + // rss + assert(rss.size() > 0); + assert(rss.find(Phase::MODEL_LOAD) != rss.end()); + assert(rss.find(Phase::PREPARE) != rss.end()); + assert(rss.find(Phase::EXECUTE) != rss.end()); + _model_load_rss = rss.at(Phase::MODEL_LOAD); + _prepare_rss = rss.at(Phase::PREPARE); + _execute_rss = rss.at(Phase::EXECUTE); + _peak_rss = maxMemory(rss); + + // hwm + assert(hwm.size() > 0); + assert(hwm.find(Phase::MODEL_LOAD) != hwm.end()); + assert(hwm.find(Phase::PREPARE) != hwm.end()); + assert(hwm.find(Phase::EXECUTE) != hwm.end()); + _model_load_hwm = hwm.at(Phase::MODEL_LOAD); + _prepare_hwm = hwm.at(Phase::PREPARE); + _execute_hwm = hwm.at(Phase::EXECUTE); + _peak_hwm = maxMemory(hwm); + } + +public: + double getModelLoadTime() const { return _model_load_time; } + double getPrepareTime() const { return _prepare_time; } + double getExecuteTimeMean() const { return _execute_time_mean; } + double getExecuteTimeMin() const { return _execute_time_min; } + double getExecuteTimeMax() const { return _execute_time_max; } + + uint32_t getModelLoadRss() const { return _model_load_rss; } + uint32_t getPrepareRss() const { return _prepare_rss; } + uint32_t getExecuteRss() const { return _execute_rss; } + uint32_t getPeakRss() const { return _peak_rss; } + + uint32_t getModelLoadHwm() const { return _model_load_hwm; } + uint32_t getPrepareHwm() const { return _prepare_hwm; } + uint32_t getExecuteHwm() const { return _execute_hwm; } + uint32_t getPeakHwm() const { return _peak_hwm; } + +private: + double _model_load_time; + double _prepare_time; + double _execute_time_mean; + double _execute_time_min; + double _execute_time_max; + + uint32_t _model_load_rss; + uint32_t _prepare_rss; + uint32_t _execute_rss; + uint32_t _peak_rss; + + uint32_t _model_load_hwm; + uint32_t _prepare_hwm; + uint32_t _execute_hwm; + uint32_t _peak_hwm; +}; + +} // namespace benchmark + +#endif // __NNFW_BENCHMARK_RESULT_H__ diff --git a/runtime/libs/benchmark/include/benchmark/Util.h b/runtime/libs/benchmark/include/benchmark/Util.h new file mode 100644 index 000000000..b10360fa0 --- /dev/null +++ b/runtime/libs/benchmark/include/benchmark/Util.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_BENCHMARK_UTIL_H__ +#define __NNFW_BENCHMARK_UTIL_H__ + +#include "Result.h" +#include "CsvWriter.h" + +#include <chrono> +#include <string> +#include <iostream> + +namespace benchmark +{ + +inline uint64_t nowMicros() +{ + auto time_point = std::chrono::high_resolution_clock::now(); + auto since_epoch = time_point.time_since_epoch(); + // default precision of high resolution clock is 10e-9 (nanoseconds) + return std::chrono::duration_cast<std::chrono::microseconds>(since_epoch).count(); +} + +// TODO Support not only stdout but also ostream +inline void printResult(const Result &result, bool print_memory) +{ + std::cout << "===================================" << std::endl; + std::cout << getPhaseString(Phase::MODEL_LOAD) << " takes " << result.getModelLoadTime() / 1e3 + << " ms" << std::endl; + std::cout << getPhaseString(Phase::PREPARE) << " takes " << result.getPrepareTime() / 1e3 + << " ms" << std::endl; + std::cout << getPhaseString(Phase::EXECUTE) << " takes " << std::endl; + std::cout << "- Min: " << result.getExecuteTimeMin() / 1e3 << " ms" << std::endl; + std::cout << "- Max: " << result.getExecuteTimeMax() / 1e3 << " ms" << std::endl; + std::cout << "- Mean: " << result.getExecuteTimeMean() / 1e3 << " ms" << std::endl; + std::cout << "===================================" << std::endl; + + if (print_memory == false) + return; + + std::cout << "RSS" << std::endl; + std::cout << "- " << getPhaseString(Phase::MODEL_LOAD) << " takes " << result.getModelLoadRss() + << " kb" << std::endl; + std::cout << "- " << getPhaseString(Phase::PREPARE) << " takes " << result.getPrepareRss() + << " kb" << std::endl; + std::cout << "- " << getPhaseString(Phase::EXECUTE) << " takes " << result.getExecuteRss() + << " kb" << std::endl; + std::cout << "- PEAK " + << " takes " << result.getPeakRss() << " kb" << std::endl; + std::cout << "===================================" << std::endl; + std::cout << "HWM" << std::endl; + std::cout << "- " << getPhaseString(Phase::MODEL_LOAD) << " takes " << result.getModelLoadHwm() + << " kb" << std::endl; + std::cout << "- " << getPhaseString(Phase::PREPARE) << " takes " << result.getPrepareHwm() + << " kb" << std::endl; + std::cout << "- " << getPhaseString(Phase::EXECUTE) << " takes " << result.getExecuteHwm() + << " kb" << std::endl; + std::cout << "- PEAK " + << " takes " << result.getPeakHwm() << " kb" << std::endl; + std::cout << "===================================" << std::endl; +} + +// TODO Support not only csv but also other datafile format such as xml, json, ... +inline void writeResult(const Result &result, const std::string &exec, const std::string &model, + const std::string &backend) +{ + std::string csv_filename = exec + "-" + model + "-" + backend + ".csv"; + + // write to csv + CsvWriter writer(csv_filename); + writer << model << backend << result.getModelLoadTime() / 1e3 << result.getPrepareTime() / 1e3 + << result.getExecuteTimeMin() / 1e3 << result.getExecuteTimeMax() / 1e3 + << result.getExecuteTimeMean() / 1e3 << result.getModelLoadRss() << result.getPrepareRss() + << result.getExecuteRss() << result.getPeakRss() << result.getModelLoadHwm() + << result.getPrepareHwm() << result.getExecuteHwm() << result.getPeakHwm(); + + bool done = writer.done(); + + std::cout << "Writing to " << csv_filename << " is "; + if (done) + std::cout << "done" << std::endl; + else + std::cout << "failed" << std::endl; +} + +} // namespace benchmark + +#endif // __NNFW_BENCHMARK_UTIL_H__ diff --git a/runtime/libs/benchmark/src/CsvWriter.cpp b/runtime/libs/benchmark/src/CsvWriter.cpp new file mode 100644 index 000000000..9f2c5b09d --- /dev/null +++ b/runtime/libs/benchmark/src/CsvWriter.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "benchmark/CsvWriter.h" +#include <cassert> + +namespace +{ + +const std::vector<std::string> csv_header{ +#include "benchmark/CsvHeader.lst" +}; + +} // namespace anonymous + +namespace benchmark +{ + +CsvWriter::CsvWriter(const std::string &csv_filename) : CsvWriter(csv_filename, csv_header) +{ + // DO NOTHING +} + +CsvWriter::CsvWriter(const std::string &csv_filename, const std::vector<std::string> &header) + : _ofs(csv_filename), _header_size(header.size()), _col_idx(0) +{ + assert(csv_filename.empty() == false); + assert(header.size() != 0); + assert(_ofs.is_open()); + + writeHeader(header); +} + +CsvWriter::~CsvWriter() +{ + if (_ofs.is_open()) + _ofs.close(); +} + +void CsvWriter::writeHeader(const std::vector<std::string> &header) +{ + for (const auto &col : header) + write(col); +} + +void CsvWriter::postWrite() +{ + if (++_col_idx == _header_size) + { + _ofs << newline; + _col_idx = 0; + } + else + { + _ofs << delimiter; + } +} + +void CsvWriter::write(const std::string &val) +{ + _ofs << val; + postWrite(); +} + +void CsvWriter::write(double val) +{ + _ofs << val; + postWrite(); +} + +void CsvWriter::write(uint32_t val) +{ + _ofs << val; + postWrite(); +} + +void CsvWriter::write(char val) +{ + _ofs << val; + postWrite(); +} + +bool CsvWriter::done() { return _col_idx == 0; } + +CsvWriter &operator<<(CsvWriter &csvw, const std::string &val) +{ + csvw.write(val); + return csvw; +} + +CsvWriter &operator<<(CsvWriter &csvw, double val) +{ + csvw.write(val); + return csvw; +} + +CsvWriter &operator<<(CsvWriter &csvw, uint32_t val) +{ + csvw.write(val); + return csvw; +} + +CsvWriter &operator<<(CsvWriter &csvw, char val) +{ + csvw.write(val); + return csvw; +} + +} // namespace benchmark diff --git a/runtime/libs/benchmark/src/MemoryPoller.cpp b/runtime/libs/benchmark/src/MemoryPoller.cpp new file mode 100644 index 000000000..436d536e4 --- /dev/null +++ b/runtime/libs/benchmark/src/MemoryPoller.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "benchmark/MemoryPoller.h" +#include <vector> +#include <fstream> +#include <sstream> +#include <stdexcept> +#include <cassert> +#include <iostream> + +namespace +{ + +const std::string proc_status_path("/proc/self/status"); +const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory"); + +bool isStrNumber(const std::string &s) +{ + return !s.empty() && + std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end(); +} + +std::vector<std::string> splitLine(std::string line, std::string delimiters = " \n\t") +{ + std::vector<std::string> words; + size_t prev = 0, pos; + + while ((pos = line.find_first_of(delimiters, prev)) != std::string::npos) + { + if (pos > prev) + words.emplace_back(line.substr(prev, pos - prev)); + prev = pos + 1; + } + + if (prev < line.length()) + words.emplace_back(line.substr(prev, std::string::npos)); + + return words; +} + +std::vector<std::string> getValueFromFileStatus(const std::string &file, const std::string &key) +{ + std::ifstream ifs(file); + assert(ifs.is_open()); + + std::string line; + std::vector<std::string> val; + + bool found = false; + while (std::getline(ifs, line)) + { + if (line.find(key) != std::string::npos) + { + found = true; + break; + } + } + ifs.close(); + + if (!found) + { + // NOTE. the process which uses gpu resources cannot be there yet at the model-load phase. + // At that time, just return empty. + return val; + } + + val = splitLine(line); + return val; +} + +} // namespace anonymous + +namespace benchmark +{ + +MemoryPoller::MemoryPoller(std::chrono::milliseconds duration, bool gpu_poll) + : _duration(duration), _run(false), _term(false), _gpu_poll(gpu_poll) +{ + if (prepareMemoryPolling() == false) + throw std::runtime_error("failed to prepare memory pooling"); + + _thread = std::thread{&MemoryPoller::process, this}; +} + +bool MemoryPoller::start(Phase phase) +{ + if (std::find(_phases.begin(), _phases.end(), phase) != _phases.end()) + { + std::cerr << getPhaseString(phase) << " is already processing/processed..." << std::endl; + return false; + } + + { + std::lock_guard<std::mutex> lock(_mutex); + _phases.emplace_back(phase); + _rss_map[phase] = 0; + _hwm_map[phase] = 0; + } + + _run = true; + _cond_var_started.notify_all(); + return true; +} + +bool MemoryPoller::end(Phase phase) +{ + if (std::find(_phases.begin(), _phases.end(), phase) == _phases.end()) + { + std::cerr << getPhaseString(phase) << " is not started..." << std::endl; + return false; + } + + uint32_t mem = 0; + bool stop = false; + { + std::lock_guard<std::mutex> lock(_mutex); + _phases.remove(phase); + stop = (_phases.size() == 0); + } + + if (_rss_map[phase] == 0) + { + uint32_t mem = getVmRSS(); + if (_gpu_poll) + { + mem += getGpuMemory(); + } + _rss_map[phase] = mem; + } + + if (_hwm_map[phase] == 0) + { + uint32_t mem = getVmHWM(); + if (_gpu_poll) + { + mem += getGpuMemory(); + } + _hwm_map[phase] = mem; + } + + if (stop) + { + _run = false; + _cond_var_started.notify_all(); + } + + return true; +} + +void MemoryPoller::process() +{ + std::unique_lock<std::mutex> lock_started(_mutex_started); + while (true) + { + _cond_var_started.wait(lock_started, [&]() { return _run || _term; }); + if (_term) + break; + + std::unique_lock<std::mutex> lock(_mutex); + + uint32_t cur_rss = getVmRSS(); + uint32_t cur_hwm = getVmHWM(); + if (_gpu_poll) + { + auto gpu_mem = getGpuMemory(); + cur_rss += gpu_mem; + cur_hwm += gpu_mem; + } + + for (auto &phase : _phases) + { + auto &rss = _rss_map.at(phase); + if (rss < cur_rss) + rss = cur_rss; + // hwm is gradually increasing + auto &hwm = _hwm_map.at(phase); + hwm = cur_hwm; + } + + lock.unlock(); + + std::this_thread::sleep_for(std::chrono::milliseconds(_duration)); + } +} + +bool MemoryPoller::prepareMemoryPolling() +{ + // VmRSS + { + std::ifstream ifs(proc_status_path); + if (!ifs.is_open()) + { + std::cerr << "failed to open " << proc_status_path << std::endl; + return false; + } + ifs.close(); + } + + // (Additionally) GpuMemory + if (_gpu_poll) + { + std::ifstream ifs(gpu_memory_path); + if (!ifs.is_open()) + { + std::cerr << "failed to open " << gpu_memory_path << std::endl; + return false; + } + ifs.close(); + + // Needs process name + auto val = getValueFromFileStatus(proc_status_path, "Name"); + assert(val.size() != 0); + _process_name = val[1]; + } + + return true; +} + +uint32_t MemoryPoller::getVmRSS() +{ + auto val = getValueFromFileStatus(proc_status_path, "VmRSS"); + if (val.size() == 0) + return 0; + assert(isStrNumber(val[1])); + return std::stoul(val[1]); +} + +uint32_t MemoryPoller::getVmHWM() +{ + auto val = getValueFromFileStatus(proc_status_path, "VmHWM"); + if (val.size() == 0) + return 0; + // key: value + assert(isStrNumber(val[1])); + return std::stoul(val[1]); +} + +uint32_t MemoryPoller::getGpuMemory() +{ + assert(!_process_name.empty()); + auto val = getValueFromFileStatus(gpu_memory_path, _process_name); + if (val.size() == 0) + return 0; + // process_name -> pid -> gpu_mem -> max_gpu_mem + assert(isStrNumber(val[2])); + return std::stoul(val[2]); +} + +} // namespace benchmark diff --git a/runtime/libs/cpp14/CMakeLists.txt b/runtime/libs/cpp14/CMakeLists.txt new file mode 100644 index 000000000..bba9e132d --- /dev/null +++ b/runtime/libs/cpp14/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(nnfw_lib_cpp14 INTERFACE) +target_include_directories(nnfw_lib_cpp14 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) diff --git a/runtime/libs/cpp14/include/cpp14/memory.h b/runtime/libs/cpp14/include/cpp14/memory.h new file mode 100644 index 000000000..7070e1c99 --- /dev/null +++ b/runtime/libs/cpp14/include/cpp14/memory.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file memory.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains @c make_unique which is not supported by C++11 + * @details Implementation is based on http://isocpp.org/files/papers/N3656.txt + */ +#ifndef __NNFW_CPP14_MEMORY_H__ +#define __NNFW_CPP14_MEMORY_H__ + +#include <memory> + +namespace nnfw +{ +namespace cpp14 +{ + +template <typename T> struct _Unique_if +{ + typedef std::unique_ptr<T> _Single_object; +}; + +template <typename T> struct _Unique_if<T[]> +{ + typedef std::unique_ptr<T[]> _Unknown_bound; +}; + +template <typename T, size_t N> struct _Unique_if<T[N]> +{ + typedef void _Known_bound; +}; + +template <typename T, typename... Args> +typename _Unique_if<T>::_Single_object make_unique(Args &&... args) +{ + return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); +} + +template <typename T> typename _Unique_if<T>::_Unknown_bound make_unique(size_t n) +{ + typedef typename std::remove_extent<T>::type U; + return std::unique_ptr<T>(new U[n]()); +} + +template <typename T, typename... Args> +typename _Unique_if<T>::_Known_bound make_unique(Args &&...) = delete; + +} // namespace cpp14 +} // namespace nnfw + +#endif // __NNFW_CPP14_MEMORY_H__ diff --git a/runtime/libs/jsoncpp/CMakeLists.txt b/runtime/libs/jsoncpp/CMakeLists.txt new file mode 100644 index 000000000..5720cec5b --- /dev/null +++ b/runtime/libs/jsoncpp/CMakeLists.txt @@ -0,0 +1,6 @@ +file(GLOB_RECURSE SRCS "*.cpp") + +add_library(jsoncpp STATIC ${SRCS}) +set_property(TARGET jsoncpp PROPERTY POSITION_INDEPENDENT_CODE ON) +set_property(TARGET jsoncpp APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>) diff --git a/runtime/libs/jsoncpp/README.md b/runtime/libs/jsoncpp/README.md new file mode 100644 index 000000000..da5a06d71 --- /dev/null +++ b/runtime/libs/jsoncpp/README.md @@ -0,0 +1,11 @@ +# Origin of source code + +This library is based on Json-cpp amalgated header and cpp files(https://github.com/open-source-parsers/jsoncpp/wiki/Amalgamated) + +# Background + +Since jsoncpp on tizen does not support static jsoncpp library, nnfw project will link this local library. + +# Version + +- 1.7.7 : https://github.com/open-source-parsers/jsoncpp/archive/1.7.7.tar.gz diff --git a/runtime/libs/jsoncpp/json/json-forwards.h b/runtime/libs/jsoncpp/json/json-forwards.h new file mode 100644 index 000000000..9fe95c055 --- /dev/null +++ b/runtime/libs/jsoncpp/json/json-forwards.h @@ -0,0 +1,315 @@ +/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/). +/// It is intended to be used with #include "json/json-forwards.h" +/// This header provides forward declaration for all JsonCpp types. + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +/* +The JsonCpp library's source code, including accompanying documentation, +tests and demonstration applications, are licensed under the following +conditions... + +The author (Baptiste Lepilleur) explicitly disclaims copyright in all +jurisdictions which recognize such a disclaimer. In such jurisdictions, +this software is released into the Public Domain. + +In jurisdictions which do not recognize Public Domain property (e.g. Germany as of +2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is +released under the terms of the MIT License (see below). + +In jurisdictions which recognize Public Domain property, the user of this +software may choose to accept it either as 1) Public Domain, 2) under the +conditions of the MIT License (see below), or 3) under the terms of dual +Public Domain/MIT License conditions described here, as they choose. + +The MIT License is about as close to Public Domain as a license can get, and is +described in clear, concise terms at: + + http://en.wikipedia.org/wiki/MIT_License + +The full text of the MIT License follows: + +======================================================================== +Copyright (c) 2007-2010 Baptiste Lepilleur + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +======================================================================== +(END LICENSE TEXT) + +The MIT license is compatible with both the GPL and commercial +software, affording one all of the rights of Public Domain with the +minor nuisance of being required to keep the above copyright notice +and license text in the source code. Note also that by accepting the +Public Domain "license" you can re-license your copy using whatever +license you like. + +*/ + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED +#define JSON_FORWARD_AMALGATED_H_INCLUDED +/// If defined, indicates that the source file is amalgated +/// to prevent private header inclusion. +#define JSON_IS_AMALGAMATION + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/config.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef JSON_CONFIG_H_INCLUDED +#define JSON_CONFIG_H_INCLUDED +#include <stddef.h> +#include <string> //typedef String +#include <stdint.h> //typedef int64_t, uint64_t + +/// If defined, indicates that json library is embedded in CppTL library. +//# define JSON_IN_CPPTL 1 + +/// If defined, indicates that json may leverage CppTL library +//# define JSON_USE_CPPTL 1 +/// If defined, indicates that cpptl vector based map should be used instead of +/// std::map +/// as Value container. +//# define JSON_USE_CPPTL_SMALLMAP 1 + +// If non-zero, the library uses exceptions to report bad input instead of C +// assertion macros. The default is to use exceptions. +#ifndef JSON_USE_EXCEPTION +#define JSON_USE_EXCEPTION 1 +#endif + +/// If defined, indicates that the source file is amalgated +/// to prevent private header inclusion. +/// Remarks: it is automatically defined in the generated amalgated header. +// #define JSON_IS_AMALGAMATION + +#ifdef JSON_IN_CPPTL +#include <cpptl/config.h> +#ifndef JSON_USE_CPPTL +#define JSON_USE_CPPTL 1 +#endif +#endif + +#ifdef JSON_IN_CPPTL +#define JSON_API CPPTL_API +#elif defined(JSON_DLL_BUILD) +#if defined(_MSC_VER) || defined(__MINGW32__) +#define JSON_API __declspec(dllexport) +#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING +#endif // if defined(_MSC_VER) +#elif defined(JSON_DLL) +#if defined(_MSC_VER) || defined(__MINGW32__) +#define JSON_API __declspec(dllimport) +#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING +#endif // if defined(_MSC_VER) +#endif // ifdef JSON_IN_CPPTL +#if !defined(JSON_API) +#define JSON_API +#endif + +// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for +// integer +// Storages, and 64 bits integer support is disabled. +// #define JSON_NO_INT64 1 + +#if defined(_MSC_VER) // MSVC +#if _MSC_VER <= 1200 // MSVC 6 + // Microsoft Visual Studio 6 only support conversion from __int64 to double + // (no conversion from unsigned __int64). +#define JSON_USE_INT64_DOUBLE_CONVERSION 1 +// Disable warning 4786 for VS6 caused by STL (identifier was truncated to '255' +// characters in the debug information) +// All projects I've ever seen with VS6 were using this globally (not bothering +// with pragma push/pop). +#pragma warning(disable : 4786) +#endif // MSVC 6 + +#if _MSC_VER >= 1500 // MSVC 2008 + /// Indicates that the following function is deprecated. +#define JSONCPP_DEPRECATED(message) __declspec(deprecated(message)) +#endif + +#endif // defined(_MSC_VER) + +// In c++11 the override keyword allows you to explicity define that a function +// is intended to override the base-class version. This makes the code more +// managable and fixes a set of common hard-to-find bugs. +#if __cplusplus >= 201103L +#define JSONCPP_OVERRIDE override +#elif defined(_MSC_VER) && _MSC_VER > 1600 +#define JSONCPP_OVERRIDE override +#else +#define JSONCPP_OVERRIDE +#endif + +#ifndef JSON_HAS_RVALUE_REFERENCES + +#if defined(_MSC_VER) && _MSC_VER >= 1600 // MSVC >= 2010 +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // MSVC >= 2010 + +#ifdef __clang__ +#if __has_feature(cxx_rvalue_references) +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // has_feature + +#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc) +#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L) +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // GXX_EXPERIMENTAL + +#endif // __clang__ || __GNUC__ + +#endif // not defined JSON_HAS_RVALUE_REFERENCES + +#ifndef JSON_HAS_RVALUE_REFERENCES +#define JSON_HAS_RVALUE_REFERENCES 0 +#endif + +#ifdef __clang__ +#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc) +#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) +#define JSONCPP_DEPRECATED(message) __attribute__((deprecated(message))) +#elif (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +#define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__)) +#endif // GNUC version +#endif // __clang__ || __GNUC__ + +#if !defined(JSONCPP_DEPRECATED) +#define JSONCPP_DEPRECATED(message) +#endif // if !defined(JSONCPP_DEPRECATED) + +#if __GNUC__ >= 6 +#define JSON_USE_INT64_DOUBLE_CONVERSION 1 +#endif + +#if !defined(JSON_IS_AMALGAMATION) + +#include "version.h" + +#if JSONCPP_USING_SECURE_MEMORY +#include "allocator.h" //typedef Allocator +#endif + +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ +typedef int Int; +typedef unsigned int UInt; +#if defined(JSON_NO_INT64) +typedef int LargestInt; +typedef unsigned int LargestUInt; +#undef JSON_HAS_INT64 +#else // if defined(JSON_NO_INT64) +// For Microsoft Visual use specific types as long long is not supported +#if defined(_MSC_VER) // Microsoft Visual Studio +typedef __int64 Int64; +typedef unsigned __int64 UInt64; +#else // if defined(_MSC_VER) // Other platforms, use long long +typedef int64_t Int64; +typedef uint64_t UInt64; +#endif // if defined(_MSC_VER) +typedef Int64 LargestInt; +typedef UInt64 LargestUInt; +#define JSON_HAS_INT64 +#endif // if defined(JSON_NO_INT64) +#if JSONCPP_USING_SECURE_MEMORY +#define JSONCPP_STRING std::basic_string<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_OSTRINGSTREAM \ + std::basic_ostringstream<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_OSTREAM std::basic_ostream<char, std::char_traits<char>> +#define JSONCPP_ISTRINGSTREAM \ + std::basic_istringstream<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_ISTREAM std::istream +#else +#define JSONCPP_STRING std::string +#define JSONCPP_OSTRINGSTREAM std::ostringstream +#define JSONCPP_OSTREAM std::ostream +#define JSONCPP_ISTRINGSTREAM std::istringstream +#define JSONCPP_ISTREAM std::istream +#endif // if JSONCPP_USING_SECURE_MEMORY +} // end namespace Json + +#endif // JSON_CONFIG_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/config.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/forwards.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef JSON_FORWARDS_H_INCLUDED +#define JSON_FORWARDS_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "config.h" +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ + +// writer.h +class FastWriter; +class StyledWriter; + +// reader.h +class Reader; + +// features.h +class Features; + +// value.h +typedef unsigned int ArrayIndex; +class StaticString; +class Path; +class PathArgument; +class Value; +class ValueIteratorBase; +class ValueIterator; +class ValueConstIterator; + +} // namespace Json + +#endif // JSON_FORWARDS_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/forwards.h +// ////////////////////////////////////////////////////////////////////// + +#endif // ifndef JSON_FORWARD_AMALGATED_H_INCLUDED diff --git a/runtime/libs/jsoncpp/json/json.h b/runtime/libs/jsoncpp/json/json.h new file mode 100644 index 000000000..19c591267 --- /dev/null +++ b/runtime/libs/jsoncpp/json/json.h @@ -0,0 +1,2133 @@ +/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/). +/// It is intended to be used with #include "json/json.h" + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +/* +The JsonCpp library's source code, including accompanying documentation, +tests and demonstration applications, are licensed under the following +conditions... + +The author (Baptiste Lepilleur) explicitly disclaims copyright in all +jurisdictions which recognize such a disclaimer. In such jurisdictions, +this software is released into the Public Domain. + +In jurisdictions which do not recognize Public Domain property (e.g. Germany as of +2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is +released under the terms of the MIT License (see below). + +In jurisdictions which recognize Public Domain property, the user of this +software may choose to accept it either as 1) Public Domain, 2) under the +conditions of the MIT License (see below), or 3) under the terms of dual +Public Domain/MIT License conditions described here, as they choose. + +The MIT License is about as close to Public Domain as a license can get, and is +described in clear, concise terms at: + + http://en.wikipedia.org/wiki/MIT_License + +The full text of the MIT License follows: + +======================================================================== +Copyright (c) 2007-2010 Baptiste Lepilleur + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +======================================================================== +(END LICENSE TEXT) + +The MIT license is compatible with both the GPL and commercial +software, affording one all of the rights of Public Domain with the +minor nuisance of being required to keep the above copyright notice +and license text in the source code. Note also that by accepting the +Public Domain "license" you can re-license your copy using whatever +license you like. + +*/ + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +#ifndef JSON_AMALGATED_H_INCLUDED +#define JSON_AMALGATED_H_INCLUDED +/// If defined, indicates that the source file is amalgated +/// to prevent private header inclusion. +#define JSON_IS_AMALGAMATION + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/version.h +// ////////////////////////////////////////////////////////////////////// + +// DO NOT EDIT. This file (and "version") is generated by CMake. +// Run CMake configure step to update it. +#ifndef JSON_VERSION_H_INCLUDED +#define JSON_VERSION_H_INCLUDED + +#define JSONCPP_VERSION_STRING "1.7.7" +#define JSONCPP_VERSION_MAJOR 1 +#define JSONCPP_VERSION_MINOR 7 +#define JSONCPP_VERSION_PATCH 7 +#define JSONCPP_VERSION_QUALIFIER +#define JSONCPP_VERSION_HEXA \ + ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | (JSONCPP_VERSION_PATCH << 8)) + +#ifdef JSONCPP_USING_SECURE_MEMORY +#undef JSONCPP_USING_SECURE_MEMORY +#endif +#define JSONCPP_USING_SECURE_MEMORY 0 +// If non-zero, the library zeroes any memory that it has allocated before +// it frees its memory. + +#endif // JSON_VERSION_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/version.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/config.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef JSON_CONFIG_H_INCLUDED +#define JSON_CONFIG_H_INCLUDED +#include <stddef.h> +#include <string> //typedef String +#include <stdint.h> //typedef int64_t, uint64_t + +/// If defined, indicates that json library is embedded in CppTL library. +//# define JSON_IN_CPPTL 1 + +/// If defined, indicates that json may leverage CppTL library +//# define JSON_USE_CPPTL 1 +/// If defined, indicates that cpptl vector based map should be used instead of +/// std::map +/// as Value container. +//# define JSON_USE_CPPTL_SMALLMAP 1 + +// If non-zero, the library uses exceptions to report bad input instead of C +// assertion macros. The default is to use exceptions. +#ifndef JSON_USE_EXCEPTION +#define JSON_USE_EXCEPTION 1 +#endif + +/// If defined, indicates that the source file is amalgated +/// to prevent private header inclusion. +/// Remarks: it is automatically defined in the generated amalgated header. +// #define JSON_IS_AMALGAMATION + +#ifdef JSON_IN_CPPTL +#include <cpptl/config.h> +#ifndef JSON_USE_CPPTL +#define JSON_USE_CPPTL 1 +#endif +#endif + +#ifdef JSON_IN_CPPTL +#define JSON_API CPPTL_API +#elif defined(JSON_DLL_BUILD) +#if defined(_MSC_VER) || defined(__MINGW32__) +#define JSON_API __declspec(dllexport) +#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING +#endif // if defined(_MSC_VER) +#elif defined(JSON_DLL) +#if defined(_MSC_VER) || defined(__MINGW32__) +#define JSON_API __declspec(dllimport) +#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING +#endif // if defined(_MSC_VER) +#endif // ifdef JSON_IN_CPPTL +#if !defined(JSON_API) +#define JSON_API +#endif + +// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for +// integer +// Storages, and 64 bits integer support is disabled. +// #define JSON_NO_INT64 1 + +#if defined(_MSC_VER) // MSVC +#if _MSC_VER <= 1200 // MSVC 6 + // Microsoft Visual Studio 6 only support conversion from __int64 to double + // (no conversion from unsigned __int64). +#define JSON_USE_INT64_DOUBLE_CONVERSION 1 +// Disable warning 4786 for VS6 caused by STL (identifier was truncated to '255' +// characters in the debug information) +// All projects I've ever seen with VS6 were using this globally (not bothering +// with pragma push/pop). +#pragma warning(disable : 4786) +#endif // MSVC 6 + +#if _MSC_VER >= 1500 // MSVC 2008 + /// Indicates that the following function is deprecated. +#define JSONCPP_DEPRECATED(message) __declspec(deprecated(message)) +#endif + +#endif // defined(_MSC_VER) + +// In c++11 the override keyword allows you to explicity define that a function +// is intended to override the base-class version. This makes the code more +// managable and fixes a set of common hard-to-find bugs. +#if __cplusplus >= 201103L +#define JSONCPP_OVERRIDE override +#elif defined(_MSC_VER) && _MSC_VER > 1600 +#define JSONCPP_OVERRIDE override +#else +#define JSONCPP_OVERRIDE +#endif + +#ifndef JSON_HAS_RVALUE_REFERENCES + +#if defined(_MSC_VER) && _MSC_VER >= 1600 // MSVC >= 2010 +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // MSVC >= 2010 + +#ifdef __clang__ +#if __has_feature(cxx_rvalue_references) +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // has_feature + +#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc) +#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L) +#define JSON_HAS_RVALUE_REFERENCES 1 +#endif // GXX_EXPERIMENTAL + +#endif // __clang__ || __GNUC__ + +#endif // not defined JSON_HAS_RVALUE_REFERENCES + +#ifndef JSON_HAS_RVALUE_REFERENCES +#define JSON_HAS_RVALUE_REFERENCES 0 +#endif + +#ifdef __clang__ +#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc) +#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) +#define JSONCPP_DEPRECATED(message) __attribute__((deprecated(message))) +#elif (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +#define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__)) +#endif // GNUC version +#endif // __clang__ || __GNUC__ + +#if !defined(JSONCPP_DEPRECATED) +#define JSONCPP_DEPRECATED(message) +#endif // if !defined(JSONCPP_DEPRECATED) + +#if __GNUC__ >= 6 +#define JSON_USE_INT64_DOUBLE_CONVERSION 1 +#endif + +#if !defined(JSON_IS_AMALGAMATION) + +#include "version.h" + +#if JSONCPP_USING_SECURE_MEMORY +#include "allocator.h" //typedef Allocator +#endif + +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ +typedef int Int; +typedef unsigned int UInt; +#if defined(JSON_NO_INT64) +typedef int LargestInt; +typedef unsigned int LargestUInt; +#undef JSON_HAS_INT64 +#else // if defined(JSON_NO_INT64) +// For Microsoft Visual use specific types as long long is not supported +#if defined(_MSC_VER) // Microsoft Visual Studio +typedef __int64 Int64; +typedef unsigned __int64 UInt64; +#else // if defined(_MSC_VER) // Other platforms, use long long +typedef int64_t Int64; +typedef uint64_t UInt64; +#endif // if defined(_MSC_VER) +typedef Int64 LargestInt; +typedef UInt64 LargestUInt; +#define JSON_HAS_INT64 +#endif // if defined(JSON_NO_INT64) +#if JSONCPP_USING_SECURE_MEMORY +#define JSONCPP_STRING std::basic_string<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_OSTRINGSTREAM \ + std::basic_ostringstream<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_OSTREAM std::basic_ostream<char, std::char_traits<char>> +#define JSONCPP_ISTRINGSTREAM \ + std::basic_istringstream<char, std::char_traits<char>, Json::SecureAllocator<char>> +#define JSONCPP_ISTREAM std::istream +#else +#define JSONCPP_STRING std::string +#define JSONCPP_OSTRINGSTREAM std::ostringstream +#define JSONCPP_OSTREAM std::ostream +#define JSONCPP_ISTRINGSTREAM std::istringstream +#define JSONCPP_ISTREAM std::istream +#endif // if JSONCPP_USING_SECURE_MEMORY +} // end namespace Json + +#endif // JSON_CONFIG_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/config.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/forwards.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef JSON_FORWARDS_H_INCLUDED +#define JSON_FORWARDS_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "config.h" +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ + +// writer.h +class FastWriter; +class StyledWriter; + +// reader.h +class Reader; + +// features.h +class Features; + +// value.h +typedef unsigned int ArrayIndex; +class StaticString; +class Path; +class PathArgument; +class Value; +class ValueIteratorBase; +class ValueIterator; +class ValueConstIterator; + +} // namespace Json + +#endif // JSON_FORWARDS_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/forwards.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/features.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef CPPTL_JSON_FEATURES_H_INCLUDED +#define CPPTL_JSON_FEATURES_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "forwards.h" +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ + +/** \brief Configuration passed to reader and writer. + * This configuration object can be used to force the Reader or Writer + * to behave in a standard conforming way. + */ +class JSON_API Features +{ +public: + /** \brief A configuration that allows all features and assumes all strings + * are UTF-8. + * - C & C++ comments are allowed + * - Root object can be any JSON value + * - Assumes Value strings are encoded in UTF-8 + */ + static Features all(); + + /** \brief A configuration that is strictly compatible with the JSON + * specification. + * - Comments are forbidden. + * - Root object must be either an array or an object value. + * - Assumes Value strings are encoded in UTF-8 + */ + static Features strictMode(); + + /** \brief Initialize the configuration like JsonConfig::allFeatures; + */ + Features(); + + /// \c true if comments are allowed. Default: \c true. + bool allowComments_; + + /// \c true if root must be either an array or an object value. Default: \c + /// false. + bool strictRoot_; + + /// \c true if dropped null placeholders are allowed. Default: \c false. + bool allowDroppedNullPlaceholders_; + + /// \c true if numeric object key are allowed. Default: \c false. + bool allowNumericKeys_; +}; + +} // namespace Json + +#endif // CPPTL_JSON_FEATURES_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/features.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/value.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef CPPTL_JSON_H_INCLUDED +#define CPPTL_JSON_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "forwards.h" +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <string> +#include <vector> +#include <exception> + +#ifndef JSON_USE_CPPTL_SMALLMAP +#include <map> +#else +#include <cpptl/smallmap.h> +#endif +#ifdef JSON_USE_CPPTL +#include <cpptl/forwards.h> +#endif + +// Conditional NORETURN attribute on the throw functions would: +// a) suppress false positives from static code analysis +// b) possibly improve optimization opportunities. +#if !defined(JSONCPP_NORETURN) +#if defined(_MSC_VER) +#define JSONCPP_NORETURN __declspec(noreturn) +#elif defined(__GNUC__) +#define JSONCPP_NORETURN __attribute__((__noreturn__)) +#else +#define JSONCPP_NORETURN +#endif +#endif + +// Disable warning C4251: <data member>: <type> needs to have dll-interface to +// be used by... +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(push) +#pragma warning(disable : 4251) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +/** \brief JSON (JavaScript Object Notation). + */ +namespace Json +{ + +/** Base class for all exceptions we throw. + * + * We use nothing but these internally. Of course, STL can throw others. + */ +class JSON_API Exception : public std::exception +{ +public: + Exception(JSONCPP_STRING const &msg); + ~Exception() throw() JSONCPP_OVERRIDE; + char const *what() const throw() JSONCPP_OVERRIDE; + +protected: + JSONCPP_STRING msg_; +}; + +/** Exceptions which the user cannot easily avoid. + * + * E.g. out-of-memory (when we use malloc), stack-overflow, malicious input + * + * \remark derived from Json::Exception + */ +class JSON_API RuntimeError : public Exception +{ +public: + RuntimeError(JSONCPP_STRING const &msg); +}; + +/** Exceptions thrown by JSON_ASSERT/JSON_FAIL macros. + * + * These are precondition-violations (user bugs) and internal errors (our bugs). + * + * \remark derived from Json::Exception + */ +class JSON_API LogicError : public Exception +{ +public: + LogicError(JSONCPP_STRING const &msg); +}; + +/// used internally +JSONCPP_NORETURN void throwRuntimeError(JSONCPP_STRING const &msg); +/// used internally +JSONCPP_NORETURN void throwLogicError(JSONCPP_STRING const &msg); + +/** \brief Type of the value held by a Value object. + */ +enum ValueType +{ + nullValue = 0, ///< 'null' value + intValue, ///< signed integer value + uintValue, ///< unsigned integer value + realValue, ///< double value + stringValue, ///< UTF-8 string value + booleanValue, ///< bool value + arrayValue, ///< array value (ordered list) + objectValue ///< object value (collection of name/value pairs). +}; + +enum CommentPlacement +{ + commentBefore = 0, ///< a comment placed on the line before a value + commentAfterOnSameLine, ///< a comment just after a value on the same line + commentAfter, ///< a comment on the line after a value (only make sense for + /// root value) + numberOfCommentPlacement +}; + +//# ifdef JSON_USE_CPPTL +// typedef CppTL::AnyEnumerator<const char *> EnumMemberNames; +// typedef CppTL::AnyEnumerator<const Value &> EnumValues; +//# endif + +/** \brief Lightweight wrapper to tag static string. + * + * Value constructor and objectValue member assignement takes advantage of the + * StaticString and avoid the cost of string duplication when storing the + * string or the member name. + * + * Example of usage: + * \code + * Json::Value aValue( StaticString("some text") ); + * Json::Value object; + * static const StaticString code("code"); + * object[code] = 1234; + * \endcode + */ +class JSON_API StaticString +{ +public: + explicit StaticString(const char *czstring) : c_str_(czstring) {} + + operator const char *() const { return c_str_; } + + const char *c_str() const { return c_str_; } + +private: + const char *c_str_; +}; + +/** \brief Represents a <a HREF="http://www.json.org">JSON</a> value. + * + * This class is a discriminated union wrapper that can represents a: + * - signed integer [range: Value::minInt - Value::maxInt] + * - unsigned integer (range: 0 - Value::maxUInt) + * - double + * - UTF-8 string + * - boolean + * - 'null' + * - an ordered list of Value + * - collection of name/value pairs (javascript object) + * + * The type of the held value is represented by a #ValueType and + * can be obtained using type(). + * + * Values of an #objectValue or #arrayValue can be accessed using operator[]() + * methods. + * Non-const methods will automatically create the a #nullValue element + * if it does not exist. + * The sequence of an #arrayValue will be automatically resized and initialized + * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue. + * + * The get() methods can be used to obtain default value in the case the + * required element does not exist. + * + * It is possible to iterate over the list of a #objectValue values using + * the getMemberNames() method. + * + * \note #Value string-length fit in size_t, but keys must be < 2^30. + * (The reason is an implementation detail.) A #CharReader will raise an + * exception if a bound is exceeded to avoid security holes in your app, + * but the Value API does *not* check bounds. That is the responsibility + * of the caller. + */ +class JSON_API Value +{ + friend class ValueIteratorBase; + +public: + typedef std::vector<JSONCPP_STRING> Members; + typedef ValueIterator iterator; + typedef ValueConstIterator const_iterator; + typedef Json::UInt UInt; + typedef Json::Int Int; +#if defined(JSON_HAS_INT64) + typedef Json::UInt64 UInt64; + typedef Json::Int64 Int64; +#endif // defined(JSON_HAS_INT64) + typedef Json::LargestInt LargestInt; + typedef Json::LargestUInt LargestUInt; + typedef Json::ArrayIndex ArrayIndex; + + static const Value + &null; ///< We regret this reference to a global instance; prefer the simpler Value(). + static const Value &nullRef; ///< just a kludge for binary-compatibility; same as null + static Value const &nullSingleton(); ///< Prefer this to null or nullRef. + + /// Minimum signed integer value that can be stored in a Json::Value. + static const LargestInt minLargestInt; + /// Maximum signed integer value that can be stored in a Json::Value. + static const LargestInt maxLargestInt; + /// Maximum unsigned integer value that can be stored in a Json::Value. + static const LargestUInt maxLargestUInt; + + /// Minimum signed int value that can be stored in a Json::Value. + static const Int minInt; + /// Maximum signed int value that can be stored in a Json::Value. + static const Int maxInt; + /// Maximum unsigned int value that can be stored in a Json::Value. + static const UInt maxUInt; + +#if defined(JSON_HAS_INT64) + /// Minimum signed 64 bits int value that can be stored in a Json::Value. + static const Int64 minInt64; + /// Maximum signed 64 bits int value that can be stored in a Json::Value. + static const Int64 maxInt64; + /// Maximum unsigned 64 bits int value that can be stored in a Json::Value. + static const UInt64 maxUInt64; +#endif // defined(JSON_HAS_INT64) + +private: +#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION + class CZString + { + public: + enum DuplicationPolicy + { + noDuplication = 0, + duplicate, + duplicateOnCopy + }; + CZString(ArrayIndex index); + CZString(char const *str, unsigned length, DuplicationPolicy allocate); + CZString(CZString const &other); +#if JSON_HAS_RVALUE_REFERENCES + CZString(CZString &&other); +#endif + ~CZString(); + CZString &operator=(CZString other); + bool operator<(CZString const &other) const; + bool operator==(CZString const &other) const; + ArrayIndex index() const; + // const char* c_str() const; ///< \deprecated + char const *data() const; + unsigned length() const; + bool isStaticString() const; + + private: + void swap(CZString &other); + + struct StringStorage + { + unsigned policy_ : 2; + unsigned length_ : 30; // 1GB max + }; + + char const *cstr_; // actually, a prefixed string, unless policy is noDup + union { + ArrayIndex index_; + StringStorage storage_; + }; + }; + +public: +#ifndef JSON_USE_CPPTL_SMALLMAP + typedef std::map<CZString, Value> ObjectValues; +#else + typedef CppTL::SmallMap<CZString, Value> ObjectValues; +#endif // ifndef JSON_USE_CPPTL_SMALLMAP +#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION + +public: + /** \brief Create a default Value of the given type. + + This is a very useful constructor. + To create an empty array, pass arrayValue. + To create an empty object, pass objectValue. + Another Value can then be set to this one by assignment. +This is useful since clear() and resize() will not alter types. + + Examples: +\code +Json::Value null_value; // null +Json::Value arr_value(Json::arrayValue); // [] +Json::Value obj_value(Json::objectValue); // {} +\endcode + */ + Value(ValueType type = nullValue); + Value(Int value); + Value(UInt value); +#if defined(JSON_HAS_INT64) + Value(Int64 value); + Value(UInt64 value); +#endif // if defined(JSON_HAS_INT64) + Value(double value); + Value(const char *value); ///< Copy til first 0. (NULL causes to seg-fault.) + Value(const char *begin, const char *end); ///< Copy all, incl zeroes. + /** \brief Constructs a value from a static string. + + * Like other value string constructor but do not duplicate the string for + * internal storage. The given string must remain alive after the call to this + * constructor. + * \note This works only for null-terminated strings. (We cannot change the + * size of this class, so we have nowhere to store the length, + * which might be computed later for various operations.) + * + * Example of usage: + * \code + * static StaticString foo("some text"); + * Json::Value aValue(foo); + * \endcode + */ + Value(const StaticString &value); + Value(const JSONCPP_STRING &value); ///< Copy data() til size(). Embedded zeroes too. +#ifdef JSON_USE_CPPTL + Value(const CppTL::ConstString &value); +#endif + Value(bool value); + /// Deep copy. + Value(const Value &other); +#if JSON_HAS_RVALUE_REFERENCES + /// Move constructor + Value(Value &&other); +#endif + ~Value(); + + /// Deep copy, then swap(other). + /// \note Over-write existing comments. To preserve comments, use #swapPayload(). + Value &operator=(Value other); + /// Swap everything. + void swap(Value &other); + /// Swap values but leave comments and source offsets in place. + void swapPayload(Value &other); + + ValueType type() const; + + /// Compare payload only, not comments etc. + bool operator<(const Value &other) const; + bool operator<=(const Value &other) const; + bool operator>=(const Value &other) const; + bool operator>(const Value &other) const; + bool operator==(const Value &other) const; + bool operator!=(const Value &other) const; + int compare(const Value &other) const; + + const char *asCString() const; ///< Embedded zeroes could cause you trouble! +#if JSONCPP_USING_SECURE_MEMORY + unsigned getCStringLength() const; // Allows you to understand the length of the CString +#endif + JSONCPP_STRING asString() const; ///< Embedded zeroes are possible. + /** Get raw char* of string-value. + * \return false if !string. (Seg-fault if str or end are NULL.) + */ + bool getString(char const **begin, char const **end) const; +#ifdef JSON_USE_CPPTL + CppTL::ConstString asConstString() const; +#endif + Int asInt() const; + UInt asUInt() const; +#if defined(JSON_HAS_INT64) + Int64 asInt64() const; + UInt64 asUInt64() const; +#endif // if defined(JSON_HAS_INT64) + LargestInt asLargestInt() const; + LargestUInt asLargestUInt() const; + float asFloat() const; + double asDouble() const; + bool asBool() const; + + bool isNull() const; + bool isBool() const; + bool isInt() const; + bool isInt64() const; + bool isUInt() const; + bool isUInt64() const; + bool isIntegral() const; + bool isDouble() const; + bool isNumeric() const; + bool isString() const; + bool isArray() const; + bool isObject() const; + + bool isConvertibleTo(ValueType other) const; + + /// Number of values in array or object + ArrayIndex size() const; + + /// \brief Return true if empty array, empty object, or null; + /// otherwise, false. + bool empty() const; + + /// Return isNull() + bool operator!() const; + + /// Remove all object members and array elements. + /// \pre type() is arrayValue, objectValue, or nullValue + /// \post type() is unchanged + void clear(); + + /// Resize the array to size elements. + /// New elements are initialized to null. + /// May only be called on nullValue or arrayValue. + /// \pre type() is arrayValue or nullValue + /// \post type() is arrayValue + void resize(ArrayIndex size); + + /// Access an array element (zero based index ). + /// If the array contains less than index element, then null value are + /// inserted + /// in the array so that its size is index+1. + /// (You may need to say 'value[0u]' to get your compiler to distinguish + /// this from the operator[] which takes a string.) + Value &operator[](ArrayIndex index); + + /// Access an array element (zero based index ). + /// If the array contains less than index element, then null value are + /// inserted + /// in the array so that its size is index+1. + /// (You may need to say 'value[0u]' to get your compiler to distinguish + /// this from the operator[] which takes a string.) + Value &operator[](int index); + + /// Access an array element (zero based index ) + /// (You may need to say 'value[0u]' to get your compiler to distinguish + /// this from the operator[] which takes a string.) + const Value &operator[](ArrayIndex index) const; + + /// Access an array element (zero based index ) + /// (You may need to say 'value[0u]' to get your compiler to distinguish + /// this from the operator[] which takes a string.) + const Value &operator[](int index) const; + + /// If the array contains at least index+1 elements, returns the element + /// value, + /// otherwise returns defaultValue. + Value get(ArrayIndex index, const Value &defaultValue) const; + /// Return true if index < size(). + bool isValidIndex(ArrayIndex index) const; + /// \brief Append value to array at the end. + /// + /// Equivalent to jsonvalue[jsonvalue.size()] = value; + Value &append(const Value &value); + + /// Access an object value by name, create a null member if it does not exist. + /// \note Because of our implementation, keys are limited to 2^30 -1 chars. + /// Exceeding that will cause an exception. + Value &operator[](const char *key); + /// Access an object value by name, returns null if there is no member with + /// that name. + const Value &operator[](const char *key) const; + /// Access an object value by name, create a null member if it does not exist. + /// \param key may contain embedded nulls. + Value &operator[](const JSONCPP_STRING &key); + /// Access an object value by name, returns null if there is no member with + /// that name. + /// \param key may contain embedded nulls. + const Value &operator[](const JSONCPP_STRING &key) const; + /** \brief Access an object value by name, create a null member if it does not + exist. + + * If the object has no entry for that name, then the member name used to store + * the new entry is not duplicated. + * Example of use: + * \code + * Json::Value object; + * static const StaticString code("code"); + * object[code] = 1234; + * \endcode + */ + Value &operator[](const StaticString &key); +#ifdef JSON_USE_CPPTL + /// Access an object value by name, create a null member if it does not exist. + Value &operator[](const CppTL::ConstString &key); + /// Access an object value by name, returns null if there is no member with + /// that name. + const Value &operator[](const CppTL::ConstString &key) const; +#endif + /// Return the member named key if it exist, defaultValue otherwise. + /// \note deep copy + Value get(const char *key, const Value &defaultValue) const; + /// Return the member named key if it exist, defaultValue otherwise. + /// \note deep copy + /// \note key may contain embedded nulls. + Value get(const char *begin, const char *end, const Value &defaultValue) const; + /// Return the member named key if it exist, defaultValue otherwise. + /// \note deep copy + /// \param key may contain embedded nulls. + Value get(const JSONCPP_STRING &key, const Value &defaultValue) const; +#ifdef JSON_USE_CPPTL + /// Return the member named key if it exist, defaultValue otherwise. + /// \note deep copy + Value get(const CppTL::ConstString &key, const Value &defaultValue) const; +#endif + /// Most general and efficient version of isMember()const, get()const, + /// and operator[]const + /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30 + Value const *find(char const *begin, char const *end) const; + /// Most general and efficient version of object-mutators. + /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30 + /// \return non-zero, but JSON_ASSERT if this is neither object nor nullValue. + Value const *demand(char const *begin, char const *end); + /// \brief Remove and return the named member. + /// + /// Do nothing if it did not exist. + /// \return the removed Value, or null. + /// \pre type() is objectValue or nullValue + /// \post type() is unchanged + /// \deprecated + Value removeMember(const char *key); + /// Same as removeMember(const char*) + /// \param key may contain embedded nulls. + /// \deprecated + Value removeMember(const JSONCPP_STRING &key); + /// Same as removeMember(const char* begin, const char* end, Value* removed), + /// but 'key' is null-terminated. + bool removeMember(const char *key, Value *removed); + /** \brief Remove the named map member. + + Update 'removed' iff removed. + \param key may contain embedded nulls. + \return true iff removed (no exceptions) + */ + bool removeMember(JSONCPP_STRING const &key, Value *removed); + /// Same as removeMember(JSONCPP_STRING const& key, Value* removed) + bool removeMember(const char *begin, const char *end, Value *removed); + /** \brief Remove the indexed array element. + + O(n) expensive operations. + Update 'removed' iff removed. + \return true iff removed (no exceptions) + */ + bool removeIndex(ArrayIndex i, Value *removed); + + /// Return true if the object has a member named key. + /// \note 'key' must be null-terminated. + bool isMember(const char *key) const; + /// Return true if the object has a member named key. + /// \param key may contain embedded nulls. + bool isMember(const JSONCPP_STRING &key) const; + /// Same as isMember(JSONCPP_STRING const& key)const + bool isMember(const char *begin, const char *end) const; +#ifdef JSON_USE_CPPTL + /// Return true if the object has a member named key. + bool isMember(const CppTL::ConstString &key) const; +#endif + + /// \brief Return a list of the member names. + /// + /// If null, return an empty list. + /// \pre type() is objectValue or nullValue + /// \post if type() was nullValue, it remains nullValue + Members getMemberNames() const; + + //# ifdef JSON_USE_CPPTL + // EnumMemberNames enumMemberNames() const; + // EnumValues enumValues() const; + //# endif + + /// \deprecated Always pass len. + JSONCPP_DEPRECATED("Use setComment(JSONCPP_STRING const&) instead.") + void setComment(const char *comment, CommentPlacement placement); + /// Comments must be //... or /* ... */ + void setComment(const char *comment, size_t len, CommentPlacement placement); + /// Comments must be //... or /* ... */ + void setComment(const JSONCPP_STRING &comment, CommentPlacement placement); + bool hasComment(CommentPlacement placement) const; + /// Include delimiters and embedded newlines. + JSONCPP_STRING getComment(CommentPlacement placement) const; + + JSONCPP_STRING toStyledString() const; + + const_iterator begin() const; + const_iterator end() const; + + iterator begin(); + iterator end(); + + // Accessors for the [start, limit) range of bytes within the JSON text from + // which this value was parsed, if any. + void setOffsetStart(ptrdiff_t start); + void setOffsetLimit(ptrdiff_t limit); + ptrdiff_t getOffsetStart() const; + ptrdiff_t getOffsetLimit() const; + +private: + void initBasic(ValueType type, bool allocated = false); + + Value &resolveReference(const char *key); + Value &resolveReference(const char *key, const char *end); + + struct CommentInfo + { + CommentInfo(); + ~CommentInfo(); + + void setComment(const char *text, size_t len); + + char *comment_; + }; + + // struct MemberNamesTransform + //{ + // typedef const char *result_type; + // const char *operator()( const CZString &name ) const + // { + // return name.c_str(); + // } + //}; + + union ValueHolder { + LargestInt int_; + LargestUInt uint_; + double real_; + bool bool_; + char *string_; // actually ptr to unsigned, followed by str, unless !allocated_ + ObjectValues *map_; + } value_; + ValueType type_ : 8; + unsigned int allocated_ : 1; // Notes: if declared as bool, bitfield is useless. + // If not allocated_, string_ must be null-terminated. + CommentInfo *comments_; + + // [start, limit) byte offsets in the source JSON text from which this Value + // was extracted. + ptrdiff_t start_; + ptrdiff_t limit_; +}; + +/** \brief Experimental and untested: represents an element of the "path" to + * access a node. + */ +class JSON_API PathArgument +{ +public: + friend class Path; + + PathArgument(); + PathArgument(ArrayIndex index); + PathArgument(const char *key); + PathArgument(const JSONCPP_STRING &key); + +private: + enum Kind + { + kindNone = 0, + kindIndex, + kindKey + }; + JSONCPP_STRING key_; + ArrayIndex index_; + Kind kind_; +}; + +/** \brief Experimental and untested: represents a "path" to access a node. + * + * Syntax: + * - "." => root node + * - ".[n]" => elements at index 'n' of root node (an array value) + * - ".name" => member named 'name' of root node (an object value) + * - ".name1.name2.name3" + * - ".[0][1][2].name1[3]" + * - ".%" => member name is provided as parameter + * - ".[%]" => index is provied as parameter + */ +class JSON_API Path +{ +public: + Path(const JSONCPP_STRING &path, const PathArgument &a1 = PathArgument(), + const PathArgument &a2 = PathArgument(), const PathArgument &a3 = PathArgument(), + const PathArgument &a4 = PathArgument(), const PathArgument &a5 = PathArgument()); + + const Value &resolve(const Value &root) const; + Value resolve(const Value &root, const Value &defaultValue) const; + /// Creates the "path" to access the specified node and returns a reference on + /// the node. + Value &make(Value &root) const; + +private: + typedef std::vector<const PathArgument *> InArgs; + typedef std::vector<PathArgument> Args; + + void makePath(const JSONCPP_STRING &path, const InArgs &in); + void addPathInArg(const JSONCPP_STRING &path, const InArgs &in, InArgs::const_iterator &itInArg, + PathArgument::Kind kind); + void invalidPath(const JSONCPP_STRING &path, int location); + + Args args_; +}; + +/** \brief base class for Value iterators. + * + */ +class JSON_API ValueIteratorBase +{ +public: + typedef std::bidirectional_iterator_tag iterator_category; + typedef unsigned int size_t; + typedef int difference_type; + typedef ValueIteratorBase SelfType; + + bool operator==(const SelfType &other) const { return isEqual(other); } + + bool operator!=(const SelfType &other) const { return !isEqual(other); } + + difference_type operator-(const SelfType &other) const { return other.computeDistance(*this); } + + /// Return either the index or the member name of the referenced value as a + /// Value. + Value key() const; + + /// Return the index of the referenced Value, or -1 if it is not an arrayValue. + UInt index() const; + + /// Return the member name of the referenced Value, or "" if it is not an + /// objectValue. + /// \note Avoid `c_str()` on result, as embedded zeroes are possible. + JSONCPP_STRING name() const; + + /// Return the member name of the referenced Value. "" if it is not an + /// objectValue. + /// \deprecated This cannot be used for UTF-8 strings, since there can be embedded nulls. + JSONCPP_DEPRECATED("Use `key = name();` instead.") + char const *memberName() const; + /// Return the member name of the referenced Value, or NULL if it is not an + /// objectValue. + /// \note Better version than memberName(). Allows embedded nulls. + char const *memberName(char const **end) const; + +protected: + Value &deref() const; + + void increment(); + + void decrement(); + + difference_type computeDistance(const SelfType &other) const; + + bool isEqual(const SelfType &other) const; + + void copy(const SelfType &other); + +private: + Value::ObjectValues::iterator current_; + // Indicates that iterator is for a null value. + bool isNull_; + +public: + // For some reason, BORLAND needs these at the end, rather + // than earlier. No idea why. + ValueIteratorBase(); + explicit ValueIteratorBase(const Value::ObjectValues::iterator ¤t); +}; + +/** \brief const iterator for object and array value. + * + */ +class JSON_API ValueConstIterator : public ValueIteratorBase +{ + friend class Value; + +public: + typedef const Value value_type; + // typedef unsigned int size_t; + // typedef int difference_type; + typedef const Value &reference; + typedef const Value *pointer; + typedef ValueConstIterator SelfType; + + ValueConstIterator(); + ValueConstIterator(ValueIterator const &other); + +private: + /*! \internal Use by Value to create an iterator. + */ + explicit ValueConstIterator(const Value::ObjectValues::iterator ¤t); + +public: + SelfType &operator=(const ValueIteratorBase &other); + + SelfType operator++(int) + { + SelfType temp(*this); + ++*this; + return temp; + } + + SelfType operator--(int) + { + SelfType temp(*this); + --*this; + return temp; + } + + SelfType &operator--() + { + decrement(); + return *this; + } + + SelfType &operator++() + { + increment(); + return *this; + } + + reference operator*() const { return deref(); } + + pointer operator->() const { return &deref(); } +}; + +/** \brief Iterator for object and array value. + */ +class JSON_API ValueIterator : public ValueIteratorBase +{ + friend class Value; + +public: + typedef Value value_type; + typedef unsigned int size_t; + typedef int difference_type; + typedef Value &reference; + typedef Value *pointer; + typedef ValueIterator SelfType; + + ValueIterator(); + explicit ValueIterator(const ValueConstIterator &other); + ValueIterator(const ValueIterator &other); + +private: + /*! \internal Use by Value to create an iterator. + */ + explicit ValueIterator(const Value::ObjectValues::iterator ¤t); + +public: + SelfType &operator=(const SelfType &other); + + SelfType operator++(int) + { + SelfType temp(*this); + ++*this; + return temp; + } + + SelfType operator--(int) + { + SelfType temp(*this); + --*this; + return temp; + } + + SelfType &operator--() + { + decrement(); + return *this; + } + + SelfType &operator++() + { + increment(); + return *this; + } + + reference operator*() const { return deref(); } + + pointer operator->() const { return &deref(); } +}; + +} // namespace Json + +namespace std +{ +/// Specialize std::swap() for Json::Value. +template <> inline void swap(Json::Value &a, Json::Value &b) { a.swap(b); } +} + +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(pop) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +#endif // CPPTL_JSON_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/value.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/reader.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef CPPTL_JSON_READER_H_INCLUDED +#define CPPTL_JSON_READER_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "features.h" +#include "value.h" +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <deque> +#include <iosfwd> +#include <stack> +#include <string> +#include <istream> + +// Disable warning C4251: <data member>: <type> needs to have dll-interface to +// be used by... +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(push) +#pragma warning(disable : 4251) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +namespace Json +{ + +/** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a + *Value. + * + * \deprecated Use CharReader and CharReaderBuilder. + */ +class JSON_API Reader +{ +public: + typedef char Char; + typedef const Char *Location; + + /** \brief An error tagged with where in the JSON text it was encountered. + * + * The offsets give the [start, limit) range of bytes within the text. Note + * that this is bytes, not codepoints. + * + */ + struct StructuredError + { + ptrdiff_t offset_start; + ptrdiff_t offset_limit; + JSONCPP_STRING message; + }; + + /** \brief Constructs a Reader allowing all features + * for parsing. + */ + Reader(); + + /** \brief Constructs a Reader allowing the specified feature set + * for parsing. + */ + Reader(const Features &features); + + /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a> + * document. + * \param document UTF-8 encoded string containing the document to read. + * \param root [out] Contains the root value of the document if it was + * successfully parsed. + * \param collectComments \c true to collect comment and allow writing them + * back during + * serialization, \c false to discard comments. + * This parameter is ignored if + * Features::allowComments_ + * is \c false. + * \return \c true if the document was successfully parsed, \c false if an + * error occurred. + */ + bool parse(const std::string &document, Value &root, bool collectComments = true); + + /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a> + document. + * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the + document to read. + * \param endDoc Pointer on the end of the UTF-8 encoded string of the + document to read. + * Must be >= beginDoc. + * \param root [out] Contains the root value of the document if it was + * successfully parsed. + * \param collectComments \c true to collect comment and allow writing them + back during + * serialization, \c false to discard comments. + * This parameter is ignored if + Features::allowComments_ + * is \c false. + * \return \c true if the document was successfully parsed, \c false if an + error occurred. + */ + bool parse(const char *beginDoc, const char *endDoc, Value &root, bool collectComments = true); + + /// \brief Parse from input stream. + /// \see Json::operator>>(std::istream&, Json::Value&). + bool parse(JSONCPP_ISTREAM &is, Value &root, bool collectComments = true); + + /** \brief Returns a user friendly string that list errors in the parsed + * document. + * \return Formatted error message with the list of errors with their location + * in + * the parsed document. An empty string is returned if no error + * occurred + * during parsing. + * \deprecated Use getFormattedErrorMessages() instead (typo fix). + */ + JSONCPP_DEPRECATED("Use getFormattedErrorMessages() instead.") + JSONCPP_STRING getFormatedErrorMessages() const; + + /** \brief Returns a user friendly string that list errors in the parsed + * document. + * \return Formatted error message with the list of errors with their location + * in + * the parsed document. An empty string is returned if no error + * occurred + * during parsing. + */ + JSONCPP_STRING getFormattedErrorMessages() const; + + /** \brief Returns a vector of structured erros encounted while parsing. + * \return A (possibly empty) vector of StructuredError objects. Currently + * only one error can be returned, but the caller should tolerate + * multiple + * errors. This can occur if the parser recovers from a non-fatal + * parse error and then encounters additional errors. + */ + std::vector<StructuredError> getStructuredErrors() const; + + /** \brief Add a semantic error message. + * \param value JSON Value location associated with the error + * \param message The error message. + * \return \c true if the error was successfully added, \c false if the + * Value offset exceeds the document size. + */ + bool pushError(const Value &value, const JSONCPP_STRING &message); + + /** \brief Add a semantic error message with extra context. + * \param value JSON Value location associated with the error + * \param message The error message. + * \param extra Additional JSON Value location to contextualize the error + * \return \c true if the error was successfully added, \c false if either + * Value offset exceeds the document size. + */ + bool pushError(const Value &value, const JSONCPP_STRING &message, const Value &extra); + + /** \brief Return whether there are any errors. + * \return \c true if there are no errors to report \c false if + * errors have occurred. + */ + bool good() const; + +private: + enum TokenType + { + tokenEndOfStream = 0, + tokenObjectBegin, + tokenObjectEnd, + tokenArrayBegin, + tokenArrayEnd, + tokenString, + tokenNumber, + tokenTrue, + tokenFalse, + tokenNull, + tokenArraySeparator, + tokenMemberSeparator, + tokenComment, + tokenError + }; + + class Token + { + public: + TokenType type_; + Location start_; + Location end_; + }; + + class ErrorInfo + { + public: + Token token_; + JSONCPP_STRING message_; + Location extra_; + }; + + typedef std::deque<ErrorInfo> Errors; + + bool readToken(Token &token); + void skipSpaces(); + bool match(Location pattern, int patternLength); + bool readComment(); + bool readCStyleComment(); + bool readCppStyleComment(); + bool readString(); + void readNumber(); + bool readValue(); + bool readObject(Token &token); + bool readArray(Token &token); + bool decodeNumber(Token &token); + bool decodeNumber(Token &token, Value &decoded); + bool decodeString(Token &token); + bool decodeString(Token &token, JSONCPP_STRING &decoded); + bool decodeDouble(Token &token); + bool decodeDouble(Token &token, Value &decoded); + bool decodeUnicodeCodePoint(Token &token, Location ¤t, Location end, unsigned int &unicode); + bool decodeUnicodeEscapeSequence(Token &token, Location ¤t, Location end, + unsigned int &unicode); + bool addError(const JSONCPP_STRING &message, Token &token, Location extra = 0); + bool recoverFromError(TokenType skipUntilToken); + bool addErrorAndRecover(const JSONCPP_STRING &message, Token &token, TokenType skipUntilToken); + void skipUntilSpace(); + Value ¤tValue(); + Char getNextChar(); + void getLocationLineAndColumn(Location location, int &line, int &column) const; + JSONCPP_STRING getLocationLineAndColumn(Location location) const; + void addComment(Location begin, Location end, CommentPlacement placement); + void skipCommentTokens(Token &token); + + typedef std::stack<Value *> Nodes; + Nodes nodes_; + Errors errors_; + JSONCPP_STRING document_; + Location begin_; + Location end_; + Location current_; + Location lastValueEnd_; + Value *lastValue_; + JSONCPP_STRING commentsBefore_; + Features features_; + bool collectComments_; +}; // Reader + +/** Interface for reading JSON from a char array. + */ +class JSON_API CharReader +{ +public: + virtual ~CharReader() {} + /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a> + document. + * The document must be a UTF-8 encoded string containing the document to read. + * + * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the + document to read. + * \param endDoc Pointer on the end of the UTF-8 encoded string of the + document to read. + * Must be >= beginDoc. + * \param root [out] Contains the root value of the document if it was + * successfully parsed. + * \param errs [out] Formatted error messages (if not NULL) + * a user friendly string that lists errors in the parsed + * document. + * \return \c true if the document was successfully parsed, \c false if an + error occurred. + */ + virtual bool parse(char const *beginDoc, char const *endDoc, Value *root, + JSONCPP_STRING *errs) = 0; + + class JSON_API Factory + { + public: + virtual ~Factory() {} + /** \brief Allocate a CharReader via operator new(). + * \throw std::exception if something goes wrong (e.g. invalid settings) + */ + virtual CharReader *newCharReader() const = 0; + }; // Factory +}; // CharReader + +/** \brief Build a CharReader implementation. + +Usage: +\code + using namespace Json; + CharReaderBuilder builder; + builder["collectComments"] = false; + Value value; + JSONCPP_STRING errs; + bool ok = parseFromStream(builder, std::cin, &value, &errs); +\endcode +*/ +class JSON_API CharReaderBuilder : public CharReader::Factory +{ +public: + // Note: We use a Json::Value so that we can add data-members to this class + // without a major version bump. + /** Configuration of this builder. + These are case-sensitive. + Available settings (case-sensitive): + - `"collectComments": false or true` + - true to collect comment and allow writing them + back during serialization, false to discard comments. + This parameter is ignored if allowComments is false. + - `"allowComments": false or true` + - true if comments are allowed. + - `"strictRoot": false or true` + - true if root must be either an array or an object value + - `"allowDroppedNullPlaceholders": false or true` + - true if dropped null placeholders are allowed. (See StreamWriterBuilder.) + - `"allowNumericKeys": false or true` + - true if numeric object keys are allowed. + - `"allowSingleQuotes": false or true` + - true if '' are allowed for strings (both keys and values) + - `"stackLimit": integer` + - Exceeding stackLimit (recursive depth of `readValue()`) will + cause an exception. + - This is a security issue (seg-faults caused by deeply nested JSON), + so the default is low. + - `"failIfExtra": false or true` + - If true, `parse()` returns false when extra non-whitespace trails + the JSON value in the input string. + - `"rejectDupKeys": false or true` + - If true, `parse()` returns false when a key is duplicated within an object. + - `"allowSpecialFloats": false or true` + - If true, special float values (NaNs and infinities) are allowed + and their values are lossfree restorable. + + You can examine 'settings_` yourself + to see the defaults. You can also write and read them just like any + JSON Value. + \sa setDefaults() + */ + Json::Value settings_; + + CharReaderBuilder(); + ~CharReaderBuilder() JSONCPP_OVERRIDE; + + CharReader *newCharReader() const JSONCPP_OVERRIDE; + + /** \return true if 'settings' are legal and consistent; + * otherwise, indicate bad settings via 'invalid'. + */ + bool validate(Json::Value *invalid) const; + + /** A simple way to update a specific setting. + */ + Value &operator[](JSONCPP_STRING key); + + /** Called by ctor, but you can use this to reset settings_. + * \pre 'settings' != NULL (but Json::null is fine) + * \remark Defaults: + * \snippet src/lib_json/json_reader.cpp CharReaderBuilderDefaults + */ + static void setDefaults(Json::Value *settings); + /** Same as old Features::strictMode(). + * \pre 'settings' != NULL (but Json::null is fine) + * \remark Defaults: + * \snippet src/lib_json/json_reader.cpp CharReaderBuilderStrictMode + */ + static void strictMode(Json::Value *settings); +}; + +/** Consume entire stream and use its begin/end. + * Someday we might have a real StreamReader, but for now this + * is convenient. + */ +bool JSON_API parseFromStream(CharReader::Factory const &, JSONCPP_ISTREAM &, Value *root, + std::string *errs); + +/** \brief Read from 'sin' into 'root'. + + Always keep comments from the input JSON. + + This can be used to read a file into a particular sub-object. + For example: + \code + Json::Value root; + cin >> root["dir"]["file"]; + cout << root; + \endcode + Result: + \verbatim + { + "dir": { + "file": { + // The input stream JSON would be nested here. + } + } + } + \endverbatim + \throw std::exception on parse error. + \see Json::operator<<() +*/ +JSON_API JSONCPP_ISTREAM &operator>>(JSONCPP_ISTREAM &, Value &); + +} // namespace Json + +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(pop) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +#endif // CPPTL_JSON_READER_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/reader.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/writer.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef JSON_WRITER_H_INCLUDED +#define JSON_WRITER_H_INCLUDED + +#if !defined(JSON_IS_AMALGAMATION) +#include "value.h" +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <vector> +#include <string> +#include <ostream> + +// Disable warning C4251: <data member>: <type> needs to have dll-interface to +// be used by... +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(push) +#pragma warning(disable : 4251) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +namespace Json +{ + +class Value; + +/** + +Usage: +\code + using namespace Json; + void writeToStdout(StreamWriter::Factory const& factory, Value const& value) { + std::unique_ptr<StreamWriter> const writer( + factory.newStreamWriter()); + writer->write(value, &std::cout); + std::cout << std::endl; // add lf and flush + } +\endcode +*/ +class JSON_API StreamWriter +{ +protected: + JSONCPP_OSTREAM *sout_; // not owned; will not delete +public: + StreamWriter(); + virtual ~StreamWriter(); + /** Write Value into document as configured in sub-class. + Do not take ownership of sout, but maintain a reference during function. + \pre sout != NULL + \return zero on success (For now, we always return zero, so check the stream instead.) + \throw std::exception possibly, depending on configuration + */ + virtual int write(Value const &root, JSONCPP_OSTREAM *sout) = 0; + + /** \brief A simple abstract factory. + */ + class JSON_API Factory + { + public: + virtual ~Factory(); + /** \brief Allocate a CharReader via operator new(). + * \throw std::exception if something goes wrong (e.g. invalid settings) + */ + virtual StreamWriter *newStreamWriter() const = 0; + }; // Factory +}; // StreamWriter + +/** \brief Write into stringstream, then return string, for convenience. + * A StreamWriter will be created from the factory, used, and then deleted. + */ +JSONCPP_STRING JSON_API writeString(StreamWriter::Factory const &factory, Value const &root); + +/** \brief Build a StreamWriter implementation. + +Usage: +\code + using namespace Json; + Value value = ...; + StreamWriterBuilder builder; + builder["commentStyle"] = "None"; + builder["indentation"] = " "; // or whatever you like + std::unique_ptr<Json::StreamWriter> writer( + builder.newStreamWriter()); + writer->write(value, &std::cout); + std::cout << std::endl; // add lf and flush +\endcode +*/ +class JSON_API StreamWriterBuilder : public StreamWriter::Factory +{ +public: + // Note: We use a Json::Value so that we can add data-members to this class + // without a major version bump. + /** Configuration of this builder. + Available settings (case-sensitive): + - "commentStyle": "None" or "All" + - "indentation": "<anything>" + - "enableYAMLCompatibility": false or true + - slightly change the whitespace around colons + - "dropNullPlaceholders": false or true + - Drop the "null" string from the writer's output for nullValues. + Strictly speaking, this is not valid JSON. But when the output is being + fed to a browser's Javascript, it makes for smaller output and the + browser can handle the output just fine. + - "useSpecialFloats": false or true + - If true, outputs non-finite floating point values in the following way: + NaN values as "NaN", positive infinity as "Infinity", and negative infinity + as "-Infinity". + + You can examine 'settings_` yourself + to see the defaults. You can also write and read them just like any + JSON Value. + \sa setDefaults() + */ + Json::Value settings_; + + StreamWriterBuilder(); + ~StreamWriterBuilder() JSONCPP_OVERRIDE; + + /** + * \throw std::exception if something goes wrong (e.g. invalid settings) + */ + StreamWriter *newStreamWriter() const JSONCPP_OVERRIDE; + + /** \return true if 'settings' are legal and consistent; + * otherwise, indicate bad settings via 'invalid'. + */ + bool validate(Json::Value *invalid) const; + /** A simple way to update a specific setting. + */ + Value &operator[](JSONCPP_STRING key); + + /** Called by ctor, but you can use this to reset settings_. + * \pre 'settings' != NULL (but Json::null is fine) + * \remark Defaults: + * \snippet src/lib_json/json_writer.cpp StreamWriterBuilderDefaults + */ + static void setDefaults(Json::Value *settings); +}; + +/** \brief Abstract class for writers. + * \deprecated Use StreamWriter. (And really, this is an implementation detail.) + */ +class JSON_API Writer +{ +public: + virtual ~Writer(); + + virtual JSONCPP_STRING write(const Value &root) = 0; +}; + +/** \brief Outputs a Value in <a HREF="http://www.json.org">JSON</a> format + *without formatting (not human friendly). + * + * The JSON document is written in a single line. It is not intended for 'human' + *consumption, + * but may be usefull to support feature such as RPC where bandwith is limited. + * \sa Reader, Value + * \deprecated Use StreamWriterBuilder. + */ +class JSON_API FastWriter : public Writer +{ + +public: + FastWriter(); + ~FastWriter() JSONCPP_OVERRIDE {} + + void enableYAMLCompatibility(); + + /** \brief Drop the "null" string from the writer's output for nullValues. + * Strictly speaking, this is not valid JSON. But when the output is being + * fed to a browser's Javascript, it makes for smaller output and the + * browser can handle the output just fine. + */ + void dropNullPlaceholders(); + + void omitEndingLineFeed(); + +public: // overridden from Writer + JSONCPP_STRING write(const Value &root) JSONCPP_OVERRIDE; + +private: + void writeValue(const Value &value); + + JSONCPP_STRING document_; + bool yamlCompatiblityEnabled_; + bool dropNullPlaceholders_; + bool omitEndingLineFeed_; +}; + +/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a + *human friendly way. + * + * The rules for line break and indent are as follow: + * - Object value: + * - if empty then print {} without indent and line break + * - if not empty the print '{', line break & indent, print one value per + *line + * and then unindent and line break and print '}'. + * - Array value: + * - if empty then print [] without indent and line break + * - if the array contains no object value, empty array or some other value + *types, + * and all the values fit on one lines, then print the array on a single + *line. + * - otherwise, it the values do not fit on one line, or the array contains + * object or non empty array, then print one value per line. + * + * If the Value have comments then they are outputed according to their + *#CommentPlacement. + * + * \sa Reader, Value, Value::setComment() + * \deprecated Use StreamWriterBuilder. + */ +class JSON_API StyledWriter : public Writer +{ +public: + StyledWriter(); + ~StyledWriter() JSONCPP_OVERRIDE {} + +public: // overridden from Writer + /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format. + * \param root Value to serialize. + * \return String containing the JSON document that represents the root value. + */ + JSONCPP_STRING write(const Value &root) JSONCPP_OVERRIDE; + +private: + void writeValue(const Value &value); + void writeArrayValue(const Value &value); + bool isMultineArray(const Value &value); + void pushValue(const JSONCPP_STRING &value); + void writeIndent(); + void writeWithIndent(const JSONCPP_STRING &value); + void indent(); + void unindent(); + void writeCommentBeforeValue(const Value &root); + void writeCommentAfterValueOnSameLine(const Value &root); + bool hasCommentForValue(const Value &value); + static JSONCPP_STRING normalizeEOL(const JSONCPP_STRING &text); + + typedef std::vector<JSONCPP_STRING> ChildValues; + + ChildValues childValues_; + JSONCPP_STRING document_; + JSONCPP_STRING indentString_; + unsigned int rightMargin_; + unsigned int indentSize_; + bool addChildValues_; +}; + +/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a + human friendly way, + to a stream rather than to a string. + * + * The rules for line break and indent are as follow: + * - Object value: + * - if empty then print {} without indent and line break + * - if not empty the print '{', line break & indent, print one value per + line + * and then unindent and line break and print '}'. + * - Array value: + * - if empty then print [] without indent and line break + * - if the array contains no object value, empty array or some other value + types, + * and all the values fit on one lines, then print the array on a single + line. + * - otherwise, it the values do not fit on one line, or the array contains + * object or non empty array, then print one value per line. + * + * If the Value have comments then they are outputed according to their + #CommentPlacement. + * + * \param indentation Each level will be indented by this amount extra. + * \sa Reader, Value, Value::setComment() + * \deprecated Use StreamWriterBuilder. + */ +class JSON_API StyledStreamWriter +{ +public: + StyledStreamWriter(JSONCPP_STRING indentation = "\t"); + ~StyledStreamWriter() {} + +public: + /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format. + * \param out Stream to write to. (Can be ostringstream, e.g.) + * \param root Value to serialize. + * \note There is no point in deriving from Writer, since write() should not + * return a value. + */ + void write(JSONCPP_OSTREAM &out, const Value &root); + +private: + void writeValue(const Value &value); + void writeArrayValue(const Value &value); + bool isMultineArray(const Value &value); + void pushValue(const JSONCPP_STRING &value); + void writeIndent(); + void writeWithIndent(const JSONCPP_STRING &value); + void indent(); + void unindent(); + void writeCommentBeforeValue(const Value &root); + void writeCommentAfterValueOnSameLine(const Value &root); + bool hasCommentForValue(const Value &value); + static JSONCPP_STRING normalizeEOL(const JSONCPP_STRING &text); + + typedef std::vector<JSONCPP_STRING> ChildValues; + + ChildValues childValues_; + JSONCPP_OSTREAM *document_; + JSONCPP_STRING indentString_; + unsigned int rightMargin_; + JSONCPP_STRING indentation_; + bool addChildValues_ : 1; + bool indented_ : 1; +}; + +#if defined(JSON_HAS_INT64) +JSONCPP_STRING JSON_API valueToString(Int value); +JSONCPP_STRING JSON_API valueToString(UInt value); +#endif // if defined(JSON_HAS_INT64) +JSONCPP_STRING JSON_API valueToString(LargestInt value); +JSONCPP_STRING JSON_API valueToString(LargestUInt value); +JSONCPP_STRING JSON_API valueToString(double value); +JSONCPP_STRING JSON_API valueToString(bool value); +JSONCPP_STRING JSON_API valueToQuotedString(const char *value); + +/// \brief Output using the StyledStreamWriter. +/// \see Json::operator>>() +JSON_API JSONCPP_OSTREAM &operator<<(JSONCPP_OSTREAM &, const Value &root); + +} // namespace Json + +#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) +#pragma warning(pop) +#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) + +#endif // JSON_WRITER_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/writer.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: include/json/assertions.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef CPPTL_JSON_ASSERTIONS_H_INCLUDED +#define CPPTL_JSON_ASSERTIONS_H_INCLUDED + +#include <stdlib.h> +#include <sstream> + +#if !defined(JSON_IS_AMALGAMATION) +#include "config.h" +#endif // if !defined(JSON_IS_AMALGAMATION) + +/** It should not be possible for a maliciously designed file to + * cause an abort() or seg-fault, so these macros are used only + * for pre-condition violations and internal logic errors. + */ +#if JSON_USE_EXCEPTION + +// @todo <= add detail about condition in exception +#define JSON_ASSERT(condition) \ + { \ + if (!(condition)) \ + { \ + Json::throwLogicError("assert json failed"); \ + } \ + } + +#define JSON_FAIL_MESSAGE(message) \ + { \ + JSONCPP_OSTRINGSTREAM oss; \ + oss << message; \ + Json::throwLogicError(oss.str()); \ + abort(); \ + } + +#else // JSON_USE_EXCEPTION + +#define JSON_ASSERT(condition) assert(condition) + +// The call to assert() will show the failure message in debug builds. In +// release builds we abort, for a core-dump or debugger. +#define JSON_FAIL_MESSAGE(message) \ + { \ + JSONCPP_OSTRINGSTREAM oss; \ + oss << message; \ + assert(false && oss.str().c_str()); \ + abort(); \ + } + +#endif + +#define JSON_ASSERT_MESSAGE(condition, message) \ + if (!(condition)) \ + { \ + JSON_FAIL_MESSAGE(message); \ + } + +#endif // CPPTL_JSON_ASSERTIONS_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: include/json/assertions.h +// ////////////////////////////////////////////////////////////////////// + +#endif // ifndef JSON_AMALGATED_H_INCLUDED diff --git a/runtime/libs/jsoncpp/jsoncpp.cpp b/runtime/libs/jsoncpp/jsoncpp.cpp new file mode 100644 index 000000000..5b3cd691d --- /dev/null +++ b/runtime/libs/jsoncpp/jsoncpp.cpp @@ -0,0 +1,5651 @@ +/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/). +/// It is intended to be used with #include "json/json.h" + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +/* +The JsonCpp library's source code, including accompanying documentation, +tests and demonstration applications, are licensed under the following +conditions... + +The author (Baptiste Lepilleur) explicitly disclaims copyright in all +jurisdictions which recognize such a disclaimer. In such jurisdictions, +this software is released into the Public Domain. + +In jurisdictions which do not recognize Public Domain property (e.g. Germany as of +2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is +released under the terms of the MIT License (see below). + +In jurisdictions which recognize Public Domain property, the user of this +software may choose to accept it either as 1) Public Domain, 2) under the +conditions of the MIT License (see below), or 3) under the terms of dual +Public Domain/MIT License conditions described here, as they choose. + +The MIT License is about as close to Public Domain as a license can get, and is +described in clear, concise terms at: + + http://en.wikipedia.org/wiki/MIT_License + +The full text of the MIT License follows: + +======================================================================== +Copyright (c) 2007-2010 Baptiste Lepilleur + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +======================================================================== +(END LICENSE TEXT) + +The MIT license is compatible with both the GPL and commercial +software, affording one all of the rights of Public Domain with the +minor nuisance of being required to keep the above copyright notice +and license text in the source code. Note also that by accepting the +Public Domain "license" you can re-license your copy using whatever +license you like. + +*/ + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: LICENSE +// ////////////////////////////////////////////////////////////////////// + +#include "json/json.h" + +#ifndef JSON_IS_AMALGAMATION +#error "Compile with -I PATH_TO_JSON_DIRECTORY" +#endif + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: src/lib_json/json_tool.h +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#ifndef LIB_JSONCPP_JSON_TOOL_H_INCLUDED +#define LIB_JSONCPP_JSON_TOOL_H_INCLUDED + +#ifndef NO_LOCALE_SUPPORT +#include <clocale> +#endif + +/* This header provides common string manipulation support, such as UTF-8, + * portable conversion from/to string... + * + * It is an internal header that must not be exposed. + */ + +namespace Json +{ +static char getDecimalPoint() +{ +#ifdef NO_LOCALE_SUPPORT + return '\0'; +#else + struct lconv *lc = localeconv(); + return lc ? *(lc->decimal_point) : '\0'; +#endif +} + +/// Converts a unicode code-point to UTF-8. +static inline JSONCPP_STRING codePointToUTF8(unsigned int cp) +{ + JSONCPP_STRING result; + + // based on description from http://en.wikipedia.org/wiki/UTF-8 + + if (cp <= 0x7f) + { + result.resize(1); + result[0] = static_cast<char>(cp); + } + else if (cp <= 0x7FF) + { + result.resize(2); + result[1] = static_cast<char>(0x80 | (0x3f & cp)); + result[0] = static_cast<char>(0xC0 | (0x1f & (cp >> 6))); + } + else if (cp <= 0xFFFF) + { + result.resize(3); + result[2] = static_cast<char>(0x80 | (0x3f & cp)); + result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 6))); + result[0] = static_cast<char>(0xE0 | (0xf & (cp >> 12))); + } + else if (cp <= 0x10FFFF) + { + result.resize(4); + result[3] = static_cast<char>(0x80 | (0x3f & cp)); + result[2] = static_cast<char>(0x80 | (0x3f & (cp >> 6))); + result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 12))); + result[0] = static_cast<char>(0xF0 | (0x7 & (cp >> 18))); + } + + return result; +} + +/// Returns true if ch is a control character (in range [1,31]). +static inline bool isControlCharacter(char ch) { return ch > 0 && ch <= 0x1F; } + +enum +{ + /// Constant that specify the size of the buffer that must be passed to + /// uintToString. + uintToStringBufferSize = 3 * sizeof(LargestUInt) + 1 +}; + +// Defines a char buffer for use with uintToString(). +typedef char UIntToStringBuffer[uintToStringBufferSize]; + +/** Converts an unsigned integer to string. + * @param value Unsigned interger to convert to string + * @param current Input/Output string buffer. + * Must have at least uintToStringBufferSize chars free. + */ +static inline void uintToString(LargestUInt value, char *¤t) +{ + *--current = 0; + do + { + *--current = static_cast<char>(value % 10U + static_cast<unsigned>('0')); + value /= 10; + } while (value != 0); +} + +/** Change ',' to '.' everywhere in buffer. + * + * We had a sophisticated way, but it did not work in WinCE. + * @see https://github.com/open-source-parsers/jsoncpp/pull/9 + */ +static inline void fixNumericLocale(char *begin, char *end) +{ + while (begin < end) + { + if (*begin == ',') + { + *begin = '.'; + } + ++begin; + } +} + +static inline void fixNumericLocaleInput(char *begin, char *end) +{ + char decimalPoint = getDecimalPoint(); + if (decimalPoint != '\0' && decimalPoint != '.') + { + while (begin < end) + { + if (*begin == '.') + { + *begin = decimalPoint; + } + ++begin; + } + } +} + +} // namespace Json { + +#endif // LIB_JSONCPP_JSON_TOOL_H_INCLUDED + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: src/lib_json/json_tool.h +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: src/lib_json/json_reader.cpp +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2011 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#if !defined(JSON_IS_AMALGAMATION) +#include <json/assertions.h> +#include <json/reader.h> +#include <json/value.h> +#include "json_tool.h" +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <utility> +#include <cstdio> +#include <cassert> +#include <cstring> +#include <istream> +#include <sstream> +#include <memory> +#include <set> +#include <limits> + +#if defined(_MSC_VER) +#if !defined(WINCE) && defined(__STDC_SECURE_LIB__) && _MSC_VER >= 1500 // VC++ 9.0 and above +#define snprintf sprintf_s +#elif _MSC_VER >= 1900 // VC++ 14.0 and above +#define snprintf std::snprintf +#else +#define snprintf _snprintf +#endif +#elif defined(__ANDROID__) || defined(__QNXNTO__) +#define snprintf snprintf +#elif __cplusplus >= 201103L +#if !defined(__MINGW32__) && !defined(__CYGWIN__) +#define snprintf std::snprintf +#endif +#endif + +#if defined(__QNXNTO__) +#define sscanf std::sscanf +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1400 // VC++ 8.0 +// Disable warning about strdup being deprecated. +#pragma warning(disable : 4996) +#endif + +static int const stackLimit_g = 1000; +static int stackDepth_g = 0; // see readValue() + +namespace Json +{ + +#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520) +typedef std::unique_ptr<CharReader> CharReaderPtr; +#else +typedef std::auto_ptr<CharReader> CharReaderPtr; +#endif + +// Implementation of class Features +// //////////////////////////////// + +Features::Features() + : allowComments_(true), strictRoot_(false), allowDroppedNullPlaceholders_(false), + allowNumericKeys_(false) +{ +} + +Features Features::all() { return Features(); } + +Features Features::strictMode() +{ + Features features; + features.allowComments_ = false; + features.strictRoot_ = true; + features.allowDroppedNullPlaceholders_ = false; + features.allowNumericKeys_ = false; + return features; +} + +// Implementation of class Reader +// //////////////////////////////// + +static bool containsNewLine(Reader::Location begin, Reader::Location end) +{ + for (; begin < end; ++begin) + if (*begin == '\n' || *begin == '\r') + return true; + return false; +} + +// Class Reader +// ////////////////////////////////////////////////////////////////// + +Reader::Reader() + : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(), lastValue_(), + commentsBefore_(), features_(Features::all()), collectComments_() +{ +} + +Reader::Reader(const Features &features) + : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(), lastValue_(), + commentsBefore_(), features_(features), collectComments_() +{ +} + +bool Reader::parse(const std::string &document, Value &root, bool collectComments) +{ + JSONCPP_STRING documentCopy(document.data(), document.data() + document.capacity()); + std::swap(documentCopy, document_); + const char *begin = document_.c_str(); + const char *end = begin + document_.length(); + return parse(begin, end, root, collectComments); +} + +bool Reader::parse(std::istream &sin, Value &root, bool collectComments) +{ + // std::istream_iterator<char> begin(sin); + // std::istream_iterator<char> end; + // Those would allow streamed input from a file, if parse() were a + // template function. + + // Since JSONCPP_STRING is reference-counted, this at least does not + // create an extra copy. + JSONCPP_STRING doc; + std::getline(sin, doc, (char)EOF); + return parse(doc.data(), doc.data() + doc.size(), root, collectComments); +} + +bool Reader::parse(const char *beginDoc, const char *endDoc, Value &root, bool collectComments) +{ + if (!features_.allowComments_) + { + collectComments = false; + } + + begin_ = beginDoc; + end_ = endDoc; + collectComments_ = collectComments; + current_ = begin_; + lastValueEnd_ = 0; + lastValue_ = 0; + commentsBefore_ = ""; + errors_.clear(); + while (!nodes_.empty()) + nodes_.pop(); + nodes_.push(&root); + + stackDepth_g = 0; // Yes, this is bad coding, but options are limited. + bool successful = readValue(); + Token token; + skipCommentTokens(token); + if (collectComments_ && !commentsBefore_.empty()) + root.setComment(commentsBefore_, commentAfter); + if (features_.strictRoot_) + { + if (!root.isArray() && !root.isObject()) + { + // Set error location to start of doc, ideally should be first token found + // in doc + token.type_ = tokenError; + token.start_ = beginDoc; + token.end_ = endDoc; + addError("A valid JSON document must be either an array or an object value.", token); + return false; + } + } + return successful; +} + +bool Reader::readValue() +{ + // This is a non-reentrant way to support a stackLimit. Terrible! + // But this deprecated class has a security problem: Bad input can + // cause a seg-fault. This seems like a fair, binary-compatible way + // to prevent the problem. + if (stackDepth_g >= stackLimit_g) + throwRuntimeError("Exceeded stackLimit in readValue()."); + ++stackDepth_g; + + Token token; + skipCommentTokens(token); + bool successful = true; + + if (collectComments_ && !commentsBefore_.empty()) + { + currentValue().setComment(commentsBefore_, commentBefore); + commentsBefore_ = ""; + } + + switch (token.type_) + { + case tokenObjectBegin: + successful = readObject(token); + currentValue().setOffsetLimit(current_ - begin_); + break; + case tokenArrayBegin: + successful = readArray(token); + currentValue().setOffsetLimit(current_ - begin_); + break; + case tokenNumber: + successful = decodeNumber(token); + break; + case tokenString: + successful = decodeString(token); + break; + case tokenTrue: + { + Value v(true); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenFalse: + { + Value v(false); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenNull: + { + Value v; + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenArraySeparator: + case tokenObjectEnd: + case tokenArrayEnd: + if (features_.allowDroppedNullPlaceholders_) + { + // "Un-read" the current token and mark the current value as a null + // token. + current_--; + Value v; + currentValue().swapPayload(v); + currentValue().setOffsetStart(current_ - begin_ - 1); + currentValue().setOffsetLimit(current_ - begin_); + break; + } // Else, fall through... + default: + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return addError("Syntax error: value, object or array expected.", token); + } + + if (collectComments_) + { + lastValueEnd_ = current_; + lastValue_ = ¤tValue(); + } + + --stackDepth_g; + return successful; +} + +void Reader::skipCommentTokens(Token &token) +{ + if (features_.allowComments_) + { + do + { + readToken(token); + } while (token.type_ == tokenComment); + } + else + { + readToken(token); + } +} + +bool Reader::readToken(Token &token) +{ + skipSpaces(); + token.start_ = current_; + Char c = getNextChar(); + bool ok = true; + switch (c) + { + case '{': + token.type_ = tokenObjectBegin; + break; + case '}': + token.type_ = tokenObjectEnd; + break; + case '[': + token.type_ = tokenArrayBegin; + break; + case ']': + token.type_ = tokenArrayEnd; + break; + case '"': + token.type_ = tokenString; + ok = readString(); + break; + case '/': + token.type_ = tokenComment; + ok = readComment(); + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + token.type_ = tokenNumber; + readNumber(); + break; + case 't': + token.type_ = tokenTrue; + ok = match("rue", 3); + break; + case 'f': + token.type_ = tokenFalse; + ok = match("alse", 4); + break; + case 'n': + token.type_ = tokenNull; + ok = match("ull", 3); + break; + case ',': + token.type_ = tokenArraySeparator; + break; + case ':': + token.type_ = tokenMemberSeparator; + break; + case 0: + token.type_ = tokenEndOfStream; + break; + default: + ok = false; + break; + } + if (!ok) + token.type_ = tokenError; + token.end_ = current_; + return true; +} + +void Reader::skipSpaces() +{ + while (current_ != end_) + { + Char c = *current_; + if (c == ' ' || c == '\t' || c == '\r' || c == '\n') + ++current_; + else + break; + } +} + +bool Reader::match(Location pattern, int patternLength) +{ + if (end_ - current_ < patternLength) + return false; + int index = patternLength; + while (index--) + if (current_[index] != pattern[index]) + return false; + current_ += patternLength; + return true; +} + +bool Reader::readComment() +{ + Location commentBegin = current_ - 1; + Char c = getNextChar(); + bool successful = false; + if (c == '*') + successful = readCStyleComment(); + else if (c == '/') + successful = readCppStyleComment(); + if (!successful) + return false; + + if (collectComments_) + { + CommentPlacement placement = commentBefore; + if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) + { + if (c != '*' || !containsNewLine(commentBegin, current_)) + placement = commentAfterOnSameLine; + } + + addComment(commentBegin, current_, placement); + } + return true; +} + +static JSONCPP_STRING normalizeEOL(Reader::Location begin, Reader::Location end) +{ + JSONCPP_STRING normalized; + normalized.reserve(static_cast<size_t>(end - begin)); + Reader::Location current = begin; + while (current != end) + { + char c = *current++; + if (c == '\r') + { + if (current != end && *current == '\n') + // convert dos EOL + ++current; + // convert Mac EOL + normalized += '\n'; + } + else + { + normalized += c; + } + } + return normalized; +} + +void Reader::addComment(Location begin, Location end, CommentPlacement placement) +{ + assert(collectComments_); + const JSONCPP_STRING &normalized = normalizeEOL(begin, end); + if (placement == commentAfterOnSameLine) + { + assert(lastValue_ != 0); + lastValue_->setComment(normalized, placement); + } + else + { + commentsBefore_ += normalized; + } +} + +bool Reader::readCStyleComment() +{ + while ((current_ + 1) < end_) + { + Char c = getNextChar(); + if (c == '*' && *current_ == '/') + break; + } + return getNextChar() == '/'; +} + +bool Reader::readCppStyleComment() +{ + while (current_ != end_) + { + Char c = getNextChar(); + if (c == '\n') + break; + if (c == '\r') + { + // Consume DOS EOL. It will be normalized in addComment. + if (current_ != end_ && *current_ == '\n') + getNextChar(); + // Break on Moc OS 9 EOL. + break; + } + } + return true; +} + +void Reader::readNumber() +{ + const char *p = current_; + char c = '0'; // stopgap for already consumed character + // integral part + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + // fractional part + if (c == '.') + { + c = (current_ = p) < end_ ? *p++ : '\0'; + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + } + // exponential part + if (c == 'e' || c == 'E') + { + c = (current_ = p) < end_ ? *p++ : '\0'; + if (c == '+' || c == '-') + c = (current_ = p) < end_ ? *p++ : '\0'; + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + } +} + +bool Reader::readString() +{ + Char c = '\0'; + while (current_ != end_) + { + c = getNextChar(); + if (c == '\\') + getNextChar(); + else if (c == '"') + break; + } + return c == '"'; +} + +bool Reader::readObject(Token &tokenStart) +{ + Token tokenName; + JSONCPP_STRING name; + Value init(objectValue); + currentValue().swapPayload(init); + currentValue().setOffsetStart(tokenStart.start_ - begin_); + while (readToken(tokenName)) + { + bool initialTokenOk = true; + while (tokenName.type_ == tokenComment && initialTokenOk) + initialTokenOk = readToken(tokenName); + if (!initialTokenOk) + break; + if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object + return true; + name = ""; + if (tokenName.type_ == tokenString) + { + if (!decodeString(tokenName, name)) + return recoverFromError(tokenObjectEnd); + } + else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) + { + Value numberName; + if (!decodeNumber(tokenName, numberName)) + return recoverFromError(tokenObjectEnd); + name = JSONCPP_STRING(numberName.asCString()); + } + else + { + break; + } + + Token colon; + if (!readToken(colon) || colon.type_ != tokenMemberSeparator) + { + return addErrorAndRecover("Missing ':' after object member name", colon, tokenObjectEnd); + } + Value &value = currentValue()[name]; + nodes_.push(&value); + bool ok = readValue(); + nodes_.pop(); + if (!ok) // error already set + return recoverFromError(tokenObjectEnd); + + Token comma; + if (!readToken(comma) || (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator && + comma.type_ != tokenComment)) + { + return addErrorAndRecover("Missing ',' or '}' in object declaration", comma, tokenObjectEnd); + } + bool finalizeTokenOk = true; + while (comma.type_ == tokenComment && finalizeTokenOk) + finalizeTokenOk = readToken(comma); + if (comma.type_ == tokenObjectEnd) + return true; + } + return addErrorAndRecover("Missing '}' or object member name", tokenName, tokenObjectEnd); +} + +bool Reader::readArray(Token &tokenStart) +{ + Value init(arrayValue); + currentValue().swapPayload(init); + currentValue().setOffsetStart(tokenStart.start_ - begin_); + skipSpaces(); + if (current_ != end_ && *current_ == ']') // empty array + { + Token endArray; + readToken(endArray); + return true; + } + int index = 0; + for (;;) + { + Value &value = currentValue()[index++]; + nodes_.push(&value); + bool ok = readValue(); + nodes_.pop(); + if (!ok) // error already set + return recoverFromError(tokenArrayEnd); + + Token token; + // Accept Comment after last item in the array. + ok = readToken(token); + while (token.type_ == tokenComment && ok) + { + ok = readToken(token); + } + bool badTokenType = (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd); + if (!ok || badTokenType) + { + return addErrorAndRecover("Missing ',' or ']' in array declaration", token, tokenArrayEnd); + } + if (token.type_ == tokenArrayEnd) + break; + } + return true; +} + +bool Reader::decodeNumber(Token &token) +{ + Value decoded; + if (!decodeNumber(token, decoded)) + return false; + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool Reader::decodeNumber(Token &token, Value &decoded) +{ + // Attempts to parse the number as an integer. If the number is + // larger than the maximum supported value of an integer then + // we decode the number as a double. + Location current = token.start_; + bool isNegative = *current == '-'; + if (isNegative) + ++current; + // TODO: Help the compiler do the div and mod at compile time or get rid of them. + Value::LargestUInt maxIntegerValue = + isNegative ? Value::LargestUInt(Value::maxLargestInt) + 1 : Value::maxLargestUInt; + Value::LargestUInt threshold = maxIntegerValue / 10; + Value::LargestUInt value = 0; + while (current < token.end_) + { + Char c = *current++; + if (c < '0' || c > '9') + return decodeDouble(token, decoded); + Value::UInt digit(static_cast<Value::UInt>(c - '0')); + if (value >= threshold) + { + // We've hit or exceeded the max value divided by 10 (rounded down). If + // a) we've only just touched the limit, b) this is the last digit, and + // c) it's small enough to fit in that rounding delta, we're okay. + // Otherwise treat this number as a double to avoid overflow. + if (value > threshold || current != token.end_ || digit > maxIntegerValue % 10) + { + return decodeDouble(token, decoded); + } + } + value = value * 10 + digit; + } + if (isNegative && value == maxIntegerValue) + decoded = Value::minLargestInt; + else if (isNegative) + decoded = -Value::LargestInt(value); + else if (value <= Value::LargestUInt(Value::maxInt)) + decoded = Value::LargestInt(value); + else + decoded = value; + return true; +} + +bool Reader::decodeDouble(Token &token) +{ + Value decoded; + if (!decodeDouble(token, decoded)) + return false; + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool Reader::decodeDouble(Token &token, Value &decoded) +{ + double value = 0; + JSONCPP_STRING buffer(token.start_, token.end_); + JSONCPP_ISTRINGSTREAM is(buffer); + if (!(is >> value)) + return addError("'" + JSONCPP_STRING(token.start_, token.end_) + "' is not a number.", token); + decoded = value; + return true; +} + +bool Reader::decodeString(Token &token) +{ + JSONCPP_STRING decoded_string; + if (!decodeString(token, decoded_string)) + return false; + Value decoded(decoded_string); + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool Reader::decodeString(Token &token, JSONCPP_STRING &decoded) +{ + decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2)); + Location current = token.start_ + 1; // skip '"' + Location end = token.end_ - 1; // do not include '"' + while (current != end) + { + Char c = *current++; + if (c == '"') + break; + else if (c == '\\') + { + if (current == end) + return addError("Empty escape sequence in string", token, current); + Char escape = *current++; + switch (escape) + { + case '"': + decoded += '"'; + break; + case '/': + decoded += '/'; + break; + case '\\': + decoded += '\\'; + break; + case 'b': + decoded += '\b'; + break; + case 'f': + decoded += '\f'; + break; + case 'n': + decoded += '\n'; + break; + case 'r': + decoded += '\r'; + break; + case 't': + decoded += '\t'; + break; + case 'u': + { + unsigned int unicode; + if (!decodeUnicodeCodePoint(token, current, end, unicode)) + return false; + decoded += codePointToUTF8(unicode); + } + break; + default: + return addError("Bad escape sequence in string", token, current); + } + } + else + { + decoded += c; + } + } + return true; +} + +bool Reader::decodeUnicodeCodePoint(Token &token, Location ¤t, Location end, + unsigned int &unicode) +{ + + if (!decodeUnicodeEscapeSequence(token, current, end, unicode)) + return false; + if (unicode >= 0xD800 && unicode <= 0xDBFF) + { + // surrogate pairs + if (end - current < 6) + return addError("additional six characters expected to parse unicode surrogate pair.", token, + current); + unsigned int surrogatePair; + if (*(current++) == '\\' && *(current++) == 'u') + { + if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) + { + unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF); + } + else + return false; + } + else + return addError("expecting another \\u token to begin the second half of " + "a unicode surrogate pair", + token, current); + } + return true; +} + +bool Reader::decodeUnicodeEscapeSequence(Token &token, Location ¤t, Location end, + unsigned int &ret_unicode) +{ + if (end - current < 4) + return addError("Bad unicode escape sequence in string: four digits expected.", token, current); + int unicode = 0; + for (int index = 0; index < 4; ++index) + { + Char c = *current++; + unicode *= 16; + if (c >= '0' && c <= '9') + unicode += c - '0'; + else if (c >= 'a' && c <= 'f') + unicode += c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + unicode += c - 'A' + 10; + else + return addError("Bad unicode escape sequence in string: hexadecimal digit expected.", token, + current); + } + ret_unicode = static_cast<unsigned int>(unicode); + return true; +} + +bool Reader::addError(const JSONCPP_STRING &message, Token &token, Location extra) +{ + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = extra; + errors_.push_back(info); + return false; +} + +bool Reader::recoverFromError(TokenType skipUntilToken) +{ + size_t const errorCount = errors_.size(); + Token skip; + for (;;) + { + if (!readToken(skip)) + errors_.resize(errorCount); // discard errors caused by recovery + if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream) + break; + } + errors_.resize(errorCount); + return false; +} + +bool Reader::addErrorAndRecover(const JSONCPP_STRING &message, Token &token, + TokenType skipUntilToken) +{ + addError(message, token); + return recoverFromError(skipUntilToken); +} + +Value &Reader::currentValue() { return *(nodes_.top()); } + +Reader::Char Reader::getNextChar() +{ + if (current_ == end_) + return 0; + return *current_++; +} + +void Reader::getLocationLineAndColumn(Location location, int &line, int &column) const +{ + Location current = begin_; + Location lastLineStart = current; + line = 0; + while (current < location && current != end_) + { + Char c = *current++; + if (c == '\r') + { + if (*current == '\n') + ++current; + lastLineStart = current; + ++line; + } + else if (c == '\n') + { + lastLineStart = current; + ++line; + } + } + // column & line start at 1 + column = int(location - lastLineStart) + 1; + ++line; +} + +JSONCPP_STRING Reader::getLocationLineAndColumn(Location location) const +{ + int line, column; + getLocationLineAndColumn(location, line, column); + char buffer[18 + 16 + 16 + 1]; + snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column); + return buffer; +} + +// Deprecated. Preserved for backward compatibility +JSONCPP_STRING Reader::getFormatedErrorMessages() const { return getFormattedErrorMessages(); } + +JSONCPP_STRING Reader::getFormattedErrorMessages() const +{ + JSONCPP_STRING formattedMessage; + for (Errors::const_iterator itError = errors_.begin(); itError != errors_.end(); ++itError) + { + const ErrorInfo &error = *itError; + formattedMessage += "* " + getLocationLineAndColumn(error.token_.start_) + "\n"; + formattedMessage += " " + error.message_ + "\n"; + if (error.extra_) + formattedMessage += "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n"; + } + return formattedMessage; +} + +std::vector<Reader::StructuredError> Reader::getStructuredErrors() const +{ + std::vector<Reader::StructuredError> allErrors; + for (Errors::const_iterator itError = errors_.begin(); itError != errors_.end(); ++itError) + { + const ErrorInfo &error = *itError; + Reader::StructuredError structured; + structured.offset_start = error.token_.start_ - begin_; + structured.offset_limit = error.token_.end_ - begin_; + structured.message = error.message_; + allErrors.push_back(structured); + } + return allErrors; +} + +bool Reader::pushError(const Value &value, const JSONCPP_STRING &message) +{ + ptrdiff_t const length = end_ - begin_; + if (value.getOffsetStart() > length || value.getOffsetLimit() > length) + return false; + Token token; + token.type_ = tokenError; + token.start_ = begin_ + value.getOffsetStart(); + token.end_ = end_ + value.getOffsetLimit(); + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = 0; + errors_.push_back(info); + return true; +} + +bool Reader::pushError(const Value &value, const JSONCPP_STRING &message, const Value &extra) +{ + ptrdiff_t const length = end_ - begin_; + if (value.getOffsetStart() > length || value.getOffsetLimit() > length || + extra.getOffsetLimit() > length) + return false; + Token token; + token.type_ = tokenError; + token.start_ = begin_ + value.getOffsetStart(); + token.end_ = begin_ + value.getOffsetLimit(); + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = begin_ + extra.getOffsetStart(); + errors_.push_back(info); + return true; +} + +bool Reader::good() const { return !errors_.size(); } + +// exact copy of Features +class OurFeatures +{ +public: + static OurFeatures all(); + bool allowComments_; + bool strictRoot_; + bool allowDroppedNullPlaceholders_; + bool allowNumericKeys_; + bool allowSingleQuotes_; + bool failIfExtra_; + bool rejectDupKeys_; + bool allowSpecialFloats_; + int stackLimit_; +}; // OurFeatures + +// exact copy of Implementation of class Features +// //////////////////////////////// + +OurFeatures OurFeatures::all() { return OurFeatures(); } + +// Implementation of class Reader +// //////////////////////////////// + +// exact copy of Reader, renamed to OurReader +class OurReader +{ +public: + typedef char Char; + typedef const Char *Location; + struct StructuredError + { + ptrdiff_t offset_start; + ptrdiff_t offset_limit; + JSONCPP_STRING message; + }; + + OurReader(OurFeatures const &features); + bool parse(const char *beginDoc, const char *endDoc, Value &root, bool collectComments = true); + JSONCPP_STRING getFormattedErrorMessages() const; + std::vector<StructuredError> getStructuredErrors() const; + bool pushError(const Value &value, const JSONCPP_STRING &message); + bool pushError(const Value &value, const JSONCPP_STRING &message, const Value &extra); + bool good() const; + +private: + OurReader(OurReader const &); // no impl + void operator=(OurReader const &); // no impl + + enum TokenType + { + tokenEndOfStream = 0, + tokenObjectBegin, + tokenObjectEnd, + tokenArrayBegin, + tokenArrayEnd, + tokenString, + tokenNumber, + tokenTrue, + tokenFalse, + tokenNull, + tokenNaN, + tokenPosInf, + tokenNegInf, + tokenArraySeparator, + tokenMemberSeparator, + tokenComment, + tokenError + }; + + class Token + { + public: + TokenType type_; + Location start_; + Location end_; + }; + + class ErrorInfo + { + public: + Token token_; + JSONCPP_STRING message_; + Location extra_; + }; + + typedef std::deque<ErrorInfo> Errors; + + bool readToken(Token &token); + void skipSpaces(); + bool match(Location pattern, int patternLength); + bool readComment(); + bool readCStyleComment(); + bool readCppStyleComment(); + bool readString(); + bool readStringSingleQuote(); + bool readNumber(bool checkInf); + bool readValue(); + bool readObject(Token &token); + bool readArray(Token &token); + bool decodeNumber(Token &token); + bool decodeNumber(Token &token, Value &decoded); + bool decodeString(Token &token); + bool decodeString(Token &token, JSONCPP_STRING &decoded); + bool decodeDouble(Token &token); + bool decodeDouble(Token &token, Value &decoded); + bool decodeUnicodeCodePoint(Token &token, Location ¤t, Location end, unsigned int &unicode); + bool decodeUnicodeEscapeSequence(Token &token, Location ¤t, Location end, + unsigned int &unicode); + bool addError(const JSONCPP_STRING &message, Token &token, Location extra = 0); + bool recoverFromError(TokenType skipUntilToken); + bool addErrorAndRecover(const JSONCPP_STRING &message, Token &token, TokenType skipUntilToken); + void skipUntilSpace(); + Value ¤tValue(); + Char getNextChar(); + void getLocationLineAndColumn(Location location, int &line, int &column) const; + JSONCPP_STRING getLocationLineAndColumn(Location location) const; + void addComment(Location begin, Location end, CommentPlacement placement); + void skipCommentTokens(Token &token); + + typedef std::stack<Value *> Nodes; + Nodes nodes_; + Errors errors_; + JSONCPP_STRING document_; + Location begin_; + Location end_; + Location current_; + Location lastValueEnd_; + Value *lastValue_; + JSONCPP_STRING commentsBefore_; + int stackDepth_; + + OurFeatures const features_; + bool collectComments_; +}; // OurReader + +// complete copy of Read impl, for OurReader + +OurReader::OurReader(OurFeatures const &features) + : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(), lastValue_(), + commentsBefore_(), stackDepth_(0), features_(features), collectComments_() +{ +} + +bool OurReader::parse(const char *beginDoc, const char *endDoc, Value &root, bool collectComments) +{ + if (!features_.allowComments_) + { + collectComments = false; + } + + begin_ = beginDoc; + end_ = endDoc; + collectComments_ = collectComments; + current_ = begin_; + lastValueEnd_ = 0; + lastValue_ = 0; + commentsBefore_ = ""; + errors_.clear(); + while (!nodes_.empty()) + nodes_.pop(); + nodes_.push(&root); + + stackDepth_ = 0; + bool successful = readValue(); + Token token; + skipCommentTokens(token); + if (features_.failIfExtra_) + { + if ((features_.strictRoot_ || token.type_ != tokenError) && token.type_ != tokenEndOfStream) + { + addError("Extra non-whitespace after JSON value.", token); + return false; + } + } + if (collectComments_ && !commentsBefore_.empty()) + root.setComment(commentsBefore_, commentAfter); + if (features_.strictRoot_) + { + if (!root.isArray() && !root.isObject()) + { + // Set error location to start of doc, ideally should be first token found + // in doc + token.type_ = tokenError; + token.start_ = beginDoc; + token.end_ = endDoc; + addError("A valid JSON document must be either an array or an object value.", token); + return false; + } + } + return successful; +} + +bool OurReader::readValue() +{ + if (stackDepth_ >= features_.stackLimit_) + throwRuntimeError("Exceeded stackLimit in readValue()."); + ++stackDepth_; + Token token; + skipCommentTokens(token); + bool successful = true; + + if (collectComments_ && !commentsBefore_.empty()) + { + currentValue().setComment(commentsBefore_, commentBefore); + commentsBefore_ = ""; + } + + switch (token.type_) + { + case tokenObjectBegin: + successful = readObject(token); + currentValue().setOffsetLimit(current_ - begin_); + break; + case tokenArrayBegin: + successful = readArray(token); + currentValue().setOffsetLimit(current_ - begin_); + break; + case tokenNumber: + successful = decodeNumber(token); + break; + case tokenString: + successful = decodeString(token); + break; + case tokenTrue: + { + Value v(true); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenFalse: + { + Value v(false); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenNull: + { + Value v; + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenNaN: + { + Value v(std::numeric_limits<double>::quiet_NaN()); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenPosInf: + { + Value v(std::numeric_limits<double>::infinity()); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenNegInf: + { + Value v(-std::numeric_limits<double>::infinity()); + currentValue().swapPayload(v); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + } + break; + case tokenArraySeparator: + case tokenObjectEnd: + case tokenArrayEnd: + if (features_.allowDroppedNullPlaceholders_) + { + // "Un-read" the current token and mark the current value as a null + // token. + current_--; + Value v; + currentValue().swapPayload(v); + currentValue().setOffsetStart(current_ - begin_ - 1); + currentValue().setOffsetLimit(current_ - begin_); + break; + } // else, fall through ... + default: + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return addError("Syntax error: value, object or array expected.", token); + } + + if (collectComments_) + { + lastValueEnd_ = current_; + lastValue_ = ¤tValue(); + } + + --stackDepth_; + return successful; +} + +void OurReader::skipCommentTokens(Token &token) +{ + if (features_.allowComments_) + { + do + { + readToken(token); + } while (token.type_ == tokenComment); + } + else + { + readToken(token); + } +} + +bool OurReader::readToken(Token &token) +{ + skipSpaces(); + token.start_ = current_; + Char c = getNextChar(); + bool ok = true; + switch (c) + { + case '{': + token.type_ = tokenObjectBegin; + break; + case '}': + token.type_ = tokenObjectEnd; + break; + case '[': + token.type_ = tokenArrayBegin; + break; + case ']': + token.type_ = tokenArrayEnd; + break; + case '"': + token.type_ = tokenString; + ok = readString(); + break; + case '\'': + if (features_.allowSingleQuotes_) + { + token.type_ = tokenString; + ok = readStringSingleQuote(); + break; + } // else continue + case '/': + token.type_ = tokenComment; + ok = readComment(); + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + token.type_ = tokenNumber; + readNumber(false); + break; + case '-': + if (readNumber(true)) + { + token.type_ = tokenNumber; + } + else + { + token.type_ = tokenNegInf; + ok = features_.allowSpecialFloats_ && match("nfinity", 7); + } + break; + case 't': + token.type_ = tokenTrue; + ok = match("rue", 3); + break; + case 'f': + token.type_ = tokenFalse; + ok = match("alse", 4); + break; + case 'n': + token.type_ = tokenNull; + ok = match("ull", 3); + break; + case 'N': + if (features_.allowSpecialFloats_) + { + token.type_ = tokenNaN; + ok = match("aN", 2); + } + else + { + ok = false; + } + break; + case 'I': + if (features_.allowSpecialFloats_) + { + token.type_ = tokenPosInf; + ok = match("nfinity", 7); + } + else + { + ok = false; + } + break; + case ',': + token.type_ = tokenArraySeparator; + break; + case ':': + token.type_ = tokenMemberSeparator; + break; + case 0: + token.type_ = tokenEndOfStream; + break; + default: + ok = false; + break; + } + if (!ok) + token.type_ = tokenError; + token.end_ = current_; + return true; +} + +void OurReader::skipSpaces() +{ + while (current_ != end_) + { + Char c = *current_; + if (c == ' ' || c == '\t' || c == '\r' || c == '\n') + ++current_; + else + break; + } +} + +bool OurReader::match(Location pattern, int patternLength) +{ + if (end_ - current_ < patternLength) + return false; + int index = patternLength; + while (index--) + if (current_[index] != pattern[index]) + return false; + current_ += patternLength; + return true; +} + +bool OurReader::readComment() +{ + Location commentBegin = current_ - 1; + Char c = getNextChar(); + bool successful = false; + if (c == '*') + successful = readCStyleComment(); + else if (c == '/') + successful = readCppStyleComment(); + if (!successful) + return false; + + if (collectComments_) + { + CommentPlacement placement = commentBefore; + if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) + { + if (c != '*' || !containsNewLine(commentBegin, current_)) + placement = commentAfterOnSameLine; + } + + addComment(commentBegin, current_, placement); + } + return true; +} + +void OurReader::addComment(Location begin, Location end, CommentPlacement placement) +{ + assert(collectComments_); + const JSONCPP_STRING &normalized = normalizeEOL(begin, end); + if (placement == commentAfterOnSameLine) + { + assert(lastValue_ != 0); + lastValue_->setComment(normalized, placement); + } + else + { + commentsBefore_ += normalized; + } +} + +bool OurReader::readCStyleComment() +{ + while ((current_ + 1) < end_) + { + Char c = getNextChar(); + if (c == '*' && *current_ == '/') + break; + } + return getNextChar() == '/'; +} + +bool OurReader::readCppStyleComment() +{ + while (current_ != end_) + { + Char c = getNextChar(); + if (c == '\n') + break; + if (c == '\r') + { + // Consume DOS EOL. It will be normalized in addComment. + if (current_ != end_ && *current_ == '\n') + getNextChar(); + // Break on Moc OS 9 EOL. + break; + } + } + return true; +} + +bool OurReader::readNumber(bool checkInf) +{ + const char *p = current_; + if (checkInf && p != end_ && *p == 'I') + { + current_ = ++p; + return false; + } + char c = '0'; // stopgap for already consumed character + // integral part + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + // fractional part + if (c == '.') + { + c = (current_ = p) < end_ ? *p++ : '\0'; + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + } + // exponential part + if (c == 'e' || c == 'E') + { + c = (current_ = p) < end_ ? *p++ : '\0'; + if (c == '+' || c == '-') + c = (current_ = p) < end_ ? *p++ : '\0'; + while (c >= '0' && c <= '9') + c = (current_ = p) < end_ ? *p++ : '\0'; + } + return true; +} +bool OurReader::readString() +{ + Char c = 0; + while (current_ != end_) + { + c = getNextChar(); + if (c == '\\') + getNextChar(); + else if (c == '"') + break; + } + return c == '"'; +} + +bool OurReader::readStringSingleQuote() +{ + Char c = 0; + while (current_ != end_) + { + c = getNextChar(); + if (c == '\\') + getNextChar(); + else if (c == '\'') + break; + } + return c == '\''; +} + +bool OurReader::readObject(Token &tokenStart) +{ + Token tokenName; + JSONCPP_STRING name; + Value init(objectValue); + currentValue().swapPayload(init); + currentValue().setOffsetStart(tokenStart.start_ - begin_); + while (readToken(tokenName)) + { + bool initialTokenOk = true; + while (tokenName.type_ == tokenComment && initialTokenOk) + initialTokenOk = readToken(tokenName); + if (!initialTokenOk) + break; + if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object + return true; + name = ""; + if (tokenName.type_ == tokenString) + { + if (!decodeString(tokenName, name)) + return recoverFromError(tokenObjectEnd); + } + else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) + { + Value numberName; + if (!decodeNumber(tokenName, numberName)) + return recoverFromError(tokenObjectEnd); + name = numberName.asString(); + } + else + { + break; + } + + Token colon; + if (!readToken(colon) || colon.type_ != tokenMemberSeparator) + { + return addErrorAndRecover("Missing ':' after object member name", colon, tokenObjectEnd); + } + if (name.length() >= (1U << 30)) + throwRuntimeError("keylength >= 2^30"); + if (features_.rejectDupKeys_ && currentValue().isMember(name)) + { + JSONCPP_STRING msg = "Duplicate key: '" + name + "'"; + return addErrorAndRecover(msg, tokenName, tokenObjectEnd); + } + Value &value = currentValue()[name]; + nodes_.push(&value); + bool ok = readValue(); + nodes_.pop(); + if (!ok) // error already set + return recoverFromError(tokenObjectEnd); + + Token comma; + if (!readToken(comma) || (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator && + comma.type_ != tokenComment)) + { + return addErrorAndRecover("Missing ',' or '}' in object declaration", comma, tokenObjectEnd); + } + bool finalizeTokenOk = true; + while (comma.type_ == tokenComment && finalizeTokenOk) + finalizeTokenOk = readToken(comma); + if (comma.type_ == tokenObjectEnd) + return true; + } + return addErrorAndRecover("Missing '}' or object member name", tokenName, tokenObjectEnd); +} + +bool OurReader::readArray(Token &tokenStart) +{ + Value init(arrayValue); + currentValue().swapPayload(init); + currentValue().setOffsetStart(tokenStart.start_ - begin_); + skipSpaces(); + if (current_ != end_ && *current_ == ']') // empty array + { + Token endArray; + readToken(endArray); + return true; + } + int index = 0; + for (;;) + { + Value &value = currentValue()[index++]; + nodes_.push(&value); + bool ok = readValue(); + nodes_.pop(); + if (!ok) // error already set + return recoverFromError(tokenArrayEnd); + + Token token; + // Accept Comment after last item in the array. + ok = readToken(token); + while (token.type_ == tokenComment && ok) + { + ok = readToken(token); + } + bool badTokenType = (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd); + if (!ok || badTokenType) + { + return addErrorAndRecover("Missing ',' or ']' in array declaration", token, tokenArrayEnd); + } + if (token.type_ == tokenArrayEnd) + break; + } + return true; +} + +bool OurReader::decodeNumber(Token &token) +{ + Value decoded; + if (!decodeNumber(token, decoded)) + return false; + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool OurReader::decodeNumber(Token &token, Value &decoded) +{ + // Attempts to parse the number as an integer. If the number is + // larger than the maximum supported value of an integer then + // we decode the number as a double. + Location current = token.start_; + bool isNegative = *current == '-'; + if (isNegative) + ++current; + // TODO: Help the compiler do the div and mod at compile time or get rid of them. + Value::LargestUInt maxIntegerValue = + isNegative ? Value::LargestUInt(-Value::minLargestInt) : Value::maxLargestUInt; + Value::LargestUInt threshold = maxIntegerValue / 10; + Value::LargestUInt value = 0; + while (current < token.end_) + { + Char c = *current++; + if (c < '0' || c > '9') + return decodeDouble(token, decoded); + Value::UInt digit(static_cast<Value::UInt>(c - '0')); + if (value >= threshold) + { + // We've hit or exceeded the max value divided by 10 (rounded down). If + // a) we've only just touched the limit, b) this is the last digit, and + // c) it's small enough to fit in that rounding delta, we're okay. + // Otherwise treat this number as a double to avoid overflow. + if (value > threshold || current != token.end_ || digit > maxIntegerValue % 10) + { + return decodeDouble(token, decoded); + } + } + value = value * 10 + digit; + } + if (isNegative) + decoded = -Value::LargestInt(value); + else if (value <= Value::LargestUInt(Value::maxInt)) + decoded = Value::LargestInt(value); + else + decoded = value; + return true; +} + +bool OurReader::decodeDouble(Token &token) +{ + Value decoded; + if (!decodeDouble(token, decoded)) + return false; + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool OurReader::decodeDouble(Token &token, Value &decoded) +{ + double value = 0; + const int bufferSize = 32; + int count; + ptrdiff_t const length = token.end_ - token.start_; + + // Sanity check to avoid buffer overflow exploits. + if (length < 0) + { + return addError("Unable to parse token length", token); + } + size_t const ulength = static_cast<size_t>(length); + + // Avoid using a string constant for the format control string given to + // sscanf, as this can cause hard to debug crashes on OS X. See here for more + // info: + // + // http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html + char format[] = "%lf"; + + if (length <= bufferSize) + { + Char buffer[bufferSize + 1]; + memcpy(buffer, token.start_, ulength); + buffer[length] = 0; + fixNumericLocaleInput(buffer, buffer + length); + count = sscanf(buffer, format, &value); + } + else + { + JSONCPP_STRING buffer(token.start_, token.end_); + count = sscanf(buffer.c_str(), format, &value); + } + + if (count != 1) + return addError("'" + JSONCPP_STRING(token.start_, token.end_) + "' is not a number.", token); + decoded = value; + return true; +} + +bool OurReader::decodeString(Token &token) +{ + JSONCPP_STRING decoded_string; + if (!decodeString(token, decoded_string)) + return false; + Value decoded(decoded_string); + currentValue().swapPayload(decoded); + currentValue().setOffsetStart(token.start_ - begin_); + currentValue().setOffsetLimit(token.end_ - begin_); + return true; +} + +bool OurReader::decodeString(Token &token, JSONCPP_STRING &decoded) +{ + decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2)); + Location current = token.start_ + 1; // skip '"' + Location end = token.end_ - 1; // do not include '"' + while (current != end) + { + Char c = *current++; + if (c == '"') + break; + else if (c == '\\') + { + if (current == end) + return addError("Empty escape sequence in string", token, current); + Char escape = *current++; + switch (escape) + { + case '"': + decoded += '"'; + break; + case '/': + decoded += '/'; + break; + case '\\': + decoded += '\\'; + break; + case 'b': + decoded += '\b'; + break; + case 'f': + decoded += '\f'; + break; + case 'n': + decoded += '\n'; + break; + case 'r': + decoded += '\r'; + break; + case 't': + decoded += '\t'; + break; + case 'u': + { + unsigned int unicode; + if (!decodeUnicodeCodePoint(token, current, end, unicode)) + return false; + decoded += codePointToUTF8(unicode); + } + break; + default: + return addError("Bad escape sequence in string", token, current); + } + } + else + { + decoded += c; + } + } + return true; +} + +bool OurReader::decodeUnicodeCodePoint(Token &token, Location ¤t, Location end, + unsigned int &unicode) +{ + + if (!decodeUnicodeEscapeSequence(token, current, end, unicode)) + return false; + if (unicode >= 0xD800 && unicode <= 0xDBFF) + { + // surrogate pairs + if (end - current < 6) + return addError("additional six characters expected to parse unicode surrogate pair.", token, + current); + unsigned int surrogatePair; + if (*(current++) == '\\' && *(current++) == 'u') + { + if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) + { + unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF); + } + else + return false; + } + else + return addError("expecting another \\u token to begin the second half of " + "a unicode surrogate pair", + token, current); + } + return true; +} + +bool OurReader::decodeUnicodeEscapeSequence(Token &token, Location ¤t, Location end, + unsigned int &ret_unicode) +{ + if (end - current < 4) + return addError("Bad unicode escape sequence in string: four digits expected.", token, current); + int unicode = 0; + for (int index = 0; index < 4; ++index) + { + Char c = *current++; + unicode *= 16; + if (c >= '0' && c <= '9') + unicode += c - '0'; + else if (c >= 'a' && c <= 'f') + unicode += c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + unicode += c - 'A' + 10; + else + return addError("Bad unicode escape sequence in string: hexadecimal digit expected.", token, + current); + } + ret_unicode = static_cast<unsigned int>(unicode); + return true; +} + +bool OurReader::addError(const JSONCPP_STRING &message, Token &token, Location extra) +{ + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = extra; + errors_.push_back(info); + return false; +} + +bool OurReader::recoverFromError(TokenType skipUntilToken) +{ + size_t errorCount = errors_.size(); + Token skip; + for (;;) + { + if (!readToken(skip)) + errors_.resize(errorCount); // discard errors caused by recovery + if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream) + break; + } + errors_.resize(errorCount); + return false; +} + +bool OurReader::addErrorAndRecover(const JSONCPP_STRING &message, Token &token, + TokenType skipUntilToken) +{ + addError(message, token); + return recoverFromError(skipUntilToken); +} + +Value &OurReader::currentValue() { return *(nodes_.top()); } + +OurReader::Char OurReader::getNextChar() +{ + if (current_ == end_) + return 0; + return *current_++; +} + +void OurReader::getLocationLineAndColumn(Location location, int &line, int &column) const +{ + Location current = begin_; + Location lastLineStart = current; + line = 0; + while (current < location && current != end_) + { + Char c = *current++; + if (c == '\r') + { + if (*current == '\n') + ++current; + lastLineStart = current; + ++line; + } + else if (c == '\n') + { + lastLineStart = current; + ++line; + } + } + // column & line start at 1 + column = int(location - lastLineStart) + 1; + ++line; +} + +JSONCPP_STRING OurReader::getLocationLineAndColumn(Location location) const +{ + int line, column; + getLocationLineAndColumn(location, line, column); + char buffer[18 + 16 + 16 + 1]; + snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column); + return buffer; +} + +JSONCPP_STRING OurReader::getFormattedErrorMessages() const +{ + JSONCPP_STRING formattedMessage; + for (Errors::const_iterator itError = errors_.begin(); itError != errors_.end(); ++itError) + { + const ErrorInfo &error = *itError; + formattedMessage += "* " + getLocationLineAndColumn(error.token_.start_) + "\n"; + formattedMessage += " " + error.message_ + "\n"; + if (error.extra_) + formattedMessage += "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n"; + } + return formattedMessage; +} + +std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const +{ + std::vector<OurReader::StructuredError> allErrors; + for (Errors::const_iterator itError = errors_.begin(); itError != errors_.end(); ++itError) + { + const ErrorInfo &error = *itError; + OurReader::StructuredError structured; + structured.offset_start = error.token_.start_ - begin_; + structured.offset_limit = error.token_.end_ - begin_; + structured.message = error.message_; + allErrors.push_back(structured); + } + return allErrors; +} + +bool OurReader::pushError(const Value &value, const JSONCPP_STRING &message) +{ + ptrdiff_t length = end_ - begin_; + if (value.getOffsetStart() > length || value.getOffsetLimit() > length) + return false; + Token token; + token.type_ = tokenError; + token.start_ = begin_ + value.getOffsetStart(); + token.end_ = end_ + value.getOffsetLimit(); + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = 0; + errors_.push_back(info); + return true; +} + +bool OurReader::pushError(const Value &value, const JSONCPP_STRING &message, const Value &extra) +{ + ptrdiff_t length = end_ - begin_; + if (value.getOffsetStart() > length || value.getOffsetLimit() > length || + extra.getOffsetLimit() > length) + return false; + Token token; + token.type_ = tokenError; + token.start_ = begin_ + value.getOffsetStart(); + token.end_ = begin_ + value.getOffsetLimit(); + ErrorInfo info; + info.token_ = token; + info.message_ = message; + info.extra_ = begin_ + extra.getOffsetStart(); + errors_.push_back(info); + return true; +} + +bool OurReader::good() const { return !errors_.size(); } + +class OurCharReader : public CharReader +{ + bool const collectComments_; + OurReader reader_; + +public: + OurCharReader(bool collectComments, OurFeatures const &features) + : collectComments_(collectComments), reader_(features) + { + } + bool parse(char const *beginDoc, char const *endDoc, Value *root, + JSONCPP_STRING *errs) JSONCPP_OVERRIDE + { + bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_); + if (errs) + { + *errs = reader_.getFormattedErrorMessages(); + } + return ok; + } +}; + +CharReaderBuilder::CharReaderBuilder() { setDefaults(&settings_); } +CharReaderBuilder::~CharReaderBuilder() {} +CharReader *CharReaderBuilder::newCharReader() const +{ + bool collectComments = settings_["collectComments"].asBool(); + OurFeatures features = OurFeatures::all(); + features.allowComments_ = settings_["allowComments"].asBool(); + features.strictRoot_ = settings_["strictRoot"].asBool(); + features.allowDroppedNullPlaceholders_ = settings_["allowDroppedNullPlaceholders"].asBool(); + features.allowNumericKeys_ = settings_["allowNumericKeys"].asBool(); + features.allowSingleQuotes_ = settings_["allowSingleQuotes"].asBool(); + features.stackLimit_ = settings_["stackLimit"].asInt(); + features.failIfExtra_ = settings_["failIfExtra"].asBool(); + features.rejectDupKeys_ = settings_["rejectDupKeys"].asBool(); + features.allowSpecialFloats_ = settings_["allowSpecialFloats"].asBool(); + return new OurCharReader(collectComments, features); +} +static void getValidReaderKeys(std::set<JSONCPP_STRING> *valid_keys) +{ + valid_keys->clear(); + valid_keys->insert("collectComments"); + valid_keys->insert("allowComments"); + valid_keys->insert("strictRoot"); + valid_keys->insert("allowDroppedNullPlaceholders"); + valid_keys->insert("allowNumericKeys"); + valid_keys->insert("allowSingleQuotes"); + valid_keys->insert("stackLimit"); + valid_keys->insert("failIfExtra"); + valid_keys->insert("rejectDupKeys"); + valid_keys->insert("allowSpecialFloats"); +} +bool CharReaderBuilder::validate(Json::Value *invalid) const +{ + Json::Value my_invalid; + if (!invalid) + invalid = &my_invalid; // so we do not need to test for NULL + Json::Value &inv = *invalid; + std::set<JSONCPP_STRING> valid_keys; + getValidReaderKeys(&valid_keys); + Value::Members keys = settings_.getMemberNames(); + size_t n = keys.size(); + for (size_t i = 0; i < n; ++i) + { + JSONCPP_STRING const &key = keys[i]; + if (valid_keys.find(key) == valid_keys.end()) + { + inv[key] = settings_[key]; + } + } + return 0u == inv.size(); +} +Value &CharReaderBuilder::operator[](JSONCPP_STRING key) { return settings_[key]; } +// static +void CharReaderBuilder::strictMode(Json::Value *settings) +{ + //! [CharReaderBuilderStrictMode] + (*settings)["allowComments"] = false; + (*settings)["strictRoot"] = true; + (*settings)["allowDroppedNullPlaceholders"] = false; + (*settings)["allowNumericKeys"] = false; + (*settings)["allowSingleQuotes"] = false; + (*settings)["stackLimit"] = 1000; + (*settings)["failIfExtra"] = true; + (*settings)["rejectDupKeys"] = true; + (*settings)["allowSpecialFloats"] = false; + //! [CharReaderBuilderStrictMode] +} +// static +void CharReaderBuilder::setDefaults(Json::Value *settings) +{ + //! [CharReaderBuilderDefaults] + (*settings)["collectComments"] = true; + (*settings)["allowComments"] = true; + (*settings)["strictRoot"] = false; + (*settings)["allowDroppedNullPlaceholders"] = false; + (*settings)["allowNumericKeys"] = false; + (*settings)["allowSingleQuotes"] = false; + (*settings)["stackLimit"] = 1000; + (*settings)["failIfExtra"] = false; + (*settings)["rejectDupKeys"] = false; + (*settings)["allowSpecialFloats"] = false; + //! [CharReaderBuilderDefaults] +} + +////////////////////////////////// +// global functions + +bool parseFromStream(CharReader::Factory const &fact, JSONCPP_ISTREAM &sin, Value *root, + JSONCPP_STRING *errs) +{ + JSONCPP_OSTRINGSTREAM ssin; + ssin << sin.rdbuf(); + JSONCPP_STRING doc = ssin.str(); + char const *begin = doc.data(); + char const *end = begin + doc.size(); + // Note that we do not actually need a null-terminator. + CharReaderPtr const reader(fact.newCharReader()); + return reader->parse(begin, end, root, errs); +} + +JSONCPP_ISTREAM &operator>>(JSONCPP_ISTREAM &sin, Value &root) +{ + CharReaderBuilder b; + JSONCPP_STRING errs; + bool ok = parseFromStream(b, sin, &root, &errs); + if (!ok) + { + fprintf(stderr, "Error from reader: %s", errs.c_str()); + + throwRuntimeError(errs); + } + return sin; +} + +} // namespace Json + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: src/lib_json/json_reader.cpp +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: src/lib_json/json_valueiterator.inl +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2007-2010 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +// included by json_value.cpp + +namespace Json +{ + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class ValueIteratorBase +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +ValueIteratorBase::ValueIteratorBase() : current_(), isNull_(true) {} + +ValueIteratorBase::ValueIteratorBase(const Value::ObjectValues::iterator ¤t) + : current_(current), isNull_(false) +{ +} + +Value &ValueIteratorBase::deref() const { return current_->second; } + +void ValueIteratorBase::increment() { ++current_; } + +void ValueIteratorBase::decrement() { --current_; } + +ValueIteratorBase::difference_type ValueIteratorBase::computeDistance(const SelfType &other) const +{ +#ifdef JSON_USE_CPPTL_SMALLMAP + return other.current_ - current_; +#else + // Iterator for null value are initialized using the default + // constructor, which initialize current_ to the default + // std::map::iterator. As begin() and end() are two instance + // of the default std::map::iterator, they can not be compared. + // To allow this, we handle this comparison specifically. + if (isNull_ && other.isNull_) + { + return 0; + } + + // Usage of std::distance is not portable (does not compile with Sun Studio 12 + // RogueWave STL, + // which is the one used by default). + // Using a portable hand-made version for non random iterator instead: + // return difference_type( std::distance( current_, other.current_ ) ); + difference_type myDistance = 0; + for (Value::ObjectValues::iterator it = current_; it != other.current_; ++it) + { + ++myDistance; + } + return myDistance; +#endif +} + +bool ValueIteratorBase::isEqual(const SelfType &other) const +{ + if (isNull_) + { + return other.isNull_; + } + return current_ == other.current_; +} + +void ValueIteratorBase::copy(const SelfType &other) +{ + current_ = other.current_; + isNull_ = other.isNull_; +} + +Value ValueIteratorBase::key() const +{ + const Value::CZString czstring = (*current_).first; + if (czstring.data()) + { + if (czstring.isStaticString()) + return Value(StaticString(czstring.data())); + return Value(czstring.data(), czstring.data() + czstring.length()); + } + return Value(czstring.index()); +} + +UInt ValueIteratorBase::index() const +{ + const Value::CZString czstring = (*current_).first; + if (!czstring.data()) + return czstring.index(); + return Value::UInt(-1); +} + +JSONCPP_STRING ValueIteratorBase::name() const +{ + char const *keey; + char const *end; + keey = memberName(&end); + if (!keey) + return JSONCPP_STRING(); + return JSONCPP_STRING(keey, end); +} + +char const *ValueIteratorBase::memberName() const +{ + const char *cname = (*current_).first.data(); + return cname ? cname : ""; +} + +char const *ValueIteratorBase::memberName(char const **end) const +{ + const char *cname = (*current_).first.data(); + if (!cname) + { + *end = NULL; + return NULL; + } + *end = cname + (*current_).first.length(); + return cname; +} + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class ValueConstIterator +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +ValueConstIterator::ValueConstIterator() {} + +ValueConstIterator::ValueConstIterator(const Value::ObjectValues::iterator ¤t) + : ValueIteratorBase(current) +{ +} + +ValueConstIterator::ValueConstIterator(ValueIterator const &other) : ValueIteratorBase(other) {} + +ValueConstIterator &ValueConstIterator::operator=(const ValueIteratorBase &other) +{ + copy(other); + return *this; +} + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class ValueIterator +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +ValueIterator::ValueIterator() {} + +ValueIterator::ValueIterator(const Value::ObjectValues::iterator ¤t) + : ValueIteratorBase(current) +{ +} + +ValueIterator::ValueIterator(const ValueConstIterator &other) : ValueIteratorBase(other) +{ + throwRuntimeError("ConstIterator to Iterator should never be allowed."); +} + +ValueIterator::ValueIterator(const ValueIterator &other) : ValueIteratorBase(other) {} + +ValueIterator &ValueIterator::operator=(const SelfType &other) +{ + copy(other); + return *this; +} + +} // namespace Json + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: src/lib_json/json_valueiterator.inl +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: src/lib_json/json_value.cpp +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2011 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#if !defined(JSON_IS_AMALGAMATION) +#include <json/assertions.h> +#include <json/value.h> +#include <json/writer.h> +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <math.h> +#include <sstream> +#include <utility> +#include <cstring> +#include <cassert> +#ifdef JSON_USE_CPPTL +#include <cpptl/conststring.h> +#endif +#include <cstddef> // size_t +#include <algorithm> // min() + +#define JSON_ASSERT_UNREACHABLE assert(false) + +namespace Json +{ + +// This is a walkaround to avoid the static initialization of Value::null. +// kNull must be word-aligned to avoid crashing on ARM. We use an alignment of +// 8 (instead of 4) as a bit of future-proofing. +#if defined(__ARMEL__) +#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) +#else +#define ALIGNAS(byte_alignment) +#endif +// static const unsigned char ALIGNAS(8) kNull[sizeof(Value)] = { 0 }; +// const unsigned char& kNullRef = kNull[0]; +// const Value& Value::null = reinterpret_cast<const Value&>(kNullRef); +// const Value& Value::nullRef = null; + +// static +Value const &Value::nullSingleton() +{ + static Value const nullStatic; + return nullStatic; +} + +// for backwards compatibility, we'll leave these global references around, but DO NOT +// use them in JSONCPP library code any more! +Value const &Value::null = Value::nullSingleton(); +Value const &Value::nullRef = Value::nullSingleton(); + +const Int Value::minInt = Int(~(UInt(-1) / 2)); +const Int Value::maxInt = Int(UInt(-1) / 2); +const UInt Value::maxUInt = UInt(-1); +#if defined(JSON_HAS_INT64) +const Int64 Value::minInt64 = Int64(~(UInt64(-1) / 2)); +const Int64 Value::maxInt64 = Int64(UInt64(-1) / 2); +const UInt64 Value::maxUInt64 = UInt64(-1); +// The constant is hard-coded because some compiler have trouble +// converting Value::maxUInt64 to a double correctly (AIX/xlC). +// Assumes that UInt64 is a 64 bits integer. +static const double maxUInt64AsDouble = 18446744073709551615.0; +#endif // defined(JSON_HAS_INT64) +const LargestInt Value::minLargestInt = LargestInt(~(LargestUInt(-1) / 2)); +const LargestInt Value::maxLargestInt = LargestInt(LargestUInt(-1) / 2); +const LargestUInt Value::maxLargestUInt = LargestUInt(-1); + +#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) +template <typename T, typename U> static inline bool InRange(double d, T min, U max) +{ + // The casts can lose precision, but we are looking only for + // an approximate range. Might fail on edge cases though. ~cdunn + // return d >= static_cast<double>(min) && d <= static_cast<double>(max); + return d >= min && d <= max; +} +#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) +static inline double integerToDouble(Json::UInt64 value) +{ + return static_cast<double>(Int64(value / 2)) * 2.0 + static_cast<double>(Int64(value & 1)); +} + +template <typename T> static inline double integerToDouble(T value) +{ + return static_cast<double>(value); +} + +template <typename T, typename U> static inline bool InRange(double d, T min, U max) +{ + return d >= integerToDouble(min) && d <= integerToDouble(max); +} +#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + +/** Duplicates the specified string value. + * @param value Pointer to the string to duplicate. Must be zero-terminated if + * length is "unknown". + * @param length Length of the value. if equals to unknown, then it will be + * computed using strlen(value). + * @return Pointer on the duplicate instance of string. + */ +static inline char *duplicateStringValue(const char *value, size_t length) +{ + // Avoid an integer overflow in the call to malloc below by limiting length + // to a sane value. + if (length >= static_cast<size_t>(Value::maxInt)) + length = Value::maxInt - 1; + + char *newString = static_cast<char *>(malloc(length + 1)); + if (newString == NULL) + { + throwRuntimeError("in Json::Value::duplicateStringValue(): " + "Failed to allocate string value buffer"); + } + memcpy(newString, value, length); + newString[length] = 0; + return newString; +} + +/* Record the length as a prefix. + */ +static inline char *duplicateAndPrefixStringValue(const char *value, unsigned int length) +{ + // Avoid an integer overflow in the call to malloc below by limiting length + // to a sane value. + JSON_ASSERT_MESSAGE(length <= static_cast<unsigned>(Value::maxInt) - sizeof(unsigned) - 1U, + "in Json::Value::duplicateAndPrefixStringValue(): " + "length too big for prefixing"); + unsigned actualLength = length + static_cast<unsigned>(sizeof(unsigned)) + 1U; + char *newString = static_cast<char *>(malloc(actualLength)); + if (newString == 0) + { + throwRuntimeError("in Json::Value::duplicateAndPrefixStringValue(): " + "Failed to allocate string value buffer"); + } + *reinterpret_cast<unsigned *>(newString) = length; + memcpy(newString + sizeof(unsigned), value, length); + newString[actualLength - 1U] = 0; // to avoid buffer over-run accidents by users later + return newString; +} +inline static void decodePrefixedString(bool isPrefixed, char const *prefixed, unsigned *length, + char const **value) +{ + if (!isPrefixed) + { + *length = static_cast<unsigned>(strlen(prefixed)); + *value = prefixed; + } + else + { + *length = *reinterpret_cast<unsigned const *>(prefixed); + *value = prefixed + sizeof(unsigned); + } +} +/** Free the string duplicated by duplicateStringValue()/duplicateAndPrefixStringValue(). + */ +#if JSONCPP_USING_SECURE_MEMORY +static inline void releasePrefixedStringValue(char *value) +{ + unsigned length = 0; + char const *valueDecoded; + decodePrefixedString(true, value, &length, &valueDecoded); + size_t const size = sizeof(unsigned) + length + 1U; + memset(value, 0, size); + free(value); +} +static inline void releaseStringValue(char *value, unsigned length) +{ + // length==0 => we allocated the strings memory + size_t size = (length == 0) ? strlen(value) : length; + memset(value, 0, size); + free(value); +} +#else // !JSONCPP_USING_SECURE_MEMORY +static inline void releasePrefixedStringValue(char *value) { free(value); } +static inline void releaseStringValue(char *value, unsigned) { free(value); } +#endif // JSONCPP_USING_SECURE_MEMORY + +} // namespace Json + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ValueInternals... +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +#if !defined(JSON_IS_AMALGAMATION) + +#include "json_valueiterator.inl" +#endif // if !defined(JSON_IS_AMALGAMATION) + +namespace Json +{ + +Exception::Exception(JSONCPP_STRING const &msg) : msg_(msg) {} +Exception::~Exception() throw() {} +char const *Exception::what() const throw() { return msg_.c_str(); } +RuntimeError::RuntimeError(JSONCPP_STRING const &msg) : Exception(msg) {} +LogicError::LogicError(JSONCPP_STRING const &msg) : Exception(msg) {} +JSONCPP_NORETURN void throwRuntimeError(JSONCPP_STRING const &msg) { throw RuntimeError(msg); } +JSONCPP_NORETURN void throwLogicError(JSONCPP_STRING const &msg) { throw LogicError(msg); } + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class Value::CommentInfo +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +Value::CommentInfo::CommentInfo() : comment_(0) {} + +Value::CommentInfo::~CommentInfo() +{ + if (comment_) + releaseStringValue(comment_, 0u); +} + +void Value::CommentInfo::setComment(const char *text, size_t len) +{ + if (comment_) + { + releaseStringValue(comment_, 0u); + comment_ = 0; + } + JSON_ASSERT(text != 0); + JSON_ASSERT_MESSAGE(text[0] == '\0' || text[0] == '/', + "in Json::Value::setComment(): Comments must start with /"); + // It seems that /**/ style comments are acceptable as well. + comment_ = duplicateStringValue(text, len); +} + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class Value::CZString +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +// Notes: policy_ indicates if the string was allocated when +// a string is stored. + +Value::CZString::CZString(ArrayIndex aindex) : cstr_(0), index_(aindex) {} + +Value::CZString::CZString(char const *str, unsigned ulength, DuplicationPolicy allocate) + : cstr_(str) +{ + // allocate != duplicate + storage_.policy_ = allocate & 0x3; + storage_.length_ = ulength & 0x3FFFFFFF; +} + +Value::CZString::CZString(const CZString &other) +{ + cstr_ = (other.storage_.policy_ != noDuplication && other.cstr_ != 0 + ? duplicateStringValue(other.cstr_, other.storage_.length_) + : other.cstr_); + storage_.policy_ = + static_cast<unsigned>( + other.cstr_ ? (static_cast<DuplicationPolicy>(other.storage_.policy_) == noDuplication + ? noDuplication + : duplicate) + : static_cast<DuplicationPolicy>(other.storage_.policy_)) & + 3U; + storage_.length_ = other.storage_.length_; +} + +#if JSON_HAS_RVALUE_REFERENCES +Value::CZString::CZString(CZString &&other) : cstr_(other.cstr_), index_(other.index_) +{ + other.cstr_ = nullptr; +} +#endif + +Value::CZString::~CZString() +{ + if (cstr_ && storage_.policy_ == duplicate) + { + releaseStringValue(const_cast<char *>(cstr_), storage_.length_ + 1u); //+1 for null terminating + // character for sake of + // completeness but not + // actually necessary + } +} + +void Value::CZString::swap(CZString &other) +{ + std::swap(cstr_, other.cstr_); + std::swap(index_, other.index_); +} + +Value::CZString &Value::CZString::operator=(CZString other) +{ + swap(other); + return *this; +} + +bool Value::CZString::operator<(const CZString &other) const +{ + if (!cstr_) + return index_ < other.index_; + // return strcmp(cstr_, other.cstr_) < 0; + // Assume both are strings. + unsigned this_len = this->storage_.length_; + unsigned other_len = other.storage_.length_; + unsigned min_len = std::min(this_len, other_len); + JSON_ASSERT(this->cstr_ && other.cstr_); + int comp = memcmp(this->cstr_, other.cstr_, min_len); + if (comp < 0) + return true; + if (comp > 0) + return false; + return (this_len < other_len); +} + +bool Value::CZString::operator==(const CZString &other) const +{ + if (!cstr_) + return index_ == other.index_; + // return strcmp(cstr_, other.cstr_) == 0; + // Assume both are strings. + unsigned this_len = this->storage_.length_; + unsigned other_len = other.storage_.length_; + if (this_len != other_len) + return false; + JSON_ASSERT(this->cstr_ && other.cstr_); + int comp = memcmp(this->cstr_, other.cstr_, this_len); + return comp == 0; +} + +ArrayIndex Value::CZString::index() const { return index_; } + +// const char* Value::CZString::c_str() const { return cstr_; } +const char *Value::CZString::data() const { return cstr_; } +unsigned Value::CZString::length() const { return storage_.length_; } +bool Value::CZString::isStaticString() const { return storage_.policy_ == noDuplication; } + +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// class Value::Value +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////// + +/*! \internal Default constructor initialization must be equivalent to: + * memset( this, 0, sizeof(Value) ) + * This optimization is used in ValueInternalMap fast allocator. + */ +Value::Value(ValueType vtype) +{ + static char const empty[] = ""; + initBasic(vtype); + switch (vtype) + { + case nullValue: + break; + case intValue: + case uintValue: + value_.int_ = 0; + break; + case realValue: + value_.real_ = 0.0; + break; + case stringValue: + // allocated_ == false, so this is safe. + value_.string_ = const_cast<char *>(static_cast<char const *>(empty)); + break; + case arrayValue: + case objectValue: + value_.map_ = new ObjectValues(); + break; + case booleanValue: + value_.bool_ = false; + break; + default: + JSON_ASSERT_UNREACHABLE; + } +} + +Value::Value(Int value) +{ + initBasic(intValue); + value_.int_ = value; +} + +Value::Value(UInt value) +{ + initBasic(uintValue); + value_.uint_ = value; +} +#if defined(JSON_HAS_INT64) +Value::Value(Int64 value) +{ + initBasic(intValue); + value_.int_ = value; +} +Value::Value(UInt64 value) +{ + initBasic(uintValue); + value_.uint_ = value; +} +#endif // defined(JSON_HAS_INT64) + +Value::Value(double value) +{ + initBasic(realValue); + value_.real_ = value; +} + +Value::Value(const char *value) +{ + initBasic(stringValue, true); + value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(strlen(value))); +} + +Value::Value(const char *beginValue, const char *endValue) +{ + initBasic(stringValue, true); + value_.string_ = + duplicateAndPrefixStringValue(beginValue, static_cast<unsigned>(endValue - beginValue)); +} + +Value::Value(const JSONCPP_STRING &value) +{ + initBasic(stringValue, true); + value_.string_ = + duplicateAndPrefixStringValue(value.data(), static_cast<unsigned>(value.length())); +} + +Value::Value(const StaticString &value) +{ + initBasic(stringValue); + value_.string_ = const_cast<char *>(value.c_str()); +} + +#ifdef JSON_USE_CPPTL +Value::Value(const CppTL::ConstString &value) +{ + initBasic(stringValue, true); + value_.string_ = duplicateAndPrefixStringValue(value, static_cast<unsigned>(value.length())); +} +#endif + +Value::Value(bool value) +{ + initBasic(booleanValue); + value_.bool_ = value; +} + +Value::Value(Value const &other) + : type_(other.type_), allocated_(false), comments_(0), start_(other.start_), + limit_(other.limit_) +{ + switch (type_) + { + case nullValue: + case intValue: + case uintValue: + case realValue: + case booleanValue: + value_ = other.value_; + break; + case stringValue: + if (other.value_.string_ && other.allocated_) + { + unsigned len; + char const *str; + decodePrefixedString(other.allocated_, other.value_.string_, &len, &str); + value_.string_ = duplicateAndPrefixStringValue(str, len); + allocated_ = true; + } + else + { + value_.string_ = other.value_.string_; + allocated_ = false; + } + break; + case arrayValue: + case objectValue: + value_.map_ = new ObjectValues(*other.value_.map_); + break; + default: + JSON_ASSERT_UNREACHABLE; + } + if (other.comments_) + { + comments_ = new CommentInfo[numberOfCommentPlacement]; + for (int comment = 0; comment < numberOfCommentPlacement; ++comment) + { + const CommentInfo &otherComment = other.comments_[comment]; + if (otherComment.comment_) + comments_[comment].setComment(otherComment.comment_, strlen(otherComment.comment_)); + } + } +} + +#if JSON_HAS_RVALUE_REFERENCES +// Move constructor +Value::Value(Value &&other) +{ + initBasic(nullValue); + swap(other); +} +#endif + +Value::~Value() +{ + switch (type_) + { + case nullValue: + case intValue: + case uintValue: + case realValue: + case booleanValue: + break; + case stringValue: + if (allocated_) + releasePrefixedStringValue(value_.string_); + break; + case arrayValue: + case objectValue: + delete value_.map_; + break; + default: + JSON_ASSERT_UNREACHABLE; + } + + delete[] comments_; + + value_.uint_ = 0; +} + +Value &Value::operator=(Value other) +{ + swap(other); + return *this; +} + +void Value::swapPayload(Value &other) +{ + ValueType temp = type_; + type_ = other.type_; + other.type_ = temp; + std::swap(value_, other.value_); + int temp2 = allocated_; + allocated_ = other.allocated_; + other.allocated_ = temp2 & 0x1; +} + +void Value::swap(Value &other) +{ + swapPayload(other); + std::swap(comments_, other.comments_); + std::swap(start_, other.start_); + std::swap(limit_, other.limit_); +} + +ValueType Value::type() const { return type_; } + +int Value::compare(const Value &other) const +{ + if (*this < other) + return -1; + if (*this > other) + return 1; + return 0; +} + +bool Value::operator<(const Value &other) const +{ + int typeDelta = type_ - other.type_; + if (typeDelta) + return typeDelta < 0 ? true : false; + switch (type_) + { + case nullValue: + return false; + case intValue: + return value_.int_ < other.value_.int_; + case uintValue: + return value_.uint_ < other.value_.uint_; + case realValue: + return value_.real_ < other.value_.real_; + case booleanValue: + return value_.bool_ < other.value_.bool_; + case stringValue: + { + if ((value_.string_ == 0) || (other.value_.string_ == 0)) + { + if (other.value_.string_) + return true; + else + return false; + } + unsigned this_len; + unsigned other_len; + char const *this_str; + char const *other_str; + decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str); + decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str); + unsigned min_len = std::min(this_len, other_len); + JSON_ASSERT(this_str && other_str); + int comp = memcmp(this_str, other_str, min_len); + if (comp < 0) + return true; + if (comp > 0) + return false; + return (this_len < other_len); + } + case arrayValue: + case objectValue: + { + int delta = int(value_.map_->size() - other.value_.map_->size()); + if (delta) + return delta < 0; + return (*value_.map_) < (*other.value_.map_); + } + default: + JSON_ASSERT_UNREACHABLE; + } + return false; // unreachable +} + +bool Value::operator<=(const Value &other) const { return !(other < *this); } + +bool Value::operator>=(const Value &other) const { return !(*this < other); } + +bool Value::operator>(const Value &other) const { return other < *this; } + +bool Value::operator==(const Value &other) const +{ + // if ( type_ != other.type_ ) + // GCC 2.95.3 says: + // attempt to take address of bit-field structure member `Json::Value::type_' + // Beats me, but a temp solves the problem. + int temp = other.type_; + if (type_ != temp) + return false; + switch (type_) + { + case nullValue: + return true; + case intValue: + return value_.int_ == other.value_.int_; + case uintValue: + return value_.uint_ == other.value_.uint_; + case realValue: + return value_.real_ == other.value_.real_; + case booleanValue: + return value_.bool_ == other.value_.bool_; + case stringValue: + { + if ((value_.string_ == 0) || (other.value_.string_ == 0)) + { + return (value_.string_ == other.value_.string_); + } + unsigned this_len; + unsigned other_len; + char const *this_str; + char const *other_str; + decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str); + decodePrefixedString(other.allocated_, other.value_.string_, &other_len, &other_str); + if (this_len != other_len) + return false; + JSON_ASSERT(this_str && other_str); + int comp = memcmp(this_str, other_str, this_len); + return comp == 0; + } + case arrayValue: + case objectValue: + return value_.map_->size() == other.value_.map_->size() && + (*value_.map_) == (*other.value_.map_); + default: + JSON_ASSERT_UNREACHABLE; + } + return false; // unreachable +} + +bool Value::operator!=(const Value &other) const { return !(*this == other); } + +const char *Value::asCString() const +{ + JSON_ASSERT_MESSAGE(type_ == stringValue, "in Json::Value::asCString(): requires stringValue"); + if (value_.string_ == 0) + return 0; + unsigned this_len; + char const *this_str; + decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str); + return this_str; +} + +#if JSONCPP_USING_SECURE_MEMORY +unsigned Value::getCStringLength() const +{ + JSON_ASSERT_MESSAGE(type_ == stringValue, "in Json::Value::asCString(): requires stringValue"); + if (value_.string_ == 0) + return 0; + unsigned this_len; + char const *this_str; + decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str); + return this_len; +} +#endif + +bool Value::getString(char const **str, char const **cend) const +{ + if (type_ != stringValue) + return false; + if (value_.string_ == 0) + return false; + unsigned length; + decodePrefixedString(this->allocated_, this->value_.string_, &length, str); + *cend = *str + length; + return true; +} + +JSONCPP_STRING Value::asString() const +{ + switch (type_) + { + case nullValue: + return ""; + case stringValue: + { + if (value_.string_ == 0) + return ""; + unsigned this_len; + char const *this_str; + decodePrefixedString(this->allocated_, this->value_.string_, &this_len, &this_str); + return JSONCPP_STRING(this_str, this_len); + } + case booleanValue: + return value_.bool_ ? "true" : "false"; + case intValue: + return valueToString(value_.int_); + case uintValue: + return valueToString(value_.uint_); + case realValue: + return valueToString(value_.real_); + default: + JSON_FAIL_MESSAGE("Type is not convertible to string"); + } +} + +#ifdef JSON_USE_CPPTL +CppTL::ConstString Value::asConstString() const +{ + unsigned len; + char const *str; + decodePrefixedString(allocated_, value_.string_, &len, &str); + return CppTL::ConstString(str, len); +} +#endif + +Value::Int Value::asInt() const +{ + switch (type_) + { + case intValue: + JSON_ASSERT_MESSAGE(isInt(), "LargestInt out of Int range"); + return Int(value_.int_); + case uintValue: + JSON_ASSERT_MESSAGE(isInt(), "LargestUInt out of Int range"); + return Int(value_.uint_); + case realValue: + JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt, maxInt), "double out of Int range"); + return Int(value_.real_); + case nullValue: + return 0; + case booleanValue: + return value_.bool_ ? 1 : 0; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to Int."); +} + +Value::UInt Value::asUInt() const +{ + switch (type_) + { + case intValue: + JSON_ASSERT_MESSAGE(isUInt(), "LargestInt out of UInt range"); + return UInt(value_.int_); + case uintValue: + JSON_ASSERT_MESSAGE(isUInt(), "LargestUInt out of UInt range"); + return UInt(value_.uint_); + case realValue: + JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt), "double out of UInt range"); + return UInt(value_.real_); + case nullValue: + return 0; + case booleanValue: + return value_.bool_ ? 1 : 0; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to UInt."); +} + +#if defined(JSON_HAS_INT64) + +Value::Int64 Value::asInt64() const +{ + switch (type_) + { + case intValue: + return Int64(value_.int_); + case uintValue: + JSON_ASSERT_MESSAGE(isInt64(), "LargestUInt out of Int64 range"); + return Int64(value_.uint_); + case realValue: + JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt64, maxInt64), "double out of Int64 range"); + return Int64(value_.real_); + case nullValue: + return 0; + case booleanValue: + return value_.bool_ ? 1 : 0; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to Int64."); +} + +Value::UInt64 Value::asUInt64() const +{ + switch (type_) + { + case intValue: + JSON_ASSERT_MESSAGE(isUInt64(), "LargestInt out of UInt64 range"); + return UInt64(value_.int_); + case uintValue: + return UInt64(value_.uint_); + case realValue: + JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt64), "double out of UInt64 range"); + return UInt64(value_.real_); + case nullValue: + return 0; + case booleanValue: + return value_.bool_ ? 1 : 0; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to UInt64."); +} +#endif // if defined(JSON_HAS_INT64) + +LargestInt Value::asLargestInt() const +{ +#if defined(JSON_NO_INT64) + return asInt(); +#else + return asInt64(); +#endif +} + +LargestUInt Value::asLargestUInt() const +{ +#if defined(JSON_NO_INT64) + return asUInt(); +#else + return asUInt64(); +#endif +} + +double Value::asDouble() const +{ + switch (type_) + { + case intValue: + return static_cast<double>(value_.int_); + case uintValue: +#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + return static_cast<double>(value_.uint_); +#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + return integerToDouble(value_.uint_); +#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + case realValue: + return value_.real_; + case nullValue: + return 0.0; + case booleanValue: + return value_.bool_ ? 1.0 : 0.0; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to double."); +} + +float Value::asFloat() const +{ + switch (type_) + { + case intValue: + return static_cast<float>(value_.int_); + case uintValue: +#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + return static_cast<float>(value_.uint_); +#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + // This can fail (silently?) if the value is bigger than MAX_FLOAT. + return static_cast<float>(integerToDouble(value_.uint_)); +#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) + case realValue: + return static_cast<float>(value_.real_); + case nullValue: + return 0.0; + case booleanValue: + return value_.bool_ ? 1.0f : 0.0f; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to float."); +} + +bool Value::asBool() const +{ + switch (type_) + { + case booleanValue: + return value_.bool_; + case nullValue: + return false; + case intValue: + return value_.int_ ? true : false; + case uintValue: + return value_.uint_ ? true : false; + case realValue: + // This is kind of strange. Not recommended. + return (value_.real_ != 0.0) ? true : false; + default: + break; + } + JSON_FAIL_MESSAGE("Value is not convertible to bool."); +} + +bool Value::isConvertibleTo(ValueType other) const +{ + switch (other) + { + case nullValue: + return (isNumeric() && asDouble() == 0.0) || + (type_ == booleanValue && value_.bool_ == false) || + (type_ == stringValue && asString() == "") || + (type_ == arrayValue && value_.map_->size() == 0) || + (type_ == objectValue && value_.map_->size() == 0) || type_ == nullValue; + case intValue: + return isInt() || (type_ == realValue && InRange(value_.real_, minInt, maxInt)) || + type_ == booleanValue || type_ == nullValue; + case uintValue: + return isUInt() || (type_ == realValue && InRange(value_.real_, 0, maxUInt)) || + type_ == booleanValue || type_ == nullValue; + case realValue: + return isNumeric() || type_ == booleanValue || type_ == nullValue; + case booleanValue: + return isNumeric() || type_ == booleanValue || type_ == nullValue; + case stringValue: + return isNumeric() || type_ == booleanValue || type_ == stringValue || type_ == nullValue; + case arrayValue: + return type_ == arrayValue || type_ == nullValue; + case objectValue: + return type_ == objectValue || type_ == nullValue; + } + JSON_ASSERT_UNREACHABLE; + return false; +} + +/// Number of values in array or object +ArrayIndex Value::size() const +{ + switch (type_) + { + case nullValue: + case intValue: + case uintValue: + case realValue: + case booleanValue: + case stringValue: + return 0; + case arrayValue: // size of the array is highest index + 1 + if (!value_.map_->empty()) + { + ObjectValues::const_iterator itLast = value_.map_->end(); + --itLast; + return (*itLast).first.index() + 1; + } + return 0; + case objectValue: + return ArrayIndex(value_.map_->size()); + } + JSON_ASSERT_UNREACHABLE; + return 0; // unreachable; +} + +bool Value::empty() const +{ + if (isNull() || isArray() || isObject()) + return size() == 0u; + else + return false; +} + +bool Value::operator!() const { return isNull(); } + +void Value::clear() +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue || type_ == objectValue, + "in Json::Value::clear(): requires complex value"); + start_ = 0; + limit_ = 0; + switch (type_) + { + case arrayValue: + case objectValue: + value_.map_->clear(); + break; + default: + break; + } +} + +void Value::resize(ArrayIndex newSize) +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue, + "in Json::Value::resize(): requires arrayValue"); + if (type_ == nullValue) + *this = Value(arrayValue); + ArrayIndex oldSize = size(); + if (newSize == 0) + clear(); + else if (newSize > oldSize) + (*this)[newSize - 1]; + else + { + for (ArrayIndex index = newSize; index < oldSize; ++index) + { + value_.map_->erase(index); + } + JSON_ASSERT(size() == newSize); + } +} + +Value &Value::operator[](ArrayIndex index) +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue, + "in Json::Value::operator[](ArrayIndex): requires arrayValue"); + if (type_ == nullValue) + *this = Value(arrayValue); + CZString key(index); + ObjectValues::iterator it = value_.map_->lower_bound(key); + if (it != value_.map_->end() && (*it).first == key) + return (*it).second; + + ObjectValues::value_type defaultValue(key, nullSingleton()); + it = value_.map_->insert(it, defaultValue); + return (*it).second; +} + +Value &Value::operator[](int index) +{ + JSON_ASSERT_MESSAGE(index >= 0, + "in Json::Value::operator[](int index): index cannot be negative"); + return (*this)[ArrayIndex(index)]; +} + +const Value &Value::operator[](ArrayIndex index) const +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == arrayValue, + "in Json::Value::operator[](ArrayIndex)const: requires arrayValue"); + if (type_ == nullValue) + return nullSingleton(); + CZString key(index); + ObjectValues::const_iterator it = value_.map_->find(key); + if (it == value_.map_->end()) + return nullSingleton(); + return (*it).second; +} + +const Value &Value::operator[](int index) const +{ + JSON_ASSERT_MESSAGE(index >= 0, + "in Json::Value::operator[](int index) const: index cannot be negative"); + return (*this)[ArrayIndex(index)]; +} + +void Value::initBasic(ValueType vtype, bool allocated) +{ + type_ = vtype; + allocated_ = allocated; + comments_ = 0; + start_ = 0; + limit_ = 0; +} + +// Access an object value by name, create a null member if it does not exist. +// @pre Type of '*this' is object or null. +// @param key is null-terminated. +Value &Value::resolveReference(const char *key) +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue, + "in Json::Value::resolveReference(): requires objectValue"); + if (type_ == nullValue) + *this = Value(objectValue); + CZString actualKey(key, static_cast<unsigned>(strlen(key)), CZString::noDuplication); // NOTE! + ObjectValues::iterator it = value_.map_->lower_bound(actualKey); + if (it != value_.map_->end() && (*it).first == actualKey) + return (*it).second; + + ObjectValues::value_type defaultValue(actualKey, nullSingleton()); + it = value_.map_->insert(it, defaultValue); + Value &value = (*it).second; + return value; +} + +// @param key is not null-terminated. +Value &Value::resolveReference(char const *key, char const *cend) +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue, + "in Json::Value::resolveReference(key, end): requires objectValue"); + if (type_ == nullValue) + *this = Value(objectValue); + CZString actualKey(key, static_cast<unsigned>(cend - key), CZString::duplicateOnCopy); + ObjectValues::iterator it = value_.map_->lower_bound(actualKey); + if (it != value_.map_->end() && (*it).first == actualKey) + return (*it).second; + + ObjectValues::value_type defaultValue(actualKey, nullSingleton()); + it = value_.map_->insert(it, defaultValue); + Value &value = (*it).second; + return value; +} + +Value Value::get(ArrayIndex index, const Value &defaultValue) const +{ + const Value *value = &((*this)[index]); + return value == &nullSingleton() ? defaultValue : *value; +} + +bool Value::isValidIndex(ArrayIndex index) const { return index < size(); } + +Value const *Value::find(char const *key, char const *cend) const +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue, + "in Json::Value::find(key, end, found): requires objectValue or nullValue"); + if (type_ == nullValue) + return NULL; + CZString actualKey(key, static_cast<unsigned>(cend - key), CZString::noDuplication); + ObjectValues::const_iterator it = value_.map_->find(actualKey); + if (it == value_.map_->end()) + return NULL; + return &(*it).second; +} +const Value &Value::operator[](const char *key) const +{ + Value const *found = find(key, key + strlen(key)); + if (!found) + return nullSingleton(); + return *found; +} +Value const &Value::operator[](JSONCPP_STRING const &key) const +{ + Value const *found = find(key.data(), key.data() + key.length()); + if (!found) + return nullSingleton(); + return *found; +} + +Value &Value::operator[](const char *key) { return resolveReference(key, key + strlen(key)); } + +Value &Value::operator[](const JSONCPP_STRING &key) +{ + return resolveReference(key.data(), key.data() + key.length()); +} + +Value &Value::operator[](const StaticString &key) { return resolveReference(key.c_str()); } + +#ifdef JSON_USE_CPPTL +Value &Value::operator[](const CppTL::ConstString &key) +{ + return resolveReference(key.c_str(), key.end_c_str()); +} +Value const &Value::operator[](CppTL::ConstString const &key) const +{ + Value const *found = find(key.c_str(), key.end_c_str()); + if (!found) + return nullSingleton(); + return *found; +} +#endif + +Value &Value::append(const Value &value) { return (*this)[size()] = value; } + +Value Value::get(char const *key, char const *cend, Value const &defaultValue) const +{ + Value const *found = find(key, cend); + return !found ? defaultValue : *found; +} +Value Value::get(char const *key, Value const &defaultValue) const +{ + return get(key, key + strlen(key), defaultValue); +} +Value Value::get(JSONCPP_STRING const &key, Value const &defaultValue) const +{ + return get(key.data(), key.data() + key.length(), defaultValue); +} + +bool Value::removeMember(const char *key, const char *cend, Value *removed) +{ + if (type_ != objectValue) + { + return false; + } + CZString actualKey(key, static_cast<unsigned>(cend - key), CZString::noDuplication); + ObjectValues::iterator it = value_.map_->find(actualKey); + if (it == value_.map_->end()) + return false; + *removed = it->second; + value_.map_->erase(it); + return true; +} +bool Value::removeMember(const char *key, Value *removed) +{ + return removeMember(key, key + strlen(key), removed); +} +bool Value::removeMember(JSONCPP_STRING const &key, Value *removed) +{ + return removeMember(key.data(), key.data() + key.length(), removed); +} +Value Value::removeMember(const char *key) +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue, + "in Json::Value::removeMember(): requires objectValue"); + if (type_ == nullValue) + return nullSingleton(); + + Value removed; // null + removeMember(key, key + strlen(key), &removed); + return removed; // still null if removeMember() did nothing +} +Value Value::removeMember(const JSONCPP_STRING &key) { return removeMember(key.c_str()); } + +bool Value::removeIndex(ArrayIndex index, Value *removed) +{ + if (type_ != arrayValue) + { + return false; + } + CZString key(index); + ObjectValues::iterator it = value_.map_->find(key); + if (it == value_.map_->end()) + { + return false; + } + *removed = it->second; + ArrayIndex oldSize = size(); + // shift left all items left, into the place of the "removed" + for (ArrayIndex i = index; i < (oldSize - 1); ++i) + { + CZString keey(i); + (*value_.map_)[keey] = (*this)[i + 1]; + } + // erase the last one ("leftover") + CZString keyLast(oldSize - 1); + ObjectValues::iterator itLast = value_.map_->find(keyLast); + value_.map_->erase(itLast); + return true; +} + +#ifdef JSON_USE_CPPTL +Value Value::get(const CppTL::ConstString &key, const Value &defaultValue) const +{ + return get(key.c_str(), key.end_c_str(), defaultValue); +} +#endif + +bool Value::isMember(char const *key, char const *cend) const +{ + Value const *value = find(key, cend); + return NULL != value; +} +bool Value::isMember(char const *key) const { return isMember(key, key + strlen(key)); } +bool Value::isMember(JSONCPP_STRING const &key) const +{ + return isMember(key.data(), key.data() + key.length()); +} + +#ifdef JSON_USE_CPPTL +bool Value::isMember(const CppTL::ConstString &key) const +{ + return isMember(key.c_str(), key.end_c_str()); +} +#endif + +Value::Members Value::getMemberNames() const +{ + JSON_ASSERT_MESSAGE(type_ == nullValue || type_ == objectValue, + "in Json::Value::getMemberNames(), value must be objectValue"); + if (type_ == nullValue) + return Value::Members(); + Members members; + members.reserve(value_.map_->size()); + ObjectValues::const_iterator it = value_.map_->begin(); + ObjectValues::const_iterator itEnd = value_.map_->end(); + for (; it != itEnd; ++it) + { + members.push_back(JSONCPP_STRING((*it).first.data(), (*it).first.length())); + } + return members; +} +// +//# ifdef JSON_USE_CPPTL +// EnumMemberNames +// Value::enumMemberNames() const +//{ +// if ( type_ == objectValue ) +// { +// return CppTL::Enum::any( CppTL::Enum::transform( +// CppTL::Enum::keys( *(value_.map_), CppTL::Type<const CZString &>() ), +// MemberNamesTransform() ) ); +// } +// return EnumMemberNames(); +//} +// +// +// EnumValues +// Value::enumValues() const +//{ +// if ( type_ == objectValue || type_ == arrayValue ) +// return CppTL::Enum::anyValues( *(value_.map_), +// CppTL::Type<const Value &>() ); +// return EnumValues(); +//} +// +//# endif + +static bool IsIntegral(double d) +{ + double integral_part; + return modf(d, &integral_part) == 0.0; +} + +bool Value::isNull() const { return type_ == nullValue; } + +bool Value::isBool() const { return type_ == booleanValue; } + +bool Value::isInt() const +{ + switch (type_) + { + case intValue: + return value_.int_ >= minInt && value_.int_ <= maxInt; + case uintValue: + return value_.uint_ <= UInt(maxInt); + case realValue: + return value_.real_ >= minInt && value_.real_ <= maxInt && IsIntegral(value_.real_); + default: + break; + } + return false; +} + +bool Value::isUInt() const +{ + switch (type_) + { + case intValue: + return value_.int_ >= 0 && LargestUInt(value_.int_) <= LargestUInt(maxUInt); + case uintValue: + return value_.uint_ <= maxUInt; + case realValue: + return value_.real_ >= 0 && value_.real_ <= maxUInt && IsIntegral(value_.real_); + default: + break; + } + return false; +} + +bool Value::isInt64() const +{ +#if defined(JSON_HAS_INT64) + switch (type_) + { + case intValue: + return true; + case uintValue: + return value_.uint_ <= UInt64(maxInt64); + case realValue: + // Note that maxInt64 (= 2^63 - 1) is not exactly representable as a + // double, so double(maxInt64) will be rounded up to 2^63. Therefore we + // require the value to be strictly less than the limit. + return value_.real_ >= double(minInt64) && value_.real_ < double(maxInt64) && + IsIntegral(value_.real_); + default: + break; + } +#endif // JSON_HAS_INT64 + return false; +} + +bool Value::isUInt64() const +{ +#if defined(JSON_HAS_INT64) + switch (type_) + { + case intValue: + return value_.int_ >= 0; + case uintValue: + return true; + case realValue: + // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a + // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we + // require the value to be strictly less than the limit. + return value_.real_ >= 0 && value_.real_ < maxUInt64AsDouble && IsIntegral(value_.real_); + default: + break; + } +#endif // JSON_HAS_INT64 + return false; +} + +bool Value::isIntegral() const +{ +#if defined(JSON_HAS_INT64) + return isInt64() || isUInt64(); +#else + return isInt() || isUInt(); +#endif +} + +bool Value::isDouble() const { return type_ == realValue || isIntegral(); } + +bool Value::isNumeric() const { return isIntegral() || isDouble(); } + +bool Value::isString() const { return type_ == stringValue; } + +bool Value::isArray() const { return type_ == arrayValue; } + +bool Value::isObject() const { return type_ == objectValue; } + +void Value::setComment(const char *comment, size_t len, CommentPlacement placement) +{ + if (!comments_) + comments_ = new CommentInfo[numberOfCommentPlacement]; + if ((len > 0) && (comment[len - 1] == '\n')) + { + // Always discard trailing newline, to aid indentation. + len -= 1; + } + comments_[placement].setComment(comment, len); +} + +void Value::setComment(const char *comment, CommentPlacement placement) +{ + setComment(comment, strlen(comment), placement); +} + +void Value::setComment(const JSONCPP_STRING &comment, CommentPlacement placement) +{ + setComment(comment.c_str(), comment.length(), placement); +} + +bool Value::hasComment(CommentPlacement placement) const +{ + return comments_ != 0 && comments_[placement].comment_ != 0; +} + +JSONCPP_STRING Value::getComment(CommentPlacement placement) const +{ + if (hasComment(placement)) + return comments_[placement].comment_; + return ""; +} + +void Value::setOffsetStart(ptrdiff_t start) { start_ = start; } + +void Value::setOffsetLimit(ptrdiff_t limit) { limit_ = limit; } + +ptrdiff_t Value::getOffsetStart() const { return start_; } + +ptrdiff_t Value::getOffsetLimit() const { return limit_; } + +JSONCPP_STRING Value::toStyledString() const +{ + StyledWriter writer; + return writer.write(*this); +} + +Value::const_iterator Value::begin() const +{ + switch (type_) + { + case arrayValue: + case objectValue: + if (value_.map_) + return const_iterator(value_.map_->begin()); + break; + default: + break; + } + return const_iterator(); +} + +Value::const_iterator Value::end() const +{ + switch (type_) + { + case arrayValue: + case objectValue: + if (value_.map_) + return const_iterator(value_.map_->end()); + break; + default: + break; + } + return const_iterator(); +} + +Value::iterator Value::begin() +{ + switch (type_) + { + case arrayValue: + case objectValue: + if (value_.map_) + return iterator(value_.map_->begin()); + break; + default: + break; + } + return iterator(); +} + +Value::iterator Value::end() +{ + switch (type_) + { + case arrayValue: + case objectValue: + if (value_.map_) + return iterator(value_.map_->end()); + break; + default: + break; + } + return iterator(); +} + +// class PathArgument +// ////////////////////////////////////////////////////////////////// + +PathArgument::PathArgument() : key_(), index_(), kind_(kindNone) {} + +PathArgument::PathArgument(ArrayIndex index) : key_(), index_(index), kind_(kindIndex) {} + +PathArgument::PathArgument(const char *key) : key_(key), index_(), kind_(kindKey) {} + +PathArgument::PathArgument(const JSONCPP_STRING &key) : key_(key.c_str()), index_(), kind_(kindKey) +{ +} + +// class Path +// ////////////////////////////////////////////////////////////////// + +Path::Path(const JSONCPP_STRING &path, const PathArgument &a1, const PathArgument &a2, + const PathArgument &a3, const PathArgument &a4, const PathArgument &a5) +{ + InArgs in; + in.push_back(&a1); + in.push_back(&a2); + in.push_back(&a3); + in.push_back(&a4); + in.push_back(&a5); + makePath(path, in); +} + +void Path::makePath(const JSONCPP_STRING &path, const InArgs &in) +{ + const char *current = path.c_str(); + const char *end = current + path.length(); + InArgs::const_iterator itInArg = in.begin(); + while (current != end) + { + if (*current == '[') + { + ++current; + if (*current == '%') + addPathInArg(path, in, itInArg, PathArgument::kindIndex); + else + { + ArrayIndex index = 0; + for (; current != end && *current >= '0' && *current <= '9'; ++current) + index = index * 10 + ArrayIndex(*current - '0'); + args_.push_back(index); + } + if (current == end || *++current != ']') + invalidPath(path, int(current - path.c_str())); + } + else if (*current == '%') + { + addPathInArg(path, in, itInArg, PathArgument::kindKey); + ++current; + } + else if (*current == '.' || *current == ']') + { + ++current; + } + else + { + const char *beginName = current; + while (current != end && !strchr("[.", *current)) + ++current; + args_.push_back(JSONCPP_STRING(beginName, current)); + } + } +} + +void Path::addPathInArg(const JSONCPP_STRING & /*path*/, const InArgs &in, + InArgs::const_iterator &itInArg, PathArgument::Kind kind) +{ + if (itInArg == in.end()) + { + // Error: missing argument %d + } + else if ((*itInArg)->kind_ != kind) + { + // Error: bad argument type + } + else + { + args_.push_back(**itInArg++); + } +} + +void Path::invalidPath(const JSONCPP_STRING & /*path*/, int /*location*/) +{ + // Error: invalid path. +} + +const Value &Path::resolve(const Value &root) const +{ + const Value *node = &root; + for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) + { + const PathArgument &arg = *it; + if (arg.kind_ == PathArgument::kindIndex) + { + if (!node->isArray() || !node->isValidIndex(arg.index_)) + { + // Error: unable to resolve path (array value expected at position... + return Value::null; + } + node = &((*node)[arg.index_]); + } + else if (arg.kind_ == PathArgument::kindKey) + { + if (!node->isObject()) + { + // Error: unable to resolve path (object value expected at position...) + return Value::null; + } + node = &((*node)[arg.key_]); + if (node == &Value::nullSingleton()) + { + // Error: unable to resolve path (object has no member named '' at + // position...) + return Value::null; + } + } + } + return *node; +} + +Value Path::resolve(const Value &root, const Value &defaultValue) const +{ + const Value *node = &root; + for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) + { + const PathArgument &arg = *it; + if (arg.kind_ == PathArgument::kindIndex) + { + if (!node->isArray() || !node->isValidIndex(arg.index_)) + return defaultValue; + node = &((*node)[arg.index_]); + } + else if (arg.kind_ == PathArgument::kindKey) + { + if (!node->isObject()) + return defaultValue; + node = &((*node)[arg.key_]); + if (node == &Value::nullSingleton()) + return defaultValue; + } + } + return *node; +} + +Value &Path::make(Value &root) const +{ + Value *node = &root; + for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) + { + const PathArgument &arg = *it; + if (arg.kind_ == PathArgument::kindIndex) + { + if (!node->isArray()) + { + // Error: node is not an array at position ... + } + node = &((*node)[arg.index_]); + } + else if (arg.kind_ == PathArgument::kindKey) + { + if (!node->isObject()) + { + // Error: node is not an object at position... + } + node = &((*node)[arg.key_]); + } + } + return *node; +} + +} // namespace Json + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: src/lib_json/json_value.cpp +// ////////////////////////////////////////////////////////////////////// + +// ////////////////////////////////////////////////////////////////////// +// Beginning of content of file: src/lib_json/json_writer.cpp +// ////////////////////////////////////////////////////////////////////// + +// Copyright 2011 Baptiste Lepilleur +// Distributed under MIT license, or public domain if desired and +// recognized in your jurisdiction. +// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +#if !defined(JSON_IS_AMALGAMATION) +#include <json/writer.h> +#include "json_tool.h" +#endif // if !defined(JSON_IS_AMALGAMATION) +#include <iomanip> +#include <memory> +#include <sstream> +#include <utility> +#include <set> +#include <cassert> +#include <cstring> +#include <cstdio> + +#if defined(_MSC_VER) && _MSC_VER >= 1200 && _MSC_VER < 1800 // Between VC++ 6.0 and VC++ 11.0 +#include <float.h> +#define isfinite _finite +#elif defined(__sun) && defined(__SVR4) // Solaris +#if !defined(isfinite) +#include <ieeefp.h> +#define isfinite finite +#endif +#elif defined(_AIX) +#if !defined(isfinite) +#include <math.h> +#define isfinite finite +#endif +#elif defined(__hpux) +#if !defined(isfinite) +#if defined(__ia64) && !defined(finite) +#define isfinite(x) ((sizeof(x) == sizeof(float) ? _Isfinitef(x) : _IsFinite(x))) +#else +#include <math.h> +#define isfinite finite +#endif +#endif +#else +#include <cmath> +#if !(defined(__QNXNTO__)) // QNX already defines isfinite +#define isfinite std::isfinite +#endif +#endif + +#if defined(_MSC_VER) +#if !defined(WINCE) && defined(__STDC_SECURE_LIB__) && _MSC_VER >= 1500 // VC++ 9.0 and above +#define snprintf sprintf_s +#elif _MSC_VER >= 1900 // VC++ 14.0 and above +#define snprintf std::snprintf +#else +#define snprintf _snprintf +#endif +#elif defined(__ANDROID__) || defined(__QNXNTO__) +#define snprintf snprintf +#elif __cplusplus >= 201103L +#if !defined(__MINGW32__) && !defined(__CYGWIN__) +#define snprintf std::snprintf +#endif +#endif + +#if defined(__BORLANDC__) +#include <float.h> +#define isfinite _finite +#define snprintf _snprintf +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1400 // VC++ 8.0 +// Disable warning about strdup being deprecated. +#pragma warning(disable : 4996) +#endif + +namespace Json +{ + +#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520) +typedef std::unique_ptr<StreamWriter> StreamWriterPtr; +#else +typedef std::auto_ptr<StreamWriter> StreamWriterPtr; +#endif + +static bool containsControlCharacter(const char *str) +{ + while (*str) + { + if (isControlCharacter(*(str++))) + return true; + } + return false; +} + +static bool containsControlCharacter0(const char *str, unsigned len) +{ + char const *end = str + len; + while (end != str) + { + if (isControlCharacter(*str) || 0 == *str) + return true; + ++str; + } + return false; +} + +JSONCPP_STRING valueToString(LargestInt value) +{ + UIntToStringBuffer buffer; + char *current = buffer + sizeof(buffer); + if (value == Value::minLargestInt) + { + uintToString(LargestUInt(Value::maxLargestInt) + 1, current); + *--current = '-'; + } + else if (value < 0) + { + uintToString(LargestUInt(-value), current); + *--current = '-'; + } + else + { + uintToString(LargestUInt(value), current); + } + assert(current >= buffer); + return current; +} + +JSONCPP_STRING valueToString(LargestUInt value) +{ + UIntToStringBuffer buffer; + char *current = buffer + sizeof(buffer); + uintToString(value, current); + assert(current >= buffer); + return current; +} + +#if defined(JSON_HAS_INT64) + +JSONCPP_STRING valueToString(Int value) { return valueToString(LargestInt(value)); } + +JSONCPP_STRING valueToString(UInt value) { return valueToString(LargestUInt(value)); } + +#endif // # if defined(JSON_HAS_INT64) + +namespace +{ +JSONCPP_STRING valueToString(double value, bool useSpecialFloats, unsigned int precision) +{ + // Allocate a buffer that is more than large enough to store the 16 digits of + // precision requested below. + char buffer[32]; + int len = -1; + + char formatString[6]; + sprintf(formatString, "%%.%dg", precision); + + // Print into the buffer. We need not request the alternative representation + // that always has a decimal point because JSON doesn't distingish the + // concepts of reals and integers. + if (isfinite(value)) + { + len = snprintf(buffer, sizeof(buffer), formatString, value); + } + else + { + // IEEE standard states that NaN values will not compare to themselves + if (value != value) + { + len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "NaN" : "null"); + } + else if (value < 0) + { + len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "-Infinity" : "-1e+9999"); + } + else + { + len = snprintf(buffer, sizeof(buffer), useSpecialFloats ? "Infinity" : "1e+9999"); + } + // For those, we do not need to call fixNumLoc, but it is fast. + } + assert(len >= 0); + fixNumericLocale(buffer, buffer + len); + return buffer; +} +} + +JSONCPP_STRING valueToString(double value) { return valueToString(value, false, 17); } + +JSONCPP_STRING valueToString(bool value) { return value ? "true" : "false"; } + +JSONCPP_STRING valueToQuotedString(const char *value) +{ + if (value == NULL) + return ""; + // Not sure how to handle unicode... + if (strpbrk(value, "\"\\\b\f\n\r\t") == NULL && !containsControlCharacter(value)) + return JSONCPP_STRING("\"") + value + "\""; + // We have to walk value and escape any special characters. + // Appending to JSONCPP_STRING is not efficient, but this should be rare. + // (Note: forward slashes are *not* rare, but I am not escaping them.) + JSONCPP_STRING::size_type maxsize = strlen(value) * 2 + 3; // allescaped+quotes+NULL + JSONCPP_STRING result; + result.reserve(maxsize); // to avoid lots of mallocs + result += "\""; + for (const char *c = value; *c != 0; ++c) + { + switch (*c) + { + case '\"': + result += "\\\""; + break; + case '\\': + result += "\\\\"; + break; + case '\b': + result += "\\b"; + break; + case '\f': + result += "\\f"; + break; + case '\n': + result += "\\n"; + break; + case '\r': + result += "\\r"; + break; + case '\t': + result += "\\t"; + break; + // case '/': + // Even though \/ is considered a legal escape in JSON, a bare + // slash is also legal, so I see no reason to escape it. + // (I hope I am not misunderstanding something. + // blep notes: actually escaping \/ may be useful in javascript to avoid </ + // sequence. + // Should add a flag to allow this compatibility mode and prevent this + // sequence from occurring. + default: + if (isControlCharacter(*c)) + { + JSONCPP_OSTRINGSTREAM oss; + oss << "\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) + << static_cast<int>(*c); + result += oss.str(); + } + else + { + result += *c; + } + break; + } + } + result += "\""; + return result; +} + +// https://github.com/upcaste/upcaste/blob/master/src/upcore/src/cstring/strnpbrk.cpp +static char const *strnpbrk(char const *s, char const *accept, size_t n) +{ + assert((s || !n) && accept); + + char const *const end = s + n; + for (char const *cur = s; cur < end; ++cur) + { + int const c = *cur; + for (char const *a = accept; *a; ++a) + { + if (*a == c) + { + return cur; + } + } + } + return NULL; +} +static JSONCPP_STRING valueToQuotedStringN(const char *value, unsigned length) +{ + if (value == NULL) + return ""; + // Not sure how to handle unicode... + if (strnpbrk(value, "\"\\\b\f\n\r\t", length) == NULL && + !containsControlCharacter0(value, length)) + return JSONCPP_STRING("\"") + value + "\""; + // We have to walk value and escape any special characters. + // Appending to JSONCPP_STRING is not efficient, but this should be rare. + // (Note: forward slashes are *not* rare, but I am not escaping them.) + JSONCPP_STRING::size_type maxsize = length * 2 + 3; // allescaped+quotes+NULL + JSONCPP_STRING result; + result.reserve(maxsize); // to avoid lots of mallocs + result += "\""; + char const *end = value + length; + for (const char *c = value; c != end; ++c) + { + switch (*c) + { + case '\"': + result += "\\\""; + break; + case '\\': + result += "\\\\"; + break; + case '\b': + result += "\\b"; + break; + case '\f': + result += "\\f"; + break; + case '\n': + result += "\\n"; + break; + case '\r': + result += "\\r"; + break; + case '\t': + result += "\\t"; + break; + // case '/': + // Even though \/ is considered a legal escape in JSON, a bare + // slash is also legal, so I see no reason to escape it. + // (I hope I am not misunderstanding something.) + // blep notes: actually escaping \/ may be useful in javascript to avoid </ + // sequence. + // Should add a flag to allow this compatibility mode and prevent this + // sequence from occurring. + default: + if ((isControlCharacter(*c)) || (*c == 0)) + { + JSONCPP_OSTRINGSTREAM oss; + oss << "\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) + << static_cast<int>(*c); + result += oss.str(); + } + else + { + result += *c; + } + break; + } + } + result += "\""; + return result; +} + +// Class Writer +// ////////////////////////////////////////////////////////////////// +Writer::~Writer() {} + +// Class FastWriter +// ////////////////////////////////////////////////////////////////// + +FastWriter::FastWriter() + : yamlCompatiblityEnabled_(false), dropNullPlaceholders_(false), omitEndingLineFeed_(false) +{ +} + +void FastWriter::enableYAMLCompatibility() { yamlCompatiblityEnabled_ = true; } + +void FastWriter::dropNullPlaceholders() { dropNullPlaceholders_ = true; } + +void FastWriter::omitEndingLineFeed() { omitEndingLineFeed_ = true; } + +JSONCPP_STRING FastWriter::write(const Value &root) +{ + document_ = ""; + writeValue(root); + if (!omitEndingLineFeed_) + document_ += "\n"; + return document_; +} + +void FastWriter::writeValue(const Value &value) +{ + switch (value.type()) + { + case nullValue: + if (!dropNullPlaceholders_) + document_ += "null"; + break; + case intValue: + document_ += valueToString(value.asLargestInt()); + break; + case uintValue: + document_ += valueToString(value.asLargestUInt()); + break; + case realValue: + document_ += valueToString(value.asDouble()); + break; + case stringValue: + { + // Is NULL possible for value.string_? No. + char const *str; + char const *end; + bool ok = value.getString(&str, &end); + if (ok) + document_ += valueToQuotedStringN(str, static_cast<unsigned>(end - str)); + break; + } + case booleanValue: + document_ += valueToString(value.asBool()); + break; + case arrayValue: + { + document_ += '['; + ArrayIndex size = value.size(); + for (ArrayIndex index = 0; index < size; ++index) + { + if (index > 0) + document_ += ','; + writeValue(value[index]); + } + document_ += ']'; + } + break; + case objectValue: + { + Value::Members members(value.getMemberNames()); + document_ += '{'; + for (Value::Members::iterator it = members.begin(); it != members.end(); ++it) + { + const JSONCPP_STRING &name = *it; + if (it != members.begin()) + document_ += ','; + document_ += valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length())); + document_ += yamlCompatiblityEnabled_ ? ": " : ":"; + writeValue(value[name]); + } + document_ += '}'; + } + break; + } +} + +// Class StyledWriter +// ////////////////////////////////////////////////////////////////// + +StyledWriter::StyledWriter() : rightMargin_(74), indentSize_(3), addChildValues_() {} + +JSONCPP_STRING StyledWriter::write(const Value &root) +{ + document_ = ""; + addChildValues_ = false; + indentString_ = ""; + writeCommentBeforeValue(root); + writeValue(root); + writeCommentAfterValueOnSameLine(root); + document_ += "\n"; + return document_; +} + +void StyledWriter::writeValue(const Value &value) +{ + switch (value.type()) + { + case nullValue: + pushValue("null"); + break; + case intValue: + pushValue(valueToString(value.asLargestInt())); + break; + case uintValue: + pushValue(valueToString(value.asLargestUInt())); + break; + case realValue: + pushValue(valueToString(value.asDouble())); + break; + case stringValue: + { + // Is NULL possible for value.string_? No. + char const *str; + char const *end; + bool ok = value.getString(&str, &end); + if (ok) + pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str))); + else + pushValue(""); + break; + } + case booleanValue: + pushValue(valueToString(value.asBool())); + break; + case arrayValue: + writeArrayValue(value); + break; + case objectValue: + { + Value::Members members(value.getMemberNames()); + if (members.empty()) + pushValue("{}"); + else + { + writeWithIndent("{"); + indent(); + Value::Members::iterator it = members.begin(); + for (;;) + { + const JSONCPP_STRING &name = *it; + const Value &childValue = value[name]; + writeCommentBeforeValue(childValue); + writeWithIndent(valueToQuotedString(name.c_str())); + document_ += " : "; + writeValue(childValue); + if (++it == members.end()) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + document_ += ','; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("}"); + } + } + break; + } +} + +void StyledWriter::writeArrayValue(const Value &value) +{ + unsigned size = value.size(); + if (size == 0) + pushValue("[]"); + else + { + bool isArrayMultiLine = isMultineArray(value); + if (isArrayMultiLine) + { + writeWithIndent("["); + indent(); + bool hasChildValue = !childValues_.empty(); + unsigned index = 0; + for (;;) + { + const Value &childValue = value[index]; + writeCommentBeforeValue(childValue); + if (hasChildValue) + writeWithIndent(childValues_[index]); + else + { + writeIndent(); + writeValue(childValue); + } + if (++index == size) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + document_ += ','; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("]"); + } + else // output on a single line + { + assert(childValues_.size() == size); + document_ += "[ "; + for (unsigned index = 0; index < size; ++index) + { + if (index > 0) + document_ += ", "; + document_ += childValues_[index]; + } + document_ += " ]"; + } + } +} + +bool StyledWriter::isMultineArray(const Value &value) +{ + ArrayIndex const size = value.size(); + bool isMultiLine = size * 3 >= rightMargin_; + childValues_.clear(); + for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) + { + const Value &childValue = value[index]; + isMultiLine = ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); + } + if (!isMultiLine) // check if line length > max line length + { + childValues_.reserve(size); + addChildValues_ = true; + ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]' + for (ArrayIndex index = 0; index < size; ++index) + { + if (hasCommentForValue(value[index])) + { + isMultiLine = true; + } + writeValue(value[index]); + lineLength += static_cast<ArrayIndex>(childValues_[index].length()); + } + addChildValues_ = false; + isMultiLine = isMultiLine || lineLength >= rightMargin_; + } + return isMultiLine; +} + +void StyledWriter::pushValue(const JSONCPP_STRING &value) +{ + if (addChildValues_) + childValues_.push_back(value); + else + document_ += value; +} + +void StyledWriter::writeIndent() +{ + if (!document_.empty()) + { + char last = document_[document_.length() - 1]; + if (last == ' ') // already indented + return; + if (last != '\n') // Comments may add new-line + document_ += '\n'; + } + document_ += indentString_; +} + +void StyledWriter::writeWithIndent(const JSONCPP_STRING &value) +{ + writeIndent(); + document_ += value; +} + +void StyledWriter::indent() { indentString_ += JSONCPP_STRING(indentSize_, ' '); } + +void StyledWriter::unindent() +{ + assert(indentString_.size() >= indentSize_); + indentString_.resize(indentString_.size() - indentSize_); +} + +void StyledWriter::writeCommentBeforeValue(const Value &root) +{ + if (!root.hasComment(commentBefore)) + return; + + document_ += "\n"; + writeIndent(); + const JSONCPP_STRING &comment = root.getComment(commentBefore); + JSONCPP_STRING::const_iterator iter = comment.begin(); + while (iter != comment.end()) + { + document_ += *iter; + if (*iter == '\n' && (iter != comment.end() && *(iter + 1) == '/')) + writeIndent(); + ++iter; + } + + // Comments are stripped of trailing newlines, so add one here + document_ += "\n"; +} + +void StyledWriter::writeCommentAfterValueOnSameLine(const Value &root) +{ + if (root.hasComment(commentAfterOnSameLine)) + document_ += " " + root.getComment(commentAfterOnSameLine); + + if (root.hasComment(commentAfter)) + { + document_ += "\n"; + document_ += root.getComment(commentAfter); + document_ += "\n"; + } +} + +bool StyledWriter::hasCommentForValue(const Value &value) +{ + return value.hasComment(commentBefore) || value.hasComment(commentAfterOnSameLine) || + value.hasComment(commentAfter); +} + +// Class StyledStreamWriter +// ////////////////////////////////////////////////////////////////// + +StyledStreamWriter::StyledStreamWriter(JSONCPP_STRING indentation) + : document_(NULL), rightMargin_(74), indentation_(indentation), addChildValues_() +{ +} + +void StyledStreamWriter::write(JSONCPP_OSTREAM &out, const Value &root) +{ + document_ = &out; + addChildValues_ = false; + indentString_ = ""; + indented_ = true; + writeCommentBeforeValue(root); + if (!indented_) + writeIndent(); + indented_ = true; + writeValue(root); + writeCommentAfterValueOnSameLine(root); + *document_ << "\n"; + document_ = NULL; // Forget the stream, for safety. +} + +void StyledStreamWriter::writeValue(const Value &value) +{ + switch (value.type()) + { + case nullValue: + pushValue("null"); + break; + case intValue: + pushValue(valueToString(value.asLargestInt())); + break; + case uintValue: + pushValue(valueToString(value.asLargestUInt())); + break; + case realValue: + pushValue(valueToString(value.asDouble())); + break; + case stringValue: + { + // Is NULL possible for value.string_? No. + char const *str; + char const *end; + bool ok = value.getString(&str, &end); + if (ok) + pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str))); + else + pushValue(""); + break; + } + case booleanValue: + pushValue(valueToString(value.asBool())); + break; + case arrayValue: + writeArrayValue(value); + break; + case objectValue: + { + Value::Members members(value.getMemberNames()); + if (members.empty()) + pushValue("{}"); + else + { + writeWithIndent("{"); + indent(); + Value::Members::iterator it = members.begin(); + for (;;) + { + const JSONCPP_STRING &name = *it; + const Value &childValue = value[name]; + writeCommentBeforeValue(childValue); + writeWithIndent(valueToQuotedString(name.c_str())); + *document_ << " : "; + writeValue(childValue); + if (++it == members.end()) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + *document_ << ","; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("}"); + } + } + break; + } +} + +void StyledStreamWriter::writeArrayValue(const Value &value) +{ + unsigned size = value.size(); + if (size == 0) + pushValue("[]"); + else + { + bool isArrayMultiLine = isMultineArray(value); + if (isArrayMultiLine) + { + writeWithIndent("["); + indent(); + bool hasChildValue = !childValues_.empty(); + unsigned index = 0; + for (;;) + { + const Value &childValue = value[index]; + writeCommentBeforeValue(childValue); + if (hasChildValue) + writeWithIndent(childValues_[index]); + else + { + if (!indented_) + writeIndent(); + indented_ = true; + writeValue(childValue); + indented_ = false; + } + if (++index == size) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + *document_ << ","; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("]"); + } + else // output on a single line + { + assert(childValues_.size() == size); + *document_ << "[ "; + for (unsigned index = 0; index < size; ++index) + { + if (index > 0) + *document_ << ", "; + *document_ << childValues_[index]; + } + *document_ << " ]"; + } + } +} + +bool StyledStreamWriter::isMultineArray(const Value &value) +{ + ArrayIndex const size = value.size(); + bool isMultiLine = size * 3 >= rightMargin_; + childValues_.clear(); + for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) + { + const Value &childValue = value[index]; + isMultiLine = ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); + } + if (!isMultiLine) // check if line length > max line length + { + childValues_.reserve(size); + addChildValues_ = true; + ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]' + for (ArrayIndex index = 0; index < size; ++index) + { + if (hasCommentForValue(value[index])) + { + isMultiLine = true; + } + writeValue(value[index]); + lineLength += static_cast<ArrayIndex>(childValues_[index].length()); + } + addChildValues_ = false; + isMultiLine = isMultiLine || lineLength >= rightMargin_; + } + return isMultiLine; +} + +void StyledStreamWriter::pushValue(const JSONCPP_STRING &value) +{ + if (addChildValues_) + childValues_.push_back(value); + else + *document_ << value; +} + +void StyledStreamWriter::writeIndent() +{ + // blep intended this to look at the so-far-written string + // to determine whether we are already indented, but + // with a stream we cannot do that. So we rely on some saved state. + // The caller checks indented_. + *document_ << '\n' << indentString_; +} + +void StyledStreamWriter::writeWithIndent(const JSONCPP_STRING &value) +{ + if (!indented_) + writeIndent(); + *document_ << value; + indented_ = false; +} + +void StyledStreamWriter::indent() { indentString_ += indentation_; } + +void StyledStreamWriter::unindent() +{ + assert(indentString_.size() >= indentation_.size()); + indentString_.resize(indentString_.size() - indentation_.size()); +} + +void StyledStreamWriter::writeCommentBeforeValue(const Value &root) +{ + if (!root.hasComment(commentBefore)) + return; + + if (!indented_) + writeIndent(); + const JSONCPP_STRING &comment = root.getComment(commentBefore); + JSONCPP_STRING::const_iterator iter = comment.begin(); + while (iter != comment.end()) + { + *document_ << *iter; + if (*iter == '\n' && (iter != comment.end() && *(iter + 1) == '/')) + // writeIndent(); // would include newline + *document_ << indentString_; + ++iter; + } + indented_ = false; +} + +void StyledStreamWriter::writeCommentAfterValueOnSameLine(const Value &root) +{ + if (root.hasComment(commentAfterOnSameLine)) + *document_ << ' ' << root.getComment(commentAfterOnSameLine); + + if (root.hasComment(commentAfter)) + { + writeIndent(); + *document_ << root.getComment(commentAfter); + } + indented_ = false; +} + +bool StyledStreamWriter::hasCommentForValue(const Value &value) +{ + return value.hasComment(commentBefore) || value.hasComment(commentAfterOnSameLine) || + value.hasComment(commentAfter); +} + +////////////////////////// +// BuiltStyledStreamWriter + +/// Scoped enums are not available until C++11. +struct CommentStyle +{ + /// Decide whether to write comments. + enum Enum + { + None, ///< Drop all comments. + Most, ///< Recover odd behavior of previous versions (not implemented yet). + All ///< Keep all comments. + }; +}; + +struct BuiltStyledStreamWriter : public StreamWriter +{ + BuiltStyledStreamWriter(JSONCPP_STRING const &indentation, CommentStyle::Enum cs, + JSONCPP_STRING const &colonSymbol, JSONCPP_STRING const &nullSymbol, + JSONCPP_STRING const &endingLineFeedSymbol, bool useSpecialFloats, + unsigned int precision); + int write(Value const &root, JSONCPP_OSTREAM *sout) JSONCPP_OVERRIDE; + +private: + void writeValue(Value const &value); + void writeArrayValue(Value const &value); + bool isMultineArray(Value const &value); + void pushValue(JSONCPP_STRING const &value); + void writeIndent(); + void writeWithIndent(JSONCPP_STRING const &value); + void indent(); + void unindent(); + void writeCommentBeforeValue(Value const &root); + void writeCommentAfterValueOnSameLine(Value const &root); + static bool hasCommentForValue(const Value &value); + + typedef std::vector<JSONCPP_STRING> ChildValues; + + ChildValues childValues_; + JSONCPP_STRING indentString_; + unsigned int rightMargin_; + JSONCPP_STRING indentation_; + CommentStyle::Enum cs_; + JSONCPP_STRING colonSymbol_; + JSONCPP_STRING nullSymbol_; + JSONCPP_STRING endingLineFeedSymbol_; + bool addChildValues_ : 1; + bool indented_ : 1; + bool useSpecialFloats_ : 1; + unsigned int precision_; +}; +BuiltStyledStreamWriter::BuiltStyledStreamWriter(JSONCPP_STRING const &indentation, + CommentStyle::Enum cs, + JSONCPP_STRING const &colonSymbol, + JSONCPP_STRING const &nullSymbol, + JSONCPP_STRING const &endingLineFeedSymbol, + bool useSpecialFloats, unsigned int precision) + : rightMargin_(74), indentation_(indentation), cs_(cs), colonSymbol_(colonSymbol), + nullSymbol_(nullSymbol), endingLineFeedSymbol_(endingLineFeedSymbol), addChildValues_(false), + indented_(false), useSpecialFloats_(useSpecialFloats), precision_(precision) +{ +} +int BuiltStyledStreamWriter::write(Value const &root, JSONCPP_OSTREAM *sout) +{ + sout_ = sout; + addChildValues_ = false; + indented_ = true; + indentString_ = ""; + writeCommentBeforeValue(root); + if (!indented_) + writeIndent(); + indented_ = true; + writeValue(root); + writeCommentAfterValueOnSameLine(root); + *sout_ << endingLineFeedSymbol_; + sout_ = NULL; + return 0; +} +void BuiltStyledStreamWriter::writeValue(Value const &value) +{ + switch (value.type()) + { + case nullValue: + pushValue(nullSymbol_); + break; + case intValue: + pushValue(valueToString(value.asLargestInt())); + break; + case uintValue: + pushValue(valueToString(value.asLargestUInt())); + break; + case realValue: + pushValue(valueToString(value.asDouble(), useSpecialFloats_, precision_)); + break; + case stringValue: + { + // Is NULL is possible for value.string_? No. + char const *str; + char const *end; + bool ok = value.getString(&str, &end); + if (ok) + pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str))); + else + pushValue(""); + break; + } + case booleanValue: + pushValue(valueToString(value.asBool())); + break; + case arrayValue: + writeArrayValue(value); + break; + case objectValue: + { + Value::Members members(value.getMemberNames()); + if (members.empty()) + pushValue("{}"); + else + { + writeWithIndent("{"); + indent(); + Value::Members::iterator it = members.begin(); + for (;;) + { + JSONCPP_STRING const &name = *it; + Value const &childValue = value[name]; + writeCommentBeforeValue(childValue); + writeWithIndent(valueToQuotedStringN(name.data(), static_cast<unsigned>(name.length()))); + *sout_ << colonSymbol_; + writeValue(childValue); + if (++it == members.end()) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + *sout_ << ","; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("}"); + } + } + break; + } +} + +void BuiltStyledStreamWriter::writeArrayValue(Value const &value) +{ + unsigned size = value.size(); + if (size == 0) + pushValue("[]"); + else + { + bool isMultiLine = (cs_ == CommentStyle::All) || isMultineArray(value); + if (isMultiLine) + { + writeWithIndent("["); + indent(); + bool hasChildValue = !childValues_.empty(); + unsigned index = 0; + for (;;) + { + Value const &childValue = value[index]; + writeCommentBeforeValue(childValue); + if (hasChildValue) + writeWithIndent(childValues_[index]); + else + { + if (!indented_) + writeIndent(); + indented_ = true; + writeValue(childValue); + indented_ = false; + } + if (++index == size) + { + writeCommentAfterValueOnSameLine(childValue); + break; + } + *sout_ << ","; + writeCommentAfterValueOnSameLine(childValue); + } + unindent(); + writeWithIndent("]"); + } + else // output on a single line + { + assert(childValues_.size() == size); + *sout_ << "["; + if (!indentation_.empty()) + *sout_ << " "; + for (unsigned index = 0; index < size; ++index) + { + if (index > 0) + *sout_ << ((!indentation_.empty()) ? ", " : ","); + *sout_ << childValues_[index]; + } + if (!indentation_.empty()) + *sout_ << " "; + *sout_ << "]"; + } + } +} + +bool BuiltStyledStreamWriter::isMultineArray(Value const &value) +{ + ArrayIndex const size = value.size(); + bool isMultiLine = size * 3 >= rightMargin_; + childValues_.clear(); + for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) + { + Value const &childValue = value[index]; + isMultiLine = ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); + } + if (!isMultiLine) // check if line length > max line length + { + childValues_.reserve(size); + addChildValues_ = true; + ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]' + for (ArrayIndex index = 0; index < size; ++index) + { + if (hasCommentForValue(value[index])) + { + isMultiLine = true; + } + writeValue(value[index]); + lineLength += static_cast<ArrayIndex>(childValues_[index].length()); + } + addChildValues_ = false; + isMultiLine = isMultiLine || lineLength >= rightMargin_; + } + return isMultiLine; +} + +void BuiltStyledStreamWriter::pushValue(JSONCPP_STRING const &value) +{ + if (addChildValues_) + childValues_.push_back(value); + else + *sout_ << value; +} + +void BuiltStyledStreamWriter::writeIndent() +{ + // blep intended this to look at the so-far-written string + // to determine whether we are already indented, but + // with a stream we cannot do that. So we rely on some saved state. + // The caller checks indented_. + + if (!indentation_.empty()) + { + // In this case, drop newlines too. + *sout_ << '\n' << indentString_; + } +} + +void BuiltStyledStreamWriter::writeWithIndent(JSONCPP_STRING const &value) +{ + if (!indented_) + writeIndent(); + *sout_ << value; + indented_ = false; +} + +void BuiltStyledStreamWriter::indent() { indentString_ += indentation_; } + +void BuiltStyledStreamWriter::unindent() +{ + assert(indentString_.size() >= indentation_.size()); + indentString_.resize(indentString_.size() - indentation_.size()); +} + +void BuiltStyledStreamWriter::writeCommentBeforeValue(Value const &root) +{ + if (cs_ == CommentStyle::None) + return; + if (!root.hasComment(commentBefore)) + return; + + if (!indented_) + writeIndent(); + const JSONCPP_STRING &comment = root.getComment(commentBefore); + JSONCPP_STRING::const_iterator iter = comment.begin(); + while (iter != comment.end()) + { + *sout_ << *iter; + if (*iter == '\n' && (iter != comment.end() && *(iter + 1) == '/')) + // writeIndent(); // would write extra newline + *sout_ << indentString_; + ++iter; + } + indented_ = false; +} + +void BuiltStyledStreamWriter::writeCommentAfterValueOnSameLine(Value const &root) +{ + if (cs_ == CommentStyle::None) + return; + if (root.hasComment(commentAfterOnSameLine)) + *sout_ << " " + root.getComment(commentAfterOnSameLine); + + if (root.hasComment(commentAfter)) + { + writeIndent(); + *sout_ << root.getComment(commentAfter); + } +} + +// static +bool BuiltStyledStreamWriter::hasCommentForValue(const Value &value) +{ + return value.hasComment(commentBefore) || value.hasComment(commentAfterOnSameLine) || + value.hasComment(commentAfter); +} + +/////////////// +// StreamWriter + +StreamWriter::StreamWriter() : sout_(NULL) {} +StreamWriter::~StreamWriter() {} +StreamWriter::Factory::~Factory() {} +StreamWriterBuilder::StreamWriterBuilder() { setDefaults(&settings_); } +StreamWriterBuilder::~StreamWriterBuilder() {} +StreamWriter *StreamWriterBuilder::newStreamWriter() const +{ + JSONCPP_STRING indentation = settings_["indentation"].asString(); + JSONCPP_STRING cs_str = settings_["commentStyle"].asString(); + bool eyc = settings_["enableYAMLCompatibility"].asBool(); + bool dnp = settings_["dropNullPlaceholders"].asBool(); + bool usf = settings_["useSpecialFloats"].asBool(); + unsigned int pre = settings_["precision"].asUInt(); + CommentStyle::Enum cs = CommentStyle::All; + if (cs_str == "All") + { + cs = CommentStyle::All; + } + else if (cs_str == "None") + { + cs = CommentStyle::None; + } + else + { + throwRuntimeError("commentStyle must be 'All' or 'None'"); + } + JSONCPP_STRING colonSymbol = " : "; + if (eyc) + { + colonSymbol = ": "; + } + else if (indentation.empty()) + { + colonSymbol = ":"; + } + JSONCPP_STRING nullSymbol = "null"; + if (dnp) + { + nullSymbol = ""; + } + if (pre > 17) + pre = 17; + JSONCPP_STRING endingLineFeedSymbol = ""; + return new BuiltStyledStreamWriter(indentation, cs, colonSymbol, nullSymbol, endingLineFeedSymbol, + usf, pre); +} +static void getValidWriterKeys(std::set<JSONCPP_STRING> *valid_keys) +{ + valid_keys->clear(); + valid_keys->insert("indentation"); + valid_keys->insert("commentStyle"); + valid_keys->insert("enableYAMLCompatibility"); + valid_keys->insert("dropNullPlaceholders"); + valid_keys->insert("useSpecialFloats"); + valid_keys->insert("precision"); +} +bool StreamWriterBuilder::validate(Json::Value *invalid) const +{ + Json::Value my_invalid; + if (!invalid) + invalid = &my_invalid; // so we do not need to test for NULL + Json::Value &inv = *invalid; + std::set<JSONCPP_STRING> valid_keys; + getValidWriterKeys(&valid_keys); + Value::Members keys = settings_.getMemberNames(); + size_t n = keys.size(); + for (size_t i = 0; i < n; ++i) + { + JSONCPP_STRING const &key = keys[i]; + if (valid_keys.find(key) == valid_keys.end()) + { + inv[key] = settings_[key]; + } + } + return 0u == inv.size(); +} +Value &StreamWriterBuilder::operator[](JSONCPP_STRING key) { return settings_[key]; } +// static +void StreamWriterBuilder::setDefaults(Json::Value *settings) +{ + //! [StreamWriterBuilderDefaults] + (*settings)["commentStyle"] = "All"; + (*settings)["indentation"] = "\t"; + (*settings)["enableYAMLCompatibility"] = false; + (*settings)["dropNullPlaceholders"] = false; + (*settings)["useSpecialFloats"] = false; + (*settings)["precision"] = 17; + //! [StreamWriterBuilderDefaults] +} + +JSONCPP_STRING writeString(StreamWriter::Factory const &builder, Value const &root) +{ + JSONCPP_OSTRINGSTREAM sout; + StreamWriterPtr const writer(builder.newStreamWriter()); + writer->write(root, &sout); + return sout.str(); +} + +JSONCPP_OSTREAM &operator<<(JSONCPP_OSTREAM &sout, Value const &root) +{ + StreamWriterBuilder builder; + StreamWriterPtr const writer(builder.newStreamWriter()); + writer->write(root, &sout); + return sout; +} + +} // namespace Json + +// ////////////////////////////////////////////////////////////////////// +// End of content of file: src/lib_json/json_writer.cpp +// ////////////////////////////////////////////////////////////////////// diff --git a/runtime/libs/misc/CMakeLists.txt b/runtime/libs/misc/CMakeLists.txt new file mode 100644 index 000000000..557d403ec --- /dev/null +++ b/runtime/libs/misc/CMakeLists.txt @@ -0,0 +1,11 @@ +# Library `nnfw_lib_misc` +file(GLOB_RECURSE NNFW_UTILITY_SRCS "src/*.cpp") + +add_library(nnfw_lib_misc STATIC ${NNFW_UTILITY_SRCS}) +target_include_directories(nnfw_lib_misc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +set_target_properties(nnfw_lib_misc PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_link_libraries(nnfw_lib_misc PRIVATE nnfw_common) +target_link_libraries(nnfw_lib_misc PRIVATE nnfw_coverage) + +add_executable(nnfw_tensor_index_iterator "examples/tensor_index_iterator.cpp") +target_link_libraries(nnfw_tensor_index_iterator nnfw_lib_misc) diff --git a/runtime/libs/misc/examples/tensor_index_iterator.cpp b/runtime/libs/misc/examples/tensor_index_iterator.cpp new file mode 100644 index 000000000..d94da9f49 --- /dev/null +++ b/runtime/libs/misc/examples/tensor_index_iterator.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/tensor/IndexIterator.h" + +#include <array> + +#include <iostream> +#include <algorithm> + +#include <cassert> + +void test_iterate(void) +{ + const nnfw::misc::tensor::Shape shape{3, 4, 7}; + + std::array<int, 3 * 4 * 7> array; + + array.fill(0); + + using nnfw::misc::tensor::iterate; + using nnfw::misc::tensor::Index; + + iterate(shape) << [&](const Index &index) { + assert(index.rank() == shape.rank()); + + const uint32_t rank = index.rank(); + + uint32_t offset = index.at(0); + + for (uint32_t axis = 1; axis < rank; ++axis) + { + offset *= shape.dim(axis); + offset += index.at(axis); + } + + array[offset] += 1; + }; + + assert(std::all_of(array.begin(), array.end(), [](int num) { return num == 1; })); +} + +int main(int argc, char **argv) +{ + test_iterate(); + + nnfw::misc::tensor::Shape shape{3, 4, 3, 4}; + + std::cout << "Iterate over tensor{3, 4, 3, 4}" << std::endl; + + nnfw::misc::tensor::iterate(shape) << [](const nnfw::misc::tensor::Index &index) { + std::cout << "rank: " << index.rank() << std::endl; + + for (uint32_t d = 0; d < index.rank(); ++d) + { + std::cout << " offset(" << d << ") = " << index.at(d) << std::endl; + } + }; + + return 0; +} diff --git a/runtime/libs/misc/include/misc/EnvVar.h b/runtime/libs/misc/include/misc/EnvVar.h new file mode 100644 index 000000000..db28a3c7d --- /dev/null +++ b/runtime/libs/misc/include/misc/EnvVar.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file EnvVar.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::EnvVar class + */ + +#ifndef __NNFW_MISC_ENV_VAR__ +#define __NNFW_MISC_ENV_VAR__ + +#include <algorithm> +#include <array> +#include <cstdlib> +#include <string> + +namespace nnfw +{ +namespace misc +{ +/** + * @brief Class to access environment variable + */ +class EnvVar +{ +public: + /** + * @brief Construct a new EnvVar object + * @param[in] key environment variable + */ + EnvVar(const std::string &key) + { + const char *value = std::getenv(key.c_str()); + if (value == nullptr) + { + // An empty string is considered as an empty value + _value = ""; + } + else + { + _value = value; + } + } + + /** + * @brief Get environment variable of string type + * @param[in] def Default value of environment variable + * @return Defaut value passed as a parameter when there is no environment variable, + * otherwise the value of environment variable passed into constructor + */ + std::string asString(const std::string &def) const + { + if (_value.empty()) + return def; + return _value; + } + + /** + * @brief Get environment variable of boolean type + * @param[in] def Default value of environment variable + * @return Defaut value passed as a parameter when there is no environment variable, + * otherwise the value of environment variable passed into constructor + */ + bool asBool(bool def) const + { + if (_value.empty()) + return def; + static const std::array<std::string, 5> false_list{"0", "OFF", "FALSE", "N", "NO"}; + auto false_found = std::find(false_list.begin(), false_list.end(), _value); + return (false_found == false_list.end()); + } + + /** + * @brief Get environment variable of int type + * @param[in] def Default value of environment variable + * @return Defaut value passed as a parameter when there is no environment variable, + * otherwise the value of environment variable passed into constructor + */ + int asInt(int def) const + { + if (_value.empty()) + return def; + return std::stoi(_value); + } + + /** + * @brief Get environment variable of float type + * @param[in] def Default value of environment variable + * @return Defaut value passed as a parameter when there is no environment variable, + * otherwise the value of environment variable passed into constructor + */ + float asFloat(float def) const + { + if (_value.empty()) + return def; + return std::stof(_value); + } + +private: + std::string _value; +}; + +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_ENV_VAR__ diff --git a/runtime/libs/misc/include/misc/EventCollector.h b/runtime/libs/misc/include/misc/EventCollector.h new file mode 100644 index 000000000..530a90906 --- /dev/null +++ b/runtime/libs/misc/include/misc/EventCollector.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EVENT_COLLECTOR_H__ +#define __EVENT_COLLECTOR_H__ + +#include "misc/EventRecorder.h" + +class EventCollector +{ +public: + enum class Edge + { + BEGIN, + END + }; + + struct Event + { + Edge edge; + std::string backend; + std::string label; + }; + +public: + EventCollector(EventRecorder *rec) : _rec{rec} + { + // DO NOTHING + } + +public: + void onEvent(const Event &event); + +protected: + EventRecorder *_rec; +}; + +#endif // __EVENT_COLLECTOR_H__ diff --git a/runtime/libs/misc/include/misc/EventRecorder.h b/runtime/libs/misc/include/misc/EventRecorder.h new file mode 100644 index 000000000..1e621fdf8 --- /dev/null +++ b/runtime/libs/misc/include/misc/EventRecorder.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EVENT_RECORDER_H__ +#define __EVENT_RECORDER_H__ + +#include <map> +#include <memory> +#include <mutex> + +#include <ostream> +#include <sstream> + +struct Event +{ + std::string name; + std::string tid; + std::string ph; /* REQUIRED */ + std::string ts; /* REQUIRED */ +}; + +struct DurationEvent : public Event +{ + // TO BE FILLED +}; + +struct CounterEvent : public Event +{ + std::map<std::string, std::string> values; +}; + +// +// Record Event as Chrome Trace Event File Format +// +// Refrence: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit +// +class EventRecorder +{ +public: + EventRecorder() = default; + +public: + void emit(const DurationEvent &evt); + void emit(const CounterEvent &evt); + +public: + void writeToFile(std::ostream &os); + +private: + std::mutex _mu; + std::stringstream _ss; +}; + +#endif // __EVENT_RECORDER_H__ diff --git a/runtime/libs/misc/include/misc/benchmark.h b/runtime/libs/misc/include/misc/benchmark.h new file mode 100644 index 000000000..fe5b97585 --- /dev/null +++ b/runtime/libs/misc/include/misc/benchmark.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file benchmark.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::benchmark::Accumulator class + */ +#ifndef __NNFW_MISC_BENCHMARK_H__ +#define __NNFW_MISC_BENCHMARK_H__ + +#include <chrono> + +namespace nnfw +{ +namespace misc +{ +// Benckmark support +namespace benchmark +{ + +/** + * @brief Class to accumulate time during benchmark + */ +template <typename T> class Accumulator +{ +public: + /** + * @brief Construct a new Accumulator object + * @param[in] ref Object to keep time duration + */ + Accumulator(T &ref) : _ref(ref) + { + // DO NOTHING + } + +public: + /** + * @brief Return the reference of @c ref passed to constructor + * @return Reference of @c ref + */ + T &operator()(void) { return _ref; } + +private: + T &_ref; +}; + +/** + * @brief Run passed function and returns accumulated time + * @tparam T Period used by @c std::chrono::duration_cast + * @tparam Callable Function type to benchmark + * @param[in] acc Accumulated time after running @cb + * @param[in] cb Function to run and benchmark + * @return Accumulated time + */ +template <typename T, typename Callable> +Accumulator<T> &operator<<(Accumulator<T> &&acc, Callable cb) +{ + auto begin = std::chrono::steady_clock::now(); + cb(); + auto end = std::chrono::steady_clock::now(); + + acc() += std::chrono::duration_cast<T>(end - begin); + + return acc; +} + +template <typename T> Accumulator<T> measure(T &out) { return Accumulator<T>(out); } + +} // namespace benchmark +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_BENCHMARK_H__ diff --git a/runtime/libs/misc/include/misc/feature/Index.h b/runtime/libs/misc/include/misc/feature/Index.h new file mode 100644 index 000000000..a361d8dd2 --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/Index.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Index.h + * @brief This file contains Index class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_INDEX_H__ +#define __NNFW_MISC_FEATURE_INDEX_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Class to have the index information for calculating the offset. + */ +class Index +{ +public: + /** + * @brief Construct Index object using default constrcutor + */ + Index() = default; + +public: + /** + * @brief Construct Index object with three indexes of dimensions + * @param[in] ch The depth index + * @param[in] row The heigth index + * @param[in] col The width index + */ + Index(int32_t ch, int32_t row, int32_t col) : _batch{1}, _ch{ch}, _row{row}, _col{col} + { + // DO NOTHING + } + /** + * @brief Construct Index object with four indexes of dimensions + * @param[in] batch The batch index + * @param[in] ch The depth index + * @param[in] row The height index + * @param[in] col The width index + */ + Index(int32_t batch, int32_t ch, int32_t row, int32_t col) + : _batch{batch}, _ch{ch}, _row{row}, _col{col} + { + // DO NOTHING + } + +public: + /** + * @brief Get the batch index + * @return The batch index + */ + int32_t batch(void) const { return _batch; } + /** + * @brief Get the depth index + * @return The depth index + */ + int32_t ch(void) const { return _ch; } + /** + * @brief Get the height index + * @return The height index + */ + int32_t row(void) const { return _row; } + /** + * @brief Get the width index + * @return The width index + */ + int32_t col(void) const { return _col; } + +public: + /** + * @brief Get the batch index as the lvalue reference + * @return The reference of the batch value + */ + int32_t &batch(void) { return _batch; } + /** + * @brief Get the depth index as the lvalue reference + * @return The reference of the depth value + */ + int32_t &ch(void) { return _ch; } + /** + * @brief Get the height index as the lvalue reference + * @return The reference of the height value + */ + int32_t &row(void) { return _row; } + /** + * @brief Get the width index as the lvalue reference + * @return The reference of the width value + */ + int32_t &col(void) { return _col; } + +private: + /** + * @brief The batch index + */ + int32_t _batch; + /** + * @brief The depth index + */ + int32_t _ch; + /** + * @brief The height index + */ + int32_t _row; + /** + * @brief The width index + */ + int32_t _col; +}; + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_INDEX_H__ diff --git a/runtime/libs/misc/include/misc/feature/IndexIterator.h b/runtime/libs/misc/include/misc/feature/IndexIterator.h new file mode 100644 index 000000000..1cf675526 --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/IndexIterator.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexIterator.h + * @brief This file contains IndexIterator class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_INDEX_ITERATOR_H__ +#define __NNFW_MISC_FEATURE_INDEX_ITERATOR_H__ + +#include "misc/feature/Shape.h" + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Class to iterate Callable with Index of feature + */ +class IndexIterator +{ +public: + /** + * @brief Construct IndexIterator object with Shape of feature + * @param[in] shape Shape reference of feature + */ + IndexIterator(const Shape &shape) : _shape{shape} + { + // DO NOTHING + } + +public: + /** + * @brief Call a function iterated + * @param[in] cb A callback function + * @return Current IndexIterator object + */ + template <typename Callable> IndexIterator &iter(Callable cb) + { + for (int32_t batch = 0; batch < _shape.N; ++batch) + { + for (int32_t ch = 0; ch < _shape.C; ++ch) + { + for (int32_t row = 0; row < _shape.H; ++row) + { + for (int32_t col = 0; col < _shape.W; ++col) + { + cb(batch, ch, row, col); + } + } + } + } + + return (*this); + } + +private: + /** + * @brief Shape for feature + */ + const Shape _shape; +}; + +/** + * @brief Create an object of IndexIterator for feature + * @param[in] Shape reference of feature + * @return Created IndexIterator object + */ +static inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; } + +/** + * @brief Call a function iterated using IndexIterator of feature + * Overloaded operator<< + * @param[in] it An IndexIterator reference + * @param[in] cb A callback function + * @return created IndexIterator object + */ +template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb) +{ + return it.iter(cb); +} + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_INDEX_ITERATOR_H__ diff --git a/runtime/libs/misc/include/misc/feature/Object.h b/runtime/libs/misc/include/misc/feature/Object.h new file mode 100644 index 000000000..7af0e28f4 --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/Object.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Object.h + * @brief This file contains Object class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_OBJECT_H__ +#define __NNFW_MISC_FEATURE_OBJECT_H__ + +#include "misc/feature/Shape.h" +#include "misc/feature/Index.h" +#include "misc/feature/Reader.h" + +#include <vector> + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Class to have information of the operand for feature + */ +template <typename T> class Object final : public Reader<T> +{ +public: + using Generator = std::function<T(const Shape &shape, const Index &index)>; + +public: + /** + * @brief Construct Object object with Shape of feature and set value used by Generator + * @param[in] shape Reference of Shape for feature + * @param[in] fn A function to set values of operand tensor + */ + Object(const Shape &shape, const Generator &fn) : _shape{shape} + { + _value.resize(_shape.C * _shape.H * _shape.W); + + for (int32_t ch = 0; ch < _shape.C; ++ch) + { + for (int32_t row = 0; row < _shape.H; ++row) + { + for (int32_t col = 0; col < _shape.W; ++col) + { + _value.at(offsetOf(ch, row, col)) = fn(_shape, Index{ch, row, col}); + } + } + } + } + +public: + /** + * @brief Get Shape of feature as the reference + * @return The reference of the width value + */ + const Shape &shape(void) const { return _shape; } + +public: + /** + * @brief Get the value used by three indexes + * @param[in] ch The depth index + * @param[in] row The height index + * @param[in] col The width index + * @return The value at the offset + */ + T at(uint32_t ch, uint32_t row, uint32_t col) const override + { + return _value.at(offsetOf(ch, row, col)); + } + +private: + /** + * @brief Get the offset value at three indexes + * @param[in] ch The depth index + * @param[in] row The height index + * @param[in] col The width index + * @return The offset value + */ + uint32_t offsetOf(uint32_t ch, uint32_t row, uint32_t col) const + { + return ch * _shape.H * _shape.W + row * _shape.W + col; + } + +private: + /** + * @brief Shape of operand + */ + Shape _shape; + /** + * @brief The tensor vector of operand + */ + std::vector<T> _value; +}; + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_OBJECT_H__ diff --git a/runtime/libs/misc/include/misc/feature/Reader.h b/runtime/libs/misc/include/misc/feature/Reader.h new file mode 100644 index 000000000..b09209789 --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/Reader.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Reader.h + * @brief This file contains Reader class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_READER_H__ +#define __NNFW_MISC_FEATURE_READER_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Class reads values of feature + * The interface class + */ +template <typename T> struct Reader +{ + /** + * @brief Destruct Reader object using default destructor + */ + virtual ~Reader() = default; + + /** + * @brief Get the value used by three indexes + * @param[in] ch The depth index + * @param[in] row The height index + * @param[in] col The width index + * @return The value at the offset + */ + virtual T at(uint32_t ch, uint32_t row, uint32_t col) const = 0; + /** + * @brief Get the value used by four indexes + * @param[in] batch The batch index + * @param[in] ch The depth index + * @param[in] row The height index + * @param[in] col The width index + * @return The value at the offset + */ + virtual T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0; +}; + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_READER_H__ diff --git a/runtime/libs/misc/include/misc/feature/Shape.h b/runtime/libs/misc/include/misc/feature/Shape.h new file mode 100644 index 000000000..09881f58b --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/Shape.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Shape.h + * @brief This file contains Shape class for feature + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_SHAPE_H__ +#define __NNFW_MISC_FEATURE_SHAPE_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Structure to have values of dimensions for feature + */ +struct Shape +{ + int32_t N; /**< The batch value */ + int32_t C; /**< The depth value */ + int32_t H; /**< The height value */ + int32_t W; /**< The width value */ + + /** + * @brief Construct Shape object using default constrcutor + */ + Shape() = default; + /** + * @brief Construct Shape object with three values of dimensions + * @param[in] depth The depth value + * @param[in] height The height value + * @param[in] width The width value + */ + Shape(int32_t depth, int32_t height, int32_t width) : N{1}, C{depth}, H{height}, W{width} + { + // DO NOTHING + } + /** + * @brief Construct Shape object with four values of dimensions + * @param[in] batch The batch value + * @param[in] depth The depth value + * @param[in] height The height value + * @param[in] width The width value + */ + Shape(int32_t batch, int32_t depth, int32_t height, int32_t width) + : N{batch}, C{depth}, H{height}, W{width} + { + // DO NOTHING + } +}; + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_H__ diff --git a/runtime/libs/misc/include/misc/feature/TextFormatter.h b/runtime/libs/misc/include/misc/feature/TextFormatter.h new file mode 100644 index 000000000..e053f1c61 --- /dev/null +++ b/runtime/libs/misc/include/misc/feature/TextFormatter.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TextFormatter.h + * @brief This file contains TextFormatter class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FEATURE_TEXT_FORMATTER_H__ +#define __NNFW_MISC_FEATURE_TEXT_FORMATTER_H__ + +#include "misc/feature/Shape.h" +#include "misc/feature/Reader.h" + +#include <ostream> +#include <iomanip> +#include <limits> + +namespace nnfw +{ +namespace misc +{ +namespace feature +{ + +/** + * @brief Class to print operand of feature to ostream in the given string format + */ +template <typename T> class TextFormatter +{ +public: + /** + * @brief Construct TextFormatter object with an operand's information. + * @param[in] shape The shape of an operand + * @param[in] data The data of an operand + */ + TextFormatter(const Shape &shape, const Reader<T> &data) : _shape(shape), _data(data) + { + // DO NOTHING + } + +public: + /** + * @brief Get Shape of feature as the lvalue reference + * @return Shape of feature + */ + const Shape &shape(void) const { return _shape; } + /** + * @brief Get Reader<T> that can read the data of an operand + * @return Reader<T> + */ + const Reader<T> &data(void) const { return _data; } + +private: + /** + * @brief Shape of feature + */ + const Shape &_shape; + /** + * @brief Reader<T> that can read the data of an operand + */ + const Reader<T> &_data; +}; + +/** + * @brief Print operand of feature + * @param[in] os Standard output stream + * @param[in] fmt TextFormatter to print information of an operand + * @return Standard output stream + */ +template <typename T> std::ostream &operator<<(std::ostream &os, const TextFormatter<T> &fmt) +{ + const auto &shape = fmt.shape(); + + for (uint32_t ch = 0; ch < shape.C; ++ch) + { + os << " Channel " << ch << ":" << std::endl; + for (uint32_t row = 0; row < shape.H; ++row) + { + os << " "; + for (uint32_t col = 0; col < shape.W; ++col) + { + const auto value = fmt.data().at(ch, row, col); + os << std::right; + os << std::fixed; + os << std::setw(std::numeric_limits<T>::digits10 + 2); + os << std::setprecision(5); + os << value; + os << " "; + } + os << std::endl; + } + } + + return os; +} + +} // namespace feature +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_TEXT_FORMATTER_H__ diff --git a/runtime/libs/misc/include/misc/fp32.h b/runtime/libs/misc/include/misc/fp32.h new file mode 100644 index 000000000..c310402ba --- /dev/null +++ b/runtime/libs/misc/include/misc/fp32.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file fp32.h + * @brief This file contains functions to compare float values + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_FP32_H__ +#define __NNFW_MISC_FP32_H__ + +#include <cmath> +#include <cfloat> +#include <algorithm> +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace fp32 +{ + +/** + * @brief Get the difference between two float values as a relative value. + * @param[in] lhs A float value to be compared + * @param[in] rhs A float value to be compared + * @return A relative value of difference between two float values. + */ +inline float relative_diff(float lhs, float rhs) +{ + const auto diff = std::fabs(lhs - rhs); + const auto base = std::max(std::fabs(lhs), std::fabs(rhs)); + + return diff / base; +} + +/** + * @brief Verify that an obtained float value is equal to the expected float value + * by using FLT_EPSILON + * @param[in] expected An expected float value to be compared + * @param[in] obtained An obtained float value to be compared + * @param[in] tolerance A tolerance value + * @return @c true if both values are equal, otherwise @c false + */ +inline bool epsilon_equal(float expected, float obtained, uint32_t tolerance = 1) +{ + if (std::isnan(expected) && std::isnan(obtained)) + { + return true; + } + + // Let's use relative epsilon comparision + const auto diff = std::fabs(expected - obtained); + const auto max = std::max(std::fabs(expected), std::fabs(obtained)); + + return diff <= (max * FLT_EPSILON * tolerance); +} + +/** + * @brief Verify that an obtained float value is equal to the expected float value + * by comparing absolute tolerance value + * @param[in] expected An expected float value to be compared + * @param[in] obtained An obtained float value to be compared + * @param[in] tolerance A tolerance value + * @return @c true if both values are equal, otherwise @c false + */ +inline bool absolute_epsilon_equal(float expected, float obtained, float tolerance = 0.001) +{ + if (std::isnan(expected) && std::isnan(obtained)) + { + return true; + } + + // Let's use absolute epsilon comparision + const auto diff = std::fabs(expected - obtained); + + return diff <= tolerance; +} + +} // namespace fp32 +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FP32_H__ diff --git a/runtime/libs/misc/include/misc/kernel/IndexIterator.h b/runtime/libs/misc/include/misc/kernel/IndexIterator.h new file mode 100644 index 000000000..59e0f0095 --- /dev/null +++ b/runtime/libs/misc/include/misc/kernel/IndexIterator.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexIterator.h + * @brief This file contains IndexIterator class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_KERNEL_INDEX_ITERATOR_H__ +#define __NNFW_MISC_KERNEL_INDEX_ITERATOR_H__ + +#include "misc/kernel/Shape.h" + +namespace nnfw +{ +namespace misc +{ +namespace kernel +{ + +/** + * @brief Class to iterate Callable with Index of kernel + */ +class IndexIterator +{ +public: + /** + * @brief Construct IndexIterator object with Shape of kernel + * @param[in] shape Shape reference of feature + */ + IndexIterator(const Shape &shape) : _shape{shape} + { + // DO NOTHING + } + +public: + /** + * @brief Call a function iterated + * @param[in] cb A callback function + * @return Current IndexIterator object + */ + template <typename Callable> IndexIterator &iter(Callable cb) + { + for (int32_t nth = 0; nth < _shape.N; ++nth) + { + for (int32_t ch = 0; ch < _shape.C; ++ch) + { + for (int32_t row = 0; row < _shape.H; ++row) + { + for (int32_t col = 0; col < _shape.W; ++col) + { + cb(nth, ch, row, col); + } + } + } + } + + return (*this); + } + +private: + const Shape _shape; /**< Shape for kernel */ +}; + +/** + * @brief Create an object of IndexIterator for kernel + * @param[in] shape reference of feature + * @return Created IndexIterator object + */ +inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; } + +/** + * @brief Call a function iterated using IndexIterator of kernel + * Overloaded operator<< + * @param[in] it An IndexIterator reference + * @param[in] cb A callback function + * @return Created IndexIterator object + */ +template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb) +{ + return it.iter(cb); +} + +} // namespace kernel +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_INDEX_ITERATOR_H__ diff --git a/runtime/libs/misc/include/misc/kernel/Reader.h b/runtime/libs/misc/include/misc/kernel/Reader.h new file mode 100644 index 000000000..019c809ee --- /dev/null +++ b/runtime/libs/misc/include/misc/kernel/Reader.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Reader.h + * @brief This file contains Reader structure + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_KERNEL_READER_H__ +#define __NNFW_MISC_KERNEL_READER_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace kernel +{ + +/** + * @brief Structure to Reader + */ +template <typename T> struct Reader +{ + /** + * @brief Destroy the Reader object as default + */ + virtual ~Reader() = default; + + /** + * @brief Get the value used by four indexes + * @param[in] nth The kernel index + * @param[in] ch The channel index + * @param[in] row The row index + * @param[in] col The column index + * @return The value at the offset + */ + virtual T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const = 0; +}; + +} // namespace kernel +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_KERNEL_READER_H__ diff --git a/runtime/libs/misc/include/misc/kernel/Shape.h b/runtime/libs/misc/include/misc/kernel/Shape.h new file mode 100644 index 000000000..27d6a8bf0 --- /dev/null +++ b/runtime/libs/misc/include/misc/kernel/Shape.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Shape.h + * @brief This file contains Shape structure + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_KERNEL_SHAPE_H__ +#define __NNFW_MISC_KERNEL_SHAPE_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace kernel +{ + +/** + * @brief Structure to Shape + */ +struct Shape +{ + int32_t N; /**< The kernel index */ + int32_t C; /**< The channel index */ + int32_t H; /**< The height index */ + int32_t W; /**< The width index */ + + /** + * @brief Construct a new Shape object as default + */ + Shape() = default; + + /** + * @brief Construct a new Shape object with parameters + * @param[in] count The kernel index + * @param[in] depth The channel index + * @param[in] height The height index + * @param[in] width The width index + */ + Shape(int32_t count, int32_t depth, int32_t height, int32_t width) + : N{count}, C{depth}, H{height}, W{width} + { + // DO NOTHING + } +}; + +} // namespace kernel +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_KERNEL_SHAPE_H__ diff --git a/runtime/libs/misc/include/misc/matrix/IndexIterator.h b/runtime/libs/misc/include/misc/matrix/IndexIterator.h new file mode 100644 index 000000000..742ed3a65 --- /dev/null +++ b/runtime/libs/misc/include/misc/matrix/IndexIterator.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexIterator.h + * @brief This file contains IndexIterator class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_MATRIX_INDEX_ITERATOR_H__ +#define __NNFW_MISC_MATRIX_INDEX_ITERATOR_H__ + +#include "misc/matrix/Shape.h" + +namespace nnfw +{ +namespace misc +{ +namespace matrix +{ + +/** + * @brief Class to iterate Callable with Index of matrix + */ +class IndexIterator +{ +public: + /** + * @brief Construct IndexIterator object with Shape of matrix + * @param[in] shape Shape reference of matrix + */ + IndexIterator(const Shape &shape) : _shape{shape} + { + // DO NOTHING + } + +public: + /** + * @brief Call a function iterated + * @param[in] cb A callback function + * @return Current IndexIterator object + */ + template <typename Callable> IndexIterator &iter(Callable cb) + { + for (uint32_t row = 0; row < _shape.H; ++row) + { + for (uint32_t col = 0; col < _shape.W; ++col) + { + cb(row, col); + } + } + + return (*this); + } + +private: + /** + * @brief Shape for matrix + */ + const Shape _shape; +}; + +/** + * @brief Create an object of IndexIterator for matrix + * @param[in] Shape reference of matrix + * @return Created IndexIterator object + */ +inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; } + +/** + * @brief Call a function iterated using IndexIterator of matrix + * Overloaded operator<< + * @param[in] it An IndexIterator reference + * @param[in] cb A callback function + * @return created IndexIterator object + */ +template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb) +{ + return it.iter(cb); +} + +} // namespace matrix +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_MATRIX_INDEX_ITERATOR_H__ diff --git a/runtime/libs/misc/include/misc/matrix/Reader.h b/runtime/libs/misc/include/misc/matrix/Reader.h new file mode 100644 index 000000000..ea222c9d1 --- /dev/null +++ b/runtime/libs/misc/include/misc/matrix/Reader.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Reader.h + * @brief This file contains Reader class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_MATRIX_READER_H__ +#define __NNFW_MISC_MATRIX_READER_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace matrix +{ + +/** + * @brief Class reads values of matrix + * The interface class + */ +template <typename T> struct Reader +{ + /** + * @brief Destruct Reader object using default destructor + */ + virtual ~Reader() = default; + + /** + * @brief Get the value used by two indexes + * @param[in] row The height index + * @param[in] col The width index + * @return The value at the offset + */ + virtual T at(uint32_t row, uint32_t col) const = 0; +}; + +} // namespace matrix +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_MATRIX_READER_H__ diff --git a/runtime/libs/misc/include/misc/matrix/Shape.h b/runtime/libs/misc/include/misc/matrix/Shape.h new file mode 100644 index 000000000..8cbcc1e12 --- /dev/null +++ b/runtime/libs/misc/include/misc/matrix/Shape.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Shape.h + * @brief This file contains Shape class for matrix + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_MATRIX_SHAPE_H__ +#define __NNFW_MISC_MATRIX_SHAPE_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace matrix +{ + +/** + * @brief Structure to have values of dimensions for matrix + */ +struct Shape +{ + int32_t H; /**< The height value */ + int32_t W; /**< The width value */ + + /** + * @brief Construct Shape object using default constrcutor + */ + Shape() = default; + + /** + * @brief Construct Shape object with two values of dimensions + * @param[in] height The height value + * @param[in] width The width value + */ + Shape(int32_t height, int32_t width) : H{height}, W{width} + { + // DO NOTHING + } +}; + +} // namespace matrix +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_MATRIX_SHAPE_H__ diff --git a/runtime/libs/misc/include/misc/polymorphic_downcast.h b/runtime/libs/misc/include/misc/polymorphic_downcast.h new file mode 100644 index 000000000..ee885eb70 --- /dev/null +++ b/runtime/libs/misc/include/misc/polymorphic_downcast.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_MISC_POLYMORPHIC_DOWNCAST_H__ +#define __NNFW_MISC_POLYMORPHIC_DOWNCAST_H__ + +#include <cassert> +#include <memory> + +namespace nnfw +{ +namespace misc +{ + +template <typename DstType, typename SrcType> inline DstType polymorphic_downcast(SrcType *x) +{ + assert(dynamic_cast<DstType>(x) == x); + return static_cast<DstType>(x); +} + +template <typename DstType, typename SrcType> inline DstType polymorphic_downcast(SrcType &x) +{ + assert(std::addressof(dynamic_cast<DstType>(x)) == std::addressof(x)); + return static_cast<DstType>(x); +} + +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_POLYMORPHIC_DOWNCAST_H__ diff --git a/runtime/libs/misc/include/misc/string_helpers.h b/runtime/libs/misc/include/misc/string_helpers.h new file mode 100644 index 000000000..e42a12754 --- /dev/null +++ b/runtime/libs/misc/include/misc/string_helpers.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file string_helpers.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains helper functions for std::string + */ + +#include <ostream> +#include <string> +#include <sstream> +#include <vector> + +namespace +{ + +template <typename Arg> void _str(std::ostream &os, Arg &&arg) { os << std::forward<Arg>(arg); } + +template <typename Arg, typename... Args> void _str(std::ostream &os, Arg &&arg, Args &&... args) +{ + _str(os, std::forward<Arg>(arg)); + _str(os, std::forward<Args>(args)...); +} + +} // namespace {anonymous} + +namespace nnfw +{ +namespace misc +{ + +inline std::vector<std::string> split(const std::string &s, char delim) +{ + std::stringstream ss(s); + std::string item; + std::vector<std::string> elems; + while (std::getline(ss, item, delim)) + { + elems.push_back(std::move(item)); + } + return elems; +} + +template <typename... Args> std::string str(Args &&... args) +{ + std::stringstream ss; + _str(ss, std::forward<Args>(args)...); + return ss.str(); +} + +} // namespace misc +} // namespace nnfw diff --git a/runtime/libs/misc/include/misc/tensor/Comparator.h b/runtime/libs/misc/include/misc/tensor/Comparator.h new file mode 100644 index 000000000..80f53043c --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Comparator.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Comparator.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Comparator class + */ + +#ifndef __NNFW_MISC_TENSOR_COMPARATOR_H__ +#define __NNFW_MISC_TENSOR_COMPARATOR_H__ + +#include "misc/tensor/Index.h" +#include "misc/tensor/Shape.h" +#include "misc/tensor/Reader.h" +#include "misc/tensor/Diff.h" + +#include <functional> + +#include <vector> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to compare two tensors (expected and obtained to compare) + */ +class Comparator +{ +public: + /** + * @brief Construct a new @c Comparator object + * @param[in] fn Function that compares two float values + */ + Comparator(const std::function<bool(float lhs, float rhs)> &fn) : _compare_fn{fn} + { + // DO NOTHING + } + +public: + /** + * @brief Struct to observe comparison results + */ + struct Observer + { + /** + * @brief Get notification of comparison result at every index of two tensors + * @param[in] index Index of tensors compared + * @param[in] expected Expected value of element at @c index + * @param[in] obtained Obtained value of element at @c index + * @return N/A + */ + virtual void notify(const Index &index, float expected, float obtained) = 0; + }; + +public: + /** + * @brief Compare two tensors + * @param[in] shape Shape of two tensors + * @param[in] expected @c Reader<float> object that accesses expected tensor + * @param[in] obtained @c Reader<float> object that accesses obtained tensor + * @param[in] observer @c Observer notified of expected value and obtained value at every index + * @return @c std::vector<Diff<float>> containing information of failed comparison + */ + // NOTE Observer should live longer than comparator + std::vector<Diff<float>> compare(const Shape &shape, const Reader<float> &expected, + const Reader<float> &obtained, + Observer *observer = nullptr) const; + +private: + std::function<bool(float lhs, float rhs)> _compare_fn; +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_COMPARATOR_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Diff.h b/runtime/libs/misc/include/misc/tensor/Diff.h new file mode 100644 index 000000000..c41a97987 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Diff.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Diff.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Diff struct + */ + +#ifndef __NNFW_MISC_TENSOR_DIFF_H__ +#define __NNFW_MISC_TENSOR_DIFF_H__ + +#include "misc/tensor/Index.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Struct to have information after comparing two elements of two tensors + */ +template <typename T> struct Diff +{ + Index index; /**< Index of elements in two tensors, which turn out to be different */ + + T expected; /**< Expected value of element of first tensor */ + T obtained; /**< Obtained value of element of second tensor */ + + /** + * @brief Construct a new @c Diff object + * @param[in] i Initial value of index + */ + Diff(const Index &i) : index(i) + { + // DO NOTHING + } + + /** + * @brief Construct a new @c Diff object + * @param[in] i Index value + * @param[in] e Expected value of element of first tensor + * @param[in] o Obtained value of element of second tensor + */ + Diff(const Index &i, const T &e, const T &o) : index(i), expected{e}, obtained{o} + { + // DO NOTHING + } +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_DIFF_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Index.h b/runtime/libs/misc/include/misc/tensor/Index.h new file mode 100644 index 000000000..a633b4ce0 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Index.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Index.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Index struct + */ +#ifndef __NNFW_MISC_TENSOR_INDEX_H__ +#define __NNFW_MISC_TENSOR_INDEX_H__ + +#include <cstdint> +#include <cstddef> + +#include <vector> +#include <initializer_list> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Struct to represent index of each dimension of a tensor + */ +struct Index +{ +public: + /** + * @brief Construct a new @c Index object + * @param[in] rank Rank of a tensor + */ + Index(uint32_t rank) { _offsets.resize(rank); } + +public: + /** + * @brief Construct a new @c Index object + * @param[in] offsets Rank of a tensor of @c std::initializer_list<int32_t> type + */ + Index(std::initializer_list<int32_t> offsets) : _offsets{offsets} + { + // DO NOTHING + } + +public: + /** + * @brief Get the rank + * @return Rank that this @c Index object can handle + * @note We can use static_cast\n + * because size of _offsets is decieded by constructor's uintt_32 type argument + */ + uint32_t rank(void) const { return static_cast<uint32_t>(_offsets.size()); } + +public: + /** + * @brief Get the index n'th dimension + * @param[in] n Dimension + * @return index of n'th dimension + */ + int32_t at(uint32_t n) const { return _offsets.at(n); } + + /** + * @brief Get the reference of the index n'th dimension + * @param[in] n Dimension + * @return reference of index of n'th dimension + */ + int32_t &at(uint32_t n) { return _offsets.at(n); } + +private: + std::vector<int32_t> _offsets; +}; + +/** + * @brief Copy an @c Index with reversed order + * @param[in] origin @c Index object to copy + * @return an @c Index object with reversed order + * @note This is used to convert NNAPI tensor index to ARM tensor index or vice versa + */ +inline static Index copy_reverse(const Index &origin) +{ + uint32_t rank = origin.rank(); + Index target(rank); + for (uint32_t i = 0; i < rank; i++) + target.at(i) = origin.at(rank - 1 - i); + return target; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_INDEX_H__ diff --git a/runtime/libs/misc/include/misc/tensor/IndexEnumerator.h b/runtime/libs/misc/include/misc/tensor/IndexEnumerator.h new file mode 100644 index 000000000..6ce3add77 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/IndexEnumerator.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexEnumerator.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::IndexEnumerator class + */ + +#ifndef __NNFW_MISC_TENSOR_INDEX_ENUMERATOR_H__ +#define __NNFW_MISC_TENSOR_INDEX_ENUMERATOR_H__ + +#include "misc/tensor/Shape.h" +#include "misc/tensor/Index.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ +/** + * @brief Class to enumerate index of a tensor + * + */ +class IndexEnumerator +{ +public: + /** + * @brief Construct a new @c IndexEnumerator object + * @param[in] shape Shape of tensor of which index will be enumerate + */ + explicit IndexEnumerator(const Shape &shape) : _shape(shape), _cursor(0), _index(shape.rank()) + { + const uint32_t rank = _shape.rank(); + + for (uint32_t axis = 0; axis < rank; ++axis) + { + _index.at(axis) = 0; + } + + for (_cursor = 0; _cursor < rank; ++_cursor) + { + if (_index.at(_cursor) < _shape.dim(_cursor)) + { + break; + } + } + } + +public: + /** + * @brief Prevent constructing @c IndexEnumerator object by using R-value reference + */ + IndexEnumerator(IndexEnumerator &&) = delete; + /** + * @brief Prevent copy constructor + */ + IndexEnumerator(const IndexEnumerator &) = delete; + +public: + /** + * @brief Check if more enumeration is available + * @return @c true if more @c advance() is available, otherwise @c false + */ + bool valid(void) const { return _cursor < _shape.rank(); } + +public: + /** + * @brief Get the current index to enumerate + * @return Current index + */ + const Index &curr(void) const { return _index; } + +public: + /** + * @brief Advance index by +1 + */ + void advance(void) + { + const uint32_t rank = _shape.rank(); + + // Find axis to be updated + while ((_cursor < rank) && !(_index.at(_cursor) + 1 < _shape.dim(_cursor))) + { + ++_cursor; + } + + if (_cursor == rank) + { + return; + } + + // Update index + _index.at(_cursor) += 1; + + for (uint32_t axis = 0; axis < _cursor; ++axis) + { + _index.at(axis) = 0; + } + + // Update cursor + _cursor = 0; + } + +public: + const Shape _shape; //!< Shape to enumerate + +private: + uint32_t _cursor; + Index _index; +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_INDEX_ENUMERATOR_H__ diff --git a/runtime/libs/misc/include/misc/tensor/IndexFormatter.h b/runtime/libs/misc/include/misc/tensor/IndexFormatter.h new file mode 100644 index 000000000..7ae34eec1 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/IndexFormatter.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexFormatter.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::IndexFormatter class + */ + +#ifndef __NNFW_MISC_TENSOR_INDEX_FORMATTER_H__ +#define __NNFW_MISC_TENSOR_INDEX_FORMATTER_H__ + +#include "misc/tensor/Index.h" + +#include <ostream> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to send @c Index object to output stream + */ +class IndexFormatter +{ +public: + /** + * @brief Construct a new @c IndexFormatter object + * @param[in] index index to be sent to output stream + */ + IndexFormatter(const nnfw::misc::tensor::Index &index) : _index(index) + { + // DO NOTHING + } + +public: + /** + * @brief Get an @c Index object + * @return @c Index object previously passed to the constructor + */ + const nnfw::misc::tensor::Index &index(void) const { return _index; } + +private: + const nnfw::misc::tensor::Index &_index; +}; + +/** + * @brief Send @c IndexFormatter object to output stream + * @param[in] os Output stream + * @param[in] fmt @c IndexFormatter object that is sent to output stream + * @return Output stream + */ +std::ostream &operator<<(std::ostream &os, const IndexFormatter &fmt); + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_INDEX_FORMATTER_H__ diff --git a/runtime/libs/misc/include/misc/tensor/IndexIterator.h b/runtime/libs/misc/include/misc/tensor/IndexIterator.h new file mode 100644 index 000000000..f6428e19e --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/IndexIterator.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file IndexIterator.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::IndexIterator class and + * helper function and operator + */ +#ifndef __NNFW_MISC_TENSOR_INDEX_ITERATOR_H__ +#define __NNFW_MISC_TENSOR_INDEX_ITERATOR_H__ + +#include "misc/tensor/Shape.h" +#include "misc/tensor/Index.h" +#include "misc/tensor/IndexEnumerator.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to iterate indexes available for given shape + */ +class IndexIterator +{ +public: + /** + * @brief Construct a new @c IndexIterator object + * @param[in] shape Shape of tensor of which index will be iterated + */ + IndexIterator(const Shape &shape) : _shape(shape) + { + // DO NOTHING + } + +public: + /** + * @brief Construct a new IndexIterator object using reference + * @param[in] IndexIterator @c IndexIterator object to move + */ + IndexIterator(IndexIterator &&) = default; + + /** + * @brief Prevent copy constructor + */ + IndexIterator(const IndexIterator &) = delete; + +public: + /** + * @brief Iterate all available indexes and run a function for each index + * @param[in] fn Function that requires an index as a parameter. + * @return @c IndexIterator object + */ + template <typename Callable> IndexIterator &iter(Callable fn) + { + for (IndexEnumerator e{_shape}; e.valid(); e.advance()) + { + fn(e.curr()); + } + + return (*this); + } + +private: + const Shape &_shape; +}; + +/** + * @brief Get an @c IndexItator object + * @param[in] shape Shape of tensor of which index will be iterated + * @return @c IndexIterator object + */ +inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; } + +/** + * @brief Iterate all indexes and apply a function + * @param[in] it @c IndexIterator object that is constructed with a tensor shape + * @param[in] cb A function that will receive a specific index. + * Inside the function, the index is used to manipulate tensor element. + * @return @c IndexIterator object + */ +template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb) +{ + return it.iter(cb); +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_INDEX_ITERATOR_H__ diff --git a/runtime/libs/misc/include/misc/tensor/NonIncreasingStride.h b/runtime/libs/misc/include/misc/tensor/NonIncreasingStride.h new file mode 100644 index 000000000..3bc0c115c --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/NonIncreasingStride.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file NonIncreasingStride.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::NonIncreasingStride class + */ +#ifndef __NNFW_MISC_TENSOR_NON_INCREASING_STRIDE_H__ +#define __NNFW_MISC_TENSOR_NON_INCREASING_STRIDE_H__ + +#include "misc/tensor/Shape.h" +#include "misc/tensor/Index.h" + +#include <vector> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to represent strides where stride[N-1] >= stride[N] holds for all N < rank + */ +class NonIncreasingStride +{ +public: + /** + * @brief Initialize the stride data using @c Shape + * @param[in] shape to build stride info + * @return N/A + */ + void init(const Shape &shape) + { + _stride.resize(shape.rank()); + + // Scalar + if (shape.rank() == 0) + return; + + _stride.at(shape.rank() - 1) = 1; + + for (uint32_t axis = shape.rank() - 1; axis > 0; --axis) + { + _stride.at(axis - 1) = _stride.at(axis) * shape.dim(axis); + } + } + +public: + /** + * @brief Get an stride value for specific axis + * @param[in] axis Axis of stride + * @return The value of stride + */ + uint32_t at(uint32_t axis) const { return _stride.at(axis); } + +public: + /** + * @brief Get the 1-D offset of specified index for n-D tensor + * @param index @c Index object + * @return 1-D offset of index + */ + uint32_t offset(const Index &index) const; + +private: + std::vector<uint32_t> _stride; +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_NON_INCREASING_STRIDE_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Object.h b/runtime/libs/misc/include/misc/tensor/Object.h new file mode 100644 index 000000000..cba4f1baf --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Object.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Object.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Object class + */ + +#ifndef __NNFW_MISC_TENSOR_OBJECT_H__ +#define __NNFW_MISC_TENSOR_OBJECT_H__ + +#include "misc/tensor/Shape.h" +#include "misc/tensor/Index.h" +#include "misc/tensor/IndexIterator.h" +#include "misc/tensor/NonIncreasingStride.h" +#include "misc/tensor/Reader.h" + +#include <vector> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to build a tensor using specific generator + * @tparam T Type of tensor element + */ + +template <typename T> class Object final : public Reader<T> +{ +public: + /** + * @brief Function to generate tensor element + */ + using Generator = std::function<T(const Shape &shape, const Index &index)>; + +public: + /** + * @brief Construct a new @c Object object + * @param[in] shape Tensor shape + * @param[in] fn Function to generate tensor elements + */ + Object(const Shape &shape, const Generator &fn) : _shape{shape} + { + // Set 'stride' + _stride.init(shape); + + // Handle scalar object + if (shape.rank() == 0) + { + _values.resize(1); + _values.at(0) = fn(_shape, 0); + } + else + { + // Pre-allocate buffer + _values.resize(_shape.dim(0) * _stride.at(0)); + + // Set 'value' + iterate(_shape) << [this, &fn](const Index &index) { + _values.at(_stride.offset(index)) = fn(_shape, index); + }; + } + } + +public: + /** + * @brief Get reference of shape + * @return Reference of shape + */ + const Shape &shape(void) const { return _shape; } + +public: + /** + * @brief Get and element of tensor + * @param[in] index Index of a tensor element + * @return Value of tensor element + */ + T at(const Index &index) const override { return _values.at(_stride.offset(index)); } + +private: + Shape _shape; + NonIncreasingStride _stride; + +private: + std::vector<T> _values; +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_FEATURE_OBJECT_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Reader.h b/runtime/libs/misc/include/misc/tensor/Reader.h new file mode 100644 index 000000000..9175a913e --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Reader.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Reader.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Reader struct + */ + +#ifndef __NNFW_MISC_TENSOR_READER_H__ +#define __NNFW_MISC_TENSOR_READER_H__ + +#include "misc/tensor/Index.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Struct to read element of tensor + * @tparam T Type of elements in tensor + */ +template <typename T> struct Reader +{ + /** + * @brief Destroy the Reader object + */ + virtual ~Reader() = default; + + /** + * @brief Get an element of tensor + * @param[in] index Index specifying indexes of tensor element + * @return The value of specificed element + */ + virtual T at(const Index &index) const = 0; +}; + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_READER_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Shape.h b/runtime/libs/misc/include/misc/tensor/Shape.h new file mode 100644 index 000000000..bd0eac0a5 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Shape.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Shape.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Shape class + */ + +#ifndef __NNFW_MISC_TENSOR_SHAPE_H__ +#define __NNFW_MISC_TENSOR_SHAPE_H__ + +#include <cstdint> +#include <cstddef> +#include <deque> +#include <initializer_list> +#include <ostream> +#include <string> +#include <cassert> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to represent shape of a tensor + */ +class Shape +{ +public: + /** + * @brief Construct a new Shape object + * @param[in] rank Rank of a tensor + */ + Shape(uint32_t rank) { _dimensions.resize(rank); } + +public: + /** + * @brief Construct a new Shape object + * @param[in] dimensions @c initializer_list<int32_t> of dimensions of tensor + */ + Shape(const std::initializer_list<int32_t> &dimensions) : _dimensions{dimensions} + { + // Check overflow because initializer_list type can be larger size than max of uint32_t + assert(dimensions.size() <= 0xFFFFFFFF); + } + + /** + * @brief Construct a new Shape object + * @param[in] origin @c Shape object to copy + */ + Shape(const Shape &origin) = default; + +public: + /** + * @brief Add dimension to the beginning + * @param[in] d dimension to add to the beginning + * @return N/A + */ + void prepend(int32_t d) { _dimensions.emplace_front(d); } + + /** + * @brief Add dimension to the back + * @param[in] d dimension to add to the back + * @return N/A + */ + void append(int32_t d) { _dimensions.emplace_back(d); } + +public: + /** + * @brief Get the rank of this shape + * @return rank + * @note We can use static_cast\n + * because we don't support larger than max of uint32_t on constructor + */ + uint32_t rank(void) const { return static_cast<uint32_t>(_dimensions.size()); } + +public: + /** + * @brief Get specific dimension + * @param[in] n Index of dimension + * @return n'th dimension + */ + int32_t dim(uint32_t n) const { return _dimensions.at(n); } + + /** + * @brief Get the reference of specific dimension + * @param[in] n Index of dimension + * @return Reference of n'th dimension + */ + int32_t &dim(uint32_t n) { return _dimensions.at(n); } + + const std::deque<int32_t> &dims() const { return _dimensions; } + +public: + /** + * @brief Get the number of elements specified by this shape + * @return The number of elements + */ + uint64_t num_elements() const; + +private: + std::deque<int32_t> _dimensions; + +public: + /** + * @brief Get a @c Shape object after parsing string + * @param[in] s String of dimension list. Accepted format is numbers separated by comma. + * @return @c Shape object + */ + static Shape from(const std::string &s); +}; + +/** + * @brief Check equality of two @c Shape + * @param[in] Shape First shape to compare + * @param[in] Shape Second shape to compare + * @return @c true if both shapes are equal, otherwise @c false + */ +bool operator==(const Shape &, const Shape &); + +/** + * @brief Send @c Shape to @c std::ostream + * @param[in] os @c std::ostream to process this @c Shape + * @param[in] shape @c Shape to send to @c ostream + * @return Reference of @c std::ostream + */ +std::ostream &operator<<(std::ostream &os, const Shape &shape); + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_SHAPE_H__ diff --git a/runtime/libs/misc/include/misc/tensor/Zipper.h b/runtime/libs/misc/include/misc/tensor/Zipper.h new file mode 100644 index 000000000..8f0ec4ab6 --- /dev/null +++ b/runtime/libs/misc/include/misc/tensor/Zipper.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Zipper.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains nnfw::misc::tensor::Zipper class + */ + +#ifndef __NNFW_MISC_TENSOR_ZIPPER_H__ +#define __NNFW_MISC_TENSOR_ZIPPER_H__ + +#include "misc/tensor/Index.h" +#include "misc/tensor/IndexIterator.h" +#include "misc/tensor/Reader.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +/** + * @brief Class to apply a function with three params: @c Index, elements of a tensor + * at passed index read by @c Reader objects + */ +template <typename T> class Zipper +{ +public: + /** + * @brief Construct a new @c Zipper object + * @param[in] shape Shape of @c lhs and @c rhs + * @param[in] lhs @c Reader object of a tensor + * @param[in] rhs @c Reader object of a tensor + */ + Zipper(const Shape &shape, const Reader<T> &lhs, const Reader<T> &rhs) + : _shape{shape}, _lhs{lhs}, _rhs{rhs} + { + // DO NOTHING + } + +public: + /** + * @brief Apply @c cb to all elements of tensors. Elements of two tensors + * at passed @c index are read by @c lhs and @c rhs + * @param[in] cb Function to apply + * @return N/A + */ + template <typename Callable> void zip(Callable cb) const + { + iterate(_shape) << + [this, &cb](const Index &index) { cb(index, _lhs.at(index), _rhs.at(index)); }; + } + +private: + const Shape &_shape; + const Reader<T> &_lhs; + const Reader<T> &_rhs; +}; + +/** + * @brief Apply @c cb by using @c lhs and @c rhs passed to the constructor of @c zipper + * @param[in] zipper @c Zipper object + * @param[in] cb Function to zpply using @c zip function + * @return @c zipper object after applying @c cb to @c zipper + */ +template <typename T, typename Callable> +const Zipper<T> &operator<<(const Zipper<T> &zipper, Callable cb) +{ + zipper.zip(cb); + return zipper; +} + +/** + * @brief Get @c Zipper object constructed using passed params + * @param shape Shape of @c lhs and @c rhs + * @param lhs @c Reader object of a tensor + * @param rhs @c Reader object of a tensor + * @return @c Zipper object + */ +template <typename T> Zipper<T> zip(const Shape &shape, const Reader<T> &lhs, const Reader<T> &rhs) +{ + return Zipper<T>{shape, lhs, rhs}; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_TENSOR_ZIPPER_H__ diff --git a/runtime/libs/misc/include/misc/vector.h b/runtime/libs/misc/include/misc/vector.h new file mode 100644 index 000000000..395b08912 --- /dev/null +++ b/runtime/libs/misc/include/misc/vector.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file vector.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains @c == operator to check equality of elements in two vectors + */ +#ifndef __NNFW_MISC_VECTOR_H__ +#define __NNFW_MISC_VECTOR_H__ + +#include <vector> + +/** + * @brief Compare elements of two vectors + * @tparam T Type of elements in vectors + * @param[in] lhs First vector to compare + * @param[in] rhs Second vector to compare + * @return @c true if all elements are equal, otherwise @c false. + */ +template <typename T> bool operator==(const std::vector<T> &lhs, const std::vector<T> &rhs) +{ + if (lhs.size() != rhs.size()) + { + return false; + } + + for (size_t ind = 0; ind < lhs.size(); ++ind) + { + if (lhs.at(ind) != rhs.at(ind)) + { + return false; + } + } + + return true; +} + +#endif // __NNFW_MISC_VECTOR_H__ diff --git a/runtime/libs/misc/include/misc/vector/Object.h b/runtime/libs/misc/include/misc/vector/Object.h new file mode 100644 index 000000000..65d4bc613 --- /dev/null +++ b/runtime/libs/misc/include/misc/vector/Object.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Object.h + * @brief This file contains Object class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_VECTOR_OBJECT_H__ +#define __NNFW_MISC_VECTOR_OBJECT_H__ + +#include "misc/vector/Reader.h" + +#include <vector> +#include <functional> + +namespace nnfw +{ +namespace misc +{ +namespace vector +{ + +/** + * @brief Class to have information of the operand for vector + */ +template <typename T> class Object final : public Reader<T> +{ +public: + using Generator = std::function<T(int32_t size, int32_t offset)>; + +public: + /** + * @brief Construct Object object with size of vector and set value used by Generator + * @param[in] size The size of vector + * @param[in] gen A function to set values of operand tensor + */ + Object(int32_t size, const Generator &gen) : _size{size} + { + _value.resize(_size); + + for (int32_t offset = 0; offset < size; ++offset) + { + _value.at(offset) = gen(size, offset); + } + } + +public: + /** + * @brief Get size of vector + * @return Size of vector + */ + int32_t size(void) const { return _size; } + +public: + /** + * @brief Get the value used by index + * @param[in] nth The vector index + * @return The value at the offset + */ + T at(uint32_t nth) const override { return _value.at(nth); } + +private: + /** + * @brief Size of vector + */ + const int32_t _size; + /** + * @brief The tensor vector of operand + */ + std::vector<T> _value; +}; + +} // namespace vector +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_VECTOR_OBJECT_H__ diff --git a/runtime/libs/misc/include/misc/vector/Reader.h b/runtime/libs/misc/include/misc/vector/Reader.h new file mode 100644 index 000000000..eab4c427b --- /dev/null +++ b/runtime/libs/misc/include/misc/vector/Reader.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Reader.h + * @brief This file contains Reader class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_MISC_VECTOR_READER_H__ +#define __NNFW_MISC_VECTOR_READER_H__ + +#include <cstdint> + +namespace nnfw +{ +namespace misc +{ +namespace vector +{ + +/** + * @brief Class reads values of vector + * The interface class + */ +template <typename T> struct Reader +{ + /** + * @brief Destruct Reader object using default destructor + */ + virtual ~Reader() = default; + + /** + * @brief Get the value used by the index + * @param[in] nth The vector index + * @return The value at the offset + */ + virtual T at(uint32_t nth) const = 0; +}; + +} // namespace vector +} // namespace misc +} // namespace nnfw + +#endif // __NNFW_MISC_VECTOR_READER_H__ diff --git a/runtime/libs/misc/src/EventCollector.cpp b/runtime/libs/misc/src/EventCollector.cpp new file mode 100644 index 000000000..452a29166 --- /dev/null +++ b/runtime/libs/misc/src/EventCollector.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/EventCollector.h" + +// C++ standard libraries +#include <chrono> + +// POSIX standard libraries +#include <sys/time.h> +#include <sys/resource.h> + +namespace +{ + +std::string timestamp(void) +{ + auto now = std::chrono::steady_clock::now(); + return std::to_string( + std::chrono::duration_cast<std::chrono::microseconds>(now.time_since_epoch()).count()); +} + +class DurationEventBuilder +{ +public: + DurationEventBuilder(const std::string &ts) : _ts{ts} {} + + DurationEvent build(const std::string &tid, const std::string &name, const std::string &ph) const + { + DurationEvent evt; + + evt.name = name; + evt.tid = tid; + evt.ph = ph; + evt.ts = _ts; + + return evt; + } + +private: + std::string _ts; +}; + +void emit_rusage(EventRecorder *rec, const std::string &ts) +{ + struct rusage ru; + + getrusage(RUSAGE_SELF, &ru); + { + CounterEvent evt; + + evt.name = "maxrss"; + evt.ph = "C"; + evt.ts = ts; + evt.values["value"] = std::to_string(ru.ru_maxrss); + + rec->emit(evt); + } + + { + CounterEvent evt; + + evt.name = "minflt"; + evt.ph = "C"; + evt.ts = ts; + evt.values["value"] = std::to_string(ru.ru_minflt); + + rec->emit(evt); + } +} + +} // namespace + +void EventCollector::onEvent(const Event &event) +{ + auto ts = timestamp(); + + switch (event.edge) + { + case Edge::BEGIN: + _rec->emit(DurationEventBuilder(ts).build(event.backend, event.label, "B")); + break; + + case Edge::END: + _rec->emit(DurationEventBuilder(ts).build(event.backend, event.label, "E")); + break; + } + + // Trace resource usage per each event notification + emit_rusage(_rec, ts); +} diff --git a/runtime/libs/misc/src/EventRecorder.cpp b/runtime/libs/misc/src/EventRecorder.cpp new file mode 100644 index 000000000..5fdeef5fe --- /dev/null +++ b/runtime/libs/misc/src/EventRecorder.cpp @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/EventRecorder.h" + +#include <sstream> +#include <vector> + +namespace +{ + +std::string quote(const std::string &value) +{ + std::stringstream ss; + ss << '"' << value << '"'; + return ss.str(); +} + +std::string field(const std::string &k, const std::string &v) +{ + std::stringstream ss; + ss << quote(k) << " : " << quote(v); + return ss.str(); +} + +struct Content // One Entry in Chrome Event Trace +{ + std::vector<std::pair<std::string, std::string>> flds; + std::vector<std::pair<std::string, std::string>> args; +}; + +std::string object(const Content &content) +{ + std::stringstream ss; + + ss << "{ "; + + ss << field(content.flds[0].first, content.flds[0].second); + + for (uint32_t n = 1; n < content.flds.size(); ++n) + { + ss << ", " << field(content.flds.at(n).first, content.flds.at(n).second); + } + + if (content.args.size() > 0) + { + ss << ", " << quote("args") << " : { "; + ss << field(content.args.at(0).first, content.args.at(0).second); + + for (uint32_t n = 1; n < content.args.size(); ++n) + { + ss << ", " << field(content.args.at(n).first, content.args.at(n).second); + } + + ss << "}"; + } + + ss << " }"; + + return ss.str(); +} + +void fill(Content &content, const Event &evt) +{ + content.flds.emplace_back("name", evt.name); + content.flds.emplace_back("pid", "0"); + content.flds.emplace_back("tid", evt.tid); + content.flds.emplace_back("ph", evt.ph); + content.flds.emplace_back("ts", evt.ts); +} + +std::string object(const DurationEvent &evt) +{ + Content content; + + fill(content, evt); + + return ::object(content); +} + +std::string object(const CounterEvent &evt) +{ + Content content; + + fill(content, evt); + + for (auto it = evt.values.begin(); it != evt.values.end(); ++it) + { + content.args.emplace_back(it->first, it->second); + } + + return ::object(content); +} + +} // namespace + +void EventRecorder::emit(const DurationEvent &evt) +{ + std::lock_guard<std::mutex> lock{_mu}; + + _ss << " " << object(evt) << ",\n"; +} + +void EventRecorder::emit(const CounterEvent &evt) +{ + std::lock_guard<std::mutex> lock{_mu}; + + _ss << " " << object(evt) << ",\n"; +} + +void EventRecorder::writeToFile(std::ostream &os) +{ + std::lock_guard<std::mutex> lock{_mu}; + + os << "{\n"; + os << " " << quote("traceEvents") << ": [\n"; + + os << _ss.str(); + + os << " { }\n"; + os << " ]\n"; + os << "}\n"; +} diff --git a/runtime/libs/misc/src/tensor/Comparator.cpp b/runtime/libs/misc/src/tensor/Comparator.cpp new file mode 100644 index 000000000..e765e77b2 --- /dev/null +++ b/runtime/libs/misc/src/tensor/Comparator.cpp @@ -0,0 +1,38 @@ +#include "misc/tensor/Comparator.h" +#include "misc/tensor/Zipper.h" + +#include "misc/fp32.h" + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +std::vector<Diff<float>> Comparator::compare(const Shape &shape, const Reader<float> &expected, + const Reader<float> &obtained, + Observer *observer) const +{ + std::vector<Diff<float>> res; + + zip(shape, expected, obtained) << + [&](const Index &index, float expected_value, float obtained_value) { + if (!_compare_fn(expected_value, obtained_value)) + { + res.emplace_back(index, expected_value, obtained_value); + } + + // Update max_diff_index, if necessary + if (observer != nullptr) + { + observer->notify(index, expected_value, obtained_value); + } + }; + + return res; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw diff --git a/runtime/libs/misc/src/tensor/IndexFormatter.cpp b/runtime/libs/misc/src/tensor/IndexFormatter.cpp new file mode 100644 index 000000000..c949db7a8 --- /dev/null +++ b/runtime/libs/misc/src/tensor/IndexFormatter.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/tensor/IndexFormatter.h" + +#include <cassert> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +std::ostream &operator<<(std::ostream &os, const IndexFormatter &fmt) +{ + const auto rank = fmt.index().rank(); + + assert(rank > 0); + + os << fmt.index().at(0); + + if (rank > 1) + { + for (uint32_t axis = 1; axis < rank; ++axis) + { + os << ", " << fmt.index().at(axis); + } + } + + return os; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw diff --git a/runtime/libs/misc/src/tensor/NonIncreasingStride.cpp b/runtime/libs/misc/src/tensor/NonIncreasingStride.cpp new file mode 100644 index 000000000..c51ad0324 --- /dev/null +++ b/runtime/libs/misc/src/tensor/NonIncreasingStride.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/tensor/NonIncreasingStride.h" + +#include <cassert> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +uint32_t NonIncreasingStride::offset(const Index &index) const +{ + const size_t rank = _stride.size(); + + assert(index.rank() == rank); + + uint32_t offset = 0; + + for (size_t axis = 0; axis < rank; ++axis) + { + offset += _stride.at(axis) * index.at(axis); + } + + return offset; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw diff --git a/runtime/libs/misc/src/tensor/Shape.cpp b/runtime/libs/misc/src/tensor/Shape.cpp new file mode 100644 index 000000000..70d3bdfdb --- /dev/null +++ b/runtime/libs/misc/src/tensor/Shape.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "misc/tensor/Shape.h" + +#include <cassert> +#include <functional> +#include <numeric> + +namespace nnfw +{ +namespace misc +{ +namespace tensor +{ + +bool operator==(const Shape &lhs, const Shape &rhs) +{ + if (lhs.rank() != rhs.rank()) + { + return false; + } + + for (uint32_t axis = 0; axis < lhs.rank(); ++axis) + { + if (lhs.dim(axis) != rhs.dim(axis)) + { + return false; + } + } + + return true; +} + +Shape Shape::from(const std::string &str) +{ + Shape shape(0); + + bool pending = false; + int value = 0; + + for (const char *cur = str.c_str(); true; ++cur) + { + if (*cur == ',' || *cur == '\0') + { + if (pending) + { + shape.append(value); + } + + if (*cur == '\0') + { + break; + } + + pending = false; + value = 0; + continue; + } + + assert(*cur >= '0' && *cur <= '9'); + + pending = true; + value *= 10; + value += *cur - '0'; + } + + return shape; +} + +uint64_t Shape::num_elements() const +{ + return std::accumulate(_dimensions.cbegin(), _dimensions.cend(), UINT64_C(1), + std::multiplies<uint64_t>()); +} + +std::ostream &operator<<(std::ostream &os, const Shape &shape) +{ + if (shape.rank() > 0) + { + os << shape.dim(0); + + for (uint32_t axis = 1; axis < shape.rank(); ++axis) + { + os << "," << shape.dim(axis); + } + } + + return os; +} + +} // namespace tensor +} // namespace misc +} // namespace nnfw diff --git a/runtime/libs/ndarray/CMakeLists.txt b/runtime/libs/ndarray/CMakeLists.txt new file mode 100644 index 000000000..b040f5115 --- /dev/null +++ b/runtime/libs/ndarray/CMakeLists.txt @@ -0,0 +1,19 @@ +add_library(ndarray STATIC src/Array.cpp src/ContiguousSpan.cpp) + +set_target_properties(ndarray PROPERTIES POSITION_INDEPENDENT_CODE ON) + +target_include_directories(ndarray PUBLIC include) +#can't make this private because of c++ templates +target_include_directories(ndarray PUBLIC src) + +option(NDARRAY_INLINE_TEMPLATES "Set to ON to disable extern declarations for common types") + +if(${NDARRAY_INLINE_TEMPLATES}) + target_compile_definitions(ndarray PUBLIC -DNDARRAY_INLINE_TEMPLATES=1) +endif() + +target_link_libraries(ndarray PRIVATE nnfw_common) +target_link_libraries(ndarray PRIVATE nnfw_coverage) + +add_subdirectory(test) +add_subdirectory(example) diff --git a/runtime/libs/ndarray/example/CMakeLists.txt b/runtime/libs/ndarray/example/CMakeLists.txt new file mode 100644 index 000000000..c4b575dad --- /dev/null +++ b/runtime/libs/ndarray/example/CMakeLists.txt @@ -0,0 +1,4 @@ +add_executable(example_no_array example_no_array.cpp) + +add_executable(example_array example_array.cpp) +target_link_libraries(example_array PRIVATE ndarray) diff --git a/runtime/libs/ndarray/example/example_array.cpp b/runtime/libs/ndarray/example/example_array.cpp new file mode 100644 index 000000000..85d274681 --- /dev/null +++ b/runtime/libs/ndarray/example/example_array.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ndarray/Array.h" + +#include <iostream> +#include <iterator> + +using namespace ndarray; + +void gather_array(const Array<float> &input, Array<float> &output, const Array<int> &indices) +{ + assert(indices.shape().rank() == 3); + assert(input.shape().rank() == 3); + assert(indices.shape().dim(1) == input.shape().rank()); + + for (size_t i = 0; i < indices.shape().dim(0); ++i) + { + for (size_t j = 0; j < indices.shape().dim(1); ++j) + { + auto index = indices.slice(i, j); + output.slice(i, j).assign(input.slice(index[0], index[1])); + } + } +} + +int main() +{ + // fill tensor of shape[3,3,4] with sequential numbers from [0..36) + Shape in_shape{3, 3, 4}; + std::vector<float> input_data(in_shape.element_count()); + for (size_t i = 0; i < in_shape.element_count(); ++i) + input_data[i] = i; + + Array<float> input(input_data.data(), in_shape); + + // select column-vectors on main diagonal + Shape indices_shape{1, 3, 2}; + std::vector<int> indices_data(indices_shape.element_count()); + Array<int> indices(indices_data.data(), indices_shape); + + indices.slice(0, 0) = {0, 0}; + indices.slice(0, 1) = {1, 1}; + indices.slice(0, 2) = {2, 2}; + + Shape output_shape{1, 3, 4}; + std::vector<float> output_data(output_shape.element_count()); + + Array<float> output(output_data.data(), output_shape); + + gather_array(input, output, indices); + + for (size_t i = 0; i < indices_shape.dim(0); ++i) + { + for (size_t j = 0; j < indices_shape.dim(1); ++j) + { + auto output_piece = output.slice(i, j); + std::ostream_iterator<int> cout_it(std::cout, ", "); + std::copy(output_piece.begin(), output_piece.end(), cout_it); + std::cout << std::endl; + } + } +} diff --git a/runtime/libs/ndarray/example/example_no_array.cpp b/runtime/libs/ndarray/example/example_no_array.cpp new file mode 100644 index 000000000..3a4d05dca --- /dev/null +++ b/runtime/libs/ndarray/example/example_no_array.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <array> +#include <vector> +#include <algorithm> +#include <cassert> +#include <iostream> + +void gather_no_array(const float *in_data, const std::array<size_t, 3> &dims, float *out_data, + const std::array<size_t, 3> &out_dims, //[nselections, + const int *indices, const std::array<size_t, 3> &indices_dims) +{ + assert(indices_dims[1] == dims.size()); + + for (int i = 0; i < indices_dims[0]; ++i) + { + for (int j = 0; j < indices_dims[1]; ++j) + { + const int *index_ptr = indices + i * indices_dims[2] * indices_dims[1] + j * indices_dims[2]; + + size_t in_offset = index_ptr[0] * dims[2] * dims[1] + index_ptr[1] * dims[2]; + + const float *in_ptr = in_data + in_offset; + + size_t out_offset = i * out_dims[2] * out_dims[1] + j * out_dims[2]; + + float *out_ptr = out_data + out_offset; + + for (int k = 0; k < dims[2]; ++k) + { + out_ptr[k] = in_ptr[k]; + } + } + } +} + +int main() +{ + std::array<size_t, 3> in_dims{3, 3, 4}; + std::vector<float> input(3 * 3 * 4); + for (size_t i = 0; i < 3 * 3 * 4; ++i) + input[i] = i; + + std::array<size_t, 3> indices_shape{1, 3, 2}; + std::vector<int> indices(1 * 3 * 2); + + indices[0] = 0; + indices[1] = 0; + indices[2] = 1; + indices[3] = 1; + indices[4] = 2; + indices[5] = 2; + + std::array<size_t, 3> output_dims{1, 3, 4}; + std::vector<float> output(1 * 3 * 4); + + gather_no_array(input.data(), in_dims, output.data(), output_dims, indices.data(), indices_shape); + + for (size_t i = 0; i < output_dims[0]; ++i) + { + for (size_t j = 0; j < output_dims[1]; ++j) + { + auto out_ptr = output.data() + i * output_dims[1] * output_dims[2] + j * output_dims[2]; + for (size_t k = 0; k < output_dims[2]; ++k) + { + std::cout << out_ptr[k] << ", "; + } + std::cout << std::endl; + } + } +} diff --git a/runtime/libs/ndarray/include/ndarray/Array.h b/runtime/libs/ndarray/include/ndarray/Array.h new file mode 100644 index 000000000..3890cc26b --- /dev/null +++ b/runtime/libs/ndarray/include/ndarray/Array.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NDARRAY_ARRAY_H_ +#define _NDARRAY_ARRAY_H_ + +#include "Common.h" + +#include "ContiguousSpan.h" +#include "Shape.h" + +#if __cplusplus < 201402L +#include "detail/cxx14.h" //integer_sequence and make_index_dequence definitions +#else +#include <utility> +#endif + +#include <algorithm> +#include <cassert> +#include <type_traits> +#include <array> +#include <tuple> +#include <cstddef> + +namespace ndarray +{ + +// there is no index_sequence before c++14 +#if __cplusplus < 201402L + +template <size_t... Nums> using index_sequence = cxx14::index_sequence<Nums...>; + +template <size_t Num> using make_index_sequence = cxx14::make_index_sequence<Num>; + +#else + +template <size_t... Nums> using index_sequence = std::index_sequence<Nums...>; + +template <size_t _Num> using make_index_sequence = std::make_index_sequence<_Num>; + +#endif //__cplusplus < 201402L + +struct Strides +{ + explicit Strides(Shape s) : _strides{} { fillStrides(s); } + + int operator[](size_t idx) const noexcept { return _strides[idx]; } + + // since we don't have c++14 fold expression + template <typename Seq, typename... Ts> struct _calc_offset; + + template <size_t Num, size_t... Nums, typename T, typename... Ts> + struct _calc_offset<index_sequence<Num, Nums...>, T, Ts...> + { + static constexpr size_t get(const std::array<int, 8> &strides, int x, Ts... xs) + { + return _calc_offset<index_sequence<Nums...>, Ts...>::get(strides, xs...) + + x * std::get<Num>(strides); + } + }; + + template <size_t Num, typename T> struct _calc_offset<index_sequence<Num>, T> + { + static constexpr size_t get(const std::array<int, 8> &strides, int x) + { + return x * std::get<Num>(strides); + } + }; + + template <typename Seq, typename... Ts> constexpr size_t offset(Seq, Ts... x) const noexcept + { + // return ( 0 + ... + (std::get<Nums>(_strides) * x)); in c++14 + return _calc_offset<Seq, Ts...>::get(_strides, x...); + } + +private: + void fillStrides(const Shape &s) noexcept + { + int rank = s.rank(); + _strides[rank - 1] = 1; + for (int d = rank - 2; d >= 0; --d) + { + _strides[d] = _strides[d + 1] * s.dim(d + 1); + } + } + + std::array<int, NDARRAY_MAX_DIMENSION_COUNT> _strides; +}; + +template <typename T> class Array +{ +public: + Array(T *data, Shape shape) noexcept : _data(data), _shape(shape), _strides(shape) {} + + Array(const Array &) = delete; + + Array(Array &&a) noexcept : _data(a._data), _shape(a._shape), _strides(a._strides) + { + a._data = nullptr; + } + + template <typename... Ts> T &at(Ts... x) const noexcept { return _at(static_cast<size_t>(x)...); } + + /** + * @brief returns last dimension as ContigniousSpan + * @param x indices of slice to take. See tests for usage details + * @return slice at given position + */ + template <typename... Ts> ContiguousSpan<T, std::is_const<T>::value> slice(Ts... x) noexcept + { + assert(sizeof...(Ts) == _shape.rank() - 1); + return {&at(x..., 0ul), _shape.dim(_shape.rank() - 1)}; + } + + /** + * @brief returns last dimension as ContigniousSpan + * @param x indices of slice to take. See tests for usage details + * @return slice at given position + */ + template <typename... Ts> ContiguousSpan<T, true> slice(Ts... x) const noexcept + { + assert(sizeof...(Ts) == _shape.rank() - 1); + return {&at(x..., 0ul), _shape.dim(_shape.rank() - 1)}; + } + + ContiguousSpan<T, std::is_const<T>::value> flat() noexcept + { + return {_data, _shape.element_count()}; + } + + ContiguousSpan<T, true> flat() const noexcept { return {_data, _shape.element_count()}; } + + const Shape &shape() const noexcept { return _shape; } + +private: + template <typename... Ts> T &_at(Ts... x) const noexcept + { + assert(sizeof...(x) == _shape.rank()); + using Indices = make_index_sequence<sizeof...(Ts)>; + return _data[offset(Indices{}, x...)]; + } + + template <typename... Ts, size_t... Nums> + size_t offset(index_sequence<Nums...> seq, Ts... x) const noexcept + { + static_assert( + sizeof...(Ts) == sizeof...(Nums), + "Sanity check failed. Generated index sequence size is not equal to argument count"); + + return _strides.offset(seq, x...); + } + + T *_data; + Shape _shape; + Strides _strides; +}; + +template <typename To, typename From> Array<To> array_cast(Array<From> &&from, Shape newShape) +{ + assert(from.shape().element_count() / (sizeof(To) / sizeof(From)) == newShape.element_count()); + return Array<To>(reinterpret_cast<To *>(from.flat().data()), newShape); +} + +template <typename To, typename From> +Array<const To> array_cast(const Array<From> &from, Shape newShape) +{ + assert(from.shape().element_count() / (sizeof(To) / sizeof(From)) == newShape.element_count()); + return Array<To>(reinterpret_cast<const To *>(from.flat().data()), newShape); +} + +#ifndef NDARRAY_INLINE_TEMPLATES + +extern template class Array<float>; +extern template class Array<int32_t>; +extern template class Array<uint32_t>; +extern template class Array<uint8_t>; + +#endif // NDARRAY_INLINE_TEMPLATES + +} // namespace ndarray + +#endif //_NDARRAY_ARRAY_H_ diff --git a/runtime/libs/ndarray/include/ndarray/Common.h b/runtime/libs/ndarray/include/ndarray/Common.h new file mode 100644 index 000000000..aa0cc6fe2 --- /dev/null +++ b/runtime/libs/ndarray/include/ndarray/Common.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NDARRAY_COMMON_H_ +#define _NDARRAY_COMMON_H_ + +#define NDARRAY_MAX_DIMENSION_COUNT 8 + +#endif //_NDARRAY_COMMON_H_ diff --git a/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h b/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h new file mode 100644 index 000000000..8caa6a686 --- /dev/null +++ b/runtime/libs/ndarray/include/ndarray/ContiguousSpan.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NDARRAY_CONTIGNIOUS_SPAN_H_ +#define _NDARRAY_CONTIGNIOUS_SPAN_H_ + +#include <type_traits> +#include <vector> +#include <cstdint> +#include <cstddef> +#include <cassert> + +namespace ndarray +{ + +template <typename T, bool isConst = false> class ContiguousSpan +{ +public: + using pointer_type = typename std::conditional<isConst, const T *, T *>::type; + using reference_type = typename std::conditional<isConst, const T &, T &>::type; + using iterator_type = pointer_type; + + ContiguousSpan(pointer_type data, size_t len) noexcept : _data(data), _len(len) {} + + template <typename It> + explicit ContiguousSpan(It first, It last) noexcept + : _data(&*first), _len(std::distance(first, last)) + { + } + + ContiguousSpan(const ContiguousSpan &) = delete; + + ContiguousSpan(ContiguousSpan &&s) noexcept : _data(s._data), _len(s._len) { s._data = nullptr; } + + operator ContiguousSpan<T, true>() { return ContiguousSpan<T, true>{_data, _len}; } + + reference_type operator[](size_t idx) const noexcept { return _data[idx]; } + + reference_type at(size_t idx) const noexcept { return _data[idx]; } + + ContiguousSpan<T, isConst> offset(size_t offset) + { + assert(offset <= _len); + return {_data + offset, _len - offset}; + } + + template <typename From, bool _ = isConst> + typename std::enable_if<!_, void>::type assign(const From &f) noexcept + { + assignFrom(std::begin(f), std::end(f)); + } + + template <typename U, bool _ = isConst> + typename std::enable_if<!_, ContiguousSpan &>::type + operator=(std::initializer_list<U> list) noexcept + { + assignFrom(std::begin(list), std::end(list)); + return *this; + } + + template <typename It, bool _ = isConst> + typename std::enable_if<!_, void>::type assignFrom(It first, It last) noexcept + { + std::copy(first, last, begin()); + } + + size_t size() const { return _len; } + + iterator_type begin() const { return iterator_type{_data}; } + + iterator_type end() const { return iterator_type{_data + _len}; } + + pointer_type data() { return _data; } + +private: + pointer_type _data; + size_t _len; +}; + +#ifndef NDARRAY_INLINE_TEMPLATES + +extern template class ContiguousSpan<float, true>; +extern template class ContiguousSpan<float, false>; +extern template class ContiguousSpan<int32_t, true>; +extern template class ContiguousSpan<int32_t, false>; +extern template class ContiguousSpan<uint32_t, true>; +extern template class ContiguousSpan<uint32_t, false>; +extern template class ContiguousSpan<uint8_t, true>; +extern template class ContiguousSpan<uint8_t, false>; + +#endif // NDARRAY_INLINE_TEMPLATES + +} // namespace ndarray + +#endif //_NDARRAY_CONTIGNIOUS_SPAN_H_ diff --git a/runtime/libs/ndarray/include/ndarray/Shape.h b/runtime/libs/ndarray/include/ndarray/Shape.h new file mode 100644 index 000000000..fa58613b8 --- /dev/null +++ b/runtime/libs/ndarray/include/ndarray/Shape.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NDARRAY_SHAPE_H_ +#define _NDARRAY_SHAPE_H_ + +#include "Common.h" + +#include <array> +#include <cassert> +#include <cstddef> + +namespace ndarray +{ + +class Shape +{ +public: + //_dims{} here and later since array does not have std::initializer_list ctor + // and aggregate initialization is not allowed here + explicit Shape(size_t rank) noexcept : _dims{}, _rank(rank) + { + std::fill(_dims.begin(), _dims.end(), 0); + } + + Shape(std::initializer_list<size_t> list) noexcept : _dims{}, _rank(list.size()) + { + std::copy(list.begin(), list.end(), _dims.begin()); + } + + size_t dim(int i) const noexcept { return _dims.at(i); } + + size_t &dim(int i) noexcept { return _dims.at(i); } + + size_t element_count() const noexcept + { + uint32_t res = 1; + for (size_t i = 0; i < rank(); ++i) + res *= dim(i); + assert(res <= 0xffffffff); + return res; + } + + size_t rank() const noexcept { return _rank; } + +private: + std::array<size_t, NDARRAY_MAX_DIMENSION_COUNT> _dims; + size_t _rank; +}; + +} // namespace ndarray + +#endif //_NDARRAY_SHAPE_H_ diff --git a/runtime/libs/ndarray/src/Array.cpp b/runtime/libs/ndarray/src/Array.cpp new file mode 100644 index 000000000..f9c9de9d3 --- /dev/null +++ b/runtime/libs/ndarray/src/Array.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ndarray/Array.h" + +namespace ndarray +{ + +template class Array<float>; +template class Array<int32_t>; +template class Array<uint32_t>; +template class Array<uint8_t>; + +} // namespace ndarray diff --git a/runtime/libs/ndarray/src/ContiguousSpan.cpp b/runtime/libs/ndarray/src/ContiguousSpan.cpp new file mode 100644 index 000000000..e06cfc2a1 --- /dev/null +++ b/runtime/libs/ndarray/src/ContiguousSpan.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ndarray/ContiguousSpan.h" + +namespace ndarray +{ + +template class ContiguousSpan<float, true>; +template class ContiguousSpan<float, false>; +template class ContiguousSpan<int32_t, true>; +template class ContiguousSpan<int32_t, false>; +template class ContiguousSpan<uint32_t, true>; +template class ContiguousSpan<uint32_t, false>; +template class ContiguousSpan<uint8_t, true>; +template class ContiguousSpan<uint8_t, false>; + +} // namespace ndarray diff --git a/runtime/libs/ndarray/src/detail/cxx14.h b/runtime/libs/ndarray/src/detail/cxx14.h new file mode 100644 index 000000000..81135b3f2 --- /dev/null +++ b/runtime/libs/ndarray/src/detail/cxx14.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NDARRAY_CXX14_H_ +#define _NDARRAY_CXX14_H_ + +namespace ndarray +{ + +namespace cxx14 +{ + +template <size_t... Nums> struct index_sequence +{ + using value_type = size_t; + + static constexpr std::size_t size() noexcept { return sizeof...(Nums); } +}; + +namespace detail +{ + +template <size_t v, typename Seq> struct _append; + +template <size_t v, size_t... Nums> struct _append<v, index_sequence<Nums...>> +{ + using result = index_sequence<Nums..., v>; +}; + +template <size_t Len> struct make_index_sequence +{ + using result = + typename detail::_append<Len - 1, typename make_index_sequence<Len - 1>::result>::result; +}; + +template <> struct make_index_sequence<1> +{ + using result = index_sequence<0>; +}; + +template <> struct make_index_sequence<0> +{ + using result = index_sequence<>; +}; + +} // namespace detail + +template <size_t Num> using make_index_sequence = typename detail::make_index_sequence<Num>::result; + +} // namespace cxx14 + +} // namespace ndarray + +#endif //_NDARRAY_CXX14_H_ diff --git a/runtime/libs/ndarray/test/CMakeLists.txt b/runtime/libs/ndarray/test/CMakeLists.txt new file mode 100644 index 000000000..16f8779ee --- /dev/null +++ b/runtime/libs/ndarray/test/CMakeLists.txt @@ -0,0 +1,17 @@ +if(NOT BUILD_NDARRAY_TEST) + return() +endif() + +add_executable(ndarray_test ndarray_test.cpp) + +target_link_libraries(ndarray_test PRIVATE ndarray) + +nnfw_find_package(GTest) +if(NOT GTest_FOUND) + message(STATUS "GTest not avaialble. Skipping NDArray test build") + return() +endif(NOT GTest_FOUND) + +target_link_libraries(ndarray_test PUBLIC gtest gtest_main ${LIB_PTHREAD}) + +add_test(ndarray_test ndarray_test) diff --git a/runtime/libs/ndarray/test/ndarray_test.cpp b/runtime/libs/ndarray/test/ndarray_test.cpp new file mode 100644 index 000000000..0aa948c72 --- /dev/null +++ b/runtime/libs/ndarray/test/ndarray_test.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gtest/gtest.h" + +#include "ndarray/Array.h" + +using namespace ndarray; + +TEST(NDArray_tests, basic_data_test) +{ + + float raw_data[] = {1, 2, 3, 4}; + + Array<float> data22{raw_data, {2, 2}}; + + ASSERT_FLOAT_EQ(data22.at(0, 0), 1); + ASSERT_FLOAT_EQ(data22.at(0, 1), 2); + ASSERT_FLOAT_EQ(data22.at(1, 0), 3); + ASSERT_FLOAT_EQ(data22.at(1, 1), 4); + + Array<float> data14{raw_data, {1, 4}}; + ASSERT_FLOAT_EQ(data22.at(0, 0), 1); + ASSERT_FLOAT_EQ(data22.at(0, 1), 2); + ASSERT_FLOAT_EQ(data22.at(0, 2), 3); + ASSERT_FLOAT_EQ(data22.at(0, 3), 4); +} + +TEST(NDArray_tests, slice_write_test) +{ + float raw_data[4] = {0}; + + Array<float> data22{raw_data, {2, 2}}; + + data22.slice(1) = {1, 2}; + + ASSERT_FLOAT_EQ(data22.at(0, 0), 0); + ASSERT_FLOAT_EQ(data22.at(0, 1), 0); + ASSERT_FLOAT_EQ(data22.at(1, 0), 1); + ASSERT_FLOAT_EQ(data22.at(1, 1), 2); +} + +TEST(NDArray_tests, slice_read_test) +{ + float raw_data[4] = {1, 2, 3, 4}; + + Array<float> data22{raw_data, {2, 2}}; + + auto slice = data22.slice(1); + + ASSERT_FLOAT_EQ(slice[0], 3); + ASSERT_FLOAT_EQ(slice[1], 4); +} + +TEST(NDArray_tests, multidim_test) +{ + float raw_data[5] = {0, 1, 2, 3, 4}; + + Array<float> data22{raw_data, {1, 1, 1, 1, 5}}; + + ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 0), 0); + ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 1), 1); + ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 2), 2); + ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 3), 3); + ASSERT_FLOAT_EQ(data22.at(0, 0, 0, 0, 4), 4); +} + +TEST(NDArray_tests, slice_assign_test) +{ + std::vector<float> v1{1, 2, 3, 4, 5}; + std::vector<float> v2(5); + + ContiguousSpan<float> span1(v1.begin(), v1.end()); + ContiguousSpan<float> span2(v2.begin(), v2.end()); + + span2.assign(span1); + + ASSERT_EQ(v1, v2); +} diff --git a/runtime/libs/nnapi/CMakeLists.txt b/runtime/libs/nnapi/CMakeLists.txt new file mode 100644 index 000000000..a5d9490d1 --- /dev/null +++ b/runtime/libs/nnapi/CMakeLists.txt @@ -0,0 +1,3 @@ +add_subdirectories() + +add_library(nnfw_lib_nnapi ALIAS nnfw_lib_nnapi_1_2) diff --git a/runtime/libs/nnapi/v1.1/CMakeLists.txt b/runtime/libs/nnapi/v1.1/CMakeLists.txt new file mode 100644 index 000000000..dc018c60f --- /dev/null +++ b/runtime/libs/nnapi/v1.1/CMakeLists.txt @@ -0,0 +1,4 @@ +add_library(nnfw_lib_nnapi_1_1 INTERFACE) + +target_include_directories(nnfw_lib_nnapi_1_1 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(nnfw_lib_nnapi_1_1 INTERFACE nnfw-nnapi-header) diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h new file mode 100644 index 000000000..f684dab90 --- /dev/null +++ b/runtime/libs/nnapi/v1.1/include/NeuralNetworksExShim.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/** + * @file NeuralNetworksExShim.h + * @brief This file contains an actual implementation of + * ANeuralNetworksModel_addOperationEx function + * @ingroup COM_AI_RUNTIME + */ + +#ifndef NN_API_EX_SHIM_H +#define NN_API_EX_SHIM_H + +#include "NeuralNetworksEx.h" +#include "NeuralNetworksLoadHelpers.h" + +typedef int (*ANeuralNetworksModel_addOperationEx_fn)(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs); + +/** + * @brief Add an extended operation to a model. + * + * @param[in] model The model to be modified. + * @param[in] type The type of extended operation. + * @param[in] inputCount The number of entries in the inputs array. + * @param[in] inputs An array of indexes identifying each operand. + * @param[in] outputCount The number of entries in the outputs array. + * @param[in] outputs An array of indexes identifying each operand. + * + * @note The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} + * has been called will return an error.\n + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ + +inline int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs) +{ + LOAD_FUNCTION(ANeuralNetworksModel_addOperationEx); + EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs); +} + +#endif // NN_API_EX_SHIM_H diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h new file mode 100644 index 000000000..201465f9c --- /dev/null +++ b/runtime/libs/nnapi/v1.1/include/NeuralNetworksLoadHelpers.h @@ -0,0 +1,141 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from part of the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h' + +/** + * @file NeuralNetworksLoadHelpers.h + * @ingroup COM_AI_RUNTIME + * @brief This file contains functions to load NN API runtime library + */ + +#ifndef __NEURAL_NETWORKS_LOAD_HELPER_H__ +#define __NEURAL_NETWORKS_LOAD_HELPER_H__ + +#include <dlfcn.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> + +/** + * @brief Print log data + * @param[in] format Format string of @c printf + * @param[in] args Argument after format string. (Same with @c printf) + */ +#define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__); + +/** + * @brief Create a function pointer named @c fn after loading NN API library + * @param[in] name Name of a function + */ +#define LOAD_FUNCTION(name) \ + static name##_fn fn = reinterpret_cast<name##_fn>(nnfw::loadFunction(#name)); + +/** + * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION + * @param[in] args List of arguments for the function @c fn + */ +#define EXECUTE_FUNCTION(...) \ + if (fn != nullptr) { \ + fn(__VA_ARGS__); \ + } + +/** + * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION + * @param[in] args List of arguments for the function @c fn + * @return the return value of @c fn + */ +#define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0; + +namespace nnfw +{ + +/** + * @brief Load NN API library + * @param[in] name path of NN API library + * @return a symbol table handle of NN API library + */ +inline void* loadLibrary(const char* name) { + // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn + // api RT + void* handle = nullptr; +#if 1 //#ifdef __ANDROID__ + handle = dlopen(name, RTLD_LAZY | RTLD_LOCAL); + if (handle == nullptr) { + NNAPI_LOG("nnapi error: unable to open library %s", name); + NNAPI_LOG(" %s", dlerror()); + } +#endif + return handle; +} + +/** + * @brief Load libneuralnetworks.so and return handle of library + * @return a symbol table handle of NN API library + */ +inline void* getLibraryHandle() { + static void* handle = loadLibrary("libneuralnetworks.so"); + return handle; +} + +/** + * @brief Return function ptr in libneuralnetworks.so + * @param[in] name Name of function + * @return function pointer + */ +inline void* loadFunction(const char* name) { + void* fn = nullptr; + if (getLibraryHandle() != nullptr) { + fn = dlsym(getLibraryHandle(), name); + } + if (fn == nullptr) { + NNAPI_LOG("nnapi error: unable to open function %s", name); + NNAPI_LOG(" %s", dlerror()); + abort(); + } + else { +#ifdef _GNU_SOURCE + Dl_info info; + if (dladdr(fn, &info)) + { + NNAPI_LOG("nnapi function '%s' is loaded from '%s' ", name, info.dli_fname); + } + else + { + NNAPI_LOG("nnapi function '%s' is failed to load", name); + } + +#endif // _GNU_SOURCE + } + return fn; +} + +/** + * @brief Check if libneuralnetworks.so can be loaded + * @return @c true if loading is successful, otherwise @c false. + */ +inline bool NNAPIExists() { + static bool nnapi_is_available = getLibraryHandle(); + return nnapi_is_available; +} + +} // namespace nnfw + +#endif // __NEURAL_NETWORKS_LOAD_HELPER_H__ diff --git a/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h b/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h new file mode 100644 index 000000000..60b16f766 --- /dev/null +++ b/runtime/libs/nnapi/v1.1/include/NeuralNetworksShim.h @@ -0,0 +1,709 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from part of the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h' +#ifndef __NEURAL_NETWORKS_SHIM__ +#define __NEURAL_NETWORKS_SHIM__ + +#include "NeuralNetworks.h" +#include "NeuralNetworksLoadHelpers.h" + +// nn api function types + +typedef int (*ANeuralNetworksMemory_createFromFd_fn)( + size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory** memory); + +typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory); + +typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model); + +typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model); + +typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model); + +typedef int (*ANeuralNetworksCompilation_create_fn)( + ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation); + +typedef void (*ANeuralNetworksCompilation_free_fn)( + ANeuralNetworksCompilation* compilation); + +typedef int (*ANeuralNetworksCompilation_setPreference_fn)( + ANeuralNetworksCompilation* compilation, int32_t preference); + +typedef int (*ANeuralNetworksCompilation_finish_fn)( + ANeuralNetworksCompilation* compilation); + +typedef int (*ANeuralNetworksModel_addOperand_fn)( + ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type); + +typedef int (*ANeuralNetworksModel_setOperandValue_fn)( + ANeuralNetworksModel* model, int32_t index, const void* buffer, + size_t length); + +typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)( + ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksMemory* memory, size_t offset, size_t length); + +typedef int (*ANeuralNetworksModel_addOperation_fn)( + ANeuralNetworksModel* model, ANeuralNetworksOperationType type, + uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs); + +typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)( + ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, + uint32_t outputCount, const uint32_t* outputs); + +typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)( + ANeuralNetworksModel* model, bool allow); + +typedef int (*ANeuralNetworksExecution_create_fn)( + ANeuralNetworksCompilation* compilation, + ANeuralNetworksExecution** execution); + +typedef void (*ANeuralNetworksExecution_free_fn)( + ANeuralNetworksExecution* execution); + +typedef int (*ANeuralNetworksExecution_setInput_fn)( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const void* buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, + size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutput_fn)( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, void* buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, + size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_startCompute_fn)( + ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event); + +typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event); + +typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event); + +/** + * Creates a shared memory object from a file descriptor. + * + * The shared memory is backed by a file descriptor via mmap. + * See {@link ANeuralNetworksMemory} for a description on how to use + * this shared memory. + * + * @param size The requested size in bytes. + * Must not be larger than the file size. + * @param prot The desired memory protection for the mapping. + * It is either PROT_NONE or the bitwise OR of one or + * more of the following flags: PROT_READ, PROT_WRITE. + * @param fd The requested file descriptor. + * The file descriptor has to be mmap-able. The file + * descriptor will be duplicated. + * @param offset The offset to the beginning of the file of the area to map. + * The offset has to be aligned to a page size. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + */ +inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, + size_t offset, + ANeuralNetworksMemory** memory) { + LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd); + EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory); +} + +/** + * Delete a memory object. + * + * Destroys the object used by the run time to keep track of the memory. + * This will free the underlying actual memory if no other code has open + * handles to this memory. + * + * @param memory The memory object to be freed. + */ +inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) { + LOAD_FUNCTION(ANeuralNetworksMemory_free); + EXECUTE_FUNCTION(memory); +} + +/** + * Create an empty {@link ANeuralNetworksModel}. + * + * <p>This only creates the object. Computation is performed once + * {@link ANeuralNetworksExecution_startCompute} is invoked. + * + * The model should be constructed with calls to + * {@link ANeuralNetworksModel_addOperation} and + * {@link ANeuralNetworksModel_addOperand} + * + * <p>{@link ANeuralNetworksModel_finish} should be called once the model + * has been fully constructed.</p> + * + * <p>{@link ANeuralNetworksModel_free} should be called once the model + * is no longer needed.</p> + * + * @param model The {@link ANeuralNetworksModel} to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_create(ANeuralNetworksModel** model) { + LOAD_FUNCTION(ANeuralNetworksModel_create); + EXECUTE_FUNCTION_RETURN(model); +} + +/** + * Destroy a model. + * + * The model need not have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +inline void ANeuralNetworksModel_free(ANeuralNetworksModel* model) { + LOAD_FUNCTION(ANeuralNetworksModel_free); + EXECUTE_FUNCTION(model); +} + +/** + * Indicate that we have finished modifying a model. Required before + * calling {@link ANeuralNetworksCompilation_compile}. + * + * An application is responsible to make sure that no other thread uses + * the model at the same time. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be finished. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) { + LOAD_FUNCTION(ANeuralNetworksModel_finish); + EXECUTE_FUNCTION_RETURN(model); +} + +/** + * Add an operand to a model. + * + * The order in which the operands are added is important. The first one added + * to a model will have the index value 0, the second 1, etc. These indexes are + * used as operand identifiers in {@link ANeuralNetworksModel_addOperation}, + * {@link ANeuralNetworksExecution_setInput}, + * {@link ANeuralNetworksExecution_setInputFromMemory}, + * {@link ANeuralNetworksExecution_setOutput}, + * {@link ANeuralNetworksExecution_setOutputFromMemory} and + * {@link ANeuralNetworksExecution_setOperandValue}. + * + * To build a model that can accommodate inputs of various sizes, as you may + * want to do for a CNN, set the size of the dimensions that will vary at run + * time to 0. If you do so, provide the full dimensions when calling + * {@link ANeuralNetworksExecution_setInput} or {@link + * ANeuralNetworksExecution_setInputFromMemory}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param type The {@link ANeuralNetworksOperandType} that describes the shape + * of the operand. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_addOperand( + ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type) { + LOAD_FUNCTION(ANeuralNetworksModel_addOperand); + EXECUTE_FUNCTION_RETURN(model, type); +} + +/** + * Sets an operand to a constant value. + * + * For scalar values, the content of buffer is copied into the model. + * + * For tensor values, a pointer to the buffer is stored within the model. + * The application is responsible for not changing the content of this region + * until all executions using this model have completed. As the data may + * be copied during processing, modifying the data after this call yields + * undefined results. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, + int32_t index, + const void* buffer, + size_t length) { + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue); + EXECUTE_FUNCTION_RETURN(model, index, buffer, length); +} + +/** + * Sets an operand to a value stored in a memory object. + * + * The content of the memory is not copied. A reference to that memory is stored + * inside the model. The application is responsible for not changing the content + * of the memory region until all executions using this model have completed. + * As the data may be copied during processing, modifying the data after this + * call yields undefined results. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_setOperandValueFromMemory( + ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksMemory* memory, size_t offset, size_t length) { + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory); + EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length); +} + +/** + * Add an operation to a model. + * + * @param model The model to be modified. + * @param type The type of the operation. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying each operand. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying each operand. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, + ANeuralNetworksOperationType type, + uint32_t inputCount, + const uint32_t* inputs, + uint32_t outputCount, + const uint32_t* outputs) { + LOAD_FUNCTION(ANeuralNetworksModel_addOperation); + EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, + outputs); +} + +/** + * Specifies which operands will be the model's inputs and outputs. + * + * An operand cannot be used for both input and output. Doing so will + * return an error. + * + * @param model The model to be modified. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying the input operands. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying the output operands. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + */ +inline int ANeuralNetworksModel_identifyInputsAndOutputs( + ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, + uint32_t outputCount, const uint32_t* outputs) { + LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs); + EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs); +} + +/** + * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be + * calculated with range and/or precision as low as that of the IEEE 754 16-bit + * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * must be calculated using at least the range and precision of the IEEE 754 + * 32-bit floating-point format. + * + * @param model The model to be modified. + * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be + * calculated with range and/or precision as low as that of the + * IEEE 754 16-bit floating point format. 'false' indicates + * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using + * at least the range and precision of the IEEE 754 32-bit floating + * point format. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * Available since API level 28. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + */ +inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16( + ANeuralNetworksModel* model, bool allow) { + LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16); + EXECUTE_FUNCTION_RETURN(model, allow); +} + +/** + * Create a {@link ANeuralNetworksCompilation} to compile the given model. + * This only creates the object. Compilation is only performed once + * {@link ANeuralNetworksCompilation_start} is invoked. + * + * <p>The provided model must outlive the compilation.</p> + * + * The model must already have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param model The {@link ANeuralNetworksModel} to be compiled. + * @param compilation The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the model is invalid. + */ +inline int ANeuralNetworksCompilation_create( + ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation) { + LOAD_FUNCTION(ANeuralNetworksCompilation_create); + EXECUTE_FUNCTION_RETURN(model, compilation); +} + +/** + * Destroy a compilation. + * + * <p>If called on a compilation for which + * {@link ANeuralNetworksCompilation_start} has been called, the + * function will return immediately but will mark the compilation to be deleted + * once the compilation completes. The {@link ANeuralNetworksCompilation_wait} + * will return ERROR_DELETED. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param compilation The compilation to be destroyed. Passing NULL is + * acceptable and results in no operation. + */ +inline void ANeuralNetworksCompilation_free( + ANeuralNetworksCompilation* compilation) { + LOAD_FUNCTION(ANeuralNetworksCompilation_free); + EXECUTE_FUNCTION(compilation); +} + +/** + * Sets the execution preference. + * + * <p>Provides guidance to the runtime when trade-offs are possible.</p> + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param compilation The compilation to be modified. + * @param preference Either {@link PREFER_LOW_POWER}, + * {@link PREFER_SINGLE_FAST_ANSWER}, or + * {@link PREFER_SUSTAINED_SPEED}. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksCompilation_setPreference( + ANeuralNetworksCompilation* compilation, int32_t preference) { + LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference); + EXECUTE_FUNCTION_RETURN(compilation, preference); +} + +/** + * Waits until the compilation completes. + * + * More than one thread can wait on a compilation. When the compilation + * completes, all threads will be released. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally. + */ +inline int ANeuralNetworksCompilation_finish( + ANeuralNetworksCompilation* compilation) { + LOAD_FUNCTION(ANeuralNetworksCompilation_finish); + EXECUTE_FUNCTION_RETURN(compilation); +} +/** + * Create a {@link ANeuralNetworksExecution} to apply the given compilation. + * This only creates the object. Computation is only performed once + * {@link ANeuralNetworksExecution_startCompute} is invoked. + * + * <p>The provided compilation must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param execution The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +inline int ANeuralNetworksExecution_create( + ANeuralNetworksCompilation* compilation, + ANeuralNetworksExecution** execution) { + LOAD_FUNCTION(ANeuralNetworksExecution_create); + EXECUTE_FUNCTION_RETURN(compilation, execution); +} + +/** + * Destroy an execution. + * + * <p>If called on an execution for which + * {@link ANeuralNetworksExecution_startCompute} has been called, the + * function will return immediately but will mark the execution to be deleted + * once the computation completes. The {link ANeuralNetworksExecution_wait} + * will return ANEURALNETWORKS_ERROR_DELETED. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be destroyed. Passing NULL is acceptable + * and results in no operation. + */ +inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) { + LOAD_FUNCTION(ANeuralNetworksExecution_free); + EXECUTE_FUNCTION(execution); +} + +/** + * Associate a user buffer with an input of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided buffer must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This should be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other properties of the type must be the same as + * specified in the model. If the type is the same as specified + * when the model was built, NULL can be passed. + * @param buffer The buffer containing the data. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the input. + */ +inline int ANeuralNetworksExecution_setInput( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const void* buffer, size_t length) { + LOAD_FUNCTION(ANeuralNetworksExecution_setInput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); +} + +/** + * Associate part of a memory object with an input of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided memory must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the input. + */ +inline int ANeuralNetworksExecution_setInputFromMemory( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, + size_t offset, size_t length) { + LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); +} + +/** + * Associate a user buffer with an output of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided buffer must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param buffer The buffer where the data is to be written. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the output. + */ +inline int ANeuralNetworksExecution_setOutput( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, void* buffer, size_t length) { + LOAD_FUNCTION(ANeuralNetworksExecution_setOutput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); +} + +/** + * Associate part of a memory object with an output of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided memory must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param memory The memory where the data is to be stored. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The length in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the output. + */ +inline int ANeuralNetworksExecution_setOutputFromMemory( + ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, + size_t offset, size_t length) { + LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); +} + +/** + * Schedule evaluation of the execution. + * + * <p>Schedules evaluation of the execution. Once the model has been + * applied and the outputs are ready to be consumed, the execution will be + * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal. + * </p> + * + * Multiple executions can be scheduled and evaluated concurrently, and + * compilations can be performed concurrently with executions. The runtime makes + * no guarantee on the ordering of the completion of compilations and + * executions. If it's important to the application, the application should + * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and + * {@link ANeuralNetworksExecution_wait}. + * + * ANeuralNetworksExecution_wait must be called to recuperate the resources used + * by the execution. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be scheduled and executed. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksExecution_startCompute( + ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) { + LOAD_FUNCTION(ANeuralNetworksExecution_startCompute); + EXECUTE_FUNCTION_RETURN(execution, event); +} + +/** + * Waits until the execution completes. + * + * More than one thread can wait on an event. When the execution completes, + * all threads will be released. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + */ +inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) { + LOAD_FUNCTION(ANeuralNetworksEvent_wait); + EXECUTE_FUNCTION_RETURN(event); +} + +/** + * Destroys the event. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + */ +inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) { + LOAD_FUNCTION(ANeuralNetworksEvent_free); + EXECUTE_FUNCTION(event); +} + +#endif // __NEURAL_NETWORKS_SHIM__ diff --git a/runtime/libs/nnapi/v1.2/CMakeLists.txt b/runtime/libs/nnapi/v1.2/CMakeLists.txt new file mode 100644 index 000000000..21ec3015f --- /dev/null +++ b/runtime/libs/nnapi/v1.2/CMakeLists.txt @@ -0,0 +1,4 @@ +add_library(nnfw_lib_nnapi_1_2 INTERFACE) + +target_include_directories(nnfw_lib_nnapi_1_2 INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(nnfw_lib_nnapi_1_2 INTERFACE nnfw-nnapi-header) diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h b/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h new file mode 100644 index 000000000..855613241 --- /dev/null +++ b/runtime/libs/nnapi/v1.2/include/NeuralNetworksExShim.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file NeuralNetworksExShim.h + * @brief This file contains an actual implementation of + * ANeuralNetworksModel_addOperationEx function + */ + +#ifndef __NEURAL_NETWORKS_EX_SHIM_H__ +#define __NEURAL_NETWORKS_EX_SHIM_H__ + +#include "NeuralNetworks.h" +#include "NeuralNetworksEx.h" +#include "NeuralNetworksLoadHelpers.h" + +typedef int (*ANeuralNetworksModel_addOperationEx_fn)(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs); + +/** + * @brief Add an extended operation to a model. + * + * @param[in] model The model to be modified. + * @param[in] type The type of extended operation. + * @param[in] inputCount The number of entries in the inputs array. + * @param[in] inputs An array of indexes identifying each operand. + * @param[in] outputCount The number of entries in the outputs array. + * @param[in] outputs An array of indexes identifying each operand. + * + * @note The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} + * has been called will return an error.\n + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ + +inline int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs) +{ + LOAD_FUNCTION(ANeuralNetworksModel_addOperationEx); + EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs); +} + +#endif // __NEURAL_NETWORKS_EX_SHIM_H__ diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h b/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h new file mode 100644 index 000000000..1c482b54c --- /dev/null +++ b/runtime/libs/nnapi/v1.2/include/NeuralNetworksLoadHelpers.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from part of the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h' + +/** + * @file NeuralNetworksLoadHelpers.h + * @brief This file contains functions to load NN API runtime library + */ + +#ifndef __NEURAL_NETWORKS_LOAD_HELPER_H__ +#define __NEURAL_NETWORKS_LOAD_HELPER_H__ + +#include <dlfcn.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> + +/** + * @brief Print log data + * @param[in] format Format string of @c printf + * @param[in] args Argument after format string. (Same with @c printf) + */ +#define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__); + +/** + * @brief Create a function pointer named @c fn after loading NN API library + * @param[in] name Name of a function + */ +#define LOAD_FUNCTION(name) \ + static name##_fn fn = reinterpret_cast<name##_fn>(nnfw::loadFunction(#name)); + +/** + * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION + * @param[in] args List of arguments for the function @c fn + */ +#define EXECUTE_FUNCTION(...) \ + if (fn != nullptr) { \ + fn(__VA_ARGS__); \ + } + +/** + * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION + * @param[in] args List of arguments for the function @c fn + * @return the return value of @c fn + */ +#define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0; + +namespace nnfw +{ + +/** + * @brief Load NN API library + * @param[in] name path of NN API library + * @return a symbol table handle of NN API library + */ +inline void* loadLibrary(const char* name) { + // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn + // api RT + void* handle = nullptr; +#if 1 //#ifdef __ANDROID__ + handle = dlopen(name, RTLD_LAZY | RTLD_LOCAL); + if (handle == nullptr) { + NNAPI_LOG("nnapi error: unable to open library %s", name); + } +#endif + return handle; +} + +/** + * @brief Load libneuralnetworks.so and return handle of library + * @return a symbol table handle of NN API library + */ +inline void* getLibraryHandle() { + static void* handle = loadLibrary("libneuralnetworks.so"); + return handle; +} + +/** + * @brief Return function ptr in libneuralnetworks.so + * @param[in] name Name of function + * @return function pointer + */ +inline void* loadFunction(const char* name) { + void* fn = nullptr; + if (getLibraryHandle() != nullptr) { + fn = dlsym(getLibraryHandle(), name); + } + if (fn == nullptr) { + NNAPI_LOG("nnapi error: unable to open function %s", name); + abort(); + } + else { +#ifdef _GNU_SOURCE + Dl_info info; + if (dladdr(fn, &info)) + { + NNAPI_LOG("nnapi function '%s' is loaded from '%s' ", name, info.dli_fname); + } + else + { + NNAPI_LOG("nnapi function '%s' is failed to load", name); + } +#endif // _GNU_SOURCE + } + return fn; +} + +/** + * @brief Check if libneuralnetworks.so can be loaded + * @return @c true if loading is successful, otherwise @c false. + */ +inline bool NNAPIExists() { + static bool nnapi_is_available = getLibraryHandle(); + return nnapi_is_available; +} + +} // namespace nnfw + +#endif // __NEURAL_NETWORKS_LOAD_HELPER_H__ diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h b/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h new file mode 100644 index 000000000..80082383f --- /dev/null +++ b/runtime/libs/nnapi/v1.2/include/NeuralNetworksShim.h @@ -0,0 +1,1136 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTE This header is derived from part of the following file +// https://github.com/tensorflow/tensorflow/blob/a59ad83d06abd38b5e142c41043db8886a92fca8/tensorflow/lite/nnapi/NeuralNetworksShim.h + +#ifndef __NEURAL_NETWORKS_SHIM_H__ +#define __NEURAL_NETWORKS_SHIM_H__ + +#include "NeuralNetworksTypes.h" +#include "NeuralNetworksLoadHelpers.h" + +// This interface is now deprecated. You should use instead +// nnapi_implementation. + +// TODO(b/123017568): Update all current usages of this file. + +// NN api types based on NNAPI header file +// https://developer.android.com/ndk/reference/group/neural-networks + +/** + * Creates a shared memory object from a file descriptor. + * + * The shared memory is backed by a file descriptor via mmap. + * See {@link ANeuralNetworksMemory} for a description on how to use + * this shared memory. + * + * @param size The requested size in bytes. + * Must not be larger than the file size. + * @param prot The desired memory protection for the mapping. + * It is either PROT_NONE or the bitwise OR of one or + * more of the following flags: PROT_READ, PROT_WRITE. + * @param fd The requested file descriptor. + * The file descriptor has to be mmap-able. The file + * descriptor will be duplicated. + * @param offset The offset to the beginning of the file of the area to map. + * The offset has to be aligned to a page size. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + */ +inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) +{ + LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd); + EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory); +} + +/** + * Delete a memory object. + * + * Destroys the object used by the run time to keep track of the memory. + * This will free the underlying actual memory if no other code has open + * handles to this memory. + * + * @param memory The memory object to be freed. + */ +inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) +{ + LOAD_FUNCTION(ANeuralNetworksMemory_free); + EXECUTE_FUNCTION(memory); +} + +/** + * Create an empty {@link ANeuralNetworksModel}. + * + * <p>This only creates the object. Computation is performed once + * {@link ANeuralNetworksExecution_startCompute} is invoked. + * + * The model should be constructed with calls to + * {@link ANeuralNetworksModel_addOperation} and + * {@link ANeuralNetworksModel_addOperand} + * + * <p>{@link ANeuralNetworksModel_finish} should be called once the model + * has been fully constructed.</p> + * + * <p>{@link ANeuralNetworksModel_free} should be called once the model + * is no longer needed.</p> + * + * @param model The {@link ANeuralNetworksModel} to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_create(ANeuralNetworksModel **model) +{ + LOAD_FUNCTION(ANeuralNetworksModel_create); + EXECUTE_FUNCTION_RETURN(model); +} + +/** + * Destroy a model. + * + * The model need not have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +inline void ANeuralNetworksModel_free(ANeuralNetworksModel *model) +{ + LOAD_FUNCTION(ANeuralNetworksModel_free); + EXECUTE_FUNCTION(model); +} + +/** + * Indicate that we have finished modifying a model. Required before + * calling {@link ANeuralNetworksCompilation_compile}. + * + * An application is responsible to make sure that no other thread uses + * the model at the same time. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be finished. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) +{ + LOAD_FUNCTION(ANeuralNetworksModel_finish); + EXECUTE_FUNCTION_RETURN(model); +} + +/** + * Add an operand to a model. + * + * The order in which the operands are added is important. The first one added + * to a model will have the index value 0, the second 1, etc. These indexes are + * used as operand identifiers in {@link ANeuralNetworksModel_addOperation}, + * {@link ANeuralNetworksExecution_setInput}, + * {@link ANeuralNetworksExecution_setInputFromMemory}, + * {@link ANeuralNetworksExecution_setOutput}, + * {@link ANeuralNetworksExecution_setOutputFromMemory} and + * {@link ANeuralNetworksExecution_setOperandValue}. + * + * To build a model that can accommodate inputs of various sizes, as you may + * want to do for a CNN, set the size of the dimensions that will vary at run + * time to 0. If you do so, provide the full dimensions when calling + * {@link ANeuralNetworksExecution_setInput} or {@link + * ANeuralNetworksExecution_setInputFromMemory}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param type The {@link ANeuralNetworksOperandType} that describes the shape + * of the operand. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type) +{ + LOAD_FUNCTION(ANeuralNetworksModel_addOperand); + EXECUTE_FUNCTION_RETURN(model, type); +} + +/** + * Sets an operand to a constant value. + * + * For scalar values, the content of buffer is copied into the model. + * + * For tensor values, a pointer to the buffer is stored within the model. + * The application is responsible for not changing the content of this region + * until all executions using this model have completed. As the data may + * be copied during processing, modifying the data after this call yields + * undefined results. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue); + EXECUTE_FUNCTION_RETURN(model, index, buffer, length); +} + +/** + * Sets an operand's per channel quantization parameters. + * + * Sets parameters required by a tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}. + * This function must be called for every tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before + * calling {@link ANeuralNetworksModel_finish}. + * + * Available since API level 29. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param channelQuant The per channel quantization parameters for the operand. + * No memory in this struct needs to outlive the call to + * this function. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( + ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksSymmPerChannelQuantParams *channelQuant) +{ + LOAD_FUNCTION(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams); + EXECUTE_FUNCTION_RETURN(model, index, channelQuant); +} + +/** + * Sets an operand to a value stored in a memory object. + * + * The content of the memory is not copied. A reference to that memory is stored + * inside the model. The application is responsible for not changing the content + * of the memory region until all executions using this model have completed. + * As the data may be copied during processing, modifying the data after this + * call yields undefined results. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, + int32_t index, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory); + EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length); +} + +/** + * Add an operation to a model. + * + * @param model The model to be modified. + * @param type The type of the operation. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying each operand. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying each operand. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + LOAD_FUNCTION(ANeuralNetworksModel_addOperation); + EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs); +} + +/** + * Specifies which operands will be the model's inputs and outputs. + * + * An operand cannot be used for both input and output. Doing so will + * return an error. + * + * @param model The model to be modified. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying the input operands. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying the output operands. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + */ +inline int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, + uint32_t inputCount, + const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs) +{ + LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs); + EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs); +} + +/** + * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be + * calculated with range and/or precision as low as that of the IEEE 754 16-bit + * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * must be calculated using at least the range and precision of the IEEE 754 + * 32-bit floating-point format. + * + * @param model The model to be modified. + * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be + * calculated with range and/or precision as low as that of the + * IEEE 754 16-bit floating point format. 'false' indicates + * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using + * at least the range and precision of the IEEE 754 32-bit floating + * point format. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * Available since API level 28. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + */ +inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, + bool allow) +{ + LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16); + EXECUTE_FUNCTION_RETURN(model, allow); +} + +/** + * Create a {@link ANeuralNetworksCompilation} to compile the given model. + * This only creates the object. Compilation is only performed once + * {@link ANeuralNetworksCompilation_start} is invoked. + * + * <p>The provided model must outlive the compilation.</p> + * + * The model must already have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param model The {@link ANeuralNetworksModel} to be compiled. + * @param compilation The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the model is invalid. + */ +inline int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_create); + EXECUTE_FUNCTION_RETURN(model, compilation); +} + +/** + * Destroy a compilation. + * + * <p>If called on a compilation for which + * {@link ANeuralNetworksCompilation_start} has been called, the + * function will return immediately but will mark the compilation to be deleted + * once the compilation completes. The {@link ANeuralNetworksCompilation_wait} + * will return ERROR_DELETED. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param compilation The compilation to be destroyed. Passing NULL is + * acceptable and results in no operation. + */ +inline void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_free); + EXECUTE_FUNCTION(compilation); +} + +/** + * Sets the execution preference. + * + * <p>Provides guidance to the runtime when trade-offs are possible.</p> + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param compilation The compilation to be modified. + * @param preference Either {@link PREFER_LOW_POWER}, + * {@link PREFER_SINGLE_FAST_ANSWER}, or + * {@link PREFER_SUSTAINED_SPEED}. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation, + int32_t preference) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference); + EXECUTE_FUNCTION_RETURN(compilation, preference); +} + +/** + * Waits until the compilation completes. + * + * More than one thread can wait on a compilation. When the compilation + * completes, all threads will be released. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally. + */ +inline int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_finish); + EXECUTE_FUNCTION_RETURN(compilation); +} +/** + * Create a {@link ANeuralNetworksExecution} to apply the given compilation. + * This only creates the object. Computation is only performed once + * {@link ANeuralNetworksExecution_startCompute} is invoked. + * + * <p>The provided compilation must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param execution The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +inline int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_create); + EXECUTE_FUNCTION_RETURN(compilation, execution); +} + +/** + * Destroy an execution. + * + * <p>If called on an execution for which + * {@link ANeuralNetworksExecution_startCompute} has been called, the + * function will return immediately but will mark the execution to be deleted + * once the computation completes. The {link ANeuralNetworksExecution_wait} + * will return ANEURALNETWORKS_ERROR_DELETED. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be destroyed. Passing NULL is acceptable + * and results in no operation. + */ +inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_free); + EXECUTE_FUNCTION(execution); +} + +/** + * Associate a user buffer with an input of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided buffer must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This should be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other properties of the type must be the same as + * specified in the model. If the type is the same as specified + * when the model was built, NULL can be passed. + * @param buffer The buffer containing the data. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the input. + */ +inline int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const void *buffer, size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_setInput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); +} + +/** + * Associate part of a memory object with an input of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided memory must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the input. + */ +inline int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); +} + +/** + * Associate a user buffer with an output of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided buffer must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param buffer The buffer where the data is to be written. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the output. + */ +inline int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, + size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_setOutput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); +} + +/** + * Associate part of a memory object with an output of the model of the + * {@link ANeuralNetworksExecution}. + * + * <p>The provided memory must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param type The type of the operand. This can be used to specify the + * dimensions that were set to 0 when the operand was added to the + * model. All other values must be the same as specified in the + * model. If the type is the same as specified when the model + * was built, NULL can be passed. + * @param memory The memory where the data is to be stored. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The length in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if + * the name is not recognized or the buffer is too small for the output. + */ +inline int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); +} + +/** + * Schedule evaluation of the execution. + * + * <p>Schedules evaluation of the execution. Once the model has been + * applied and the outputs are ready to be consumed, the execution will be + * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal. + * </p> + * + * Multiple executions can be scheduled and evaluated concurrently, and + * compilations can be performed concurrently with executions. The runtime makes + * no guarantee on the ordering of the completion of compilations and + * executions. If it's important to the application, the application should + * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and + * {@link ANeuralNetworksExecution_wait}. + * + * ANeuralNetworksExecution_wait must be called to recuperate the resources used + * by the execution. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be scheduled and executed. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_startCompute); + EXECUTE_FUNCTION_RETURN(execution, event); +} + +/** + * Waits until the execution completes. + * + * More than one thread can wait on an event. When the execution completes, + * all threads will be released. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + */ +inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) +{ + LOAD_FUNCTION(ANeuralNetworksEvent_wait); + EXECUTE_FUNCTION_RETURN(event); +} + +/** + * Destroys the event. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + */ +inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) +{ + LOAD_FUNCTION(ANeuralNetworksEvent_free); + EXECUTE_FUNCTION(event); +} + +/** + * Get the number of available devices. + * + * @param numDevices Used to return the number of devices. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int ANeuralNetworks_getDeviceCount(uint32_t *numDevices) +{ + LOAD_FUNCTION(ANeuralNetworks_getDeviceCount); + EXECUTE_FUNCTION_RETURN(numDevices); +} + +/** + * Get the representation of the specified device. + * + * @param devIndex The index of the specified device. Must be less than the + * number of available devices. + * @param device The representation of the specified device. + * The same representation will always be returned for the + * specified device. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ + +inline int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice **device) +{ + LOAD_FUNCTION(ANeuralNetworks_getDevice); + EXECUTE_FUNCTION_RETURN(devIndex, device); +} + +/** + * Get the name of the specified device. + * + * @param device The representation of the specified device. + * @param name The returned name of the specified device. The name will be in + * UTF-8 and will be null-terminated. It will be recognizable as a + * known device name rather than a cryptic string. For devices + * with API level 29 and above, the format of the name is + * {VENDOR}-{DEVICE}, e.g. “google-ipu”. For devices with feature + * level 28 or lower, the name will always be “unknown-device”. + * The name will remain valid for the duration of the application. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice *device, const char **name) +{ + LOAD_FUNCTION(ANeuralNetworksDevice_getName); + EXECUTE_FUNCTION_RETURN(device, name); +} + +/** + * Get the version of the driver implementation of the specified device. + * + * It’s the responsibility of the driver implementor to insure that this version + * string uniquely distinguishes this implementation from all previous + * implementations. + * + * This version string must not be confused with the feature level which is + * solely defined by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no + * implicit ordering of the versions. For example, it is not possible to filter + * all drivers older than a certain version. + * + * Application developers may use this version string to avoid or prefer + * specific driver implementations. For example, an application may want to do + * so because: + * - A specific version of the driver does not provide the required + * performance, perhaps because of a performance regression. + * - A specific version of the driver has a bug or returns results that + * don’t match the minimum precision requirement for the application. + * + * @param device The representation of the specified device. + * @param version The returned version string of the driver for the specified + * device. The string will be in UTF-8 and will be + * null-terminated. For devices with feature level 28 or lower, + * "UNKNOWN" will be returned. The version string will remain + * valid for the duration of the application. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice *device, + const char **version) +{ + LOAD_FUNCTION(ANeuralNetworksDevice_getVersion); + EXECUTE_FUNCTION_RETURN(device, version); +} + +/** + * Get the supported NNAPI version of the specified device. + * + * Each device has a supported feature level, which is the most advanced feature + * this driver implements. For example, if the driver implements the features + * introduced in Android P, but does not implement the features introduced after + * Android P, the value would be 28. Developers could decide whether or not the + * specified device should be used for a Model that has certain feature + * requirements. + * + * @param device The representation of the specified device. + * @param featureLevel The API level of the most advanced feature this driver + * implements. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice *device, + int64_t *featureLevel) +{ + LOAD_FUNCTION(ANeuralNetworksDevice_getFeatureLevel); + EXECUTE_FUNCTION_RETURN(device, featureLevel); +} + +/** + * Get the supported operations for a specified set of devices. If multiple + * devices are selected, the supported operation list is a union of supported + * operations of all selected devices. + * + * @param model The model to be queried. + * @param devices The set of devices. Must not contain duplicates. + * @param numDevices The number of devices in the set. + * @param supportedOps The boolean array to be filled. True means supported. The + * size of the boolean array must be at least as large as + * the number of operations in the model. The order of + * elements in the supportedOps array matches the order in + * which the corresponding operations were added to the + * model. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int +ANeuralNetworksModel_getSupportedOperationsForDevices(const ANeuralNetworksModel *model, + const ANeuralNetworksDevice *const *devices, + uint32_t numDevices, bool *supportedOps) +{ + LOAD_FUNCTION(ANeuralNetworksModel_getSupportedOperationsForDevices); + EXECUTE_FUNCTION_RETURN(model, devices, numDevices, supportedOps); +} + +/** + * Create a {@link ANeuralNetworksCompilation} to compile the given model for a + * specified set of devices. If more than one device is specified, the + * compilation will distribute the workload automatically across the devices. + * The model must be fully supported by the specified set of devices. This means + * that ANeuralNetworksModel_getSupportedOperationsForDevices() must have + * returned true for every operation for that model/devices pair. + * + * @param model The {@link ANeuralNetworksModel} to be compiled. + * @param devices The set of devices. Must not contain duplicates. + * @param numDevices The number of devices in the set. + * @param compilation The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the model is invalid. + * + * Available since API level 29. + */ +inline int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel *model, + const ANeuralNetworksDevice *const *devices, + uint32_t numDevices, + ANeuralNetworksCompilation **compilation) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_createForDevices); + EXECUTE_FUNCTION_RETURN(model, devices, numDevices, compilation); +} + +/** + * Sets the compilation caching signature and the cache directory. + * + * Provides optional caching information to the runtime for faster repeated + * compilation. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded + * usage. + * + * @param compilation The compilation to be modified. + * @param cacheDir The cache directory to store and retrieve caching data. It is + * recommended to use the code_cache provided by the Android + * runtime. If not using the code_cache, the user should choose + * a directory local to the application, and is responsible to + * manage and clean the cache entries. + * @param token The token provided by the user to specify a model, must be of + * length ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should + * ensure that the token is unique to a model within the + * application. The NNAPI runtime will not detected token + * collisions. If there is a collision, the compilation outcome may + * be incorrect without notifying with error. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +inline int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation *compilation, + const char *cacheDir, const uint8_t *token) +{ + LOAD_FUNCTION(ANeuralNetworksCompilation_setCaching); + EXECUTE_FUNCTION_RETURN(compilation, cacheDir, token); +} + +/** + * Schedule synchronous evaluation of the execution. + * + * <p>Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed. + * </p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * See {@link ANeuralNetworksExecution_startCompute} for asynchronous execution. + * Synchronous execution incurs lower overhead than asynchronous execution. + * + * Available since API level 29. + * + * @param execution The execution to be scheduled and executed. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory + * cannot be properly mapped. + */ +inline int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_compute); + EXECUTE_FUNCTION_RETURN(execution); +} + +/** + * Get the dimensional information of the specified output operand of the model + * of the + * {@link ANeuralNetworksExecution}. + * + * On asynchronous execution initiated by {@link + * ANeuralNetworksExecution_startCompute}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function to + * recuperate the resources used by the execution. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link + * ANeuralNetworksModel_addOperand}. + * @param rank The rank of the output operand. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, + * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE if the target output is provided an + * insufficient buffer at execution time, ANEURALNETWORKS_BAD_DATA if the index + * is invalid. + * + * Available since API level 29. + */ +inline int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *rank) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_getOutputOperandRank); + EXECUTE_FUNCTION_RETURN(execution, index, rank); +} + +/** + * Get the dimensional information of the specified output operand of the model + * of the + * {@link ANeuralNetworksExecution}. The target output operand cannot be a + * scalar. + * + * On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function to + * recuperate the resources used by the execution. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is an index + * into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with + * {@link ANeuralNetworksModel_addOperand}. + * @param dimensions The dimension array to be filled. The size of the array + * must be exactly as large as the rank of the output operand + * to be queried in the model. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, + * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE if the target output is provided an + * insufficient buffer at execution time, ANEURALNETWORKS_BAD_DATA if the index + * is invalid or if the target is a scalar. + * + * Available since API level 29. + */ +inline int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *dimensions) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_getOutputOperandDimensions); + EXECUTE_FUNCTION_RETURN(execution, index, dimensions); +} + +/** + * Create a {@link ANeuralNetworksBurst} to apply the given compilation. + * This only creates the burst object. Computation is only performed once + * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid + * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}. + * + * <p>The provided compilation must outlive the burst object.</p> + * + * Available since API level 29. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param burst The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +inline int ANeuralNetworksBurst_create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksBurst **burst) +{ + LOAD_FUNCTION(ANeuralNetworksBurst_create); + EXECUTE_FUNCTION_RETURN(compilation, burst); +} + +/** + * Destroys the burst object. + * + * Available since API level 29. + * + * @param burst The burst object to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +inline void ANeuralNetworksBurst_free(ANeuralNetworksBurst *burst) +{ + LOAD_FUNCTION(ANeuralNetworksBurst_free); + EXECUTE_FUNCTION(burst); +} + +/** + * Schedule synchronous evaluation of the execution on a burst object. + * + * <p>Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed.</p> + * + * <p>There must be at most one {@link ANeuralNetworksExecution} processing at + * any given time for any given burst object. Any + * {@link ANeuralNetworksExecution} launched before the previous has finished + * will result in ANEURALNETWORKS_BAD_STATE.</p> + * + * Available since API level 29. + * + * @param burst The burst object to execute on. + * @param execution The execution to be scheduled and executed. The execution + * must be created from the same {@link + * ANeuralNetworksCompilation} as the burst object. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + */ +inline int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksBurst *burst) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_burstCompute); + EXECUTE_FUNCTION_RETURN(execution, burst); +} + +/** + * Creates a shared memory object from an AHardwareBuffer handle. + * + * If the shared memory is backed by an AHardwareBuffer of + * AHARDWAREBUFFER_FORMAT_BLOB format, it can be used the same way as shared + * memory created from a file handle. See + * {@link ANeuralNetworksMemory} for a description on how to use this shared + * memory. + * + * If the shared memory is backed by an AHardwareBuffer of a format other than + * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and + * outputs. When calling {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, + * both offset and length must be set to zero and the entire memory region will + * be associated with the specified input or output operand. There is no + * guarantee that an arbitrary AHardwareBuffer_Format and + * AHardwareBuffer_UsageFlags combination can be used by arbitrary devices. The + * execution will fail if selected set of devices cannot consume the buffer. + * + * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared + * memory backed by an AHardwareBuffer of a format other than + * AHARDWAREBUFFER_FORMAT_BLOB is disallowed. + * + * TODO(miaowang): add documentation about intended usage with introspection + * API. + * + * Available since API level 29. + * + * @param ahwb The AHardwareBuffer handle. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + * + * @see AHardwareBuffer + */ +inline int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer *ahwb, + ANeuralNetworksMemory **memory) +{ + LOAD_FUNCTION(ANeuralNetworksMemory_createFromAHardwareBuffer); + EXECUTE_FUNCTION_RETURN(ahwb, memory); +} + +/** + * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be + * measured. By default, duration is not measured. + * + * The {@link ANeuralNetworksExecution} must have been created with + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 29. + * + * @param execution The execution to be modified. + * @param measure 'true' if duration is to be measured, 'false' if not. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution *execution, + bool measure) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_setMeasureTiming); + EXECUTE_FUNCTION_RETURN(execution, measure); +} + +/** + * Get the time spent in the specified {@link ANeuralNetworksExecution}, in + * nanoseconds. The execution must have completed. + * + * @param execution The execution to be queried. + * @param durationCode The measurement to be queried, specified by {@link + * DurationCode}. + * @param duration The returned duration. If no measurement was requested by + * {@link ANeuralNetworksExecution_setMeasureTiming}, or for + * some other reason the duration is not available, UINT64_MAX will be returned. + * A particular device need not support any given measurement. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +inline int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution *execution, + int32_t durationCode, uint64_t *duration) +{ + LOAD_FUNCTION(ANeuralNetworksExecution_getDuration); + EXECUTE_FUNCTION_RETURN(execution, durationCode, duration); +} + +/**/ + +#endif // __NEURAL_NETWORKS_SHIM_H__ diff --git a/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h b/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h new file mode 100644 index 000000000..d74402749 --- /dev/null +++ b/runtime/libs/nnapi/v1.2/include/NeuralNetworksTypes.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTE This header is derived from part of the following file +// https://github.com/tensorflow/tensorflow/blob/a59ad83d06abd38b5e142c41043db8886a92fca8/tensorflow/lite/nnapi/NeuralNetworksTypes.h + +#ifndef __NEURAL_NETWORKS_TYPES_H__ +#define __NEURAL_NETWORKS_TYPES_H__ + +#include "NeuralNetworks.h" + +// NN api types based on NNAPI header file +// https://developer.android.com/ndk/reference/group/neural-networks + +// nn api function types + +typedef int (*ANeuralNetworksMemory_createFromFd_fn)(size_t size, int protect, int fd, + size_t offset, ANeuralNetworksMemory **memory); + +typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory *memory); + +typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel **model); + +typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel *model); + +typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel *model); + +typedef int (*ANeuralNetworksCompilation_create_fn)(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation); + +typedef void (*ANeuralNetworksCompilation_free_fn)(ANeuralNetworksCompilation *compilation); + +typedef int (*ANeuralNetworksCompilation_setPreference_fn)(ANeuralNetworksCompilation *compilation, + int32_t preference); + +typedef int (*ANeuralNetworksCompilation_finish_fn)(ANeuralNetworksCompilation *compilation); + +typedef int (*ANeuralNetworksModel_addOperand_fn)(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type); + +typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length); + +typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)( + ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksSymmPerChannelQuantParams *channelQuant); + +typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)( + ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset, + size_t length); + +typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs); + +typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(ANeuralNetworksModel *model, + uint32_t inputCount, + const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs); + +typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(ANeuralNetworksModel *model, + bool allow); + +typedef int (*ANeuralNetworksExecution_create_fn)(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution); + +typedef void (*ANeuralNetworksExecution_free_fn)(ANeuralNetworksExecution *execution); + +typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const void *buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)( + ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + void *buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)( + ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event); + +typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent *event); + +typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent *event); + +typedef int (*ASharedMemory_create_fn)(const char *name, size_t size); + +typedef int (*ANeuralNetworks_getDeviceCount_fn)(uint32_t *numDevices); + +typedef int (*ANeuralNetworks_getDevice_fn)(uint32_t devIndex, ANeuralNetworksDevice **device); + +typedef int (*ANeuralNetworksDevice_getName_fn)(const ANeuralNetworksDevice *device, + const char **name); + +typedef int (*ANeuralNetworksDevice_getType_fn)(const ANeuralNetworksDevice *device, int32_t *type); + +typedef int (*ANeuralNetworksDevice_getVersion_fn)(const ANeuralNetworksDevice *device, + const char **version); + +typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(const ANeuralNetworksDevice *device, + int64_t *featureLevel); + +typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)( + const ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices, + uint32_t numDevices, bool *supportedOps); + +typedef int (*ANeuralNetworksCompilation_createForDevices_fn)( + ANeuralNetworksModel *model, const ANeuralNetworksDevice *const *devices, uint32_t numDevices, + ANeuralNetworksCompilation **compilation); + +typedef int (*ANeuralNetworksCompilation_setCaching_fn)(ANeuralNetworksCompilation *compilation, + const char *cacheDir, const uint8_t *token); + +typedef int (*ANeuralNetworksExecution_compute_fn)(ANeuralNetworksExecution *execution); + +typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *rank); + +typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)( + ANeuralNetworksExecution *execution, int32_t index, uint32_t *dimensions); + +typedef int (*ANeuralNetworksBurst_create_fn)(ANeuralNetworksCompilation *compilation, + ANeuralNetworksBurst **burst); + +typedef void (*ANeuralNetworksBurst_free_fn)(ANeuralNetworksBurst *burst); + +typedef int (*ANeuralNetworksExecution_burstCompute_fn)(ANeuralNetworksExecution *execution, + ANeuralNetworksBurst *burst); + +typedef int (*ANeuralNetworksMemory_createFromAHardwareBuffer_fn)(const AHardwareBuffer *ahwb, + ANeuralNetworksMemory **memory); + +typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(ANeuralNetworksExecution *execution, + bool measure); + +typedef int (*ANeuralNetworksExecution_getDuration_fn)(const ANeuralNetworksExecution *execution, + int32_t durationCode, uint64_t *duration); + +#endif // __NEURAL_NETWORKS_TYPES_H__ diff --git a/runtime/libs/profiling/CMakeLists.txt b/runtime/libs/profiling/CMakeLists.txt new file mode 100644 index 000000000..e0398ce93 --- /dev/null +++ b/runtime/libs/profiling/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(nnfw_lib_profiling STATIC ${SOURCES}) +set_property(TARGET nnfw_lib_profiling PROPERTY POSITION_INDEPENDENT_CODE ON) +target_include_directories(nnfw_lib_profiling PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(nnfw_lib_profiling PRIVATE nnfw_common) +target_link_libraries(nnfw_lib_profiling PRIVATE nnfw_coverage) diff --git a/runtime/libs/profiling/include/profiling/profile_buffer.h b/runtime/libs/profiling/include/profiling/profile_buffer.h new file mode 100644 index 000000000..bc8d75e7c --- /dev/null +++ b/runtime/libs/profiling/include/profiling/profile_buffer.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/lite/profiling/profile_buffer.h +#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_ +#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_ + +#include <cstddef> +#include <cstdint> + +#include "profiling/time.h" + +namespace tflite { +namespace profiling { + +// A profiling event. +struct ProfileEvent { + // Describes the type of event. + // The event_metadata field may contain additional data for interpreting + // the event. + enum class EventType { + // Default event type, the metadata field has no special significance. + DEFAULT = 0, + // The event is an operator invocation and the event_metadata field is the + // index of operator node. + OPERATOR_INVOKE_EVENT = 1 + }; + + // Label of the event. This usually describes the event. + const char* tag; + // Timestamp in microseconds when the event began. + uint64_t begin_timestamp_us; + // Timestamp in microseconds when the event ended. + uint64_t end_timestamp_us; + // The field containing the type of event. This must be one of the event types + // in EventType. + EventType event_type; + // Extra data describing the details of the event. + uint32_t event_metadata; +}; +} // namespace profiling +} // namespace tflite + +#ifdef TFLITE_PROFILING_ENABLED + +#include <sys/time.h> +#include <vector> + +namespace tflite { +namespace profiling { +constexpr uint32_t kInvalidEventHandle = static_cast<uint32_t>(~0) - 1; + +// A ring buffer of profile events. +// This class is not thread safe. +class ProfileBuffer { + public: + ProfileBuffer(uint32_t max_num_entries, bool enabled) + : enabled_(enabled), current_index_(0), event_buffer_(max_num_entries) {} + + // Adds an event to the buffer with begin timestamp set to the current + // timestamp. Returns a handle to event that can be used to call EndEvent. If + // buffer is disabled this has no affect. + // The tag of the event should remain valid till the buffer is valid. + uint32_t BeginEvent(const char* tag, ProfileEvent::EventType event_type, + uint32_t event_metadata) { + if (!enabled_) { + return kInvalidEventHandle; + } + uint64_t timestamp = time::NowMicros(); + int index = current_index_ % event_buffer_.size(); + event_buffer_[index].tag = tag; + event_buffer_[index].event_type = event_type; + event_buffer_[index].event_metadata = event_metadata; + event_buffer_[index].begin_timestamp_us = timestamp; + event_buffer_[index].end_timestamp_us = 0; + current_index_++; + return index; + } + + // Sets the enabled state of buffer to |enabled| + void SetEnabled(bool enabled) { enabled_ = enabled; } + + // Sets the end timestamp for event for the handle to current time. + // If the buffer is disabled or previous event has been overwritten this + // operation has not effect. + void EndEvent(uint32_t event_handle) { + if (!enabled_ || event_handle == kInvalidEventHandle || + event_handle > current_index_) { + return; + } + const uint32_t max_size = event_buffer_.size(); + if (current_index_ > (max_size + event_handle)) { + // Ignore, buffer has already overflowed. + return; + } + + int event_index = event_handle % max_size; + event_buffer_[event_index].end_timestamp_us = time::NowMicros(); + } + + // Returns the size of the buffer. + size_t Size() const { + return (current_index_ >= event_buffer_.size()) ? event_buffer_.size() + : current_index_; + } + + // Resets the buffer. + void Reset() { + enabled_ = false; + current_index_ = 0; + } + + // Returns the profile event at the given index. If the index is invalid a + // nullptr is returned. The return event may get overwritten if more events + // are added to buffer. + const struct ProfileEvent* const At(int index) const { + size_t size = Size(); + if (index >= size) { + return nullptr; + } + const uint32_t max_size = event_buffer_.size(); + uint32_t start = + (current_index_ > max_size) ? current_index_ % max_size : max_size; + index = (index + start) % max_size; + return &event_buffer_[index]; + } + + private: + bool enabled_; + uint32_t current_index_; + std::vector<ProfileEvent> event_buffer_; +}; +} // namespace profiling +} // namespace tflite +#endif // TFLITE_PROFILING_ENABLED +#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_ + +// clang-format on diff --git a/runtime/libs/profiling/include/profiling/profiler.h b/runtime/libs/profiling/include/profiling/profiler.h new file mode 100644 index 000000000..ed3688140 --- /dev/null +++ b/runtime/libs/profiling/include/profiling/profiler.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/lite/profiling/profiler.h +#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_ +#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_ + +#include <vector> + +#include "profiling/profile_buffer.h" + +#ifdef TFLITE_PROFILING_ENABLED + +namespace tflite { +namespace profiling { +class ScopedProfile; +class ScopedOperatorProfile; + +// Controls whether profiling is enabled or disabled and collects profiles. +// TFLite is used on platforms that don't have posix threads, so the profiler is +// kept as simple as possible. It is designed to be used only on a single +// thread. +// +// Profiles are collected using Scoped*Profile objects that begin and end a +// profile event. +// An example usage is shown in the example below: +// +// Say Worker class has a DoWork method and we are interested in profiling +// the overall execution time for DoWork and time spent in Task1 and Task2 +// functions. +// +// class Worker { +// public: +// void DoWork() { +// ScopedProfile(&controller, "DoWork"); +// Task1(); +// Task2(); +// ..... +// } +// +// void Task1() { +// ScopedProfile(&controller, "Task1"); +// .... +// } +// +// void Task2() { +// ScopedProfile(&controller, "Task2"); +// } +// +// Profiler profiler; +// } +// +// We instrument the functions that need to be profiled. +// +// Profile can be collected by enable profiling and then getting profile +// events. +// +// void ProfileWorker() { +// Worker worker; +// worker.profiler.EnableProfiling(); +// worker.DoWork(); +// worker.profiler.DisableProfiling(); +// // Profiling is complete, extract profiles. +// auto profile_events = worker.profiler.GetProfiles(); +// } +// +// +class Profiler { + public: + Profiler() : buffer_(1024, false) {} + + void StartProfiling() { buffer_.SetEnabled(true); } + void StopProfiling() { buffer_.SetEnabled(false); } + void Reset() { buffer_.Reset(); } + std::vector<const ProfileEvent*> GetProfileEvents() { + std::vector<const ProfileEvent*> profile_events; + profile_events.reserve(buffer_.Size()); + for (size_t i = 0; i < buffer_.Size(); i++) { + profile_events.push_back(buffer_.At(i)); + } + return profile_events; + } + + private: + friend class ScopedProfile; + friend class ScopedOperatorProfile; + ProfileBuffer* GetProfileBuffer() { return &buffer_; } + ProfileBuffer buffer_; +}; + +class ScopedProfile { + public: + // Adds a profile event to profile that begins with the construction + // of object and ends when the object goes out of scope. + // The lifetime of tag should be at least the lifetime of profiler. + + ScopedProfile(Profiler* profiler, const char* tag) + : buffer_(nullptr), event_handle_(0) { + if (profiler) { + buffer_ = profiler->GetProfileBuffer(); + event_handle_ = + buffer_->BeginEvent(tag, ProfileEvent::EventType::DEFAULT, 0); + } + } + ~ScopedProfile() { + if (buffer_) { + buffer_->EndEvent(event_handle_); + } + } + + private: + ProfileBuffer* buffer_; + int32_t event_handle_; +}; + +class ScopedOperatorProfile { + public: + // Adds a profile event to profile that begins with the construction + // of object and ends when the object goes out of scope. + // The lifetime of tag should be at least the lifetime of profiler. + ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index) + : buffer_(nullptr), event_handle_(0) { + if (profiler) { + buffer_ = profiler->GetProfileBuffer(); + event_handle_ = buffer_->BeginEvent( + tag, ProfileEvent::EventType::OPERATOR_INVOKE_EVENT, node_index); + } + } + + ~ScopedOperatorProfile() { + if (buffer_) { + buffer_->EndEvent(event_handle_); + } + } + + private: + ProfileBuffer* buffer_; + int32_t event_handle_; +}; + +} // namespace profiling +} // namespace tflite + +#define VARNAME_UNIQ(name, ctr) name##ctr + +#define SCOPED_OPERATOR_PROFILE(profiler, node_index) \ + tflite::profiling::ScopedOperatorProfile VARNAME_UNIQ( \ + _profile_, __COUNTER__)((profiler), "OpInvoke", (node_index)) +#else + +namespace tflite { +namespace profiling { +// A noop version of profiler when profiling is disabled. +class Profiler { + public: + Profiler() {} + void StartProfiling() {} + void StopProfiling() {} + void Reset() {} + std::vector<const ProfileEvent*> GetProfileEvents() { return {}; } +}; +} // namespace profiling +} // namespace tflite + +#define SCOPED_OPERATOR_PROFILE(profiler, node_index) + +#endif // TFLITE_PROFILING_ENABLED + +#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_ + +// clang-format on diff --git a/runtime/libs/profiling/include/profiling/profiling.h b/runtime/libs/profiling/include/profiling/profiling.h new file mode 100644 index 000000000..ee0df1338 --- /dev/null +++ b/runtime/libs/profiling/include/profiling/profiling.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_MISC_PROFILING_H__ +#define __NNFW_MISC_PROFILING_H__ + +#include <iostream> + +namespace tflite +{ +namespace profiling +{ +class Profiler; // forward declaration +} +} + +namespace profiling +{ + +class Context +{ +public: + Context() : _sync(false), _profiler(nullptr) {} + +public: + const bool &sync(void) const { return _sync; } + tflite::profiling::Profiler *getProfiler() { return _profiler; } + void setProfiler(tflite::profiling::Profiler *p) { _profiler = p; } + void setSync(void) { _sync = true; } + +private: + bool _sync; + tflite::profiling::Profiler *_profiler; + +public: + static Context &get(void) + { + static Context ctx{}; + return ctx; + } +}; + +} // namespace profiling +#endif // __NNFW_MISC_PROFILING_H__ diff --git a/runtime/libs/profiling/include/profiling/time.h b/runtime/libs/profiling/include/profiling/time.h new file mode 100644 index 000000000..200563aa6 --- /dev/null +++ b/runtime/libs/profiling/include/profiling/time.h @@ -0,0 +1,35 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/lite/profiling/time.h +#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_ +#define TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_ + +#include <cstdint> + +namespace tflite { +namespace profiling { +namespace time { +uint64_t NowMicros(); +} // namespace time +} // namespace profiling +} // namespace tflite +#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_ + +// clang-format on diff --git a/runtime/libs/profiling/src/profiling/time.cpp b/runtime/libs/profiling/src/profiling/time.cpp new file mode 100644 index 000000000..761023e6d --- /dev/null +++ b/runtime/libs/profiling/src/profiling/time.cpp @@ -0,0 +1,55 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.12) +// 'externals/tensorflow/tensorflow/lite/profiling/time.cpp +#include "profiling/time.h" + +#if defined(_MSC_VER) +#include <chrono> // NOLINT(build/c++11) +#else +#include <sys/time.h> +#endif + +namespace tflite { +namespace profiling { +namespace time { + +#if defined(_MSC_VER) + +uint64_t NowMicros() { + return std::chrono::duration_cast<std::chrono::microseconds>( + std::chrono::system_clock::now().time_since_epoch()) + .count(); +} + +#else + +uint64_t NowMicros() { + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; +} + +#endif // defined(_MSC_VER) + +} // namespace time +} // namespace profiling +} // namespace tflite + +// clang-format on diff --git a/runtime/libs/rua/CMakeLists.txt b/runtime/libs/rua/CMakeLists.txt new file mode 100644 index 000000000..07ad9ea26 --- /dev/null +++ b/runtime/libs/rua/CMakeLists.txt @@ -0,0 +1,4 @@ +add_subdirectory(core) +add_subdirectory(dyn) +add_subdirectory(anchor) +add_subdirectory(shim) diff --git a/runtime/libs/rua/README.md b/runtime/libs/rua/README.md new file mode 100644 index 000000000..aea4ce033 --- /dev/null +++ b/runtime/libs/rua/README.md @@ -0,0 +1,4 @@ +# rua + +_rua_ is a **RU**ntime **A**bstraction layer which allows us to switch between multiple +Android NN rutime during execution (not loading time). diff --git a/runtime/libs/rua/anchor/CMakeLists.txt b/runtime/libs/rua/anchor/CMakeLists.txt new file mode 100644 index 000000000..6e65641f4 --- /dev/null +++ b/runtime/libs/rua/anchor/CMakeLists.txt @@ -0,0 +1,9 @@ +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(nnfw_lib_rua_anchor STATIC ${SOURCES}) +set_target_properties(nnfw_lib_rua_anchor PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_include_directories(nnfw_lib_rua_anchor PUBLIC include) +target_link_libraries(nnfw_lib_rua_anchor PUBLIC nnfw_lib_rua_core) +target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_lib_rua_dyn) +target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_common) +target_link_libraries(nnfw_lib_rua_anchor PRIVATE nnfw_coverage) diff --git a/runtime/libs/rua/anchor/include/rua/Anchor.h b/runtime/libs/rua/anchor/include/rua/Anchor.h new file mode 100644 index 000000000..f6056ab4e --- /dev/null +++ b/runtime/libs/rua/anchor/include/rua/Anchor.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_RUA_ANCHOR_H__ +#define __NNFW_RUA_ANCHOR_H__ + +#include <rua/Service.h> + +namespace rua +{ + +/** + * @brief Global Runtime Abstraction Context + * + * "set" will have global effect (within each process). + */ +struct Anchor +{ + static const RuntimeService *get(void); + static void set(const RuntimeService *svc); +}; + +} // namespace rua + +#endif // __NNFW_RUA_ANCHOR_H__ diff --git a/runtime/libs/rua/anchor/src/Anchor.cpp b/runtime/libs/rua/anchor/src/Anchor.cpp new file mode 100644 index 000000000..a78cca19e --- /dev/null +++ b/runtime/libs/rua/anchor/src/Anchor.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "rua/Anchor.h" +#include <rua/DynamicBinder.h> + +namespace +{ + +const rua::RuntimeService *anchored_service = rua::DynamicBinder::get(); + +} // namespace + +namespace rua +{ + +const RuntimeService *Anchor::get(void) { return anchored_service; } +void Anchor::set(const RuntimeService *service) { anchored_service = service; } + +} // namespace rua diff --git a/runtime/libs/rua/core/CMakeLists.txt b/runtime/libs/rua/core/CMakeLists.txt new file mode 100644 index 000000000..f7d41f657 --- /dev/null +++ b/runtime/libs/rua/core/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(nnfw_lib_rua_core INTERFACE) +target_include_directories(nnfw_lib_rua_core INTERFACE include) +target_link_libraries(nnfw_lib_rua_core INTERFACE nnfw_lib_nnapi) diff --git a/runtime/libs/rua/core/include/rua/Service.h b/runtime/libs/rua/core/include/rua/Service.h new file mode 100644 index 000000000..a79524a8a --- /dev/null +++ b/runtime/libs/rua/core/include/rua/Service.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Service.h + * @brief Core abstraction that RUA depends on. + */ +#ifndef __NNFW_RUA_SERVICE_H__ +#define __NNFW_RUA_SERVICE_H__ + +#include "NeuralNetworks.h" + +struct ANeuralNetworksMemory; +struct ANeuralNetworksEvent; + +struct ANeuralNetworksModel; +struct ANeuralNetworksCompilation; +struct ANeuralNetworksExecution; + +namespace rua +{ + +/** + * @brief A wrapper for ANeuralNetworkMemory API + */ +struct MemoryService +{ + virtual ~MemoryService() = default; + + virtual int createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) const = 0; + + virtual void free(ANeuralNetworksMemory *memory) const = 0; +}; + +/** + * @brief A wrapper for ANeuralNetworkModel API + */ +struct ModelService +{ + virtual ~ModelService() = default; + + virtual int create(ANeuralNetworksModel **model) const = 0; + + virtual int addOperand(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type) const = 0; + + virtual int setOperandValue(ANeuralNetworksModel *model, int32_t index, const void *buffer, + size_t length) const = 0; + + virtual int setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const = 0; + + virtual int addOperation(ANeuralNetworksModel *model, ANeuralNetworksOperationType type, + uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) const = 0; + + virtual int identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) const = 0; + + virtual int relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool allow) const = 0; + + virtual int finish(ANeuralNetworksModel *model) const = 0; + + virtual void free(ANeuralNetworksModel *model) const = 0; +}; + +/** + * @brief A wrapper for ANeuralNetworkCompilation API + */ +struct CompilationService +{ + virtual ~CompilationService() = default; + + virtual int create(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation) const = 0; + + virtual int setPreference(ANeuralNetworksCompilation *compilation, int32_t preference) const = 0; + virtual int finish(ANeuralNetworksCompilation *compilation) const = 0; + + virtual void free(ANeuralNetworksCompilation *compilation) const = 0; +}; + +/** + * @brief A wrapper for ANeuralNetworkExecution API + */ +struct ExecutionService +{ + virtual ~ExecutionService() = default; + + virtual int create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) const = 0; + + virtual void free(ANeuralNetworksExecution *execution) const = 0; + + virtual int setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) const = 0; + + virtual int setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const = 0; + + virtual int setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, + size_t length) const = 0; + + virtual int setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const = 0; + + virtual int startCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event) const = 0; +}; + +/** + * @brief A wrapper for ANeuralNetworkEvent API + */ +struct EventService +{ + virtual int wait(ANeuralNetworksEvent *event) const = 0; + virtual void free(ANeuralNetworksEvent *event) const = 0; +}; + +/** + * @brief A wrapper for Android NN rutime itself + */ +struct RuntimeService +{ + virtual ~RuntimeService() = default; + + virtual const MemoryService *memory(void) const = 0; + virtual const ModelService *model(void) const = 0; + virtual const CompilationService *compilation(void) const = 0; + virtual const ExecutionService *execution(void) const = 0; + virtual const EventService *event(void) const = 0; +}; + +} // namespace rua + +#endif // __NNFW_RUA_SERVICE_H__ diff --git a/runtime/libs/rua/dyn/CMakeLists.txt b/runtime/libs/rua/dyn/CMakeLists.txt new file mode 100644 index 000000000..3f9ac8928 --- /dev/null +++ b/runtime/libs/rua/dyn/CMakeLists.txt @@ -0,0 +1,8 @@ +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(nnfw_lib_rua_dyn STATIC ${SOURCES}) +set_target_properties(nnfw_lib_rua_dyn PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_include_directories(nnfw_lib_rua_dyn PUBLIC include) +target_link_libraries(nnfw_lib_rua_dyn PUBLIC nnfw_lib_rua_core) +target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_common) +target_link_libraries(nnfw_lib_rua_dyn PRIVATE nnfw_coverage) diff --git a/runtime/libs/rua/dyn/include/rua/DynamicBinder.h b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h new file mode 100644 index 000000000..8ce0c42f8 --- /dev/null +++ b/runtime/libs/rua/dyn/include/rua/DynamicBinder.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_RUA_DYNAMIC_BINDER_H__ +#define __NNFW_RUA_DYNAMIC_BINDER_H__ + +#include <rua/Service.h> + +namespace rua +{ + +/** + * @brief Bind Android NN runtime implementation via dlopen & dlsym + */ +struct DynamicBinder +{ + static const rua::RuntimeService *get(void); +}; + +} // namespace + +#endif // __NNFW_RUA_DYNAMIC_BINDER_H__ diff --git a/runtime/libs/rua/dyn/src/DynamicBinder.cpp b/runtime/libs/rua/dyn/src/DynamicBinder.cpp new file mode 100644 index 000000000..68dae6262 --- /dev/null +++ b/runtime/libs/rua/dyn/src/DynamicBinder.cpp @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "rua/DynamicBinder.h" + +#include "NeuralNetworksLoadHelpers.h" + +using namespace rua; + +// +// Memory +// +namespace +{ + +typedef int (*ANeuralNetworksMemory_createFromFd_fn)(size_t size, int protect, int fd, + size_t offset, ANeuralNetworksMemory **memory); + +typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory *memory); + +struct MemoryServiceImpl final : public MemoryService +{ + int createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) const override + { + LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd); + EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory); + } + + void free(ANeuralNetworksMemory *memory) const override + { + LOAD_FUNCTION(ANeuralNetworksMemory_free); + EXECUTE_FUNCTION(memory); + } +}; + +} // namespace + +// +// Event +// +namespace +{ + +typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent *event); + +typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent *event); + +struct EventServiceImpl final : public EventService +{ + + int wait(ANeuralNetworksEvent *event) const override + { + LOAD_FUNCTION(ANeuralNetworksEvent_wait); + EXECUTE_FUNCTION_RETURN(event); + } + + void free(ANeuralNetworksEvent *event) const override + { + LOAD_FUNCTION(ANeuralNetworksEvent_free); + EXECUTE_FUNCTION(event); + } +}; + +} // namespace + +// +// Model +// +namespace +{ + +typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel **model); + +typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel *model); + +typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel *model); + +typedef int (*ANeuralNetworksModel_addOperand_fn)(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type); + +typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length); + +typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)( + ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksMemory *memory, size_t offset, + size_t length); + +typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, + uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs); + +typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(ANeuralNetworksModel *model, + uint32_t inputCount, + const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs); + +typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(ANeuralNetworksModel *model, + bool allow); + +struct ModelServiceImpl final : public ModelService +{ + int create(ANeuralNetworksModel **model) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_create); + EXECUTE_FUNCTION_RETURN(model); + } + + int addOperand(ANeuralNetworksModel *model, const ANeuralNetworksOperandType *type) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_addOperand); + EXECUTE_FUNCTION_RETURN(model, type); + } + int setOperandValue(ANeuralNetworksModel *model, int32_t index, const void *buffer, + size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue); + EXECUTE_FUNCTION_RETURN(model, index, buffer, length); + } + + int setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory); + EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length); + } + + int addOperation(ANeuralNetworksModel *model, ANeuralNetworksOperationType type, + uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_addOperation); + EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs); + } + + int identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs); + EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs); + } + + int relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool allow) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16); + EXECUTE_FUNCTION_RETURN(model, allow); + } + + int finish(ANeuralNetworksModel *model) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_finish); + EXECUTE_FUNCTION_RETURN(model); + } + + void free(ANeuralNetworksModel *model) const override + { + LOAD_FUNCTION(ANeuralNetworksModel_free); + EXECUTE_FUNCTION(model); + } +}; + +} // namespace + +// +// Compilation +// +namespace +{ + +typedef int (*ANeuralNetworksCompilation_create_fn)(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation); + +typedef void (*ANeuralNetworksCompilation_free_fn)(ANeuralNetworksCompilation *compilation); + +typedef int (*ANeuralNetworksCompilation_setPreference_fn)(ANeuralNetworksCompilation *compilation, + int32_t preference); + +typedef int (*ANeuralNetworksCompilation_finish_fn)(ANeuralNetworksCompilation *compilation); + +struct CompilationServiceImpl : public CompilationService +{ + + int create(ANeuralNetworksModel *model, ANeuralNetworksCompilation **compilation) const override + { + LOAD_FUNCTION(ANeuralNetworksCompilation_create); + EXECUTE_FUNCTION_RETURN(model, compilation); + } + + int setPreference(ANeuralNetworksCompilation *compilation, int32_t preference) const override + { + LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference); + EXECUTE_FUNCTION_RETURN(compilation, preference); + } + + int finish(ANeuralNetworksCompilation *compilation) const override + { + LOAD_FUNCTION(ANeuralNetworksCompilation_finish); + EXECUTE_FUNCTION_RETURN(compilation); + } + + void free(ANeuralNetworksCompilation *compilation) const override + { + LOAD_FUNCTION(ANeuralNetworksCompilation_free); + EXECUTE_FUNCTION(compilation); + } +}; + +} // namespace + +// +// Exceution +// +namespace +{ + +typedef int (*ANeuralNetworksExecution_create_fn)(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution); + +typedef void (*ANeuralNetworksExecution_free_fn)(ANeuralNetworksExecution *execution); + +typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const void *buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)( + ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + void *buffer, size_t length); + +typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)( + ANeuralNetworksExecution *execution, int32_t index, const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, size_t length); + +typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event); + +struct ExecutionServiceImpl : public ExecutionService +{ + + int create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_create); + EXECUTE_FUNCTION_RETURN(compilation, execution); + } + + int setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_setInput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); + } + + int setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); + } + + int setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_setOutput); + EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length); + } + + int setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory); + EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length); + } + + int startCompute(ANeuralNetworksExecution *execution, ANeuralNetworksEvent **event) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_startCompute); + EXECUTE_FUNCTION_RETURN(execution, event); + } + + void free(ANeuralNetworksExecution *execution) const override + { + LOAD_FUNCTION(ANeuralNetworksExecution_free); + EXECUTE_FUNCTION(execution); + } +}; + +} // namespace + +// +// Runtime +// +namespace +{ + +class RuntimeImpl final : public RuntimeService +{ +public: + const MemoryService *memory(void) const override { return &_memory; } + const EventService *event(void) const override { return &_event; } + + const ModelService *model(void) const override { return &_model; } + const CompilationService *compilation(void) const override { return &_compilation; } + const ExecutionService *execution(void) const override { return &_execution; } + +private: + MemoryServiceImpl _memory; + EventServiceImpl _event; + + ModelServiceImpl _model; + CompilationServiceImpl _compilation; + ExecutionServiceImpl _execution; +}; + +} // namespace + +namespace rua +{ + +const RuntimeService *DynamicBinder::get(void) +{ + static RuntimeImpl runtime; + return &runtime; +} + +} // namespace rua diff --git a/runtime/libs/rua/shim/CMakeLists.txt b/runtime/libs/rua/shim/CMakeLists.txt new file mode 100644 index 000000000..814db5f7f --- /dev/null +++ b/runtime/libs/rua/shim/CMakeLists.txt @@ -0,0 +1,4 @@ +add_library(nnfw_lib_rua_shim INTERFACE) +target_include_directories(nnfw_lib_rua_shim INTERFACE include) +target_link_libraries(nnfw_lib_rua_shim INTERFACE nnfw_lib_rua_core) +target_link_libraries(nnfw_lib_rua_shim INTERFACE nnfw_lib_rua_anchor) diff --git a/runtime/libs/rua/shim/include/rua/Shim.h b/runtime/libs/rua/shim/include/rua/Shim.h new file mode 100644 index 000000000..07a4bb2fd --- /dev/null +++ b/runtime/libs/rua/shim/include/rua/Shim.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_RUA_SHIM_H__ +#define __NNFW_RUA_SHIM_H__ + +#include <rua/Anchor.h> + +// +// Memory +// +inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) +{ + return rua::Anchor::get()->memory()->createFromFd(size, protect, fd, offset, memory); +} + +inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) +{ + return rua::Anchor::get()->memory()->free(memory); +} + +// +// Event +// +inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) +{ + return rua::Anchor::get()->event()->wait(event); +} + +inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) +{ + return rua::Anchor::get()->event()->free(event); +} + +// +// Model +// +inline int ANeuralNetworksModel_create(ANeuralNetworksModel **model) +{ + return rua::Anchor::get()->model()->create(model); +} + +inline int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type) +{ + return rua::Anchor::get()->model()->addOperand(model, type); +} + +inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length) +{ + return rua::Anchor::get()->model()->setOperandValue(model, index, buffer, length); +} + +inline int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, + int32_t index, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + return rua::Anchor::get()->model()->setOperandValueFromMemory(model, index, memory, offset, + length); +} + +inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + return rua::Anchor::get()->model()->addOperation(model, type, inputCount, inputs, outputCount, + outputs); +} + +inline int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, + uint32_t inputCount, + const uint32_t *inputs, + uint32_t outputCount, + const uint32_t *outputs) +{ + return rua::Anchor::get()->model()->identifyInputsAndOutputs(model, inputCount, inputs, + outputCount, outputs); +} + +inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, + bool allow) +{ + return rua::Anchor::get()->model()->relaxComputationFloat32toFloat16(model, allow); +} + +inline int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) +{ + return rua::Anchor::get()->model()->finish(model); +} + +inline void ANeuralNetworksModel_free(ANeuralNetworksModel *model) +{ + return rua::Anchor::get()->model()->free(model); +} + +// +// Compilation +// +inline int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation) +{ + return rua::Anchor::get()->compilation()->create(model, compilation); +} + +inline int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation, + int32_t preference) +{ + return rua::Anchor::get()->compilation()->setPreference(compilation, preference); +} + +inline int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) +{ + return rua::Anchor::get()->compilation()->finish(compilation); +} + +inline void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation) +{ + return rua::Anchor::get()->compilation()->free(compilation); +} + +// +// Execution +// +inline int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) +{ + return rua::Anchor::get()->execution()->create(compilation, execution); +} + +inline int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const void *buffer, size_t length) +{ + return rua::Anchor::get()->execution()->setInput(execution, index, type, buffer, length); +} + +inline int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + return rua::Anchor::get()->execution()->setInputFromMemory(execution, index, type, memory, offset, + length); +} + +inline int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, + size_t length) +{ + return rua::Anchor::get()->execution()->setOutput(execution, index, type, buffer, length); +} + +inline int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, + int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + return rua::Anchor::get()->execution()->setOutputFromMemory(execution, index, type, memory, + offset, length); +} + +inline int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event) +{ + return rua::Anchor::get()->execution()->startCompute(execution, event); +} + +inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) +{ + return rua::Anchor::get()->execution()->free(execution); +} + +#endif // __NNFW_RUA_SHIM_H__ diff --git a/runtime/libs/tflite/CMakeLists.txt b/runtime/libs/tflite/CMakeLists.txt new file mode 100644 index 000000000..b5a16bcd9 --- /dev/null +++ b/runtime/libs/tflite/CMakeLists.txt @@ -0,0 +1,17 @@ +add_subdirectory(port) + +file(GLOB_RECURSE SOURCES "src/*.cpp") +file(GLOB_RECURSE TESTS "src/*.test.cpp") +list(REMOVE_ITEM SOURCES ${TESTS}) + +add_library(nnfw_lib_tflite STATIC ${SOURCES}) +set_target_properties(nnfw_lib_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_include_directories(nnfw_lib_tflite PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(nnfw_lib_tflite PUBLIC tensorflow-lite-ex) +target_link_libraries(nnfw_lib_tflite PUBLIC nnfw_lib_misc) +target_link_libraries(nnfw_lib_tflite PRIVATE ${LIB_PTHREAD} dl) +target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_common) +target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_coverage) + +add_executable(nnfw_lib_tflite_test_TensorView src/TensorView.test.cpp) +target_link_libraries(nnfw_lib_tflite_test_TensorView nnfw_lib_tflite) diff --git a/runtime/libs/tflite/include/tflite/Assert.h b/runtime/libs/tflite/include/tflite/Assert.h new file mode 100644 index 000000000..148ac7e01 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/Assert.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Assert.h + * @brief This file contains helper function of assertion + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_ASSERT_H__ +#define __NNFW_TFLITE_ASSERT_H__ + +#include "tensorflow/lite/context.h" + +#include <sstream> + +#define STR_DETAIL(value) #value +#define STR(value) STR_DETAIL(value) + +#define TFLITE_ENSURE(exp) \ + { \ + const TfLiteStatus status = (exp); \ + \ + if (status != kTfLiteOk) \ + { \ + std::ostringstream ss; \ + ss << #exp << " failed (" << __FILE__ << ":" << __LINE__ << ")"; \ + throw std::runtime_error{ss.str()}; \ + } \ + } + +#endif // __NNFW_TFLITE_ASSERT_H__ diff --git a/runtime/libs/tflite/include/tflite/Diff.h b/runtime/libs/tflite/include/tflite/Diff.h new file mode 100644 index 000000000..eca2fd502 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/Diff.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Diff.h + * @brief This file contains classes for testing correctess of implementation + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_DIFF_H__ +#define __NNFW_TFLITE_DIFF_H__ + +#include "tensorflow/lite/interpreter.h" + +#include "misc/tensor/Index.h" +#include "misc/tensor/Diff.h" +#include "misc/tensor/Shape.h" +#include "misc/tensor/Comparator.h" + +#include "tflite/TensorView.h" + +#include <functional> +#include <vector> + +/** + * @brief Class to define TfLite interpreter match application + */ +class TfLiteInterpMatchApp +{ +public: + /** + * @brief Construct a new TfLiteInterpMatchApp object with Comparator + * @param[in] comparator Comparator object for tensor comparation + */ + TfLiteInterpMatchApp(const nnfw::misc::tensor::Comparator &comparator) + : _verbose{false}, _comparator(comparator) + { + // DO NOTHING + } + +public: + /** + * @brief Get reference verbose for debugging information + * @return Reference of verbose value + */ + int &verbose(void) { return _verbose; } + +private: + int _verbose; + +public: + /** + * @brief Run two interpreter and return the output matching + * @param[in] pure Interpreter object of expected(with TfLite) + * @param[in] nnapi Interpreter object of obtained(through NNAPI) + * @return @c true if two Interpreter results are same, otherwise @c false + */ + bool run(::tflite::Interpreter &pure, ::tflite::Interpreter &nnapi) const; + /** + * @brief Compare two TensorView values and return the match result + * @param[in] expected TensorView object to read expected values + * @param[in] obtained TensorView object to read obtained values + * @param[in] id Tensor ID value used for debug message + * @return @c true if two TensorView values are same, otherwise @c false + */ + template <typename T> + bool compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected, + const nnfw::tflite::TensorView<T> &obtained, int id) const; + +private: + const nnfw::misc::tensor::Comparator &_comparator; +}; + +#include "tflite/interp/Builder.h" +#include "tflite/Quantization.h" + +#include <random> + +/** + * @brief Class to generate random values + */ +class RandomGenerator +{ +public: + /** + * @brief Construct a new RandomGenerator object + * @param[in] seed Random seed value + * @param[in] mean Mean value of normal random number generation + * @param[in] stddev Standard deviation of random number generation + * @param[in] quantization TfLiteQuantizationParams type to represent quantization value + * (not used yet) + */ + RandomGenerator(uint32_t seed, float mean, float stddev, + const TfLiteQuantizationParams quantization = make_default_quantization()) + : _rand{seed}, _dist{mean, stddev}, _quantization{quantization} + { + (void)_quantization; + } + +public: + /** + * @brief Generate random numbers for type T + * @param[in] s Shape value + * @param[in] i Index value + * @return Random generated value + * @note This is same as T generate(void) as two input parameters are not used + */ + template <typename T> + T generate(const ::nnfw::misc::tensor::Shape &, const ::nnfw::misc::tensor::Index &) + { + return generate<T>(); + } + + /** + * @brief Generate random numbers for type T + * @return Random generated value + */ + template <typename T> T generate(void) { return _dist(_rand); } + +private: + std::minstd_rand _rand; + std::normal_distribution<float> _dist; + // unused + const TfLiteQuantizationParams _quantization; +}; + +template <> uint8_t RandomGenerator::generate<uint8_t>(void); +template <> bool RandomGenerator::generate<bool>(void); + +/** + * @brief Structure for NNAPI correctness test + */ +struct RandomTestParam +{ + int verbose; //!< Verbosity of debug information + int tolerance; //!< Torlerance of value difference + int tensor_logging = 0; //!< Save logging to a file if not 0 + std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1 +}; + +/** + * @brief Class to define Random test runner + */ +class RandomTestRunner +{ +public: + /** + * @brief Construct a new RandomTestRunner object + * @param[in] seed Random seed value + * @param[in] param RandomTestParam object for test runner + * @param[in] quantization TfLiteQuantizationParams type to represent quantization value + */ + RandomTestRunner(uint32_t seed, const RandomTestParam ¶m, + const TfLiteQuantizationParams quantization = make_default_quantization()) + : _randgen{seed, 0.0f, 2.0f, quantization}, _param{param} + { + // DO NOTHING + } + +public: + /** + * @brief Run the random test runner + * @param[in] builder Interpreter Builder used to run + * @return 0 if test succeeds, otherwise failure + */ + int run(const nnfw::tflite::Builder &builder); + +public: + /** + * @brief Get RandomGenerator reference + * @return RandomGenerator reference + */ + RandomGenerator &generator() { return _randgen; }; + +private: + RandomGenerator _randgen; + const RandomTestParam _param; + +public: + /** + * @brief Create a RandomTestRunner object + * @param[in] seed Random seed value + * @return RandomGenerator object + */ + static RandomTestRunner make(uint32_t seed); +}; + +#endif // __NNFW_TFLITE_DIFF_H__ diff --git a/runtime/libs/tflite/include/tflite/FeatureView.h b/runtime/libs/tflite/include/tflite/FeatureView.h new file mode 100644 index 000000000..a8f069c40 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/FeatureView.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file FeatureView.h + * @brief This file contains FeatureView class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_FEATURE_VIEW_H__ +#define __NNFW_TFLITE_FEATURE_VIEW_H__ + +#include "tensorflow/lite/interpreter.h" + +#include "tflite/InputIndex.h" +#include "tflite/OutputIndex.h" + +#include "misc/feature/Shape.h" +#include "misc/feature/Reader.h" + +namespace nnfw +{ +namespace tflite +{ + +template <typename T> class FeatureView; + +/** + * @brief Class to support reading element of float type feature + */ +template <> class FeatureView<float> : public nnfw::misc::feature::Reader<float> +{ +public: + /** + * @brief Construct a new FeatureView object + * @param[in] interp Interpreter to read from + * @param[in] index InputIndex index of input + */ + FeatureView(::tflite::Interpreter &interp, const InputIndex &index); + /** + * @brief Construct a new FeatureView object + * @param[in] interp Interpreter to read from + * @param[in] index OutputIndex index of output + */ + FeatureView(::tflite::Interpreter &interp, const OutputIndex &index); + +public: + /** + * @brief Get value of element using channel, row and column index + * @param[in] ch Channel index + * @param[in] row Row index + * @param[in] col Column index + * @return Value of element + */ + float at(uint32_t ch, uint32_t row, uint32_t col) const; + /** + * @brief Get reference of element using channel, row and column index + * @param[in] ch Channel index + * @param[in] row Row index + * @param[in] col Column index + * @return Reference of element + */ + float &at(uint32_t ch, uint32_t row, uint32_t col); + + float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0; + +private: + /** + * @brief Get offset of element from channel, row and column index + * @param[in] ch Channel index + * @param[in] row Row index + * @param[in] col Column index + * @return Offset of element + */ + uint32_t getElementOffset(uint32_t ch, uint32_t row, uint32_t col) const + { + uint32_t res = 0; + + // TensorFlow Lite assumes that NHWC ordering for tessor + res += row * _shape.W * _shape.C; + res += col * _shape.C; + res += ch; + + return res; + } + +private: + nnfw::misc::feature::Shape _shape; + float *_base; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_FEATURE_VIEW_H__ diff --git a/runtime/libs/tflite/include/tflite/InputIndex.h b/runtime/libs/tflite/include/tflite/InputIndex.h new file mode 100644 index 000000000..f535b2626 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/InputIndex.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file InputIndex.h + * @brief This file contains InputIndex class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_INPUT_INDEX_H__ +#define __NNFW_TFLITE_INPUT_INDEX_H__ + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to express index of input + */ +class InputIndex +{ +public: + /** + * @brief Construct a new InputIndex object with index value + * @param [in] index The value of index + */ + InputIndex(int index) : _index(index) + { + // DO NOTHING + } + +public: + /** + * @brief Get index value as int + * @return Index value as int + */ + int asInt(void) const { return _index; } + +private: + int _index; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INPUT_INDEX_H__ diff --git a/runtime/libs/tflite/include/tflite/InterpreterSession.h b/runtime/libs/tflite/include/tflite/InterpreterSession.h new file mode 100644 index 000000000..deaf05a7f --- /dev/null +++ b/runtime/libs/tflite/include/tflite/InterpreterSession.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file InterpreterSession.h + * @brief This file contains InterpreterSession class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_INTERPRETER_SESSION_H__ +#define __NNFW_TFLITE_INTERPRETER_SESSION_H__ + +#include "Session.h" + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define TfLite interpreter session which is inherited from Session class + */ +class InterpreterSession final : public Session +{ +public: + /** + * @brief Construct a InterpreterSession object with interpreter of TfLite + * @param[in] interp The TfLite interpreter pointer + */ + InterpreterSession(::tflite::Interpreter *interp) : _interp{interp} + { + // DO NOTHING + } + +public: + /** + * @brief Get TfLite interpreter pointer + * @return The TfLite interpreter + */ + ::tflite::Interpreter *interp(void) override { return _interp; } + +public: + /** + * @brief Prepare the TfLite interpreter session + * @return @c true if tensor preparation is successful, otherwise @c false + */ + bool prepare(void) override + { + _interp->UseNNAPI(false); + + if (kTfLiteOk != _interp->AllocateTensors()) + { + return false; + } + + return true; + } + + /** + * @brief Run the Invoke function of TfLite interpreter + * @return @c true if Invoke() is successful, otherwise @c false + */ + bool run(void) override + { + // Return true if Invoke returns kTfLiteOk + return kTfLiteOk == _interp->Invoke(); + } + + /** + * @brief Tear down TfLite interpreter session + * @return @c true always + */ + bool teardown(void) override + { + // Do NOTHING currently + return true; + } + +private: + ::tflite::Interpreter *const _interp; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INTERPRETER_SESSION_H__ diff --git a/runtime/libs/tflite/include/tflite/NNAPISession.h b/runtime/libs/tflite/include/tflite/NNAPISession.h new file mode 100644 index 000000000..f430e86d3 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/NNAPISession.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file NNAPISession.h + * @brief This file contains NNAPISession class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_NNAPI_SESSION_H__ +#define __NNFW_TFLITE_NNAPI_SESSION_H__ + +#include "Session.h" +#include "tflite/ext/nnapi_delegate.h" + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define NNAPI interpreter session which is inherited from Session class + */ +class NNAPISession final : public Session +{ +public: + /** + * @brief Construct a NNAPISession object with interpreter of TfLite + * @param[in] interp The TfLite interpreter pointer + * @note Invoke BuildGraph() of NNAPI delegate from Interpreter + */ + NNAPISession(::tflite::Interpreter *interp) : _interp{interp} + { + // Construct Graph from Interpreter + // primary_subgraph: Experimental interface. Return 1st sugbraph + _delegate.BuildGraph(&interp->primary_subgraph()); + } + +public: + /** + * @brief Get TfLite interpreter pointer + * @return The TfLite interpreter + */ + ::tflite::Interpreter *interp(void) override { return _interp; } + +public: + /** + * @brief Prepare the TfLite interpreter session + * @return @c true if tensor preparation is successful, otherwise @c false + */ + bool prepare(void) override + { + // Explicitly turn off T/F lite internal NNAPI delegation in order to use locally defined + // NNAPI delegation. + _interp->UseNNAPI(false); + + if (kTfLiteOk != _interp->AllocateTensors()) + { + return false; + } + + return true; + } + + /** + * @brief Run the Invoke function of NNAPI delegate + * @return @c true if Invoke() is successful, otherwise @c false + */ + bool run(void) override { return kTfLiteOk == _delegate.Invoke(&_interp->primary_subgraph()); } + + /** + * @brief Tear down TfLite interpreter session + * @return @c true always + */ + bool teardown(void) override + { + // DO NOTHING + return true; + } + +private: + ::tflite::Interpreter *const _interp; + nnfw::tflite::NNAPIDelegate _delegate; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_NNAPI_SESSION_H__ diff --git a/runtime/libs/tflite/include/tflite/OutputIndex.h b/runtime/libs/tflite/include/tflite/OutputIndex.h new file mode 100644 index 000000000..dd1ca8d44 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/OutputIndex.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file OutputIndex.h + * @brief This file contains OutputIndex class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_OUTPUT_INDEX_H__ +#define __NNFW_TFLITE_OUTPUT_INDEX_H__ + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define OutputIndex + */ +class OutputIndex +{ +public: + /** + * @brief Construct a OutputIndex object with index value + * @param[in] index The value of index + */ + OutputIndex(int index) : _index(index) + { + // DO NOTHING + } + +public: + /** + * @brief Get index value as int + * @return Index valuel as int + */ + int asInt(void) const { return _index; } + +private: + int _index; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_OUTPUT_INDEX_H__ diff --git a/runtime/libs/tflite/include/tflite/Quantization.h b/runtime/libs/tflite/include/tflite/Quantization.h new file mode 100644 index 000000000..8272bcdc0 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/Quantization.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Quantization.h + * @brief This file contains BitwiseIntToFloat union and quantization related + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_QUANTIZATION_H__ +#define __NNFW_TFLITE_QUANTIZATION_H__ + +/** + * @brief Union to provide bitwise conversion of integer and float + */ +union BitwiseIntToFloat { + int i; + float f; +}; + +static const float FLOAT_NEAREST_TO_1 = BitwiseIntToFloat{0x3f7fffff}.f; + +#include "tensorflow/lite/context.h" + +/** + * @brief Get TfLiteQuantizationParams object with default values + * @return TfLiteQuantizationParams object + */ +TfLiteQuantizationParams make_default_quantization(void); + +#endif // __NNFW_TFLITE_QUANTIZATION_H__ diff --git a/runtime/libs/tflite/include/tflite/Session.h b/runtime/libs/tflite/include/tflite/Session.h new file mode 100644 index 000000000..b653acf61 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/Session.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Session.h + * @brief This file contains Session class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_SESSION_H__ +#define __NNFW_TFLITE_SESSION_H__ + +#include <tensorflow/lite/interpreter.h> + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Structure to provide interface methods of interpreter session + */ +struct Session +{ + /** + * @brief Destruct Session object using default destructor + */ + virtual ~Session() = default; + + /** + * @brief Get the Interpreter object pointer + * @return The Interpreter object pointer + */ + virtual ::tflite::Interpreter *interp(void) = 0; + + /** + * @brief Prepare the session + * @return @c true if prepare method succeeded, otherwise @c false + */ + virtual bool prepare(void) = 0; + /** + * @brief Run the session + * @return @c true if run method succeeded, otherwise @c false + */ + virtual bool run(void) = 0; + /** + * @brief Teardown(release) the session + * @return @c true if teardown method succeeded, otherwise @c false + */ + virtual bool teardown(void) = 0; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INTERP_SESSION_H__ diff --git a/runtime/libs/tflite/include/tflite/TensorLogger.h b/runtime/libs/tflite/include/tflite/TensorLogger.h new file mode 100644 index 000000000..a824c3411 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/TensorLogger.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TensorLogger.h + * @brief This file contains TensorLogger class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_TENSOR_LOGGER_H__ +#define __NNFW_TFLITE_TENSOR_LOGGER_H__ + +#include "misc/tensor/IndexIterator.h" +#include "tflite/TensorView.h" + +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/context.h> +#include <fstream> +#include <iomanip> + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to write input and output value / shape into a file in python form + * @note This is a utility to write input and output value / shape into a file in python form.\n + * any python app can load this value by running the python code below:\n + * exec(open(filename).read())\n + * generated python code looks like the following: \n + * tensor_shape_gen = []\n + * tensor_value_gen = []\n\n + * tensor_shape_gen.append("{2, 1, 2}")\n + * tensor_value_gen.append([1, 2, 3, 4])\n\n + * tensor_shape_gen.append("{2}")\n + * tensor_value_gen.append([1, 2])\n\n + * tensor_shape_gen.append("{2, 1, 2}")\n + * tensor_value_gen.append([1, 4, 3, 8])\n + */ +class TensorLogger +{ +private: + std::ofstream _outfile; + +public: + /** + * @brief Get TensorLogger instance + * @return The TensorLogger instance + */ + static TensorLogger &get() + { + static TensorLogger instance; + return instance; + } + + /** + * @brief Save the tensor details to file from interpreter + * @param[in] path The file path to save + * @param[in] interp The TfLite interpreter + */ + void save(const std::string &path, ::tflite::Interpreter &interp) + { + open(path); + + int log_index = 0; + for (const auto id : interp.inputs()) + { + _outfile << "# input tensors" << std::endl; + printTensor(interp, id, log_index++); + } + for (const auto id : interp.outputs()) + { + _outfile << "# output tensors" << std::endl; + printTensor(interp, id, log_index++); + } + close(); + } + +private: + void open(const std::string &path) + { + if (!_outfile.is_open()) + _outfile.open(path, std::ios_base::out); + + _outfile << "# ------ file: " << path << " ------" << std::endl + << "tensor_shape_gen = []" << std::endl + << "tensor_value_gen = []" << std::endl + << std::endl; + } + + void printTensor(::tflite::Interpreter &interp, const int id, const int log_index) + { + const TfLiteTensor *tensor = interp.tensor(id); + + _outfile << "# tensor name: " << tensor->name << std::endl; + _outfile << "# tflite::interpreter.tensor(" << id << ") -> " + "tensor_value_gen[" + << log_index << "]" << std::endl; + + if (tensor->type == kTfLiteInt32) + { + printTensorShape(tensor); + printTensorValue<int32_t>(tensor, tensor->data.i32); + } + else if (interp.tensor(id)->type == kTfLiteUInt8) + { + printTensorShape(tensor); + printTensorValue<uint8_t>(tensor, tensor->data.uint8); + } + else if (tensor->type == kTfLiteFloat32) + { + printTensorShape(tensor); + printTensorValue<float>(tensor, tensor->data.f); + } + } + + void printTensorShape(const TfLiteTensor *tensor) + { + _outfile << "tensor_shape_gen.append('{"; + + int r = 0; + for (; r < tensor->dims->size - 1; r++) + { + _outfile << tensor->dims->data[r] << ", "; + } + _outfile << tensor->dims->data[r]; + + _outfile << "}')" << std::endl; + } + + template <typename T> void printTensorValue(const TfLiteTensor *tensor, T *tensor_data_ptr) + { + _outfile << "tensor_value_gen.append(["; + + _outfile << std::fixed << std::setprecision(10); + + const T *end = reinterpret_cast<const T *>(tensor->data.raw_const + tensor->bytes); + for (T *ptr = tensor_data_ptr; ptr < end; ptr++) + _outfile << *ptr << ", "; + + _outfile << "])" << std::endl << std::endl; + } + + void close() + { + _outfile << "# --------- tensor shape and value defined above ---------" << std::endl; + _outfile.close(); + } +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_TENSOR_LOGGER_H__ diff --git a/runtime/libs/tflite/include/tflite/TensorShapeUtils.h b/runtime/libs/tflite/include/tflite/TensorShapeUtils.h new file mode 100644 index 000000000..ba8687413 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/TensorShapeUtils.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TensorShapeUtils.h + * @brief This file contains utilities function of tensor shape + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__ +#define __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__ + +#include "misc/tensor/Shape.h" + +#include <vector> + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Converts tensor::Shape into a vector + * @param[in] shape The tensor shape to be converted + * @return vector value of given shape object + */ +static inline std::vector<int32_t> as_dims(const nnfw::misc::tensor::Shape &shape) +{ + std::vector<int32_t> dims; + + for (uint32_t axis = 0; axis < shape.rank(); ++axis) + { + dims.emplace_back(shape.dim(axis)); + } + + return dims; +} + +/** + * @brief Broadcasts between two given shapes + * @param[in] lhs_shape The left hand side shape + * @param[in] rhs_shape The right hand side shape + * @return The broadcasted shape + */ +nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape, + const nnfw::misc::tensor::Shape &rhs_shape); + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_TENSOR_SHAPE_UTILS_H__ diff --git a/runtime/libs/tflite/include/tflite/TensorUtils.h b/runtime/libs/tflite/include/tflite/TensorUtils.h new file mode 100644 index 000000000..08af1468b --- /dev/null +++ b/runtime/libs/tflite/include/tflite/TensorUtils.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TensorUtils.h + * @brief This file contains utilities function + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_TENSOR_UTILS_H__ +#define __NNFW_TFLITE_TENSOR_UTILS_H__ + +#include <tensorflow/lite/context.h> + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Get @c true if tensor type is kTfLiteFloat32, otherwise @c false + * @param[in] tensor The tensor object to be compared + * @return @c true if tensor type is kTfLiteFloat32, otherwise @c false + */ +inline bool isFloatTensor(const TfLiteTensor *tensor) { return tensor->type == kTfLiteFloat32; } + +/** + * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1, + * otherwise @c false + * @param[in] tensor The tensor object to be compared + * @return @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false + */ +inline bool isFeatureTensor(const TfLiteTensor *tensor) +{ + return (tensor->dims->size == 4) && (tensor->dims->data[0] == 1); +} + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_TENSOR_UTILS_H__ diff --git a/runtime/libs/tflite/include/tflite/TensorView.h b/runtime/libs/tflite/include/tflite/TensorView.h new file mode 100644 index 000000000..ce791a73f --- /dev/null +++ b/runtime/libs/tflite/include/tflite/TensorView.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TensorView.h + * @brief This file contains TensorView class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_TENSOR_VIEW_H__ +#define __NNFW_TFLITE_TENSOR_VIEW_H__ + +#include "tensorflow/lite/interpreter.h" + +#include "misc/tensor/Shape.h" +#include "misc/tensor/Index.h" +#include "misc/tensor/Reader.h" +#include "misc/tensor/NonIncreasingStride.h" + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define TensorView which is inherited from nnfw::misc::tensor::Reader<T> class + */ +template <typename T> class TensorView final : public nnfw::misc::tensor::Reader<T> +{ +public: + /** + * @brief Construct a TensorView object with base and shape informations + * @param[in] shape The shape of a tensor + * @param[in] base The base address of a tensor + */ + TensorView(const nnfw::misc::tensor::Shape &shape, T *base) : _shape{shape}, _base{base} + { + // Set 'stride' + _stride.init(_shape); + } + +public: + /** + * @brief Get shape of tensor + * @return Reference of shape + */ + const nnfw::misc::tensor::Shape &shape(void) const { return _shape; } + +public: + /** + * @brief Get value of tensor index + * @param[in] index The tensor index + * @return The value at the index + */ + T at(const nnfw::misc::tensor::Index &index) const override + { + const auto offset = _stride.offset(index); + return *(_base + offset); + } + +public: + /** + * @brief Get reference value of tensor index + * @param[in] index The tensor index + * @return The reference value at the index + */ + T &at(const nnfw::misc::tensor::Index &index) + { + const auto offset = _stride.offset(index); + return *(_base + offset); + } + +private: + nnfw::misc::tensor::Shape _shape; /**< The tensor shape */ + +public: + T *_base; /**< The base address of tensor */ + nnfw::misc::tensor::NonIncreasingStride _stride; /**< The NonIncreasingStride object */ + +public: + // TODO Introduce Operand ID class + /** + * @brief Create TensorView object using given parameters + * @param[in] interp The TfLite interpreter + * @param[in] tensor_index The tensor index + * @return The new TensorView<T> object + */ + static TensorView<T> make(::tflite::Interpreter &interp, int tensor_index) + { + auto tensor_ptr = interp.tensor(tensor_index); + + // Set 'shape' + nnfw::misc::tensor::Shape shape(tensor_ptr->dims->size); + + for (uint32_t axis = 0; axis < shape.rank(); ++axis) + { + shape.dim(axis) = tensor_ptr->dims->data[axis]; + } + + return TensorView<T>(shape, interp.typed_tensor<T>(tensor_index)); + } +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_TENSOR_VIEW_H__ diff --git a/runtime/libs/tflite/include/tflite/interp/Builder.h b/runtime/libs/tflite/include/tflite/interp/Builder.h new file mode 100644 index 000000000..0f54e1779 --- /dev/null +++ b/runtime/libs/tflite/include/tflite/interp/Builder.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Builder.h + * @brief This file contains Builder structure + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_INTERP_BUILDER_H__ +#define __NNFW_TFLITE_INTERP_BUILDER_H__ + +#include <tensorflow/lite/interpreter.h> + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Structure to Builder + */ +struct Builder +{ + /** + * @brief Destroy the Builder object + */ + virtual ~Builder() = default; + + /** + * @brief Build a FlatBuffer model + * @return The TfLite interpreter object + */ + virtual std::unique_ptr<::tflite::Interpreter> build(void) const = 0; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INTERP_BUILDER_H__ diff --git a/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h b/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h new file mode 100644 index 000000000..2d96af50b --- /dev/null +++ b/runtime/libs/tflite/include/tflite/interp/FlatBufferBuilder.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file FlatBufferBuilder.h + * @brief This file contains FlatBufferBuilder class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__ +#define __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__ + +#include <tensorflow/lite/model.h> + +#include "tflite/interp/Builder.h" + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define FlatBufferBuilder which is inherited from Builder + */ +class FlatBufferBuilder final : public Builder +{ +public: + /** + * @brief Construct a FlatBufferBuilder object with FlatBufferModel of TfLite + * @param[in] model The TfLite Flatbuffer model + */ + FlatBufferBuilder(const ::tflite::FlatBufferModel &model) : _model{model} + { + // DO NOTHING + } + +public: + /** + * @brief Build a FlatBuffer model + * @return The TfLite interpreter pointer address + */ + std::unique_ptr<::tflite::Interpreter> build(void) const override; + +private: + const ::tflite::FlatBufferModel &_model; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__ diff --git a/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h b/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h new file mode 100644 index 000000000..7bfb8db2d --- /dev/null +++ b/runtime/libs/tflite/include/tflite/interp/FunctionBuilder.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file FunctionBuilder.h + * @brief This file contains FunctionBuilder class + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__ +#define __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__ + +#include <tensorflow/lite/model.h> + +#include "tflite/interp/Builder.h" + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Class to define FunctionBuilder which is inherited from Builder + */ +class FunctionBuilder final : public Builder +{ +public: + using SetupFunc = std::function<void(::tflite::Interpreter &)>; + +public: + /** + * @brief Construct a FunctionBuilder object with SetupFunction + * @param[in] fn The SetupFunc object + */ + FunctionBuilder(const SetupFunc &fn) : _fn{fn} + { + // DO NOTHING + } + +public: + /** + * @brief Build a SetupFunc + * @return The TfLite interpreter pointer address + */ + std::unique_ptr<::tflite::Interpreter> build(void) const override; + +private: + SetupFunc _fn; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_INTERP_FUNCTION_BUILDER_H__ diff --git a/runtime/libs/tflite/port/1.13.1/CMakeLists.txt b/runtime/libs/tflite/port/1.13.1/CMakeLists.txt new file mode 100644 index 000000000..311e11cae --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/CMakeLists.txt @@ -0,0 +1,16 @@ +if(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1) + return() +endif(NOT SUPPORT_TFLITE_VERSION VERSION_EQUAL 1.13.1) + +nnfw_find_package(TensorFlowLite REQUIRED) + +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(tensorflow-lite-ex STATIC ${SOURCES}) +set_target_properties(tensorflow-lite-ex PROPERTIES POSITION_INDEPENDENT_CODE ON) +target_include_directories(tensorflow-lite-ex PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(tensorflow-lite-ex PUBLIC tensorflow-lite) +target_link_libraries(tensorflow-lite-ex PUBLIC nnfw_lib_misc nnfw_lib_rua_shim) +target_link_libraries(tensorflow-lite-ex PRIVATE ${LIB_PTHREAD} dl) +target_link_libraries(tensorflow-lite-ex PRIVATE nnfw_common) +target_link_libraries(tensorflow-lite-ex PRIVATE nnfw_coverage) diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h new file mode 100644 index 000000000..697ba33e9 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/Abs.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_TFLITE_EXT_KERNELS_ABS_H__ +#define __NNFW_TFLITE_EXT_KERNELS_ABS_H__ + +#include "tensorflow/lite/context.h" + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace Abs +{ + +void *InitAbs(TfLiteContext *context, const char *buffer, size_t length); +void FreeAbs(TfLiteContext *context, void *buffer); +TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node); +TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node); + +} // namespace Abs +} // namespace custom +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_ABS_H__ diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h new file mode 100644 index 000000000..3370db778 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/CustomOps.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file CustomOps.h + * @brief This file contains registration of custom operands + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__ +#define __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__ + +#include "tensorflow/lite/context.h" +#include "tflite/ext/kernels/TensorFlowMax.h" +#include "tflite/ext/kernels/SquaredDifference.h" +#include "tflite/ext/kernels/TensorFlowSum.h" +#include "tflite/ext/kernels/Abs.h" + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ + +#define REGISTER_FUNCTION(Name) \ + TfLiteRegistration *Register_##Name(void) \ + { \ + static TfLiteRegistration r = {}; \ + r.init = Name::Init##Name; \ + r.free = Name::Free##Name; \ + r.prepare = Name::Prepare##Name; \ + r.invoke = Name::Eval##Name; \ + r.custom_name = #Name; \ + return &r; \ + } + +REGISTER_FUNCTION(TensorFlowMax) +REGISTER_FUNCTION(SquaredDifference) +REGISTER_FUNCTION(TensorFlowSum) +REGISTER_FUNCTION(Abs) + +#undef REGISTER_FUNCTION + +} // namespace custom +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__ diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h new file mode 100644 index 000000000..5512ead78 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/SquaredDifference.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file SquaredDifference.h + * @brief This file contains SquaredDifference namespace and SquaredDifference function + * definitions + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__ +#define __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__ + +#include "tensorflow/lite/context.h" + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace SquaredDifference +{ + +/** + * @brief Initialize SquaredDifference operand using the contents of buffer + * @param[in] context The TfLite context + * @param[in] buffer The buffer with contents + * @param[in] length The buffer length + * @return The void pointer for user data + */ +void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length); + +/** + * @brief Release any memory it might have allocated via 'InitSquaredDifference' + * @param[in] context The TfLite context + * @param[in] buffer The buffer with contents + * @return N/A + */ +void FreeSquaredDifference(TfLiteContext *context, void *buffer); + +/** + * @brief Prepare the SquaredDifference operand for execution + * @param[in] context The TfLite context + * @param[in] node The operand node + * @return The TfLite status + */ +TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node); + +/** + * @brief Evaluation the SquaredDifference operand for execution + * @param[in] context The TfLite context + * @param[in] node The operand node + * @return The TfLite status + */ +TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node); + +} // namespace SquaredDifference +} // namespace custom +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__ diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h new file mode 100644 index 000000000..d573308ed --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowMax.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file TensorFlowMax.h + * @brief This file contains TensorFlowMax namespace and TensorFlowMax function definitions + * @ingroup COM_AI_RUNTIME + */ + +#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__ +#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__ + +#include "tensorflow/lite/context.h" + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace TensorFlowMax +{ + +/** + * @brief Initialize TensorFlowMax operand using the contents of buffer + * @param[in] context The TfLite context + * @param[in] buffer The buffer with contents + * @param[in] length The buffer length + * @return The void pointer for user data + */ +void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length); + +/** + * @brief Release any memory it might have allocated via 'InitTensorFlowMax' + * @param[in] context The TfLite context + * @param[in] buffer The buffer with contents + * @return N/A + */ +void FreeTensorFlowMax(TfLiteContext *context, void *buffer); + +/** + * @brief Prepare the TensorFlowMax operand for execution + * @param[in] context The TfLite context + * @param[in] node The operand node + * @return The TfLite status + */ +TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node); + +/** + * @brief Evaluation the TensorFlowMax operand for execution + * @param[in] context The TfLite context + * @param[in] node The operand node + * @return The TfLite status + */ +TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node); + +} // namespace TensorFlowMax +} // namespace custom +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__ diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h new file mode 100644 index 000000000..29455aac5 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/TensorFlowSum.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__ +#define __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__ + +#include "tensorflow/lite/context.h" + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace TensorFlowSum +{ + +void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length); +void FreeTensorFlowSum(TfLiteContext *context, void *buffer); +TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node); +TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node); + +} // namespace TensorFlowSum +} // namespace custom +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__ diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h new file mode 100644 index 000000000..6e32b35fb --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/kernels/register.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.13.1) +// 'externals/tensorflow/tensorflow/lite/kernels/register.h' +#ifndef __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__ +#define __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__ + +#include <unordered_map> +#include "tensorflow/lite/context.h" +#include "tensorflow/lite/model.h" + +namespace nnfw { +namespace tflite { + +class BuiltinOpResolver : public ::tflite::MutableOpResolver { + public: + BuiltinOpResolver(); + + const TfLiteRegistration* FindOp(::tflite::BuiltinOperator op, + int version) const override; + const TfLiteRegistration* FindOp(const char* op, int version) const override; +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__ + +// clang-format on diff --git a/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h new file mode 100644 index 000000000..231baa25c --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/include/tflite/ext/nnapi_delegate.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This header is derived from the following file (in TensorFlow v1.13.1) +// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.h' +#ifndef __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__ +#define __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__ + +#include "tensorflow/lite/allocation.h" +#include "tensorflow/lite/c/c_api_internal.h" +#include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/lite/core/subgraph.h" +#include "tensorflow/lite/interpreter.h" + +struct ANeuralNetworksModel; +struct ANeuralNetworksMemory; +struct ANeuralNetworksCompilation; + +namespace nnfw { +namespace tflite { + +class NNAPIAllocation : public ::tflite::MMAPAllocation { + public: + NNAPIAllocation(const char* filename, ::tflite::ErrorReporter* error_reporter); + ~NNAPIAllocation(); + + size_t offset(const void* ptr) const { + auto signed_offset = reinterpret_cast<const uint8_t*>(ptr) - + reinterpret_cast<const uint8_t*>(mmapped_buffer_); + + return static_cast<size_t>(signed_offset); + } + + ANeuralNetworksMemory* memory() const { return handle_; } + bool valid() const override { return handle_ != nullptr; } + + private: + mutable ANeuralNetworksMemory* handle_ = nullptr; +}; + +class NNAPIDelegate { + public: + ~NNAPIDelegate(); + + // Convert a tflite graph to NNAPI + TfLiteStatus BuildGraph(::tflite::Subgraph* subgraph); + + // Run + TfLiteStatus Invoke(::tflite::Subgraph* subgraph); + + // Whether the current platform supports NNAPI delegation. + static bool IsSupported(); + + private: + // The NN API model handle + ANeuralNetworksModel* nn_model_ = nullptr; + // The NN API compilation handle + ANeuralNetworksCompilation* nn_compiled_model_ = nullptr; + // Model status + TfLiteStatus model_status_ = kTfLiteOk; + + // List of state tensors for LSTM, RNN, SVDF. + // NN API does not allow ops to maintain states across multiple + // invocations. We need to manually create state input tensors from + // corresponding state output tensors of TFLite operations, and map them + // correctly. + std::vector<int> model_states_inputs_; // holds NNAPI operand ids + std::vector<int> model_states_outputs_; // holds TFLite tensor ids +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__ + +// clang-format on diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp new file mode 100644 index 000000000..61181465d --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/kernels/Abs.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/ext/kernels/Abs.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include <iostream> +#include <cmath> + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace Abs +{ + +void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; } + +void FreeAbs(TfLiteContext *, void *) {} + +TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node) +{ + TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1); + + const TfLiteTensor *input = ::tflite::GetInput(context, node, 0); + TfLiteTensor *output = ::tflite::GetOutput(context, node, 0); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + + return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); +} + +TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node) +{ + const TfLiteTensor *input = ::tflite::GetInput(context, node, 0); + TfLiteTensor *output = ::tflite::GetOutput(context, node, 0); + size_t elements = ::tflite::NumElements(input); + switch (input->type) + { + case kTfLiteFloat32: + { + auto *in = input->data.f; + auto *in_end = in + elements; + auto *out = output->data.f; + for (; in < in_end; in++, out++) + *out = std::abs(*in); + return kTfLiteOk; + } + case kTfLiteInt32: + { + auto *in = input->data.i32; + auto *in_end = in + elements; + auto *out = output->data.i32; + for (; in < in_end; in++, out++) + *out = std::abs(*in); + return kTfLiteOk; + } + case kTfLiteInt64: + { + auto *in = input->data.i64; + auto *in_end = in + elements; + auto *out = output->data.i64; + for (; in < in_end; in++, out++) + *out = std::abs(*in); + return kTfLiteOk; + } + case kTfLiteUInt8: + { + auto *in = input->data.uint8; + auto *in_end = in + elements; + auto *out = output->data.uint8; + for (; in < in_end; in++, out++) + *out = *in; + return kTfLiteOk; + } + default: + { + context->ReportError(context, "Input type %d is not supported", input->type); + return kTfLiteError; + } + } +} + +} // namespace Abs +} // namespace custom +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp new file mode 100644 index 000000000..615878513 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/kernels/SquaredDifference.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/ext/kernels/SquaredDifference.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include <iostream> + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace SquaredDifference +{ + +void *InitSquaredDifference(TfLiteContext *, const char *, size_t) { return nullptr; } + +void FreeSquaredDifference(TfLiteContext *, void *) {} + +TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node) +{ + TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1); + + const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0); + const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1); + TfLiteTensor *output = ::tflite::GetOutput(context, node, 0); + + TF_LITE_ENSURE_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_EQ(context, input1->type, output->type); + + return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input1->dims)); +} + +TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node) +{ + + const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0); + const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1); + + TfLiteTensor *output = ::tflite::GetOutput(context, node, 0); + + size_t elements = ::tflite::NumElements(input1); + + switch (input1->type) + { + case kTfLiteFloat32: + { + const float *in1 = input1->data.f; + const float *in2 = input2->data.f; + const float *in_end1 = in1 + elements; + float *out = output->data.f; + + for (; in1 < in_end1; in1++, in2++, out++) + *out = ((*in1 - *in2) * (*in1 - *in2)); + + return kTfLiteOk; + } + case kTfLiteInt32: + { + const int *in1 = input1->data.i32; + const int *in2 = input2->data.i32; + const int *in_end1 = in1 + elements; + int *out = output->data.i32; + + for (; in1 < in_end1; in1++, in2++, out++) + *out = ((*in1 - *in2) * (*in1 - *in2)); + + return kTfLiteOk; + } + case kTfLiteInt64: + { + const int64_t *in1 = input1->data.i64; + const int64_t *in2 = input1->data.i64; + const int64_t *in_end1 = in1 + elements; + int64_t *out = output->data.i64; + + for (; in1 < in_end1; in1++, in2++, out++) + *out = ((*in1 - *in2) * (*in1 - *in2)); + + return kTfLiteOk; + } + default: + { + context->ReportError(context, "InputType is %d Unsupported", input1->type); + return kTfLiteError; + } + } +} + +} // namespace SquaredDifference +} // namespace custom +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp new file mode 100644 index 000000000..207de98f5 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowMax.cpp @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/ext/kernels/TensorFlowMax.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include <iostream> + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace TensorFlowMax +{ + +struct TensorFlowMaxOp +{ + TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node) + { + input = ::tflite::GetInput(context, node, 0); + axis = ::tflite::GetInput(context, node, 1); + output = ::tflite::GetOutput(context, node, 0); + } + const TfLiteTensor *input; + const TfLiteTensor *axis; + TfLiteTensor *output; +}; + +void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t) +{ + // Creates two temp tensors to store index and axis for internal + // implementation only. + auto *scratch_tensor_index = new int; + context->AddTensors(context, 2, scratch_tensor_index); + return scratch_tensor_index; +} + +void FreeTensorFlowMax(TfLiteContext *, void *buffer) +{ + delete static_cast<TensorFlowMaxOp *>(buffer); +} + +// Resizes the temp tensor that stores resolved axis. +TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context, + TfLiteTensor *resolved_axis) +{ + TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1); + axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis)); + return context->ResizeTensor(context, resolved_axis, axis_size); +} + +// Resizes output array based on the input size and resolved axis. +TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context) +{ + int64_t num_axis = ::tflite::NumElements(op_context->axis); + TfLiteIntArray *input_dims = op_context->input->dims; + int input_num_dims = ::tflite::NumDimensions(op_context->input); + const int *axis = op_context->axis->data.i32; + + { + // Calculates size of reducing axis. + int64_t num_reduce_axis = num_axis; + for (int64_t i = 0; i < num_axis; ++i) + { + int current = axis[i]; + if (current < 0) + { + current += input_num_dims; + } + TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims); + for (int64_t j = 0; j < i; ++j) + { + int previous = axis[j]; + if (previous < 0) + { + previous += input_num_dims; + } + if (current == previous) + { + --num_reduce_axis; + break; + } + } + } + // Determines output dimensions. + int output_num_dims = ::tflite::NumDimensions(op_context->output); + TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) || + (input_num_dims - num_reduce_axis == output_num_dims)); + + if (input_num_dims == output_num_dims) + { + TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims); + for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + int current = axis[axis_idx]; + output_dims->data[current] = 1; + } + return context->ResizeTensor(context, op_context->output, output_dims); + } + else + { + TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims); + int num_skip_axis = 0; + for (int idx = 0; idx < input_num_dims; ++idx) + { + bool is_axis = false; + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx) + { + ++num_skip_axis; + is_axis = true; + break; + } + } + if (!is_axis) + { + output_dims->data[idx - num_skip_axis] = input_dims->data[idx]; + } + } + return context->ResizeTensor(context, op_context->output, output_dims); + } + } +} + +// Initializes temp tensors to store index and resolved axis. +TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node, + TensorFlowMaxOp *op_context) +{ + // Creates a temp index to iterate through input data. + int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data); + TfLiteIntArrayFree(node->temporaries); + node->temporaries = TfLiteIntArrayCreate(2); + node->temporaries->data[0] = *scratch_tensor_index; + TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]]; + scratch_tensor->type = kTfLiteInt32; + scratch_tensor->allocation_type = kTfLiteArenaRw; + TfLiteIntArray *index_size = TfLiteIntArrayCreate(1); + index_size->data[0] = ::tflite::NumDimensions(op_context->input); + TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); + + // Creates a temp tensor to store resolved axis given input data. + node->temporaries->data[1] = *scratch_tensor_index + 1; + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + resolved_axis->type = kTfLiteInt32; + return kTfLiteOk; +} + +TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node) +{ + TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1); + + TensorFlowMaxOp op_context(context, node); + TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); + + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + // Leaves work to Eval if axis is not constant; else resizes output. + if (!::tflite::IsConstantTensor(op_context.axis)) + { + ::tflite::SetTensorToDynamic(op_context.output); + ::tflite::SetTensorToDynamic(resolved_axis); + return kTfLiteOk; + } + resolved_axis->allocation_type = kTfLiteArenaRw; + TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); + return ResizeOutputTensor(context, &op_context); +} + +// Gets offset of index if expanded on axis. When expanded, the flattened offset +// will not change, if the output index changes on the given axis. For example, +// if you have a 2D tensor and you are expanding to 3D on axis 0, +// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened +// offset. +inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index, + const int num_axis, const int *axis) +{ + size_t offset = 0; + int out_idx = 0; + for (int in_idx = 0; in_idx < num_dims; ++in_idx) + { + // if we need to expand this axis + bool is_axis = false; + if (axis != nullptr) + { + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (in_idx == axis[axis_idx]) + { + is_axis = true; + break; + } + } + } + if (!is_axis) + { + offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]); + out_idx++; + } + else + { + offset = offset * static_cast<size_t>(dims[in_idx]); + } + } + return offset; +} + +// Gets offset of index if reducing on axis. When reducing, the flattened offset +// will not change, if the input index changes on the given axis. For example, +// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0, +// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened +// offset. +// TODO(kanlig): uses Dims to represent dimensions. +inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index, + const int num_axis, const int *axis) +{ + size_t offset = 0; + for (int idx = 0; idx < num_dims; ++idx) + { + // if we need to skip this axis + bool is_axis = false; + if (axis != nullptr) + { + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (idx == axis[axis_idx]) + { + is_axis = true; + break; + } + } + } + if (!is_axis) + { + offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]); + } + } + return offset; +} + +// Gets next index to iterate through a multidimensional array. +inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current) +{ + int carry = 1; + for (int idx = num_dims - 1; idx >= 0; --idx) + { + int current_val = current[idx] + carry; + TF_LITE_ENSURE(context, (dims[idx] >= current_val)); + if (dims[idx] == current_val) + { + current[idx] = 0; + } + else + { + current[idx] = current_val; + carry = 0; + break; + } + } + return (carry == 0); +} + +template <typename T> +inline TfLiteStatus +CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims, + T *output_data, const int *output_dims, const int output_num_dims, const int *axis, + const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis) +{ + // resolves axis. + int num_resolved_axis = 0; + for (int idx = 0; idx < num_axis_dimensions; ++idx) + { + int current = axis[idx]; + TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0)); + if (current < 0) + { + current += input_num_dims; + } + bool is_dup = false; + for (int j = 0; j < num_resolved_axis; ++j) + { + if (resolved_axis[j] == current) + { + is_dup = true; + break; + } + } + if (!is_dup) + { + resolved_axis[num_resolved_axis++] = current; + } + } + + TF_LITE_ENSURE(context, (input_num_dims > 0)); + TF_LITE_ENSURE(context, (input_dims != nullptr)); + TF_LITE_ENSURE(context, (temp_index != nullptr)); + + // resets output data. + for (int idx = 0; idx < output_num_dims; ++idx) + { + temp_index[idx] = 0; + } + for (bool has_next = true; has_next; + has_next = NextIndex(context, output_num_dims, output_dims, temp_index)) + { + size_t output_offset = + ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr); + size_t input_offset = ExpandedInputOffset(input_num_dims, input_dims, temp_index, + num_resolved_axis, resolved_axis); + output_data[output_offset] = input_data[input_offset]; + } + + // resets temp index. + for (int idx = 0; idx < input_num_dims; ++idx) + { + temp_index[idx] = 0; + } + + // iterates through input_data. + for (bool has_next = true; has_next; + has_next = NextIndex(context, input_num_dims, input_dims, temp_index)) + { + size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr); + size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, + num_resolved_axis, resolved_axis); + if (output_data[output_offset] < input_data[input_offset]) + { + output_data[output_offset] = input_data[input_offset]; + } + } + + return kTfLiteOk; +} + +TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node) +{ + + TensorFlowMaxOp op_context(context, node); + int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis)); + TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]]; + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + // Resize the output tensor if the output tensor is dynamic. + if (::tflite::IsDynamicTensor(op_context.output)) + { + TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); + TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); + } + + TfLiteStatus returnStatus = kTfLiteOk; + switch (op_context.input->type) + { + case kTfLiteFloat32: + returnStatus = CustomMax<float>( + context, op_context.input->data.f, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, num_axis, false, + temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteInt32: + returnStatus = CustomMax<int>(context, op_context.input->data.i32, + op_context.input->dims->data, op_context.input->dims->size, + op_context.output->data.i32, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, + num_axis, false, temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteUInt8: + returnStatus = CustomMax<uint8_t>( + context, op_context.input->data.uint8, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.uint8, + op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32, + num_axis, false, temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteInt64: + returnStatus = CustomMax<int64_t>( + context, op_context.input->data.i64, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, num_axis, false, + temp_index->data.i32, resolved_axis->data.i32); + break; + default: + returnStatus = kTfLiteError; + } + + return returnStatus; +} + +} // namespace TensorFlowMax +} // namespace custom +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp new file mode 100644 index 000000000..40f266baa --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/kernels/TensorFlowSum.cpp @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/ext/kernels/TensorFlowSum.h" +#include "tensorflow/lite/kernels/kernel_util.h" + +#include <iostream> + +namespace nnfw +{ +namespace tflite +{ +namespace custom +{ +namespace TensorFlowSum +{ + +struct TensorFlowSumOp +{ + TensorFlowSumOp(TfLiteContext *context, TfLiteNode *node) + { + input = ::tflite::GetInput(context, node, 0); + axis = ::tflite::GetInput(context, node, 1); + output = ::tflite::GetOutput(context, node, 0); + } + const TfLiteTensor *input; + const TfLiteTensor *axis; + TfLiteTensor *output; +}; + +void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t) +{ + // Creates two temp tensors to store index and axis for internal + // implementation only. + auto *scratch_tensor_index = new int; + context->AddTensors(context, 2, scratch_tensor_index); + return scratch_tensor_index; +} + +void FreeTensorFlowSum(TfLiteContext *, void *buffer) +{ + delete static_cast<TensorFlowSumOp *>(buffer); +} + +// Resizes the temp tensor that stores resolved axis. +TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowSumOp *op_context, + TfLiteTensor *resolved_axis) +{ + TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1); + axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis)); + return context->ResizeTensor(context, resolved_axis, axis_size); +} + +// Resizes output array based on the input size and resolved axis. +TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context) +{ + int64_t num_axis = ::tflite::NumElements(op_context->axis); + TfLiteIntArray *input_dims = op_context->input->dims; + int input_num_dims = ::tflite::NumDimensions(op_context->input); + const int *axis = op_context->axis->data.i32; + + { + // Calculates size of reducing axis. + int64_t num_reduce_axis = num_axis; + for (int64_t i = 0; i < num_axis; ++i) + { + int current = axis[i]; + if (current < 0) + { + current += input_num_dims; + } + TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims); + for (int64_t j = 0; j < i; ++j) + { + int previous = axis[j]; + if (previous < 0) + { + previous += input_num_dims; + } + if (current == previous) + { + --num_reduce_axis; + break; + } + } + } + // Determines output dimensions. + int output_num_dims = ::tflite::NumDimensions(op_context->output); + TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) || + (input_num_dims - num_reduce_axis == output_num_dims)); + + if (input_num_dims == output_num_dims) + { + TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims); + for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + int current = axis[axis_idx]; + output_dims->data[current] = 1; + } + return context->ResizeTensor(context, op_context->output, output_dims); + } + else + { + TfLiteIntArray *output_dims = TfLiteIntArrayCreate(output_num_dims); + int num_skip_axis = 0; + for (int idx = 0; idx < input_num_dims; ++idx) + { + bool is_axis = false; + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (axis[axis_idx] == idx || axis[axis_idx] + input_num_dims == idx) + { + ++num_skip_axis; + is_axis = true; + break; + } + } + if (!is_axis) + { + output_dims->data[idx - num_skip_axis] = input_dims->data[idx]; + } + } + return context->ResizeTensor(context, op_context->output, output_dims); + } + } +} + +// Initializes temp tensors to store index and resolved axis. +TfLiteStatus InitializeTemporaries(TfLiteContext *context, TfLiteNode *node, + TensorFlowSumOp *op_context) +{ + // Creates a temp index to iterate through input data. + int *scratch_tensor_index = reinterpret_cast<int *>(node->user_data); + TfLiteIntArrayFree(node->temporaries); + node->temporaries = TfLiteIntArrayCreate(2); + node->temporaries->data[0] = *scratch_tensor_index; + TfLiteTensor *scratch_tensor = &context->tensors[node->temporaries->data[0]]; + scratch_tensor->type = kTfLiteInt32; + scratch_tensor->allocation_type = kTfLiteArenaRw; + TfLiteIntArray *index_size = TfLiteIntArrayCreate(1); + index_size->data[0] = ::tflite::NumDimensions(op_context->input); + TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); + + // Creates a temp tensor to store resolved axis given input data. + node->temporaries->data[1] = *scratch_tensor_index + 1; + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + resolved_axis->type = kTfLiteInt32; + return kTfLiteOk; +} + +TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node) +{ + TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1); + + TensorFlowSumOp op_context(context, node); + TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); + + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + // Leaves work to Eval if axis is not constant; else resizes output. + if (!::tflite::IsConstantTensor(op_context.axis)) + { + ::tflite::SetTensorToDynamic(op_context.output); + ::tflite::SetTensorToDynamic(resolved_axis); + return kTfLiteOk; + } + resolved_axis->allocation_type = kTfLiteArenaRw; + TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); + return ResizeOutputTensor(context, &op_context); +} + +// Gets offset of index if expanded on axis. When expanded, the flattened offset +// will not change, if the output index changes on the given axis. For example, +// if you have a 2D tensor and you are expanding to 3D on axis 0, +// then index (0, 1, 2) and index (1, 1, 2) will map from the same flattened +// offset. +inline size_t ExpandedInputOffset(const int num_dims, const int *dims, const int *index, + const int num_axis, const int *axis) +{ + size_t offset = 0; + int out_idx = 0; + for (int in_idx = 0; in_idx < num_dims; ++in_idx) + { + // if we need to expand this axis + bool is_axis = false; + if (axis != nullptr) + { + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (in_idx == axis[axis_idx]) + { + is_axis = true; + break; + } + } + } + if (!is_axis) + { + offset = offset * static_cast<size_t>(dims[in_idx]) + static_cast<size_t>(index[out_idx]); + out_idx++; + } + else + { + offset = offset * static_cast<size_t>(dims[in_idx]); + } + } + return offset; +} + +// Gets offset of index if reducing on axis. When reducing, the flattened offset +// will not change, if the input index changes on the given axis. For example, +// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0, +// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened +// offset. +// TODO(kanlig): uses Dims to represent dimensions. +inline size_t ReducedOutputOffset(const int num_dims, const int *dims, const int *index, + const int num_axis, const int *axis) +{ + size_t offset = 0; + for (int idx = 0; idx < num_dims; ++idx) + { + // if we need to skip this axis + bool is_axis = false; + if (axis != nullptr) + { + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + { + if (idx == axis[axis_idx]) + { + is_axis = true; + break; + } + } + } + if (!is_axis) + { + offset = offset * static_cast<size_t>(dims[idx]) + static_cast<size_t>(index[idx]); + } + } + return offset; +} + +// Gets next index to iterate through a multidimensional array. +inline bool NextIndex(TfLiteContext *context, const int num_dims, const int *dims, int *current) +{ + int carry = 1; + for (int idx = num_dims - 1; idx >= 0; --idx) + { + int current_val = current[idx] + carry; + TF_LITE_ENSURE(context, (dims[idx] >= current_val)); + if (dims[idx] == current_val) + { + current[idx] = 0; + } + else + { + current[idx] = current_val; + carry = 0; + break; + } + } + return (carry == 0); +} + +template <typename T> +inline TfLiteStatus +CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims, + T *output_data, const int *output_dims, const int output_num_dims, const int *axis, + const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis) +{ + // resolves axis. + int num_resolved_axis = 0; + for (int idx = 0; idx < num_axis_dimensions; ++idx) + { + int current = axis[idx]; + TF_LITE_ENSURE(context, (current < input_num_dims && current + input_num_dims >= 0)); + if (current < 0) + { + current += input_num_dims; + } + bool is_dup = false; + for (int j = 0; j < num_resolved_axis; ++j) + { + if (resolved_axis[j] == current) + { + is_dup = true; + break; + } + } + if (!is_dup) + { + resolved_axis[num_resolved_axis++] = current; + } + } + + TF_LITE_ENSURE(context, (input_num_dims > 0)); + TF_LITE_ENSURE(context, (input_dims != nullptr)); + TF_LITE_ENSURE(context, (temp_index != nullptr)); + + // resets output data. + for (int idx = 0; idx < output_num_dims; ++idx) + { + temp_index[idx] = 0; + } + for (bool has_next = true; has_next; + has_next = NextIndex(context, output_num_dims, output_dims, temp_index)) + { + size_t output_offset = + ReducedOutputOffset(output_num_dims, output_dims, temp_index, 0, nullptr); + output_data[output_offset] = 0; + } + + // resets temp index. + for (int idx = 0; idx < input_num_dims; ++idx) + { + temp_index[idx] = 0; + } + + // iterates through input_data. + for (bool has_next = true; has_next; + has_next = NextIndex(context, input_num_dims, input_dims, temp_index)) + { + size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, 0, nullptr); + size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, temp_index, + num_resolved_axis, resolved_axis); + output_data[output_offset] += input_data[input_offset]; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node) +{ + + TensorFlowSumOp op_context(context, node); + int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis)); + TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]]; + TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]]; + // Resize the output tensor if the output tensor is dynamic. + if (::tflite::IsDynamicTensor(op_context.output)) + { + TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); + TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); + } + + TfLiteStatus returnStatus = kTfLiteOk; + switch (op_context.input->type) + { + case kTfLiteFloat32: + returnStatus = CustomSum<float>( + context, op_context.input->data.f, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.f, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, num_axis, false, + temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteInt32: + returnStatus = CustomSum<int>(context, op_context.input->data.i32, + op_context.input->dims->data, op_context.input->dims->size, + op_context.output->data.i32, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, + num_axis, false, temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteUInt8: + returnStatus = CustomSum<uint8_t>( + context, op_context.input->data.uint8, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.uint8, + op_context.output->dims->data, op_context.output->dims->size, op_context.axis->data.i32, + num_axis, false, temp_index->data.i32, resolved_axis->data.i32); + break; + case kTfLiteInt64: + returnStatus = CustomSum<int64_t>( + context, op_context.input->data.i64, op_context.input->dims->data, + op_context.input->dims->size, op_context.output->data.i64, op_context.output->dims->data, + op_context.output->dims->size, op_context.axis->data.i32, num_axis, false, + temp_index->data.i32, resolved_axis->data.i32); + break; + default: + returnStatus = kTfLiteError; + } + + return returnStatus; +} + +} // namespace TensorFlowSum +} // namespace custom +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp new file mode 100644 index 000000000..b2088b277 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/kernels/register.cpp @@ -0,0 +1,317 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This code is derived from the following file (in TensorFlow v1.13.1) +// 'externals/tensorflow/tensorflow/lite/kernels/register.cc' +#include "tflite/ext/kernels/register.h" +#include "tensorflow/lite/util.h" +#include "tflite/ext/kernels/CustomOps.h" + +namespace tflite { +namespace ops { + +namespace custom { + +// Need additional external library for AUDIO_SPECTROGRAM +//TfLiteRegistration* Register_AUDIO_SPECTROGRAM(); +TfLiteRegistration* Register_LAYER_NORM_LSTM(); +TfLiteRegistration* Register_MFCC(); +TfLiteRegistration* Register_DETECTION_POSTPROCESS(); +TfLiteRegistration* Register_RELU_1(); + +} // namespace custom +} +} + +namespace tflite { +namespace ops { +namespace builtin { + +TfLiteRegistration* Register_ABS(); +TfLiteRegistration* Register_RELU(); +TfLiteRegistration* Register_RELU_N1_TO_1(); +TfLiteRegistration* Register_RELU6(); +TfLiteRegistration* Register_TANH(); +TfLiteRegistration* Register_LOGISTIC(); +TfLiteRegistration* Register_AVERAGE_POOL_2D(); +TfLiteRegistration* Register_MAX_POOL_2D(); +TfLiteRegistration* Register_L2_POOL_2D(); +TfLiteRegistration* Register_CONV_2D(); +TfLiteRegistration* Register_DEPTHWISE_CONV_2D(); +TfLiteRegistration* Register_SVDF(); +TfLiteRegistration* Register_RNN(); +TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN(); +TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_RNN(); +TfLiteRegistration* Register_EMBEDDING_LOOKUP(); +TfLiteRegistration* Register_EMBEDDING_LOOKUP_SPARSE(); +TfLiteRegistration* Register_FULLY_CONNECTED(); +TfLiteRegistration* Register_LSH_PROJECTION(); +TfLiteRegistration* Register_HASHTABLE_LOOKUP(); +TfLiteRegistration* Register_SOFTMAX(); +TfLiteRegistration* Register_CONCATENATION(); +TfLiteRegistration* Register_ADD(); +TfLiteRegistration* Register_SPACE_TO_BATCH_ND(); +TfLiteRegistration* Register_DIV(); +TfLiteRegistration* Register_SUB(); +TfLiteRegistration* Register_BATCH_TO_SPACE_ND(); +TfLiteRegistration* Register_MUL(); +TfLiteRegistration* Register_L2_NORMALIZATION(); +TfLiteRegistration* Register_LOCAL_RESPONSE_NORMALIZATION(); +TfLiteRegistration* Register_LSTM(); +TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM(); +TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); +TfLiteRegistration* Register_PAD(); +TfLiteRegistration* Register_PADV2(); +TfLiteRegistration* Register_RESHAPE(); +TfLiteRegistration* Register_RESIZE_BILINEAR(); +TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR(); +TfLiteRegistration* Register_SKIP_GRAM(); +TfLiteRegistration* Register_SPACE_TO_DEPTH(); +TfLiteRegistration* Register_GATHER(); +TfLiteRegistration* Register_TRANSPOSE(); +TfLiteRegistration* Register_MEAN(); +TfLiteRegistration* Register_SPLIT(); +TfLiteRegistration* Register_SPLIT_V(); +TfLiteRegistration* Register_SQUEEZE(); +TfLiteRegistration* Register_STRIDED_SLICE(); +TfLiteRegistration* Register_EXP(); +TfLiteRegistration* Register_TOPK_V2(); +TfLiteRegistration* Register_LOG(); +TfLiteRegistration* Register_LOG_SOFTMAX(); +TfLiteRegistration* Register_CAST(); +TfLiteRegistration* Register_DEQUANTIZE(); +TfLiteRegistration* Register_PRELU(); +TfLiteRegistration* Register_MAXIMUM(); +TfLiteRegistration* Register_MINIMUM(); +TfLiteRegistration* Register_ARG_MAX(); +TfLiteRegistration* Register_ARG_MIN(); +TfLiteRegistration* Register_GREATER(); +TfLiteRegistration* Register_GREATER_EQUAL(); +TfLiteRegistration* Register_LESS(); +TfLiteRegistration* Register_LESS_EQUAL(); +TfLiteRegistration* Register_FLOOR(); +TfLiteRegistration* Register_TILE(); +TfLiteRegistration* Register_NEG(); +TfLiteRegistration* Register_SUM(); +TfLiteRegistration* Register_REDUCE_PROD(); +TfLiteRegistration* Register_REDUCE_MAX(); +TfLiteRegistration* Register_REDUCE_MIN(); +TfLiteRegistration* Register_REDUCE_ANY(); +TfLiteRegistration* Register_SELECT(); +TfLiteRegistration* Register_SLICE(); +TfLiteRegistration* Register_SIN(); +TfLiteRegistration* Register_TRANSPOSE_CONV(); +TfLiteRegistration* Register_EXPAND_DIMS(); +TfLiteRegistration* Register_SPARSE_TO_DENSE(); +TfLiteRegistration* Register_EQUAL(); +TfLiteRegistration* Register_NOT_EQUAL(); +TfLiteRegistration* Register_SQRT(); +TfLiteRegistration* Register_RSQRT(); +TfLiteRegistration* Register_SHAPE(); +TfLiteRegistration* Register_POW(); +TfLiteRegistration* Register_FAKE_QUANT(); +TfLiteRegistration* Register_PACK(); +TfLiteRegistration* Register_ONE_HOT(); +TfLiteRegistration* Register_LOGICAL_OR(); +TfLiteRegistration* Register_LOGICAL_AND(); +TfLiteRegistration* Register_LOGICAL_NOT(); +TfLiteRegistration* Register_UNPACK(); +TfLiteRegistration* Register_FLOOR_DIV(); +TfLiteRegistration* Register_SQUARE(); +TfLiteRegistration* Register_ZEROS_LIKE(); +TfLiteRegistration* Register_FLOOR_MOD(); +TfLiteRegistration* Register_RANGE(); +TfLiteRegistration* Register_LEAKY_RELU(); +TfLiteRegistration* Register_SQUARED_DIFFERENCE(); +TfLiteRegistration* Register_FILL(); +TfLiteRegistration* Register_MIRROR_PAD(); + +} // namespace builtin +} // namespace ops +} // namespace tflite + +namespace nnfw { +namespace tflite { + +// Using namespace directive to minimize diff with upstream tensorflow +using namespace ::tflite::ops::custom; +using namespace ::tflite::ops::builtin; +using namespace ::tflite; + +// Fix to use strict build option +TfLiteStatus UnsupportedTensorFlowOp(TfLiteContext* context, TfLiteNode* /*node*/) { + context->ReportError( + context, + "Regular TensorFlow ops are not supported by this interpreter. Make sure " + "you invoke the Flex delegate before inference."); + return kTfLiteError; +} + +const TfLiteRegistration* BuiltinOpResolver::FindOp(tflite::BuiltinOperator op, + int version) const { + return MutableOpResolver::FindOp(op, version); +} + +const TfLiteRegistration* BuiltinOpResolver::FindOp(const char* op, + int version) const { + // Return the NULL Op for all ops whose name start with "Flex", allowing + // the interpreter to delegate their execution. + if (IsFlexOp(op)) { + static TfLiteRegistration null_op{ + nullptr, nullptr, &UnsupportedTensorFlowOp, + nullptr, nullptr, BuiltinOperator_CUSTOM, + "Flex", 1}; + return &null_op; + } + return MutableOpResolver::FindOp(op, version); +} + +BuiltinOpResolver::BuiltinOpResolver() { + AddBuiltin(BuiltinOperator_ABS, Register_ABS()); + AddBuiltin(BuiltinOperator_RELU, Register_RELU()); + AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1()); + AddBuiltin(BuiltinOperator_RELU6, Register_RELU6()); + AddBuiltin(BuiltinOperator_TANH, Register_TANH()); + AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC()); + AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D()); + AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D()); + AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D()); + AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D()); + AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(), + /* min_version */ 1, + /* max_version */ 2); + AddBuiltin(BuiltinOperator_SVDF, Register_SVDF()); + AddBuiltin(BuiltinOperator_RNN, Register_RNN()); + AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, + Register_BIDIRECTIONAL_SEQUENCE_RNN()); + AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, + Register_UNIDIRECTIONAL_SEQUENCE_RNN()); + AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP()); + AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, + Register_EMBEDDING_LOOKUP_SPARSE()); + AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(), + /* min_version */ 1, + /* max_version */ 2); + AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION()); + AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP()); + AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX()); + AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION()); + AddBuiltin(BuiltinOperator_ADD, Register_ADD()); + AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND()); + AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND()); + AddBuiltin(BuiltinOperator_MUL, Register_MUL()); + AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION()); + AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, + Register_LOCAL_RESPONSE_NORMALIZATION()); + AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), /* min_version */ 1, + /* max_version */ 2); + AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, + Register_BIDIRECTIONAL_SEQUENCE_LSTM()); + AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + Register_UNIDIRECTIONAL_SEQUENCE_LSTM()); + AddBuiltin(BuiltinOperator_PAD, Register_PAD()); + AddBuiltin(BuiltinOperator_PADV2, Register_PADV2()); + AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE()); + AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR()); + AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, + Register_RESIZE_NEAREST_NEIGHBOR()); + AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM()); + AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH()); + AddBuiltin(BuiltinOperator_GATHER, Register_GATHER()); + AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE()); + AddBuiltin(BuiltinOperator_MEAN, Register_MEAN()); + AddBuiltin(BuiltinOperator_DIV, Register_DIV()); + AddBuiltin(BuiltinOperator_SUB, Register_SUB()); + AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT()); + AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V()); + AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE()); + AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE()); + AddBuiltin(BuiltinOperator_EXP, Register_EXP()); + AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2()); + AddBuiltin(BuiltinOperator_LOG, Register_LOG()); + AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX()); + AddBuiltin(BuiltinOperator_CAST, Register_CAST()); + AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(), + /* min_version */ 1, + /* max_version */ 2); + AddBuiltin(BuiltinOperator_PRELU, Register_PRELU()); + AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM()); + AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM()); + AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX()); + AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN()); + AddBuiltin(BuiltinOperator_GREATER, Register_GREATER()); + AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL()); + AddBuiltin(BuiltinOperator_LESS, Register_LESS()); + AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL()); + AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR()); + AddBuiltin(BuiltinOperator_NEG, Register_NEG()); + AddBuiltin(BuiltinOperator_SELECT, Register_SELECT()); + AddBuiltin(BuiltinOperator_SLICE, Register_SLICE()); + AddBuiltin(BuiltinOperator_SIN, Register_SIN()); + AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV()); + AddBuiltin(BuiltinOperator_TILE, Register_TILE()); + AddBuiltin(BuiltinOperator_SUM, Register_SUM()); + AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD()); + AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX()); + AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN()); + AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY()); + AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS()); + AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE()); + AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL()); + AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL()); + AddBuiltin(BuiltinOperator_SQRT, Register_SQRT()); + AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT()); + AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE()); + AddBuiltin(BuiltinOperator_POW, Register_POW()); + AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2); + AddBuiltin(BuiltinOperator_PACK, Register_PACK()); + AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT()); + AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR()); + AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND()); + AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT()); + AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK()); + AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV()); + AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE()); + AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE()); + AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD()); + AddBuiltin(BuiltinOperator_RANGE, Register_RANGE()); + AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU()); + AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE()); + AddBuiltin(BuiltinOperator_FILL, Register_FILL()); + AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD()); + + AddCustom("TensorFlowMax", nnfw::tflite::custom::Register_TensorFlowMax()); + AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference()); + AddCustom("TensorFlowSum", nnfw::tflite::custom::Register_TensorFlowSum()); + AddCustom("Abs", nnfw::tflite::custom::Register_Abs()); + + // TODO(andrewharp, ahentz): Move these somewhere more appropriate so that + // custom ops aren't always included by default. + AddCustom("Mfcc", tflite::ops::custom::Register_MFCC()); + // Need additional external library for audio spectrogram + //AddCustom("AudioSpectrogram", + // tflite::ops::custom::Register_AUDIO_SPECTROGRAM()); + AddCustom("LayerNormLstm", tflite::ops::custom::Register_LAYER_NORM_LSTM()); + AddCustom("Relu1", tflite::ops::custom::Register_RELU_1()); + AddCustom("TFLite_Detection_PostProcess", + tflite::ops::custom::Register_DETECTION_POSTPROCESS()); +} + +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp new file mode 100644 index 000000000..99272f0e5 --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp @@ -0,0 +1,1217 @@ +/* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// NOTE To minimize diff with upstream tensorflow, disable clang-format +// clang-format off + +// NOTE This code is derived from the following file (in TensorFlow v1.13.1) +// 'externals/tensorflow/tensorflow/lite/nnapi_delegate.cc' +#include "tflite/ext/nnapi_delegate.h" +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/core/api/error_reporter.h" +#include "tensorflow/lite/model.h" +#include <rua/Shim.h> +#include "NeuralNetworksExShim.h" + +#ifdef __ANDROID__ +#include <android/log.h> +#include <sys/system_properties.h> +#endif + +namespace nnfw { +namespace tflite { + +void logError(const char* format, ...) { + // stderr is convenient for native tests, but is not captured for apps + va_list args_for_stderr; + va_start(args_for_stderr, format); + vfprintf(stderr, format, args_for_stderr); + va_end(args_for_stderr); + fprintf(stderr, "\n"); + fflush(stderr); +#ifdef __ANDROID__ + // produce logcat output for general consumption + va_list args_for_log; + va_start(args_for_log, format); + __android_log_vprint(ANDROID_LOG_ERROR, "tflite", format, args_for_log); + va_end(args_for_log); +#endif +} + +#define FATAL(...) \ + logError(__VA_ARGS__); \ + exit(1); + +// TODO(aselle): Change the error model to use status codes. +#define CHECK_TFLITE_SUCCESS(x) \ + if (x != kTfLiteOk) { \ + FATAL("Aborting since tflite returned failure nnapi_delegate.cc:%d.", \ + __LINE__); \ + } + +#define CHECK_NN(x) \ + if (x != ANEURALNETWORKS_NO_ERROR) { \ + FATAL("Aborting since NNAPI returned failure nnapi_delegate.cc:%d", \ + __LINE__); \ + } + +#define RETURN_ERROR_IF_TFLITE_FAILED(x) \ + if (x != kTfLiteOk) { \ + logError( \ + "Returning error since TFLite returned failure nnapi_delegate.cc:%d.", \ + __LINE__); \ + return kTfLiteError; \ + } + +#define RETURN_ERROR_IF_NN_FAILED(x) \ + if (x != ANEURALNETWORKS_NO_ERROR) { \ + logError( \ + "Returning error since NNAPI returned failure nnapi_delegate.cc:%d.", \ + __LINE__); \ + return kTfLiteError; \ + } + +// Tracking of NNAPI operand ids +static const int64_t kOperandIdNotSet = -1; +static const int64_t kOperandNotNeeded = -2; + +namespace { + +int32_t GetAndroidSdkVersion() { +#ifdef __ANDROID__ + const char* sdkProp = "ro.build.version.sdk"; + char sdkVersion[PROP_VALUE_MAX]; + int length = __system_property_get(sdkProp, sdkVersion); + if (length != 0) { + for (int i = 0; i < length; ++i) { + int digit = sdkVersion[i] - '0'; + if (digit < 0 || digit > 9) { + // Non-numeric SDK version, assume it's higher then expected; + return 0xFFFF; + } + } + // NOTE use std::strtol instead of atoi: security issue + return std::strtol(sdkVersion, NULL, 0); + } + FATAL("No %s prop", sdkProp); +#endif // __ANDROID__ + return 0; +} + +int32_t GetAndroidSdkVersionCached() { + static int32_t androidSdkVersion = GetAndroidSdkVersion(); + return androidSdkVersion; +} + +// WORKAROUND Some model have dimension zero +// Consider scalar as vector size 1 +static const uint32_t dimension_for_scalar[1] = {1}; + +} // namespace + +NNAPIAllocation::NNAPIAllocation(const char* filename, + ::tflite::ErrorReporter* error_reporter) + : MMAPAllocation(filename, error_reporter) { + if (mmapped_buffer_ != MAP_FAILED) + CHECK_NN(ANeuralNetworksMemory_createFromFd(buffer_size_bytes_, PROT_READ, + mmap_fd_, 0, &handle_)); +} + +NNAPIAllocation::~NNAPIAllocation() { + if (handle_) { + ANeuralNetworksMemory_free(handle_); + } +} + +NNAPIDelegate::~NNAPIDelegate() { + if (nn_compiled_model_) { + ANeuralNetworksCompilation_free(nn_compiled_model_); + nn_compiled_model_ = nullptr; + } + if (nn_model_) { + ANeuralNetworksModel_free(nn_model_); + nn_model_ = nullptr; + // TODO(aselle): Is this thread-safe and callable multiple times? + } + // ANeuralNetworksShutdown(); +} + +// Adds the tensors of the subgraph to the NN API model. +TfLiteStatus addTensorOperands(::tflite::Subgraph* subgraph, + ANeuralNetworksModel* nn_model, + uint32_t* no_of_operands_added, + std::vector<int64_t>* nnapi_ids) { + uint32_t next_id = 0; + for (size_t i = 0; i < subgraph->tensors_size(); i++) { + // Skip temporaries and RNN back-edges. + if ((*nnapi_ids)[i] == kOperandNotNeeded) continue; + + (*nnapi_ids)[i] = int64_t(next_id); + + int32_t nn_type = 0; + // NNAPI requires 32-bit float scale to be zero, tflite doesn't care + float scale = 0.0f; + int32_t zeroPoint = 0; + TfLiteTensor* tensor = subgraph->tensor(i); + switch (tensor->type) { + case kTfLiteNoType: + // Tensors added during initialization of Ops don't have a type yet and + // should not be registered with the NNAPI. + continue; + case kTfLiteFloat32: + nn_type = ANEURALNETWORKS_TENSOR_FLOAT32; + break; + case kTfLiteUInt8: + // NNAPI uses ANEURALNETWORKS_TENSOR_QUANT8_ASYMM to represent uint8 type + // ex. ANEURALNETWORKS_CAST + nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM; + scale = tensor->params.scale; + // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM type requires scale > 0, + // zeroPoint >= 0 and zeroPoint <= 255 + scale = (scale == 0.0f) ? 1.0f : scale; + zeroPoint = tensor->params.zero_point; + break; + case kTfLiteInt32: + nn_type = ANEURALNETWORKS_TENSOR_INT32; + scale = tensor->params.scale; + zeroPoint = tensor->params.zero_point; + break; + case kTfLiteBool: + // Workaround to pass bool type under NNAPI + // Use bool type using ANEURALNETWORKS_TENSOR_QUANT8_ASYMM with scale = 1.0f and zero_point = 0 + nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM; + scale = 1.0f; + zeroPoint = 0; + break; + default: + logError("Unsupported tensor type %d", tensor->type); + return kTfLiteError; + } + if (tensor->dims->size == 0) { + // WORKAROUND Some model have dimension zero + switch (tensor->type) { + case kTfLiteFloat32: + nn_type = ANEURALNETWORKS_TENSOR_FLOAT32; + break; + case kTfLiteInt32: + nn_type = ANEURALNETWORKS_TENSOR_INT32; + break; + default: + logError("NNAPI doesn't support tensors with rank 0 (index %d name %s)", + i, tensor->name); + return kTfLiteError; + } + } + if (tensor->dims->size > 4) { + logError("NNAPI doesn't support tensors with rank > 4 (index %d name %s)", + i, tensor->name); + return kTfLiteError; + } + // TODO(aselle): Note, many of these are intermediate results. Do I need + // to ever specify these sizes. I am currently below doing setValue + // on all of them, but I shouldn't in the future. + // Answer(jeanluc): If all the operators can set the dimension correctly, + // you won't need to. + ANeuralNetworksOperandType operand_type{ + nn_type, static_cast<uint32_t>(tensor->dims->size), + reinterpret_cast<uint32_t*>(tensor->dims->data), scale, zeroPoint}; + if (tensor->dims->size == 0) { + // WORKAROUND Some model have dimension zero + // Consider scalar as vector size 1 + operand_type.dimensions = dimension_for_scalar; + operand_type.dimensionCount = 1; + } + RETURN_ERROR_IF_NN_FAILED( + ANeuralNetworksModel_addOperand(nn_model, &operand_type)); + // TODO(aselle): Based on Michael's suggestion, limiting this to read + // only memory + if (tensor->allocation_type == kTfLiteMmapRo) { + if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>( + static_cast<const ::tflite::Allocation*>(tensor->allocation))) { + RETURN_ERROR_IF_NN_FAILED( + ANeuralNetworksModel_setOperandValueFromMemory( + nn_model, next_id, alloc->memory(), + alloc->offset(tensor->data.raw), tensor->bytes)); + } else { + RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_setOperandValue( + nn_model, next_id, tensor->data.raw, tensor->bytes)); + } + } else if (tensor->bytes == 0) { + // These size 0 tensors are optional tensors reserved. + RETURN_ERROR_IF_NN_FAILED( + ANeuralNetworksModel_setOperandValue(nn_model, next_id, nullptr, 0)); + } + + ++next_id; + } + *no_of_operands_added = next_id; + return kTfLiteOk; +} + +void MapAndAddTensorIds(const int* from_ids_buf, size_t from_ids_count, + std::vector<uint32_t>* into, + const std::vector<int64_t>& map) { + for (size_t i = 0; i < from_ids_count; i++) { + int from_id = from_ids_buf[i]; + if (from_id == kOptionalTensor) { + into->push_back(from_id); + } else { + into->push_back(map[from_id]); + } + } +} + +// Adds the operations and their parameters to the NN API model. +// 'next-id' is the operand ID of the next operand of the model. +TfLiteStatus AddOpsAndParams( + ::tflite::Subgraph* subgraph, ANeuralNetworksModel* nn_model, + uint32_t next_id, std::vector<int>* model_state_inputs, + std::vector<int>* model_state_outputs, + const std::vector<int64_t>& tensor_id_to_nnapi_id) { + for (size_t i = 0; i < subgraph->nodes_size(); i++) { + const auto* node_and_registration = subgraph->node_and_registration(i); + const TfLiteNode& node = node_and_registration->first; + const TfLiteRegistration& registration = node_and_registration->second; + ::tflite::BuiltinOperator builtin = + static_cast<::tflite::BuiltinOperator>(registration.builtin_code); + + // Add the parameters. + std::vector<uint32_t> augmented_inputs, augmented_outputs; + MapAndAddTensorIds(node.inputs->data, node.inputs->size, &augmented_inputs, + tensor_id_to_nnapi_id); + MapAndAddTensorIds(node.outputs->data, node.outputs->size, + &augmented_outputs, tensor_id_to_nnapi_id); + + auto add_scalar_int32 = [&nn_model, &augmented_inputs, + &next_id](int value) { + // Fix to use strict build option + ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_INT32; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) + CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value, + sizeof(int32_t))) + augmented_inputs.push_back(next_id++); + }; + + auto add_scalar_float32 = [&nn_model, &augmented_inputs, + &next_id](float value) { + // Fix to use strict build option + ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_FLOAT32; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) + CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value, + sizeof(float))) + augmented_inputs.push_back(next_id++); + }; + + auto add_vector_int32 = [&](const int* values, uint32_t num_values) { + // Fix to use strict build option + ANeuralNetworksOperandType operand_type{}; + operand_type.type = ANEURALNETWORKS_TENSOR_INT32; + operand_type.dimensionCount = 1; + operand_type.dimensions = &num_values; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) + CHECK_NN(ANeuralNetworksModel_setOperandValue( + nn_model, next_id, values, sizeof(int32_t) * num_values)); + augmented_inputs.push_back(next_id++); + }; + + // Handle state tensors of RNN, LSTM, SVDF. + // For each state_out tensor, a corresponding state_in operand needs to be + // created for NNAPI. + auto duplicate_state_tensor_float32 = + [subgraph, &nn_model, &next_id, &augmented_inputs, &model_state_inputs, + &model_state_outputs](int tensor_id) { + const TfLiteTensor* tensor = subgraph->tensor(tensor_id); + ANeuralNetworksOperandType operand_type{ + ANEURALNETWORKS_TENSOR_FLOAT32, + static_cast<uint32_t>(tensor->dims->size), + reinterpret_cast<uint32_t*>(tensor->dims->data), + tensor->params.scale, tensor->params.zero_point}; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)); + augmented_inputs.push_back(next_id); + model_state_inputs->push_back(next_id); + model_state_outputs->push_back(tensor_id); + next_id++; + }; + auto check_and_add_activation = [&add_scalar_int32](int activation) { + if (activation > kTfLiteActRelu6) { + logError("NNAPI only supports RELU, RELU1 and RELU6 activations"); + return kTfLiteError; + } + add_scalar_int32(activation); + return kTfLiteOk; + }; + + auto add_add_params = [&add_scalar_int32](void* data) { + auto* builtin = reinterpret_cast<TfLiteAddParams*>(data); + if (builtin->activation > kTfLiteActRelu6) { + logError("NNAPI only supports RELU, RELU1 and RELU6 activations"); + return kTfLiteError; + } + add_scalar_int32(builtin->activation); + return kTfLiteOk; + }; + + auto add_pooling_params = [&add_scalar_int32, + &check_and_add_activation](void* data) { + auto builtin = reinterpret_cast<TfLitePoolParams*>(data); + add_scalar_int32(builtin->padding); + add_scalar_int32(builtin->stride_width); + add_scalar_int32(builtin->stride_height); + add_scalar_int32(builtin->filter_width); + add_scalar_int32(builtin->filter_height); + return check_and_add_activation(builtin->activation); + }; + + auto add_convolution_params = [&add_scalar_int32, + &check_and_add_activation](void* data) { + auto builtin = reinterpret_cast<TfLiteConvParams*>(data); + add_scalar_int32(builtin->padding); + add_scalar_int32(builtin->stride_width); + add_scalar_int32(builtin->stride_height); + return check_and_add_activation(builtin->activation); + }; + + auto add_depthwise_conv_params = [&add_scalar_int32, + &check_and_add_activation](void* data) { + auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(data); + add_scalar_int32(builtin->padding); + add_scalar_int32(builtin->stride_width); + add_scalar_int32(builtin->stride_height); + add_scalar_int32(builtin->depth_multiplier); + return check_and_add_activation(builtin->activation); + }; + + auto add_fully_connected_params = [&check_and_add_activation](void* data) { + auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data); + return check_and_add_activation(builtin->activation); + }; + + auto add_concatenation_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(data); + add_scalar_int32(builtin->axis); + if (builtin->activation != kTfLiteActNone) { + logError("Concatenation does not support fused activation in NNAPI"); + return kTfLiteError; + } + return kTfLiteOk; + }; + + auto add_softmax_params = [&add_scalar_float32](void* data) { + auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(data); + add_scalar_float32(builtin->beta); + }; + + auto add_space_to_depth_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(data); + add_scalar_int32(builtin->block_size); + }; + + auto add_lstm_params = [&add_scalar_int32, + &add_scalar_float32](void* data) { + auto builtin = reinterpret_cast<TfLiteLSTMParams*>(data); + add_scalar_int32(builtin->activation); + add_scalar_float32(builtin->cell_clip); + add_scalar_float32(builtin->proj_clip); + }; + + // LSTM in NNAPI requires scratch tensor as an output operand. + auto add_lstm_scratch_tensor_float32 = [subgraph, &node, &nn_model, + &next_id, &augmented_outputs]() { + if (node.temporaries->size == 0) return; + int scratch_buffer_index = node.temporaries->data[0]; + const TfLiteTensor* tensor = subgraph->tensor(scratch_buffer_index); + ANeuralNetworksOperandType operand_type{ + ANEURALNETWORKS_TENSOR_FLOAT32, + static_cast<uint32_t>(tensor->dims->size), + reinterpret_cast<uint32_t*>(tensor->dims->data), tensor->params.scale, + tensor->params.zero_point}; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)); + augmented_outputs.insert(augmented_outputs.begin(), next_id++); + }; + + auto add_mean_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteReducerParams*>(data); + add_scalar_int32(builtin->keep_dims); + }; + + auto add_svdf_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteSVDFParams*>(data); + add_scalar_int32(builtin->rank); + add_scalar_int32(builtin->activation); + }; + + auto add_rnn_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteRNNParams*>(data); + add_scalar_int32(builtin->activation); + }; + + auto add_squeeze_params = [&](void* data) { + const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data); + // Note that we add the squeeze dimensions even if the dimensions were + // unspecified (empty), as NNAPI requires the operand. + add_vector_int32(builtin->squeeze_dims, + static_cast<uint32_t>(builtin->num_squeeze_dims)); + }; + + // Handle optional input tensors. + auto add_optional_tensors = [&nn_model, &augmented_inputs, + &next_id](int nn_type) { + for (size_t idx = 0; idx < augmented_inputs.size(); idx++) { + // Fix to use strict build option + if (augmented_inputs[idx] == static_cast<uint32_t>(kOptionalTensor)) { + const std::vector<uint32_t> dim = {0, 0}; + ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0}; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) + CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, + nullptr, 0)) + augmented_inputs[idx] = next_id++; + } + } + }; + + int nnapi_version = 10; +#include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc" + + // Fix to use strict build option + ANeuralNetworksOperationType nn_op_type = -1; + + // Using namespace directive to minimize diff with upstream tensorflow + namespace tflite = ::tflite; + + switch (builtin) { + case tflite::BuiltinOperator_ADD: + nn_op_type = ANEURALNETWORKS_ADD; + RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data)); + break; + case tflite::BuiltinOperator_MUL: + nn_op_type = ANEURALNETWORKS_MUL; + RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data)); + break; + case tflite::BuiltinOperator_AVERAGE_POOL_2D: + RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D; + break; + case tflite::BuiltinOperator_MAX_POOL_2D: + RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_MAX_POOL_2D; + break; + case tflite::BuiltinOperator_L2_POOL_2D: + RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_L2_POOL_2D; + break; + case tflite::BuiltinOperator_CONV_2D: { + auto builtin = reinterpret_cast<TfLiteConvParams*>(node.builtin_data); + if (builtin->dilation_width_factor != 1 || + builtin->dilation_height_factor != 1 || node.inputs->size != 3) { + logError("NNAPI does not support dilated Conv2D."); + return kTfLiteError; + } + } + RETURN_ERROR_IF_TFLITE_FAILED( + add_convolution_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_CONV_2D; + break; + case tflite::BuiltinOperator_RELU: + nn_op_type = ANEURALNETWORKS_RELU; + break; + case tflite::BuiltinOperator_RELU_N1_TO_1: + nn_op_type = ANEURALNETWORKS_RELU1; + break; + case tflite::BuiltinOperator_RELU6: + nn_op_type = ANEURALNETWORKS_RELU6; + break; + case tflite::BuiltinOperator_TANH: + nn_op_type = ANEURALNETWORKS_TANH; + break; + case tflite::BuiltinOperator_FLOOR: + nn_op_type = ANEURALNETWORKS_FLOOR; + break; + case tflite::BuiltinOperator_LOGISTIC: + nn_op_type = ANEURALNETWORKS_LOGISTIC; + break; + case tflite::BuiltinOperator_DEPTHWISE_CONV_2D: + RETURN_ERROR_IF_TFLITE_FAILED( + add_depthwise_conv_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D; + break; + case tflite::BuiltinOperator_CONCATENATION: + RETURN_ERROR_IF_TFLITE_FAILED( + add_concatenation_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_CONCATENATION; + break; + case tflite::BuiltinOperator_SOFTMAX: + add_softmax_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_SOFTMAX; + break; + case tflite::BuiltinOperator_FULLY_CONNECTED: + RETURN_ERROR_IF_TFLITE_FAILED( + add_fully_connected_params(node.builtin_data)); + nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED; + break; + case tflite::BuiltinOperator_RESHAPE: + if (node.inputs->size != 2) { + logError("NNAPI only supports 2-input RESHAPE"); + return kTfLiteError; + } + nn_op_type = ANEURALNETWORKS_RESHAPE; + // add_reshape_params(node.builtin_data); + break; + case tflite::BuiltinOperator_RESIZE_BILINEAR: + add_resize_bilinear_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR; + break; + case tflite::BuiltinOperator_SPACE_TO_DEPTH: + add_space_to_depth_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH; + break; + case tflite::BuiltinOperator_LSTM: { + if (node.inputs->size + /* no of params */ 3 != 21) { + logError("NNAPI only supports 21-input LSTMs"); + return kTfLiteError; + } + duplicate_state_tensor_float32( + node.outputs->data[/*kOutputStateTensor*/ 0]); + duplicate_state_tensor_float32( + node.outputs->data[/*kCellStateTensor*/ 1]); + add_lstm_params(node.builtin_data); + add_lstm_scratch_tensor_float32(); + add_optional_tensors(ANEURALNETWORKS_TENSOR_FLOAT32); + nn_op_type = ANEURALNETWORKS_LSTM; + break; + } + case tflite::BuiltinOperator_DEQUANTIZE: + nn_op_type = ANEURALNETWORKS_DEQUANTIZE; + break; + case tflite::BuiltinOperator_SVDF: { + duplicate_state_tensor_float32(node.outputs->data[/*kStateTensor*/ 0]); + add_svdf_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_SVDF; + break; + } + case tflite::BuiltinOperator_RNN: { + duplicate_state_tensor_float32( + node.outputs->data[/*kHiddenStateTensor*/ 0]); + add_rnn_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_RNN; + break; + } + case tflite::BuiltinOperator_EMBEDDING_LOOKUP: + nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP; + break; + case tflite::BuiltinOperator_PAD: + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_PAD; + break; + case tflite::BuiltinOperator_MEAN: + nnapi_version = 11; // require NNAPI 1.1 + add_mean_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_MEAN; + break; + case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: + nn_op_type = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION; + add_lrn_params(node.builtin_data); + break; + case tflite::BuiltinOperator_DIV: + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_DIV; + RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation( + reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation)); + break; + case tflite::BuiltinOperator_SUB: + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_SUB; + RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation( + reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation)); + break; + case tflite::BuiltinOperator_SQUEEZE: + nnapi_version = 11; // requires NNAPI 1.1 + add_squeeze_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_SQUEEZE; + break; + case tflite::BuiltinOperator_TRANSPOSE: + // The permutation input tensor value dictates the output dimensions. + // TODO(b/110888333): Support dynamically-sized tensors in delegates. + if ((node.inputs->size > 1) && + (subgraph->tensor(node.inputs->data[1])->allocation_type != + kTfLiteMmapRo)) { + logError("NNAPI does not yet support dynamic tensors."); + return kTfLiteError; + } + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_TRANSPOSE; + break; + case tflite::BuiltinOperator_L2_NORMALIZATION: + nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION; + if (reinterpret_cast<TfLiteL2NormParams*>(node.builtin_data) + ->activation != kTfLiteActNone) { + logError( + "NNAPI does not support L2Normalization with fused activations"); + return kTfLiteError; + } + if ((node.inputs->size > 0) && + (subgraph->tensor(node.inputs->data[0])->dims->size != 4)) { + logError("NNAPI only supports input rank 4 for L2Normalization"); + return kTfLiteError; + } + break; + case tflite::BuiltinOperator_HASHTABLE_LOOKUP: + if (subgraph->tensor(node.outputs->data[0])->type != kTfLiteFloat32) { + logError("NNAPI only support HASHTABLE_LOOKUP with float32 output", + builtin); + return kTfLiteError; + } + nn_op_type = ANEURALNETWORKS_HASHTABLE_LOOKUP; + break; + case tflite::BuiltinOperator_SLICE: + nn_op_type = ANEURALNETWORKS_SLICE; + break; + case tflite::BuiltinOperator_STRIDED_SLICE: + add_strided_slice_params(node.builtin_data); + nn_op_type = ANEURALNETWORKS_STRIDED_SLICE; + break; + case tflite::BuiltinOperator_SPACE_TO_BATCH_ND: + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_SPACE_TO_BATCH_ND; + break; + case tflite::BuiltinOperator_BATCH_TO_SPACE_ND: + nnapi_version = 11; // require NNAPI 1.1 + nn_op_type = ANEURALNETWORKS_BATCH_TO_SPACE_ND; + check_batch_to_space_params(); + break; + case tflite::BuiltinOperator_CAST: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_CAST; + break; + case tflite::BuiltinOperator_TOPK_V2: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_TOPK_V2; + break; + case tflite::BuiltinOperator_GATHER: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_GATHER; + add_gather_params(node.builtin_data); + break; + case tflite::BuiltinOperator_SPLIT: + add_split_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_SPLIT_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_NEG: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_NEG; + break; + case tflite::BuiltinOperator_EXP: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_EXP; + break; + case tflite::BuiltinOperator_TRANSPOSE_CONV: + add_transpose_conv_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_TRANSPOSE_CONV_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_PRELU: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_PRELU_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_ARG_MAX: + check_arg_max_input(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_ARGMAX_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_PACK: + add_pack_ex_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_PACK_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_UNPACK: + add_unpack_ex_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_UNPACK_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_SQRT: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_SQRT; + break; + case tflite::BuiltinOperator_RSQRT: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_RSQRT; + break; + case tflite::BuiltinOperator_EQUAL: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_EQUAL_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_NOT_EQUAL: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_NOT_EQUAL_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_SUM: + add_reducer_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_REDUCE_SUM_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_REDUCE_MAX: + add_reducer_v12_params(node.builtin_data); + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_REDUCE_MAX; + break; + case tflite::BuiltinOperator_REDUCE_MIN: + add_reducer_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_REDUCE_MIN_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_LOGICAL_AND: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_LOGICAL_AND_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_LOGICAL_OR: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_LOGICAL_OR_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_LOGICAL_NOT: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_LOGICAL_NOT_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_SQUARED_DIFFERENCE: + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + case tflite::BuiltinOperator_MAXIMUM: + nn_op_type = ANEURALNETWORKS_MAXIMUM; + break; + case tflite::BuiltinOperator_MINIMUM: + nn_op_type = ANEURALNETWORKS_MINIMUM; + break; + case tflite::BuiltinOperator_ABS: + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_ABS; + break; + case tflite::BuiltinOperator_CONCAT_EMBEDDINGS: + case tflite::BuiltinOperator_LSH_PROJECTION: + case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: + case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: + case tflite::BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: + case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: + case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: + //case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: + case tflite::BuiltinOperator_PADV2: + //case tflite::BuiltinOperator_RESIZE_BILINEAR: + case tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: + case tflite::BuiltinOperator_CALL: + case tflite::BuiltinOperator_SKIP_GRAM: + //case tflite::BuiltinOperator_RELU_N1_TO_1: + //case tflite::BuiltinOperator_GATHER: + //case tflite::BuiltinOperator_SPACE_TO_BATCH_ND: + //case tflite::BuiltinOperator_BATCH_TO_SPACE_ND: + //case tflite::BuiltinOperator_TOPK_V2: + //case tflite::BuiltinOperator_SPLIT: + //case tflite::BuiltinOperator_STRIDED_SLICE: + //case tflite::BuiltinOperator_EXP: + case tflite::BuiltinOperator_LOG_SOFTMAX: + //case tflite::BuiltinOperator_DEQUANTIZE: + case tflite::BuiltinOperator_DELEGATE: + //case tflite::BuiltinOperator_CAST: + //case tflite::BuiltinOperator_PRELU: + //case tflite::BuiltinOperator_MAXIMUM: + //case tflite::BuiltinOperator_MINIMUM: + //case tflite::BuiltinOperator_ARG_MAX: + case tflite::BuiltinOperator_ARG_MIN: + case tflite::BuiltinOperator_GREATER: + case tflite::BuiltinOperator_GREATER_EQUAL: + case tflite::BuiltinOperator_LESS: + case tflite::BuiltinOperator_LESS_EQUAL: + //case tflite::BuiltinOperator_NEG: + case tflite::BuiltinOperator_SELECT: + // case tflite::BuiltinOperator_SLICE: + case tflite::BuiltinOperator_SIN: + case tflite::BuiltinOperator_LOG: + //case tflite::BuiltinOperator_TRANSPOSE_CONV: + case tflite::BuiltinOperator_TILE: + case tflite::BuiltinOperator_EXPAND_DIMS: + case tflite::BuiltinOperator_SPARSE_TO_DENSE: + //case tflite::BuiltinOperator_EQUAL: + //case tflite::BuiltinOperator_NOT_EQUAL: + //case tflite::BuiltinOperator_SUM: + //case tflite::BuiltinOperator_REDUCE_MAX: + //case tflite::BuiltinOperator_REDUCE_MIN: + case tflite::BuiltinOperator_REDUCE_PROD: + //case tflite::BuiltinOperator_SQRT: + //case tflite::BuiltinOperator_RSQRT: + case tflite::BuiltinOperator_SHAPE: + case tflite::BuiltinOperator_POW: + case tflite::BuiltinOperator_FAKE_QUANT: + //case tflite::BuiltinOperator_PACK: + //case tflite::BuiltinOperator_LOGICAL_OR: + case tflite::BuiltinOperator_ONE_HOT: + //case tflite::BuiltinOperator_LOGICAL_AND: + //case tflite::BuiltinOperator_LOGICAL_NOT: + //case tflite::BuiltinOperator_UNPACK: + case tflite::BuiltinOperator_FLOOR_DIV: + case tflite::BuiltinOperator_REDUCE_ANY: + case tflite::BuiltinOperator_SQUARE: + case tflite::BuiltinOperator_ZEROS_LIKE: + case tflite::BuiltinOperator_FILL: + case tflite::BuiltinOperator_FLOOR_MOD: + case tflite::BuiltinOperator_RANGE: + case tflite::BuiltinOperator_LEAKY_RELU: + //case tflite::BuiltinOperator_SQUARED_DIFFERENCE: + case tflite::BuiltinOperator_MIRROR_PAD: + //case tflite::BuiltinOperator_ABS: + case tflite::BuiltinOperator_SPLIT_V: + logError("Op code %d is currently not delegated to NNAPI", builtin); + return kTfLiteError; + break; + case tflite::BuiltinOperator_CUSTOM: { + std::string custom_name(registration.custom_name); + if (custom_name.compare("TensorFlowMax") == 0) { + add_reducer_v12_params(node.builtin_data); + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_REDUCE_MAX; + break; + } + else if (custom_name.compare("SquaredDifference") == 0) { + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_SQUARED_DIFFERENCE_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + } + else if (custom_name.compare("TensorFlowSum") == 0) { + add_reducer_params(node.builtin_data); + CHECK_NN(ANeuralNetworksModel_addOperationEx( + nn_model, ANEURALNETWORKS_REDUCE_SUM_EX, + static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(node.outputs->size), + reinterpret_cast<uint32_t*>(node.outputs->data))); + continue; + } + else if (custom_name.compare("Abs") == 0) { + nnapi_version = 12; // require NNAPI 1.2 + nn_op_type = ANEURALNETWORKS_ABS; + break; + } + logError("Custom operations are not supported when using NNAPI."); + return kTfLiteError; + break; + } + default: + // Fix to use strict build option + logError("Op code %d is currently not delegated to NNAPI", builtin); + return kTfLiteError; + break; + } + + if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) { + //logError("Op %d needs NNAPI1.1", builtin); + //return kTfLiteError; + } + + // Add the operation. + RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation( + nn_model, nn_op_type, static_cast<uint32_t>(augmented_inputs.size()), + augmented_inputs.data(), + static_cast<uint32_t>(augmented_outputs.size()), + reinterpret_cast<uint32_t*>(augmented_outputs.data()))); + } + return kTfLiteOk; +} + +TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Subgraph* subgraph) { + if (nn_model_ && nn_compiled_model_) return model_status_; + + // TODO(aselle): This is not correct. need to handle resize invalidation. + if (!nn_model_) { + CHECK_NN(ANeuralNetworksModel_create(&nn_model_)); + + // Find which tensors should be added to NNAPI. TFLite has temporaries + // and RNN back-edges which are are not valid for NNAPI. We look through all + // inputs and outputs and mark the mapping in tensor_id_to_nnapi_id with + // kOperandIdNotSet. addTensorOperands will replace those with the + // corresponding NNAPI operand ids and skip kOperandNotNeeded entries. + std::vector<int64_t> tensor_id_to_nnapi_id(subgraph->tensors_size(), + kOperandNotNeeded); + // Fix to use strict build option + auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf, + int count) { + for (int j = 0; j < count; j++) { + auto tensor_id = buf[j]; + if (tensor_id != kOptionalTensor) { + tensor_id_to_nnapi_id[tensor_id] = kOperandIdNotSet; + } + } + }; + for (size_t i = 0; i < subgraph->nodes_size(); i++) { + const auto* node_and_registration = subgraph->node_and_registration(i); + const TfLiteNode& node = node_and_registration->first; + set_ids_to_not_set(node.inputs->data, node.inputs->size); + set_ids_to_not_set(node.outputs->data, node.outputs->size); + } + set_ids_to_not_set(subgraph->inputs().data(), subgraph->inputs().size()); + set_ids_to_not_set(subgraph->outputs().data(), subgraph->outputs().size()); + + uint32_t next_id = 0; + RETURN_ERROR_IF_TFLITE_FAILED(addTensorOperands( + subgraph, nn_model_, &next_id, &tensor_id_to_nnapi_id)); + RETURN_ERROR_IF_TFLITE_FAILED( + AddOpsAndParams(subgraph, nn_model_, next_id, &model_states_inputs_, + &model_states_outputs_, tensor_id_to_nnapi_id)); + + std::vector<uint32_t> augmented_inputs; + MapAndAddTensorIds(subgraph->inputs().data(), subgraph->inputs().size(), + &augmented_inputs, tensor_id_to_nnapi_id); + augmented_inputs.insert(augmented_inputs.end(), + model_states_inputs_.begin(), + model_states_inputs_.end()); + std::vector<uint32_t> augmented_outputs; + MapAndAddTensorIds(subgraph->outputs().data(), subgraph->outputs().size(), + &augmented_outputs, tensor_id_to_nnapi_id); + MapAndAddTensorIds(model_states_outputs_.data(), + model_states_outputs_.size(), &augmented_outputs, + tensor_id_to_nnapi_id); + + CHECK_NN(ANeuralNetworksModel_identifyInputsAndOutputs( + nn_model_, static_cast<uint32_t>(augmented_inputs.size()), + reinterpret_cast<const uint32_t*>(augmented_inputs.data()), + static_cast<uint32_t>(augmented_outputs.size()), + reinterpret_cast<const uint32_t*>(augmented_outputs.data()))); + + // TODO Support ANeuralNetworksModel_relaxComputationFloat32toFloat16 + /*if (GetAndroidSdkVersionCached() >= 28) { + CHECK_NN(ANeuralNetworksModel_relaxComputationFloat32toFloat16( + nn_model_, subgraph->GetAllowFp16PrecisionForFp32())); + }*/ + CHECK_NN(ANeuralNetworksModel_finish(nn_model_)); + } + if (!nn_compiled_model_) { + CHECK_NN(ANeuralNetworksCompilation_create(nn_model_, &nn_compiled_model_)); + CHECK_NN(ANeuralNetworksCompilation_finish(nn_compiled_model_)); + } + return kTfLiteOk; +} + +// Use unordered_map for temporary buffer +#include <unordered_map> + +TfLiteStatus NNAPIDelegate::Invoke(::tflite::Subgraph* subgraph) { + if (!nn_model_) { + model_status_ = BuildGraph(subgraph); + if (model_status_ != kTfLiteOk) { + logError("Failed to build graph for NNAPI"); + } + } + if (model_status_ != kTfLiteOk) { + return model_status_; + } + + ANeuralNetworksExecution* execution = nullptr; + CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution)); + + // Allocate temporary buffer to save casted boolean tensor + std::unordered_map<size_t, uint8_t*> input_boolean_tensors; + std::unordered_map<size_t, uint8_t*> output_boolean_tensors; + for (size_t i = 0; i < subgraph->inputs().size(); i++) + { + int input = subgraph->inputs()[i]; + TfLiteTensor* tensor = subgraph->tensor(input); + if (tensor->type == kTfLiteBool) + { + size_t elements = tensor->bytes / sizeof(bool); + uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)]; + input_boolean_tensors[i] = temp_tensor; + for (size_t idx = 0; idx < elements; idx++) + { + temp_tensor[idx] = (tensor->data.b[idx] ? 0x00 : 0xff); + } + } + } + for (size_t i = 0; i < subgraph->outputs().size(); i++) + { + int output = subgraph->outputs()[i]; + TfLiteTensor* tensor = subgraph->tensor(output); + if (tensor->type == kTfLiteBool) + { + uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)]; + output_boolean_tensors[i] = temp_tensor; + } + } + + // Currently perform deep copy of input buffer + for (size_t i = 0; i < subgraph->inputs().size(); i++) { + int input = subgraph->inputs()[i]; + // TODO(aselle): Is this what we want or do we want input instead? + // TODO(aselle): This should be called setInputValue maybe to be cons. + TfLiteTensor* tensor = subgraph->tensor(input); + // Workaround to pass bool type under NNAPI + if (tensor->type == kTfLiteBool) + { + CHECK_NN(ANeuralNetworksExecution_setInput( + execution, i, nullptr, input_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool))); + } + else + { + CHECK_NN(ANeuralNetworksExecution_setInput( + execution, i, nullptr, tensor->data.raw, tensor->bytes)); + } + } + + // Tell nn api where to place final data. + for (size_t i = 0; i < subgraph->outputs().size(); i++) { + int output = subgraph->outputs()[i]; + TfLiteTensor* tensor = subgraph->tensor(output); + + // Workaround to pass bool type under NNAPI + if (tensor->type == kTfLiteBool) + { + CHECK_NN(ANeuralNetworksExecution_setOutput( + execution, i, nullptr, output_boolean_tensors[i], tensor->bytes * sizeof(uint8_t) / sizeof(bool))); + } + else + { + CHECK_NN(ANeuralNetworksExecution_setOutput( + execution, i, nullptr, tensor->data.raw, tensor->bytes)); + } + } + + // The state_out of previous invocation need to be mapped to state_in of + // current invocation. + for (size_t i = 0; i < model_states_outputs_.size(); i++) { + int state_tensor_idx = model_states_outputs_[i]; + TfLiteTensor* tensor = subgraph->tensor(state_tensor_idx); + // Here we are using a deep copy for state_in tensors so that we are not + // reading and writing into the same buffer during a invocation. + // TODO(miaowang): using double shared buffer to minimize the copies. + CHECK_NN(ANeuralNetworksExecution_setInput( + execution, i + subgraph->inputs().size(), nullptr, tensor->data.raw, + tensor->bytes)); + // Tell NNAPI where to output the state_out. + CHECK_NN(ANeuralNetworksExecution_setOutput( + execution, i + subgraph->outputs().size(), nullptr, tensor->data.raw, + tensor->bytes)); + } + + // Currently use blocking compute. + ANeuralNetworksEvent* event = nullptr; + CHECK_NN(ANeuralNetworksExecution_startCompute(execution, &event)); + CHECK_NN(ANeuralNetworksEvent_wait(event)); + ANeuralNetworksEvent_free(event); + ANeuralNetworksExecution_free(execution); + + // Tell nn api where to place final data. + for (size_t i = 0; i < subgraph->inputs().size(); i++) { + int input = subgraph->inputs()[i]; + TfLiteTensor* tensor = subgraph->tensor(input); + + if (tensor->type == kTfLiteBool) + { + uint8_t* temp_tensor = input_boolean_tensors[i]; + input_boolean_tensors[i] = nullptr; + delete temp_tensor; + } + } + for (size_t i = 0; i < subgraph->outputs().size(); i++) { + int output = subgraph->outputs()[i]; + TfLiteTensor* tensor = subgraph->tensor(output); + + if (tensor->type == kTfLiteBool) + { + uint8_t* temp_tensor = output_boolean_tensors[i]; + size_t elements = tensor->bytes / sizeof(bool); + for (size_t idx = 0; idx < elements; idx++) + { + tensor->data.b[idx] = ((temp_tensor[idx] == 0x00) ? false : true); + } + output_boolean_tensors[i] = nullptr; + delete temp_tensor; + } + } + +#if 0 + printf("From the NN API:\n"); + TfLiteTensor* tensor = subgraph->tensor(subgraph->outputs()[0]); + if (float* data = + subgraph->typed_tensor<float>(subgraph->outputs()[0])) { + size_t num = tensor->bytes / sizeof(float); + for (float* p = data; p < data + num; p++) { + printf(" %f", *p); + } + printf("\n"); + } +#endif + + return kTfLiteOk; +} + +bool NNAPIDelegate::IsSupported() { return nnfw::NNAPIExists(); } + +} // namespace tflite +} // namespace nnfw + +// clang-format on diff --git a/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc new file mode 100644 index 000000000..5b718029b --- /dev/null +++ b/runtime/libs/tflite/port/1.13.1/src/nnapi_delegate_ex_AddOpsAndParams_lambda.inc @@ -0,0 +1,160 @@ +// This file is included from AddOpsAndParams defined in nnapi_delegate.cc +// and contains lambda for extened implementation to original Tensorflow Lite. + auto add_scalar_bool8 = [&nn_model, &augmented_inputs, + &next_id](bool value) { + // Fix to use strict build option + int8_t casted_value = (value ? 1 : 0); + ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_BOOL; + CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) + CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &casted_value, + sizeof(int8_t))) + augmented_inputs.push_back(next_id++); + }; + + auto add_resize_bilinear_params = [&add_scalar_int32, &subgraph, &augmented_inputs](void* data) { + auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(data); + if (builtin->align_corners) { + FATAL("Resize bilinear does not support align corners in NNAPI"); + } + + TfLiteTensor* tensor = subgraph->tensor(augmented_inputs.back()); + assert(tensor->type == kTfLiteInt32); + assert(tensor->bytes == sizeof(int)*2); + augmented_inputs.pop_back(); + + int height = ((int*)(tensor->data.raw))[1]; + int width = ((int*)(tensor->data.raw))[0]; + add_scalar_int32(height); + add_scalar_int32(width); + }; + + auto add_transpose_conv_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(data); + add_scalar_int32(builtin->padding); + add_scalar_int32(builtin->stride_width); + add_scalar_int32(builtin->stride_height); + }; + + auto add_lrn_params = [&add_scalar_int32, + &add_scalar_float32](void* data) { + auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(data); + add_scalar_int32(builtin->radius); + add_scalar_float32(builtin->bias); + add_scalar_float32(builtin->alpha); + add_scalar_float32(builtin->beta); + }; + + auto add_strided_slice_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(data); + add_scalar_int32(builtin->begin_mask); + add_scalar_int32(builtin->end_mask); + // ellipsis_mask and new_axis_mask are not supported on nn runtime + // cf) tflite interpreter supports both operations + if (builtin->ellipsis_mask) { + FATAL("STRIDE_SLICE does not support ellipsis_mask in NNAPI"); + } + if (builtin->new_axis_mask) { + FATAL("STRIDE_SLICE does not support new_axis_mask in NNAPI"); + } + add_scalar_int32(builtin->shrink_axis_mask); + }; + + auto add_gather_params = [&add_scalar_int32, &augmented_inputs](void* data) { + auto builtin = reinterpret_cast<TfLiteGatherParams*>(data); + if (builtin->axis != 0) { + FATAL("GATHER does not support axis>0 in NNAPI"); + } + + auto indices_index = augmented_inputs.back(); + augmented_inputs.pop_back(); + add_scalar_int32(builtin->axis); + augmented_inputs.push_back(indices_index); + }; + + auto add_pack_ex_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLitePackParams*>(data); + add_scalar_int32(builtin->values_count); + add_scalar_int32(builtin->axis); + }; + + auto add_unpack_ex_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteUnpackParams*>(data); + add_scalar_int32(builtin->num); + add_scalar_int32(builtin->axis); + }; + + auto check_batch_to_space_params = [subgraph, &node, &augmented_inputs]() { + + //If there are 3 inputs, check if crops is having default values {0, 0, 0, 0} + //Else unsupported by NNAPI + + if(augmented_inputs.size() == 3) + { + const uint32_t crops_buffer_index = node.inputs->data[2]; + const TfLiteTensor* crops = subgraph->tensor(crops_buffer_index); + const int *crops_value = crops->data.i32; + + //Check if crops is having default values {0, 0, 0, 0} + if(crops_value[0] != 0 || crops_value[1] != 0 || crops_value[2] != 0 || crops_value[3] != 0) + { + FATAL("BATCH_TO_SPACE_ND does not support Explicit crops in NNAPI"); + } + else + { + //Restrict crops input and pass only other two inputs + augmented_inputs.pop_back(); + } + } + }; + + auto add_split_params = [&add_scalar_int32, &augmented_inputs](void* data) { + // swap 1st and 2nd operand order + auto input_tensor = augmented_inputs[1]; + auto axis = augmented_inputs[0]; + augmented_inputs[0] = input_tensor; + augmented_inputs[1] = axis; + + auto builtin = reinterpret_cast<TfLiteSplitParams*>(data); + add_scalar_int32(builtin->num_splits); + }; + + auto check_arg_max_input = [&subgraph, &augmented_inputs](void *data) { + auto params = reinterpret_cast<TfLiteArgMaxParams*>(data); + if (params->output_type != kTfLiteInt32) + { + FATAL("Cannot handle output type in NNAPI"); + } + + TfLiteTensor* axis_tensor = subgraph->tensor(augmented_inputs.back()); + assert(axis_tensor->type == kTfLiteInt32); + + int64_t count = 1; + for (int i = 0; i < axis_tensor->dims->size; ++i) { + count *= axis_tensor->dims->data[i]; + } + assert(count == 1); + }; + + auto add_reducer_v12_params = [&add_scalar_bool8](void* data) { + auto builtin = reinterpret_cast<TfLiteReducerParams*>(data); + if (builtin == nullptr) + { + add_scalar_bool8(0); + } + else + { + add_scalar_bool8(builtin->keep_dims); + } + }; + + auto add_reducer_params = [&add_scalar_int32](void* data) { + auto builtin = reinterpret_cast<TfLiteReducerParams*>(data); + if (builtin == nullptr) + { + add_scalar_int32(0); + } + else + { + add_scalar_int32(builtin->keep_dims); + } + }; diff --git a/runtime/libs/tflite/port/CMakeLists.txt b/runtime/libs/tflite/port/CMakeLists.txt new file mode 100644 index 000000000..82c83f722 --- /dev/null +++ b/runtime/libs/tflite/port/CMakeLists.txt @@ -0,0 +1,7 @@ +# We may need to support multiple tensorflow version +# Example) +# For ubuntu: tensorflow lite v1.13.1 +# For tizen: tensorflow lite v1.9 +set(SUPPORT_TFLITE_VERSION "1.13.1" CACHE STRING "Supporting TensorFlow lite version") + +add_subdirectories() diff --git a/runtime/libs/tflite/src/Diff.cpp b/runtime/libs/tflite/src/Diff.cpp new file mode 100644 index 000000000..879de0735 --- /dev/null +++ b/runtime/libs/tflite/src/Diff.cpp @@ -0,0 +1,621 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/Diff.h" +#include "tflite/ext/nnapi_delegate.h" + +#include "misc/fp32.h" + +#include "misc/tensor/IndexIterator.h" +#include "misc/tensor/IndexFormatter.h" +#include "misc/tensor/Zipper.h" +#include "misc/tensor/Comparator.h" + +#include "misc/EnvVar.h" + +#include <iostream> +#include <cassert> + +class DiffSummary : public nnfw::misc::tensor::Comparator::Observer +{ +public: + DiffSummary() + : max_abs_diff_index(0), max_abs_diff_expected{0.0f}, max_abs_diff_obtained{0.0f}, + max_abs_diff_value{0.0f}, max_rel_diff_index(0), max_rel_diff_expected{0.0f}, + max_rel_diff_obtained{0.0f}, max_rel_diff_value{0.0f} + { + // DO NOTHING + } + +public: + void notify(const nnfw::misc::tensor::Index &index, float expected, float obtained) override; + +public: + nnfw::misc::tensor::Index max_abs_diff_index; + float max_abs_diff_expected; + float max_abs_diff_obtained; + float max_abs_diff_value; + + nnfw::misc::tensor::Index max_rel_diff_index; + float max_rel_diff_expected; + float max_rel_diff_obtained; + float max_rel_diff_value; +}; + +void DiffSummary::notify(const nnfw::misc::tensor::Index &index, float expected, float obtained) +{ + const auto abs_diff_value = std::fabs(expected - obtained); + + if (max_abs_diff_value < abs_diff_value) + { + max_abs_diff_index = index; + max_abs_diff_value = abs_diff_value; + max_abs_diff_expected = expected; + max_abs_diff_obtained = obtained; + } + + const auto rel_diff_value = nnfw::misc::fp32::relative_diff(expected, obtained); + + if (max_rel_diff_value < rel_diff_value) + { + max_rel_diff_index = index; + max_rel_diff_value = rel_diff_value; + max_rel_diff_expected = expected; + max_rel_diff_obtained = obtained; + } +} + +template <typename T> +bool TfLiteInterpMatchApp::compareSingleTensorView(const nnfw::tflite::TensorView<T> &expected, + const nnfw::tflite::TensorView<T> &obtained, + int id) const +{ + std::vector<nnfw::misc::tensor::Diff<T>> diffs; + assert(expected.shape() == obtained.shape()); + + using nnfw::misc::tensor::zip; + using nnfw::misc::tensor::Index; + + zip(expected.shape(), expected, obtained) + << [&](const Index &index, T expected_value, T obtained_value) { + if (expected_value != obtained_value) + { + diffs.emplace_back(index, expected_value, obtained_value); + } + }; + + // TODO Unify summary generation code + if (diffs.size() == 0) + { + std::cout << " Tensor #" << id << ": MATCHED" << std::endl; + } + else + { + std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl; + std::cout << " " << diffs.size() << " diffs are detected" << std::endl; + } + + if (diffs.size() > 0 && _verbose != 0) + { + std::cout << " ---- Details ---" << std::endl; + for (const auto &diff : diffs) + { + std::cout << " Diff at [" << nnfw::misc::tensor::IndexFormatter(diff.index) << "]" + << std::endl; + std::cout << " expected: " << diff.expected << std::endl; + std::cout << " obtained: " << diff.obtained << std::endl; + } + } + + return diffs.size() == 0; +} + +template <> +bool TfLiteInterpMatchApp::compareSingleTensorView<float>( + const nnfw::tflite::TensorView<float> &expected, + const nnfw::tflite::TensorView<float> &obtained, int id) const +{ + DiffSummary summary; + + assert(expected.shape() == obtained.shape()); + auto diffs = _comparator.compare(expected.shape(), expected, obtained, &summary); + + // TODO Unify summary generation code + if (diffs.size() == 0) + { + std::cout << " Tensor #" << id << ": MATCHED" << std::endl; + } + else + { + std::cout << " Tensor #" << id << ": UNMATCHED" << std::endl; + std::cout << " " << diffs.size() << " diffs are detected" << std::endl; + } + + // Print out max_diff + if (summary.max_abs_diff_value > 0) + { + std::cout << " Max absolute diff at [" + << nnfw::misc::tensor::IndexFormatter(summary.max_abs_diff_index) << "]" << std::endl; + std::cout << " expected: " << summary.max_abs_diff_expected << std::endl; + std::cout << " obtained: " << summary.max_abs_diff_obtained << std::endl; + std::cout << " absolute diff: " << summary.max_abs_diff_value << std::endl; + } + + if (summary.max_rel_diff_value > 0) + { + const auto tolerance_level = summary.max_rel_diff_value / FLT_EPSILON; + + std::cout << " Max relative diff at [" + << nnfw::misc::tensor::IndexFormatter(summary.max_rel_diff_index) << "]" << std::endl; + std::cout << " expected: " << summary.max_rel_diff_expected << std::endl; + std::cout << " obtained: " << summary.max_rel_diff_obtained << std::endl; + std::cout << " relative diff: " << summary.max_rel_diff_value << std::endl; + std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl; + } + + if (diffs.size() > 0) + { + if (_verbose != 0) + { + std::cout << " ---- Details ---" << std::endl; + for (const auto &diff : diffs) + { + const auto absolute_diff = std::fabs(diff.expected - diff.obtained); + const auto relative_diff = nnfw::misc::fp32::relative_diff(diff.expected, diff.obtained); + const auto tolerance_level = relative_diff / FLT_EPSILON; + + std::cout << " Diff at [" << nnfw::misc::tensor::IndexFormatter(diff.index) << "]" + << std::endl; + std::cout << " expected: " << diff.expected << std::endl; + std::cout << " obtained: " << diff.obtained << std::endl; + std::cout << " absolute diff: " << absolute_diff << std::endl; + std::cout << " relative diff: " << relative_diff << std::endl; + std::cout << " (tolerance level = " << tolerance_level << ")" << std::endl; + } + } + + return false; + } + return true; +} + +#include <map> + +bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpreter &nnapi) const +{ + assert(interp.outputs() == nnapi.outputs()); + + bool all_matched = true; + + using Comparator = std::function<bool(int id, ::tflite::Interpreter &, ::tflite::Interpreter &)>; + + std::map<TfLiteType, Comparator> comparators; + + comparators[kTfLiteUInt8] = [this](int id, ::tflite::Interpreter &interp, + ::tflite::Interpreter &nnapi) { + const auto expected = nnfw::tflite::TensorView<uint8_t>::make(interp, id); + const auto obtained = nnfw::tflite::TensorView<uint8_t>::make(nnapi, id); + + return compareSingleTensorView(expected, obtained, id); + }; + + comparators[kTfLiteInt32] = [this](int id, ::tflite::Interpreter &interp, + ::tflite::Interpreter &nnapi) { + const auto expected = nnfw::tflite::TensorView<int32_t>::make(interp, id); + const auto obtained = nnfw::tflite::TensorView<int32_t>::make(nnapi, id); + + return compareSingleTensorView(expected, obtained, id); + }; + + comparators[kTfLiteFloat32] = [this](int id, ::tflite::Interpreter &interp, + ::tflite::Interpreter &nnapi) { + const auto expected = nnfw::tflite::TensorView<float>::make(interp, id); + const auto obtained = nnfw::tflite::TensorView<float>::make(nnapi, id); + + return compareSingleTensorView(expected, obtained, id); + }; + + comparators[kTfLiteBool] = [this](int id, ::tflite::Interpreter &interp, + ::tflite::Interpreter &nnapi) { + const auto expected = nnfw::tflite::TensorView<bool>::make(interp, id); + const auto obtained = nnfw::tflite::TensorView<bool>::make(nnapi, id); + + return compareSingleTensorView(expected, obtained, id); + }; + + for (const auto &id : interp.outputs()) + { + assert(interp.tensor(id)->type == nnapi.tensor(id)->type); + + auto it = comparators.find(interp.tensor(id)->type); + + if (it == comparators.end()) + { + throw std::runtime_error{"Not supported output type"}; + } + + const auto &comparator = it->second; + + if (!comparator(id, interp, nnapi)) + { + all_matched = false; + } + } + + return all_matched; +} + +#include "misc/tensor/Object.h" + +using namespace std::placeholders; + +template <> uint8_t RandomGenerator::generate<uint8_t>(void) +{ + // The value of type_range is 255. + float type_range = static_cast<float>(std::numeric_limits<uint8_t>::max()) - + static_cast<float>(std::numeric_limits<uint8_t>::min()); + // Most _dist values range from -5.0 to 5.0. + float min_range = -5.0f; + float max_range = 5.0f; + // NOTE shifted_relative_val has Gaussian distribution that origin mean was 0 and standard + // deviation was 2. And then its values are distributed and shift to that mean is 127.5 and range + // is about [0, 255]. + float shifted_relative_val = (_dist(_rand) - min_range) * type_range / (max_range - min_range); + + // shifted_relative_val is adjusted to be mapped to end points of the range, if it is out of range + // values. + if (shifted_relative_val < 0.0f) + { + return 0; + } + else if (shifted_relative_val > type_range) + { + return 255; + } + + // Convert shifted_relative_val from float to uint8 + return static_cast<uint8_t>(shifted_relative_val); +} + +template <> bool RandomGenerator::generate<bool>(void) +{ + std::uniform_int_distribution<> dist(0, 1); // [0, 1] + return dist(_rand); +} + +#include "tflite/TensorLogger.h" +// +// Random Test Runner +// +int RandomTestRunner::run(const nnfw::tflite::Builder &builder) +{ + auto tfl_interp = builder.build(); + auto nnapi = builder.build(); + + tfl_interp->UseNNAPI(false); + + // Allocate Tensors + tfl_interp->AllocateTensors(); + nnapi->AllocateTensors(); + + assert(tfl_interp->inputs() == nnapi->inputs()); + + using ::tflite::Interpreter; + using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>; + + std::map<TfLiteType, Initializer> initializers; + std::map<TfLiteType, Initializer> reseters; + + // Generate singed 32-bit integer (s32) input + initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteInt32); + assert(nnapi->tensor(id)->type == kTfLiteInt32); + + auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + int32_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + // TODO Generate random values + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + ++value; + }; + }; + + // Generate singed 32-bit integer (s32) input + reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteInt32); + assert(nnapi->tensor(id)->type == kTfLiteInt32); + + auto tfl_interp_view = nnfw::tflite::TensorView<int32_t>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<int32_t>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + int32_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + // TODO Generate random values + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); + assert(nnapi->tensor(id)->type == kTfLiteUInt8); + + auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<uint8_t>); + const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); + assert(nnapi->tensor(id)->type == kTfLiteUInt8); + + auto tfl_interp_view = nnfw::tflite::TensorView<uint8_t>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<uint8_t>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<uint8_t>); + const nnfw::misc::tensor::Object<uint8_t> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + assert(tfl_interp_view.shape() == data.shape()); + + uint8_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); + assert(nnapi->tensor(id)->type == kTfLiteFloat32); + + auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<float>); + const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); + assert(nnapi->tensor(id)->type == kTfLiteFloat32); + + auto tfl_interp_view = nnfw::tflite::TensorView<float>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<float>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<float>); + const nnfw::misc::tensor::Object<float> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + float value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteBool); + assert(nnapi->tensor(id)->type == kTfLiteBool); + + auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<bool (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<bool>); + const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteBool); + assert(nnapi->tensor(id)->type == kTfLiteBool); + + auto tfl_interp_view = nnfw::tflite::TensorView<bool>::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView<bool>::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast<bool (RandomGenerator::*)(const ::nnfw::misc::tensor::Shape &, + const ::nnfw::misc::tensor::Index &)>( + &RandomGenerator::generate<bool>); + const nnfw::misc::tensor::Object<bool> data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + bool value = false; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + // Fill IFM with random numbers + for (const auto id : tfl_interp->inputs()) + { + assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); + + auto it = initializers.find(tfl_interp->tensor(id)->type); + + if (it == initializers.end()) + { + throw std::runtime_error{"Not supported input type"}; + } + + it->second(id, tfl_interp.get(), nnapi.get()); + } + + // Fill OFM with 0 + for (const auto id : tfl_interp->outputs()) + { + assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); + + auto it = reseters.find(tfl_interp->tensor(id)->type); + + if (it == reseters.end()) + { + throw std::runtime_error{"Not supported input type"}; + } + + it->second(id, tfl_interp.get(), nnapi.get()); + } + + std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl; + tfl_interp->Invoke(); + + std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl; + + char *env = getenv("UPSTREAM_DELEGATE"); + + if (env && !std::string(env).compare("1")) + { + nnapi->UseNNAPI(true); + nnapi->Invoke(); + } + else + { + nnfw::tflite::NNAPIDelegate d; + + // WARNING + // primary_subgraph: Experimental interface. Return 1st sugbraph + if (d.BuildGraph(&nnapi.get()->primary_subgraph())) + { + throw std::runtime_error{"Failed to BuildGraph"}; + } + + if (d.Invoke(&nnapi.get()->primary_subgraph())) + { + throw std::runtime_error{"Failed to BuildGraph"}; + } + } + + // Compare OFM + std::cout << "[NNAPI TEST] Compare the result" << std::endl; + + const auto tolerance = _param.tolerance; + + auto equals = [tolerance](float lhs, float rhs) { + // NOTE Hybrid approach + // TODO Allow users to set tolerance for absolute_epsilon_equal + if (nnfw::misc::fp32::absolute_epsilon_equal(lhs, rhs)) + { + return true; + } + + return nnfw::misc::fp32::epsilon_equal(lhs, rhs, tolerance); + }; + + nnfw::misc::tensor::Comparator comparator(equals); + TfLiteInterpMatchApp app(comparator); + + app.verbose() = _param.verbose; + + bool res = app.run(*tfl_interp, *nnapi); + + if (!res) + { + return 255; + } + + std::cout << "[NNAPI TEST] PASSED" << std::endl; + + if (_param.tensor_logging) + nnfw::tflite::TensorLogger::get().save(_param.log_path, *tfl_interp); + + return 0; +} + +RandomTestRunner RandomTestRunner::make(uint32_t seed) +{ + RandomTestParam param; + + param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0); + param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1); + ; + + return RandomTestRunner{seed, param}; +} diff --git a/runtime/libs/tflite/src/FeatureView.cpp b/runtime/libs/tflite/src/FeatureView.cpp new file mode 100644 index 000000000..fdf5a4b00 --- /dev/null +++ b/runtime/libs/tflite/src/FeatureView.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/FeatureView.h" +#include "tflite/TensorUtils.h" + +#include <cassert> + +namespace nnfw +{ +namespace tflite +{ + +nnfw::misc::feature::Shape getFeatureShape(const TfLiteTensor *tensor) +{ + nnfw::misc::feature::Shape shape{tensor->dims->data[3], tensor->dims->data[1], + tensor->dims->data[2]}; + + return shape; +} + +FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const InputIndex &index) +{ + const auto tensor_index = interp.inputs().at(index.asInt()); + auto tensor_ptr = interp.tensor(tensor_index); + + assert(isFloatTensor(tensor_ptr)); + assert(isFeatureTensor(tensor_ptr)); + + _shape = getFeatureShape(tensor_ptr); + _base = interp.typed_tensor<float>(tensor_index); +} + +FeatureView<float>::FeatureView(::tflite::Interpreter &interp, const OutputIndex &index) +{ + const auto tensor_index = interp.outputs().at(index.asInt()); + auto tensor_ptr = interp.tensor(tensor_index); + + assert(isFloatTensor(tensor_ptr)); + assert(isFeatureTensor(tensor_ptr)); + + _shape = getFeatureShape(tensor_ptr); + _base = interp.typed_tensor<float>(tensor_index); +} + +float FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col) const +{ + return *(_base + getElementOffset(ch, row, col)); +} + +float &FeatureView<float>::at(uint32_t ch, uint32_t row, uint32_t col) +{ + return *(_base + getElementOffset(ch, row, col)); +} + +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/src/Quantization.cpp b/runtime/libs/tflite/src/Quantization.cpp new file mode 100644 index 000000000..9c162c342 --- /dev/null +++ b/runtime/libs/tflite/src/Quantization.cpp @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/Quantization.h" + +TfLiteQuantizationParams make_default_quantization(void) +{ + return TfLiteQuantizationParams{0.0f, 0}; +} diff --git a/runtime/libs/tflite/src/TensorShapeUtils.cpp b/runtime/libs/tflite/src/TensorShapeUtils.cpp new file mode 100644 index 000000000..29628cd26 --- /dev/null +++ b/runtime/libs/tflite/src/TensorShapeUtils.cpp @@ -0,0 +1,29 @@ +#include "tflite/TensorShapeUtils.h" + +namespace nnfw +{ +namespace tflite +{ + +nnfw::misc::tensor::Shape broadcast(const nnfw::misc::tensor::Shape &lhs_shape, + const nnfw::misc::tensor::Shape &rhs_shape) +{ + const uint32_t lhs_rank = lhs_shape.rank(); + const uint32_t rhs_rank = rhs_shape.rank(); + const uint32_t out_rank = std::max(lhs_rank, rhs_rank); + const uint32_t lhs_rank_diff = out_rank - lhs_rank; + const uint32_t rhs_rank_diff = out_rank - rhs_rank; + + nnfw::misc::tensor::Shape out_shape(out_rank); + + for (uint32_t axis = 0; axis < out_rank; ++axis) + { + out_shape.dim(axis) = std::max(axis < lhs_rank_diff ? 1 : lhs_shape.dim(axis - lhs_rank_diff), + axis < rhs_rank_diff ? 1 : rhs_shape.dim(axis - rhs_rank_diff)); + } + + return out_shape; +} + +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/src/TensorView.test.cpp b/runtime/libs/tflite/src/TensorView.test.cpp new file mode 100644 index 000000000..c710b3c33 --- /dev/null +++ b/runtime/libs/tflite/src/TensorView.test.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/TensorView.h" + +#include <cassert> + +void int_test(void) +{ + int value[6] = {1, 2, 3, 4, 5, 6}; + + const nnfw::misc::tensor::Shape shape{2, 3}; + const nnfw::tflite::TensorView<int> view{shape, value}; + + assert(view.at(nnfw::misc::tensor::Index{0, 0}) == 1); + assert(view.at(nnfw::misc::tensor::Index{0, 1}) == 2); + assert(view.at(nnfw::misc::tensor::Index{0, 2}) == 3); + assert(view.at(nnfw::misc::tensor::Index{1, 0}) == 4); + assert(view.at(nnfw::misc::tensor::Index{1, 1}) == 5); + assert(view.at(nnfw::misc::tensor::Index{1, 2}) == 6); +} + +int main(int argc, char **argv) +{ + float value[6] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; + + const nnfw::misc::tensor::Shape shape{2, 3}; + const nnfw::tflite::TensorView<float> view{shape, value}; + + assert(view.at(nnfw::misc::tensor::Index{0, 0}) == 1.0f); + assert(view.at(nnfw::misc::tensor::Index{0, 1}) == 2.0f); + assert(view.at(nnfw::misc::tensor::Index{0, 2}) == 3.0f); + assert(view.at(nnfw::misc::tensor::Index{1, 0}) == 4.0f); + assert(view.at(nnfw::misc::tensor::Index{1, 1}) == 5.0f); + assert(view.at(nnfw::misc::tensor::Index{1, 2}) == 6.0f); + + int_test(); + + return 0; +} diff --git a/runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp b/runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp new file mode 100644 index 000000000..f54e67202 --- /dev/null +++ b/runtime/libs/tflite/src/interp/FlatBufferBuilder.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/interp/FlatBufferBuilder.h" + +#include "tflite/ext/kernels/register.h" + +namespace nnfw +{ +namespace tflite +{ + +std::unique_ptr<::tflite::Interpreter> FlatBufferBuilder::build(void) const +{ + std::unique_ptr<::tflite::Interpreter> interpreter; + + nnfw::tflite::BuiltinOpResolver resolver; + + ::tflite::InterpreterBuilder builder(_model, resolver); + + builder(&interpreter); + + return interpreter; +} + +} // namespace tflite +} // namespace nnfw diff --git a/runtime/libs/tflite/src/interp/FunctionBuilder.cpp b/runtime/libs/tflite/src/interp/FunctionBuilder.cpp new file mode 100644 index 000000000..599a4f393 --- /dev/null +++ b/runtime/libs/tflite/src/interp/FunctionBuilder.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/interp/FunctionBuilder.h" + +namespace nnfw +{ +namespace tflite +{ + +std::unique_ptr<::tflite::Interpreter> FunctionBuilder::build(void) const +{ + auto res = std::unique_ptr<::tflite::Interpreter>{new ::tflite::Interpreter}; + + _fn(*res); + + return res; +} + +} // namespace tflite +} // namespace nnfw |