summaryrefslogtreecommitdiff
path: root/tests/tools/onert_run/src
diff options
context:
space:
mode:
Diffstat (limited to 'tests/tools/onert_run/src')
-rw-r--r--tests/tools/onert_run/src/allocation.h38
-rw-r--r--tests/tools/onert_run/src/args.cc393
-rw-r--r--tests/tools/onert_run/src/args.h110
-rw-r--r--tests/tools/onert_run/src/formatter.h47
-rw-r--r--tests/tools/onert_run/src/h5formatter.cc258
-rw-r--r--tests/tools/onert_run/src/h5formatter.h41
-rw-r--r--tests/tools/onert_run/src/nnfw_util.cc49
-rw-r--r--tests/tools/onert_run/src/nnfw_util.h37
-rw-r--r--tests/tools/onert_run/src/onert_run.cc390
-rw-r--r--tests/tools/onert_run/src/randomgen.cc77
-rw-r--r--tests/tools/onert_run/src/randomgen.h40
-rw-r--r--tests/tools/onert_run/src/rawformatter.cc97
-rw-r--r--tests/tools/onert_run/src/rawformatter.h40
-rw-r--r--tests/tools/onert_run/src/types.h27
14 files changed, 1644 insertions, 0 deletions
diff --git a/tests/tools/onert_run/src/allocation.h b/tests/tools/onert_run/src/allocation.h
new file mode 100644
index 000000000..798bf9d06
--- /dev/null
+++ b/tests/tools/onert_run/src/allocation.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_ALLOCATION_H__
+#define __ONERT_RUN_ALLOCATION_H__
+
+#include <cstdlib>
+#include <cstdint>
+
+namespace onert_run
+{
+class Allocation
+{
+public:
+ Allocation() : data_(nullptr) {}
+ ~Allocation() { free(data_); }
+ void *data() const { return data_; }
+ void *alloc(uint64_t sz) { return data_ = malloc(sz); }
+
+private:
+ void *data_;
+};
+} // namespace onert_run
+
+#endif // __ONERT_RUN_ALLOCATION_H__
diff --git a/tests/tools/onert_run/src/args.cc b/tests/tools/onert_run/src/args.cc
new file mode 100644
index 000000000..a64d81db5
--- /dev/null
+++ b/tests/tools/onert_run/src/args.cc
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "args.h"
+
+#include <functional>
+#include <iostream>
+#include <sys/stat.h>
+#include <json/json.h>
+
+namespace
+{
+
+// This function parses a json object and returns as a vector of integers
+// For example,
+// [0, [1, 2, 3, 4], 3, 40, 4, []] in JSON
+// is converted to:
+// {
+// 0 -> [1, 2, 3, 4]
+// 3 -> 40
+// 4 -> []
+// } in std::unordered_map. Note that the value type is still Json::Value.
+std::unordered_map<uint32_t, Json::Value> argArrayToMap(const Json::Value &jsonval)
+{
+ if (!jsonval.isArray() || (jsonval.size() % 2 != 0))
+ {
+ std::cerr << "JSON argument must be an even-sized array in JSON\n";
+ exit(1);
+ }
+
+ std::unordered_map<uint32_t, Json::Value> ret;
+ for (uint32_t i = 0; i < jsonval.size(); i += 2)
+ {
+ if (!jsonval[i].isUInt())
+ {
+ std::cerr << "Key values(values in even indices) must be unsigned integers\n";
+ exit(1);
+ }
+ uint32_t key = jsonval[i].asUInt();
+ Json::Value val = jsonval[i + 1];
+ ret[key] = jsonval[i + 1];
+ }
+ return ret;
+}
+
+// param shape_str is a form of, e.g., "[1, [2, 3], 3, []]" or "h5"
+void handleShapeJsonParam(onert_run::TensorShapeMap &shape_map, const std::string &shape_str)
+{
+ Json::Value root;
+ Json::Reader reader;
+ if (!reader.parse(shape_str, root, false))
+ {
+ std::cerr << "Invalid JSON format for output_sizes \"" << shape_str << "\"\n";
+ exit(1);
+ }
+
+ auto arg_map = argArrayToMap(root);
+ for (auto &pair : arg_map)
+ {
+ uint32_t key = pair.first;
+ Json::Value &shape_json = pair.second;
+ if (!shape_json.isArray())
+ {
+ std::cerr << "All the values must be list: " << shape_str << "\n";
+ exit(1);
+ }
+
+ std::vector<int> shape;
+ for (auto &dim_json : shape_json)
+ {
+ if (!dim_json.isUInt())
+ {
+ std::cerr << "All the dims should be dim >= 0: " << shape_str << "\n";
+ exit(1);
+ }
+
+ shape.emplace_back(dim_json.asUInt64());
+ }
+
+ shape_map[key] = shape;
+ }
+}
+
+void checkModelfile(const std::string &model_filename)
+{
+ if (model_filename.empty())
+ {
+ // TODO Print usage instead of the below message
+ std::cerr << "Please specify model file. Run with `--help` for usage."
+ << "\n";
+
+ exit(1);
+ }
+ else
+ {
+ if (access(model_filename.c_str(), F_OK) == -1)
+ {
+ std::cerr << "Model file not found: " << model_filename << "\n";
+ exit(1);
+ }
+ }
+}
+
+void checkPackage(const std::string &package_filename)
+{
+ if (package_filename.empty())
+ {
+ // TODO Print usage instead of the below message
+ std::cerr << "Please specify nnpackage file. Run with `--help` for usage."
+ << "\n";
+
+ exit(1);
+ }
+ else
+ {
+ if (access(package_filename.c_str(), F_OK) == -1)
+ {
+ std::cerr << "nnpackage not found: " << package_filename << "\n";
+ exit(1);
+ }
+ }
+}
+
+} // namespace
+
+namespace onert_run
+{
+
+Args::Args(const int argc, char **argv)
+{
+ Initialize();
+ Parse(argc, argv);
+}
+
+void Args::Initialize(void)
+{
+ auto process_nnpackage = [&](const std::string &package_filename) {
+ _package_filename = package_filename;
+
+ std::cerr << "Package Filename " << _package_filename << std::endl;
+ checkPackage(package_filename);
+ };
+
+ auto process_modelfile = [&](const std::string &model_filename) {
+ _model_filename = model_filename;
+
+ std::cerr << "Model Filename " << _model_filename << std::endl;
+ checkModelfile(model_filename);
+
+ _use_single_model = true;
+ };
+
+ auto process_path = [&](const std::string &path) {
+ struct stat sb;
+ if (stat(path.c_str(), &sb) == 0)
+ {
+ if (sb.st_mode & S_IFDIR)
+ {
+ _package_filename = path;
+ checkPackage(path);
+ std::cerr << "Package Filename " << path << std::endl;
+ }
+ else
+ {
+ _model_filename = path;
+ checkModelfile(path);
+ std::cerr << "Model Filename " << path << std::endl;
+ _use_single_model = true;
+ }
+ }
+ else
+ {
+ std::cerr << "Cannot find: " << path << "\n";
+ exit(1);
+ }
+ };
+
+ auto process_output_sizes = [&](const std::string &output_sizes_json_str) {
+ Json::Value root;
+ Json::Reader reader;
+ if (!reader.parse(output_sizes_json_str, root, false))
+ {
+ std::cerr << "Invalid JSON format for output_sizes \"" << output_sizes_json_str << "\"\n";
+ exit(1);
+ }
+
+ auto arg_map = argArrayToMap(root);
+ for (auto &pair : arg_map)
+ {
+ uint32_t key = pair.first;
+ Json::Value &val_json = pair.second;
+ if (!val_json.isUInt())
+ {
+ std::cerr << "All the values in `output_sizes` must be unsigned integers\n";
+ exit(1);
+ }
+ uint32_t val = val_json.asUInt();
+ _output_sizes[key] = val;
+ }
+ };
+
+ auto process_shape_prepare = [&](const std::string &shape_str) {
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ if (shape_str == "H5" || shape_str == "h5")
+ {
+ _when_to_use_h5_shape = WhenToUseH5Shape::PREPARE;
+ return;
+ }
+#endif
+ try
+ {
+ handleShapeJsonParam(_shape_prepare, shape_str);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << "error with '--shape_prepare' option: " << shape_str << std::endl;
+ exit(1);
+ }
+ };
+
+ auto process_shape_run = [&](const std::string &shape_str) {
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ if (shape_str == "H5" || shape_str == "h5")
+ {
+ _when_to_use_h5_shape = WhenToUseH5Shape::RUN;
+ return;
+ }
+#endif
+ try
+ {
+ handleShapeJsonParam(_shape_run, shape_str);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << "error with '--shape_run' option: " << shape_str << std::endl;
+ exit(1);
+ }
+ };
+
+ // General options
+ po::options_description general("General options", 100);
+
+ // clang-format off
+ general.add_options()
+ ("help,h", "Print available options")
+ ("version", "Print version and exit immediately")
+ ("nnpackage", po::value<std::string>()->notifier(process_nnpackage), "NN Package file(directory) name")
+ ("modelfile", po::value<std::string>()->notifier(process_modelfile), "NN Model filename")
+ ("path", po::value<std::string>()->notifier(process_path), "NN Package or NN Modelfile path")
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ ("dump,d", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _dump_filename = v; }), "Output filename")
+ ("load,l", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _load_filename = v; }), "Input filename")
+#endif
+ ("dump:raw", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _dump_raw_filename = v; }), "Raw Output filename")
+ ("load:raw", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _load_raw_filename = v; }), "Raw Input filename")
+ ("output_sizes", po::value<std::string>()->notifier(process_output_sizes),
+ "The output buffer size in JSON 1D array\n"
+ "If not given, the model's output sizes are used\n"
+ "e.g. '[0, 40, 2, 80]' to set 0th tensor to 40 and 2nd tensor to 80.\n")
+ ("num_runs,r", po::value<int>()->default_value(1)->notifier([&](const auto &v) { _num_runs = v; }), "The number of runs")
+ ("warmup_runs,w", po::value<int>()->default_value(0)->notifier([&](const auto &v) { _warmup_runs = v; }), "The number of warmup runs")
+ ("run_delay,t", po::value<int>()->default_value(-1)->notifier([&](const auto &v) { _run_delay = v; }), "Delay time(us) between runs (as default no delay")
+ ("gpumem_poll,g", po::value<bool>()->default_value(false)->notifier([&](const auto &v) { _gpumem_poll = v; }), "Check gpu memory polling separately")
+ ("mem_poll,m", po::value<bool>()->default_value(false)->notifier([&](const auto &v) { _mem_poll = v; }), "Check memory polling")
+ ("write_report,p", po::value<bool>()->default_value(false)->notifier([&](const auto &v) { _write_report = v; }),
+ "Write report\n"
+ "{exec}-{nnpkg|modelfile}-{backend}.csv will be generated.\n"
+ "e.g. onert_run-UNIT_Add_000-acl_cl.csv.\n"
+ "{nnpkg|modelfile} name may be changed to realpath if you use symbolic-link.")
+ ("shape_prepare", po::value<std::string>()->default_value("[]")->notifier(process_shape_prepare),
+ "Please refer to the description of 'shape_run'")
+ ("shape_run", po::value<std::string>()->default_value("[]")->notifier(process_shape_run),
+ "'--shape_prepare: set shape of tensors before compilation (before calling nnfw_prepare()).\n"
+ "'--shape_run: set shape of tensors before running (before calling nnfw_run()).\n"
+ "Allowed value:.\n"
+ "'[0, [1, 2], 2, []]': set 0th tensor to [1, 2] and 2nd tensor to [] (scalar).\n"
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ "'h5': read shape(s) from H5 input file. '--load' should also be provided.\n"
+ "if '--load' option is provided but '--shape_prepare' or '--shape_run' is not provided,\n"
+ "'--shape_run h5' will be used by default.\n"
+#endif
+ "For detailed description, please consutl the description of nnfw_set_input_tensorinfo()\n"
+ )
+ ("verbose_level,v", po::value<int>()->default_value(0)->notifier([&](const auto &v) { _verbose_level = v; }),
+ "Verbose level\n"
+ "0: prints the only result. Messages btw run don't print\n"
+ "1: prints result and message btw run\n"
+ "2: prints all of messages to print\n")
+ ("quantize,q", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _quantize = v; }), "Request quantization with type (int8 or int16)")
+ ("qpath", po::value<std::string>()->default_value("")->notifier([&](const auto &v) { _quantized_model_path = v; }),
+ "Path to export quantized model.\n"
+ "If it is not set, the quantized model will be exported to the same directory of the original model/package with q8/q16 suffix.")
+ ;
+ // clang-format on
+
+ _options.add(general);
+ _positional.add("path", -1);
+}
+
+void Args::Parse(const int argc, char **argv)
+{
+ po::variables_map vm;
+ po::store(po::command_line_parser(argc, argv).options(_options).positional(_positional).run(),
+ vm);
+
+ if (vm.count("help"))
+ {
+ std::cout << "onert_run\n\n";
+ std::cout << "Usage: " << argv[0] << " path to nnpackage root directory [<options>]\n\n";
+ std::cout << _options;
+ std::cout << "\n";
+
+ exit(0);
+ }
+
+ if (vm.count("version"))
+ {
+ _print_version = true;
+ return;
+ }
+
+ {
+ auto conflicting_options = [&](const std::string &o1, const std::string &o2) {
+ if ((vm.count(o1) && !vm[o1].defaulted()) && (vm.count(o2) && !vm[o2].defaulted()))
+ {
+ throw boost::program_options::error(std::string("Two options '") + o1 + "' and '" + o2 +
+ "' cannot be given at once.");
+ }
+ };
+
+ // calling, e.g., "onert_run .. -- shape_prepare .. --shape_run .." should theoretically
+ // work but allowing both options together on command line makes the usage and implemenation
+ // of onert_run too complicated. Therefore let's not allow those option together.
+ conflicting_options("shape_prepare", "shape_run");
+
+ // Cannot use both single model file and nnpackage at once
+ conflicting_options("modelfile", "nnpackage");
+
+ // Require modelfile, nnpackage, or path
+ if (!vm.count("modelfile") && !vm.count("nnpackage") && !vm.count("path"))
+ throw boost::program_options::error(
+ std::string("Require one of options modelfile, nnpackage, or path."));
+ }
+
+ try
+ {
+ po::notify(vm);
+ }
+ catch (const std::bad_cast &e)
+ {
+ std::cerr << "Bad cast error - " << e.what() << '\n';
+ exit(1);
+ }
+
+ // This must be run after `notify` as `_warm_up_runs` must have been processed before.
+ if (vm.count("mem_poll"))
+ {
+ // Instead of EXECUTE to avoid overhead, memory polling runs on WARMUP
+ if (_mem_poll && _warmup_runs == 0)
+ {
+ _warmup_runs = 1;
+ }
+ }
+}
+
+bool Args::shapeParamProvided()
+{
+ bool provided = false;
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ // "--shape_run h5" or "--shape_prepare h5" was provided
+ provided = (getWhenToUseH5Shape() != WhenToUseH5Shape::NOT_PROVIDED);
+#endif
+ // specific shape was provided
+ // e.g., "--shape_run '[0, [10, 1]]'" or "--shape_prepare '[0, [10, 1]]'"
+ provided |= (!getShapeMapForPrepare().empty()) || (!getShapeMapForRun().empty());
+
+ return provided;
+}
+
+} // end of namespace onert_run
diff --git a/tests/tools/onert_run/src/args.h b/tests/tools/onert_run/src/args.h
new file mode 100644
index 000000000..97d9b1af1
--- /dev/null
+++ b/tests/tools/onert_run/src/args.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_ARGS_H__
+#define __ONERT_RUN_ARGS_H__
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+#include <boost/program_options.hpp>
+
+#include "types.h"
+
+namespace po = boost::program_options;
+
+namespace onert_run
+{
+
+using TensorShapeMap = std::unordered_map<uint32_t, TensorShape>;
+
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+enum class WhenToUseH5Shape
+{
+ NOT_PROVIDED, // Param not provided
+ PREPARE, // read shapes in h5 file and set them as inputs' shape before calling nnfw_prepare()
+ RUN, // read shapes in h5 file and set them as inputs' shape before calling nnfw_run()
+};
+#endif
+
+class Args
+{
+public:
+ Args(const int argc, char **argv);
+ void print(void);
+
+ const std::string &getPackageFilename(void) const { return _package_filename; }
+ const std::string &getModelFilename(void) const { return _model_filename; }
+ const bool useSingleModel(void) const { return _use_single_model; }
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ const std::string &getDumpFilename(void) const { return _dump_filename; }
+ const std::string &getLoadFilename(void) const { return _load_filename; }
+ WhenToUseH5Shape getWhenToUseH5Shape(void) const { return _when_to_use_h5_shape; }
+#endif
+ const std::string &getDumpRawFilename(void) const { return _dump_raw_filename; }
+ const std::string &getLoadRawFilename(void) const { return _load_raw_filename; }
+ const int getNumRuns(void) const { return _num_runs; }
+ const int getWarmupRuns(void) const { return _warmup_runs; }
+ const int getRunDelay(void) const { return _run_delay; }
+ std::unordered_map<uint32_t, uint32_t> getOutputSizes(void) const { return _output_sizes; }
+ const bool getGpuMemoryPoll(void) const { return _gpumem_poll; }
+ const bool getMemoryPoll(void) const { return _mem_poll; }
+ const bool getWriteReport(void) const { return _write_report; }
+ const bool printVersion(void) const { return _print_version; }
+ TensorShapeMap &getShapeMapForPrepare() { return _shape_prepare; }
+ TensorShapeMap &getShapeMapForRun() { return _shape_run; }
+ /// @brief Return true if "--shape_run" or "--shape_prepare" is provided
+ bool shapeParamProvided();
+ const int getVerboseLevel(void) const { return _verbose_level; }
+ const std::string &getQuantize(void) const { return _quantize; }
+ const std::string &getQuantizedModelPath(void) const { return _quantized_model_path; }
+
+private:
+ void Initialize();
+ void Parse(const int argc, char **argv);
+
+private:
+ po::positional_options_description _positional;
+ po::options_description _options;
+
+ std::string _package_filename;
+ std::string _model_filename;
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ std::string _dump_filename;
+ std::string _load_filename;
+ WhenToUseH5Shape _when_to_use_h5_shape = WhenToUseH5Shape::NOT_PROVIDED;
+#endif
+ std::string _dump_raw_filename;
+ std::string _load_raw_filename;
+ TensorShapeMap _shape_prepare;
+ TensorShapeMap _shape_run;
+ int _num_runs;
+ int _warmup_runs;
+ int _run_delay;
+ std::unordered_map<uint32_t, uint32_t> _output_sizes;
+ bool _gpumem_poll;
+ bool _mem_poll;
+ bool _write_report;
+ bool _print_version = false;
+ int _verbose_level;
+ bool _use_single_model = false;
+ std::string _quantize;
+ std::string _quantized_model_path;
+};
+
+} // end of namespace onert_run
+
+#endif // __ONERT_RUN_ARGS_H__
diff --git a/tests/tools/onert_run/src/formatter.h b/tests/tools/onert_run/src/formatter.h
new file mode 100644
index 000000000..5b73d2337
--- /dev/null
+++ b/tests/tools/onert_run/src/formatter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_FORMATTER_H__
+#define __ONERT_RUN_FORMATTER_H__
+
+#include <string>
+#include <vector>
+
+#include "types.h"
+#include "allocation.h"
+
+struct nnfw_session;
+
+namespace onert_run
+{
+class Formatter
+{
+public:
+ virtual ~Formatter() = default;
+ Formatter(nnfw_session *sess) : session_(sess) {}
+ virtual void loadInputs(const std::string &filename, std::vector<Allocation> &inputs) = 0;
+ virtual void dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs) = 0;
+ virtual std::vector<TensorShape> readTensorShapes(const std::string &filename)
+ {
+ return std::vector<TensorShape>();
+ };
+
+protected:
+ nnfw_session *session_;
+};
+} // namespace onert_run
+
+#endif // __ONERT_RUN_FORMATTER_H__
diff --git a/tests/tools/onert_run/src/h5formatter.cc b/tests/tools/onert_run/src/h5formatter.cc
new file mode 100644
index 000000000..5ea6e4c4a
--- /dev/null
+++ b/tests/tools/onert_run/src/h5formatter.cc
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "h5formatter.h"
+#include "nnfw.h"
+#include "nnfw_util.h"
+
+#include <iostream>
+#include <stdexcept>
+#include <H5Cpp.h>
+
+namespace
+{
+onert_run::TensorShape getShape(H5::DataSet &data_set)
+{
+ std::vector<hsize_t> h5_shape; // hsize_t is unsigned long long
+ H5::DataSpace data_space = data_set.getSpace();
+ int rank = data_space.getSimpleExtentNdims();
+ h5_shape.resize(rank);
+
+ // read shape info from H5 file
+ data_space.getSimpleExtentDims(h5_shape.data(), NULL);
+
+ onert_run::TensorShape shape;
+ for (auto dim : h5_shape)
+ shape.emplace_back(static_cast<int>(dim));
+
+ return shape;
+}
+} // namespace
+
+namespace onert_run
+{
+static const char *h5_value_grpname = "value";
+
+std::vector<TensorShape> H5Formatter::readTensorShapes(const std::string &filename)
+{
+ uint32_t num_inputs;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
+ std::vector<TensorShape> tensor_shapes;
+
+ try
+ {
+ H5::Exception::dontPrint();
+
+ H5::H5File file(filename, H5F_ACC_RDONLY);
+ H5::Group value_group = file.openGroup(h5_value_grpname);
+
+ // Constraints: if there are n data set names, they should be unique and
+ // one of [ "0", "1", .. , "n-1" ]
+ for (uint32_t i = 0; i < num_inputs; ++i)
+ {
+ H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
+ H5::DataType type = data_set.getDataType();
+ auto shape = getShape(data_set);
+
+ tensor_shapes.emplace_back(shape);
+ }
+
+ return tensor_shapes;
+ }
+ catch (const H5::Exception &e)
+ {
+ H5::Exception::printErrorStack();
+ std::exit(-1);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ std::exit(-1);
+ }
+}
+
+void H5Formatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
+{
+ uint32_t num_inputs;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
+ try
+ {
+ // Turn off the automatic error printing.
+ H5::Exception::dontPrint();
+
+ H5::H5File file(filename, H5F_ACC_RDONLY);
+ H5::Group value_group = file.openGroup(h5_value_grpname);
+ for (uint32_t i = 0; i < num_inputs; ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));
+
+ // TODO Add Assert(nnfw shape, h5 file shape size)
+
+ // allocate memory for data
+ auto bufsz = bufsize_for(&ti);
+ inputs[i].alloc(bufsz);
+
+ H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
+ H5::DataType type = data_set.getDataType();
+ switch (ti.dtype)
+ {
+ case NNFW_TYPE_TENSOR_FLOAT32:
+ if (type == H5::PredType::IEEE_F32BE || type == H5::PredType::IEEE_F32LE)
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
+ else
+ throw std::runtime_error("model input type is f32. But h5 data type is different.");
+ break;
+ case NNFW_TYPE_TENSOR_INT32:
+ if (type == H5::PredType::STD_I32BE || type == H5::PredType::STD_I32LE)
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT32);
+ else
+ throw std::runtime_error("model input type is i32. But h5 data type is different.");
+ break;
+ case NNFW_TYPE_TENSOR_INT64:
+ if (type == H5::PredType::STD_I64BE || type == H5::PredType::STD_I64LE)
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT64);
+ else
+ throw std::runtime_error("model input type is i64. But h5 data type is different.");
+ break;
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+ case NNFW_TYPE_TENSOR_BOOL:
+ case NNFW_TYPE_TENSOR_UINT8:
+ if (type == H5::PredType::STD_U8BE || type == H5::PredType::STD_U8LE)
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_UINT8);
+ else
+ throw std::runtime_error(
+ "model input type is qasymm8, bool or uint8. But h5 data type is different.");
+ break;
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
+ if (type == H5::PredType::STD_I8BE || type == H5::PredType::STD_I8LE)
+ data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT8);
+ else
+ throw std::runtime_error("model input type is int8. But h5 data type is different.");
+ break;
+ case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
+ throw std::runtime_error("NYI for NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED type");
+ default:
+ throw std::runtime_error("onert_run can load f32, i32, qasymm8, bool and uint8.");
+ }
+ NNPR_ENSURE_STATUS(nnfw_set_input(session_, i, ti.dtype, inputs[i].data(), bufsz));
+ NNPR_ENSURE_STATUS(nnfw_set_input_layout(session_, i, NNFW_LAYOUT_CHANNELS_LAST));
+ }
+ }
+ catch (const H5::Exception &e)
+ {
+ H5::Exception::printErrorStack();
+ std::exit(-1);
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ std::exit(-1);
+ }
+};
+
+void H5Formatter::dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs)
+{
+ uint32_t num_outputs;
+ NNPR_ENSURE_STATUS(nnfw_output_size(session_, &num_outputs));
+ try
+ {
+ // Turn off the automatic error printing.
+ H5::Exception::dontPrint();
+
+ H5::H5File file(filename, H5F_ACC_TRUNC);
+ H5::Group value_group = file.createGroup(h5_value_grpname);
+ for (uint32_t i = 0; i < num_outputs; i++)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session_, i, &ti));
+ std::vector<hsize_t> dims(ti.rank);
+ for (uint32_t j = 0; j < ti.rank; ++j)
+ {
+ if (ti.dims[j] >= 0)
+ dims[j] = static_cast<hsize_t>(ti.dims[j]);
+ else
+ {
+ std::cerr << "Negative dimension in output tensor" << std::endl;
+ exit(-1);
+ }
+ }
+ H5::DataSpace data_space(ti.rank, dims.data());
+ switch (ti.dtype)
+ {
+ case NNFW_TYPE_TENSOR_FLOAT32:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::IEEE_F32BE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_FLOAT);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_INT32:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::STD_I32LE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT32);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_INT64:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::STD_I64LE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT64);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_UINT8:
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8BE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_UINT8);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_BOOL:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8LE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT8);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
+ {
+ H5::DataSet data_set =
+ value_group.createDataSet(std::to_string(i), H5::PredType::STD_I8LE, data_space);
+ data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT8);
+ break;
+ }
+ case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
+ throw std::runtime_error("NYI for NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED type");
+ default:
+ throw std::runtime_error("onert_run can dump f32, i32, qasymm8, bool and uint8.");
+ }
+ }
+ }
+ catch (const H5::Exception &e)
+ {
+ H5::Exception::printErrorStack();
+ std::exit(-1);
+ }
+ catch (const std::runtime_error &e)
+ {
+ std::cerr << "Error during dumpOutputs on onert_run : " << e.what() << std::endl;
+ std::exit(-1);
+ }
+};
+
+} // end of namespace onert_run
diff --git a/tests/tools/onert_run/src/h5formatter.h b/tests/tools/onert_run/src/h5formatter.h
new file mode 100644
index 000000000..7ebb33f2e
--- /dev/null
+++ b/tests/tools/onert_run/src/h5formatter.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_H5FORMATTER_H__
+#define __ONERT_RUN_H5FORMATTER_H__
+
+#include "allocation.h"
+#include "formatter.h"
+#include "types.h"
+
+#include <string>
+#include <vector>
+
+struct nnfw_session;
+
+namespace onert_run
+{
+class H5Formatter : public Formatter
+{
+public:
+ H5Formatter(nnfw_session *sess) : Formatter(sess) {}
+ std::vector<TensorShape> readTensorShapes(const std::string &filename) override;
+ void loadInputs(const std::string &filename, std::vector<Allocation> &inputs) override;
+ void dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs) override;
+};
+} // namespace onert_run
+
+#endif // __ONERT_RUN_H5FORMATTER_H__
diff --git a/tests/tools/onert_run/src/nnfw_util.cc b/tests/tools/onert_run/src/nnfw_util.cc
new file mode 100644
index 000000000..0a21395fd
--- /dev/null
+++ b/tests/tools/onert_run/src/nnfw_util.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <string>
+#include "nnfw.h"
+
+namespace onert_run
+{
+uint64_t num_elems(const nnfw_tensorinfo *ti)
+{
+ uint64_t n = 1;
+ for (uint32_t i = 0; i < ti->rank; ++i)
+ {
+ assert(ti->dims[i] >= 0);
+ n *= ti->dims[i];
+ }
+ return n;
+}
+
+uint64_t bufsize_for(const nnfw_tensorinfo *ti)
+{
+ static int elmsize[] = {
+ sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 */
+ sizeof(int), /* NNFW_TYPE_TENSOR_INT32 */
+ sizeof(uint8_t), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM */
+ sizeof(bool), /* NNFW_TYPE_TENSOR_BOOL = 3 */
+ sizeof(uint8_t), /* NNFW_TYPE_TENSOR_UINT8 = 4 */
+ sizeof(int64_t), /* NNFW_TYPE_TENSOR_INT64 = 5 */
+ sizeof(int8_t), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED = 6 */
+ sizeof(int16_t), /* NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED = 7 */
+ };
+ return elmsize[ti->dtype] * num_elems(ti);
+}
+
+} // namespace onert_run
diff --git a/tests/tools/onert_run/src/nnfw_util.h b/tests/tools/onert_run/src/nnfw_util.h
new file mode 100644
index 000000000..1fcdfdf19
--- /dev/null
+++ b/tests/tools/onert_run/src/nnfw_util.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_NNFW_UTIL_H__
+#define __ONERT_RUN_NNFW_UTIL_H__
+
+#include "nnfw.h"
+
+#define NNPR_ENSURE_STATUS(a) \
+ do \
+ { \
+ if ((a) != NNFW_STATUS_NO_ERROR) \
+ { \
+ exit(-1); \
+ } \
+ } while (0)
+
+namespace onert_run
+{
+uint64_t num_elems(const nnfw_tensorinfo *ti);
+uint64_t bufsize_for(const nnfw_tensorinfo *ti);
+} // end of namespace onert_run
+
+#endif // __ONERT_RUN_NNFW_UTIL_H__
diff --git a/tests/tools/onert_run/src/onert_run.cc b/tests/tools/onert_run/src/onert_run.cc
new file mode 100644
index 000000000..0bc64bb2b
--- /dev/null
+++ b/tests/tools/onert_run/src/onert_run.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "allocation.h"
+#include "args.h"
+#include "benchmark.h"
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+#include "h5formatter.h"
+#endif
+#include "nnfw.h"
+#include "nnfw_util.h"
+#include "nnfw_internal.h"
+#include "nnfw_experimental.h"
+#include "randomgen.h"
+#include "rawformatter.h"
+#ifdef RUY_PROFILER
+#include "ruy/profiler/profiler.h"
+#endif
+
+#include <boost/program_options.hpp>
+#include <cassert>
+#include <chrono>
+#include <cstdlib>
+#include <iostream>
+#include <libgen.h>
+#include <stdexcept>
+#include <unordered_map>
+#include <vector>
+
+static const char *default_backend_cand = "cpu";
+
+void overwriteShapeMap(onert_run::TensorShapeMap &shape_map,
+ std::vector<onert_run::TensorShape> shapes)
+{
+ for (uint32_t i = 0; i < shapes.size(); i++)
+ shape_map[i] = shapes[i];
+}
+
+std::string genQuantizedModelPathFromModelPath(const std::string &model_path, bool is_q16)
+{
+ auto const extension_pos = model_path.find(".circle");
+ if (extension_pos == std::string::npos)
+ {
+ std::cerr << "Input model isn't .circle." << std::endl;
+ exit(-1);
+ }
+ auto const qstring = std::string("_quantized_") + (is_q16 ? "q16" : "q8");
+ return model_path.substr(0, extension_pos) + qstring + ".circle";
+}
+
+std::string genQuantizedModelPathFromPackagePath(const std::string &package_path, bool is_q16)
+{
+ auto package_path_without_slash = package_path;
+ if (package_path_without_slash.back() == '/')
+ package_path_without_slash.pop_back();
+ auto package_name_pos = package_path_without_slash.find_last_of('/');
+ if (package_name_pos == std::string::npos)
+ package_name_pos = 0;
+ else
+ package_name_pos++;
+ auto package_name = package_path_without_slash.substr(package_name_pos);
+ auto const qstring = std::string("_quantized_") + (is_q16 ? "q16" : "q8");
+ return package_path_without_slash + "/" + package_name + qstring + ".circle";
+}
+
+int main(const int argc, char **argv)
+{
+ using namespace onert_run;
+
+ try
+ {
+ Args args(argc, argv);
+ if (args.printVersion())
+ {
+ uint32_t version;
+ NNPR_ENSURE_STATUS(nnfw_query_info_u32(NULL, NNFW_INFO_ID_VERSION, &version));
+ std::cout << "onert_run (nnfw runtime: v" << (version >> 24) << "."
+ << ((version & 0x0000FF00) >> 8) << "." << (version & 0xFF) << ")" << std::endl;
+ exit(0);
+ }
+
+#ifdef RUY_PROFILER
+ ruy::profiler::ScopeProfile ruy_profile;
+#endif
+
+ // TODO Apply verbose level to phases
+ const int verbose = args.getVerboseLevel();
+ benchmark::Phases phases(
+ benchmark::PhaseOption{args.getMemoryPoll(), args.getGpuMemoryPoll(), args.getRunDelay()});
+
+ nnfw_session *session = nullptr;
+ NNPR_ENSURE_STATUS(nnfw_create_session(&session));
+
+ // ModelLoad
+ phases.run("MODEL_LOAD", [&](const benchmark::Phase &, uint32_t) {
+ if (args.useSingleModel())
+ NNPR_ENSURE_STATUS(
+ nnfw_load_model_from_modelfile(session, args.getModelFilename().c_str()));
+ else
+ NNPR_ENSURE_STATUS(nnfw_load_model_from_file(session, args.getPackageFilename().c_str()));
+ });
+
+ // Quantize model
+ auto quantize = args.getQuantize();
+ if (!quantize.empty())
+ {
+ NNFW_QUANTIZE_TYPE quantize_type = NNFW_QUANTIZE_TYPE_NOT_SET;
+ if (quantize == "int8")
+ quantize_type = NNFW_QUANTIZE_TYPE_U8_ASYM;
+ if (quantize == "int16")
+ quantize_type = NNFW_QUANTIZE_TYPE_I16_SYM;
+ NNPR_ENSURE_STATUS(nnfw_set_quantization_type(session, quantize_type));
+
+ if (args.getQuantizedModelPath() != "")
+ NNPR_ENSURE_STATUS(
+ nnfw_set_quantized_model_path(session, args.getQuantizedModelPath().c_str()));
+ else
+ {
+ if (args.useSingleModel())
+ NNPR_ENSURE_STATUS(nnfw_set_quantized_model_path(
+ session,
+ genQuantizedModelPathFromModelPath(args.getModelFilename(), quantize == "int16")
+ .c_str()));
+ else
+ NNPR_ENSURE_STATUS(nnfw_set_quantized_model_path(
+ session,
+ genQuantizedModelPathFromPackagePath(args.getPackageFilename(), quantize == "int16")
+ .c_str()));
+ }
+
+ NNPR_ENSURE_STATUS(nnfw_quantize(session));
+ }
+
+ char *available_backends = std::getenv("BACKENDS");
+ if (available_backends)
+ NNPR_ENSURE_STATUS(nnfw_set_available_backends(session, available_backends));
+
+ uint32_t num_inputs;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session, &num_inputs));
+
+ // verify input and output
+
+ auto verifyInputTypes = [session]() {
+ uint32_t sz;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session, &sz));
+ for (uint32_t i = 0; i < sz; ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, i, &ti));
+
+ if (ti.dtype < NNFW_TYPE_TENSOR_FLOAT32 || ti.dtype > NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED)
+ {
+ std::cerr << "E: not supported input type" << std::endl;
+ exit(-1);
+ }
+ }
+ };
+
+ auto verifyOutputTypes = [session]() {
+ uint32_t sz;
+ NNPR_ENSURE_STATUS(nnfw_output_size(session, &sz));
+
+ for (uint32_t i = 0; i < sz; ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
+
+ if (ti.dtype < NNFW_TYPE_TENSOR_FLOAT32 || ti.dtype > NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED)
+ {
+ std::cerr << "E: not supported output type" << std::endl;
+ exit(-1);
+ }
+ }
+ };
+
+ auto setTensorInfo = [session](const TensorShapeMap &tensor_shape_map) {
+ for (auto tensor_shape : tensor_shape_map)
+ {
+ auto ind = tensor_shape.first;
+ auto &shape = tensor_shape.second;
+ nnfw_tensorinfo ti;
+ // to fill dtype
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session, ind, &ti));
+
+ bool set_input = false;
+ if (ti.rank != shape.size())
+ {
+ set_input = true;
+ }
+ else
+ {
+ for (int i = 0; i < ti.rank; i++)
+ {
+ if (ti.dims[i] != shape.at(i))
+ {
+ set_input = true;
+ break;
+ }
+ }
+ }
+ if (!set_input)
+ continue;
+
+ ti.rank = shape.size();
+ for (int i = 0; i < ti.rank; i++)
+ ti.dims[i] = shape.at(i);
+ NNPR_ENSURE_STATUS(nnfw_set_input_tensorinfo(session, ind, &ti));
+ }
+ };
+
+ verifyInputTypes();
+ verifyOutputTypes();
+
+// set input shape before compilation
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+
+ auto fill_shape_from_h5 = [&session](const std::string &h5_file, TensorShapeMap &shape_map) {
+ assert(!h5_file.empty());
+ auto shapes = H5Formatter(session).readTensorShapes(h5_file);
+ overwriteShapeMap(shape_map, shapes);
+ };
+
+ if (args.getWhenToUseH5Shape() == WhenToUseH5Shape::PREPARE)
+ fill_shape_from_h5(args.getLoadFilename(), args.getShapeMapForPrepare());
+#endif
+ setTensorInfo(args.getShapeMapForPrepare());
+
+ // prepare execution
+
+ // TODO When nnfw_{prepare|run} are failed, can't catch the time
+ phases.run("PREPARE", [&](const benchmark::Phase &, uint32_t) {
+ NNPR_ENSURE_STATUS(nnfw_prepare(session));
+ });
+
+// set input shape after compilation and before execution
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ if (args.getWhenToUseH5Shape() == WhenToUseH5Shape::RUN ||
+ (!args.getLoadFilename().empty() && !args.shapeParamProvided()))
+ fill_shape_from_h5(args.getLoadFilename(), args.getShapeMapForRun());
+#endif
+ setTensorInfo(args.getShapeMapForRun());
+
+ // prepare input
+ std::vector<Allocation> inputs(num_inputs);
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ if (!args.getLoadFilename().empty())
+ H5Formatter(session).loadInputs(args.getLoadFilename(), inputs);
+ else if (!args.getLoadRawFilename().empty())
+ RawFormatter(session).loadInputs(args.getLoadRawFilename(), inputs);
+ else
+ RandomGenerator(session).generate(inputs);
+#else
+ if (!args.getLoadRawFilename().empty())
+ RawFormatter(session).loadInputs(args.getLoadRawFilename(), inputs);
+ else
+ RandomGenerator(session).generate(inputs);
+#endif
+
+ // prepare output
+ uint32_t num_outputs = 0;
+ NNPR_ENSURE_STATUS(nnfw_output_size(session, &num_outputs));
+ std::vector<Allocation> outputs(num_outputs);
+ auto output_sizes = args.getOutputSizes();
+ for (uint32_t i = 0; i < num_outputs; i++)
+ {
+ nnfw_tensorinfo ti;
+ uint64_t output_size_in_bytes = 0;
+ {
+ auto found = output_sizes.find(i);
+ if (found == output_sizes.end())
+ {
+ NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session, i, &ti));
+ output_size_in_bytes = bufsize_for(&ti);
+ }
+ else
+ {
+ output_size_in_bytes = found->second;
+ }
+ }
+ outputs[i].alloc(output_size_in_bytes);
+ NNPR_ENSURE_STATUS(
+ nnfw_set_output(session, i, ti.dtype, outputs[i].data(), output_size_in_bytes));
+ NNPR_ENSURE_STATUS(nnfw_set_output_layout(session, i, NNFW_LAYOUT_CHANNELS_LAST));
+ }
+
+ // NOTE: Measuring memory can't avoid taking overhead. Therefore, memory will be measured on the
+ // only warmup.
+ if (verbose == 0)
+ {
+ phases.run(
+ "WARMUP",
+ [&](const benchmark::Phase &, uint32_t) { NNPR_ENSURE_STATUS(nnfw_run(session)); },
+ args.getWarmupRuns());
+ phases.run(
+ "EXECUTE",
+ [&](const benchmark::Phase &, uint32_t) { NNPR_ENSURE_STATUS(nnfw_run(session)); },
+ args.getNumRuns(), true);
+ }
+ else
+ {
+ phases.run(
+ "WARMUP",
+ [&](const benchmark::Phase &, uint32_t) { NNPR_ENSURE_STATUS(nnfw_run(session)); },
+ [&](const benchmark::Phase &phase, uint32_t nth) {
+ std::cout << "... "
+ << "warmup " << nth + 1 << " takes " << phase.time[nth] / 1e3 << " ms"
+ << std::endl;
+ },
+ args.getWarmupRuns());
+ phases.run(
+ "EXECUTE",
+ [&](const benchmark::Phase &, uint32_t) { NNPR_ENSURE_STATUS(nnfw_run(session)); },
+ [&](const benchmark::Phase &phase, uint32_t nth) {
+ std::cout << "... "
+ << "run " << nth + 1 << " takes " << phase.time[nth] / 1e3 << " ms"
+ << std::endl;
+ },
+ args.getNumRuns(), true);
+ }
+
+#if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1
+ // dump output tensors
+ if (!args.getDumpFilename().empty())
+ H5Formatter(session).dumpOutputs(args.getDumpFilename(), outputs);
+#endif
+ if (!args.getDumpRawFilename().empty())
+ RawFormatter(session).dumpOutputs(args.getDumpRawFilename(), outputs);
+
+ NNPR_ENSURE_STATUS(nnfw_close_session(session));
+
+ // TODO Apply verbose level to result
+
+ // prepare result
+ benchmark::Result result(phases);
+
+ // to stdout
+ benchmark::printResult(result);
+
+ // to csv
+ if (args.getWriteReport() == false)
+ return 0;
+
+ // prepare csv task
+ std::string exec_basename;
+ std::string nnpkg_basename;
+ std::string backend_name = (available_backends) ? available_backends : default_backend_cand;
+ {
+ char buf[PATH_MAX];
+ char *res = args.useSingleModel() ? realpath(args.getModelFilename().c_str(), buf)
+ : realpath(args.getPackageFilename().c_str(), buf);
+ if (res)
+ {
+ nnpkg_basename = basename(buf);
+ }
+ else
+ {
+ std::cerr << "E: during getting realpath from nnpackage or model path." << std::endl;
+ exit(-1);
+ }
+ exec_basename = basename(argv[0]);
+ }
+
+ benchmark::writeResult(result, exec_basename, nnpkg_basename, backend_name);
+
+ return 0;
+ }
+ catch (boost::program_options::error &e)
+ {
+ std::cerr << "E: " << e.what() << std::endl;
+ exit(-1);
+ }
+ catch (std::runtime_error &e)
+ {
+ std::cerr << "E: Fail to run by runtime error:" << e.what() << std::endl;
+ exit(-1);
+ }
+}
diff --git a/tests/tools/onert_run/src/randomgen.cc b/tests/tools/onert_run/src/randomgen.cc
new file mode 100644
index 000000000..1a8a5045d
--- /dev/null
+++ b/tests/tools/onert_run/src/randomgen.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "randomgen.h"
+#include "nnfw.h"
+#include "nnfw_util.h"
+#include "misc/RandomGenerator.h"
+
+#include <iostream>
+
+namespace onert_run
+{
+
+template <class T> void randomData(nnfw::misc::RandomGenerator &randgen, void *data, uint64_t size)
+{
+ for (uint64_t i = 0; i < size; i++)
+ reinterpret_cast<T *>(data)[i] = randgen.generate<T>();
+}
+
+void RandomGenerator::generate(std::vector<Allocation> &inputs)
+{
+ // generate random data
+ const int seed = 1;
+ nnfw::misc::RandomGenerator randgen{seed, 0.0f, 2.0f};
+ for (uint32_t i = 0; i < inputs.size(); ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));
+ auto input_size_in_bytes = bufsize_for(&ti);
+ inputs[i].alloc(input_size_in_bytes);
+ switch (ti.dtype)
+ {
+ case NNFW_TYPE_TENSOR_FLOAT32:
+ randomData<float>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
+ randomData<uint8_t>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_BOOL:
+ randomData<bool>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_UINT8:
+ randomData<uint8_t>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_INT32:
+ randomData<int32_t>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_INT64:
+ randomData<int64_t>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
+ randomData<int16_t>(randgen, inputs[i].data(), num_elems(&ti));
+ break;
+ default:
+ std::cerr << "Not supported input type" << std::endl;
+ std::exit(-1);
+ }
+ NNPR_ENSURE_STATUS(
+ nnfw_set_input(session_, i, ti.dtype, inputs[i].data(), input_size_in_bytes));
+ NNPR_ENSURE_STATUS(nnfw_set_input_layout(session_, i, NNFW_LAYOUT_CHANNELS_LAST));
+ }
+};
+
+} // end of namespace onert_run
diff --git a/tests/tools/onert_run/src/randomgen.h b/tests/tools/onert_run/src/randomgen.h
new file mode 100644
index 000000000..58afb4171
--- /dev/null
+++ b/tests/tools/onert_run/src/randomgen.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_RANDOMGEN_H__
+#define __ONERT_RUN_RANDOMGEN_H__
+
+#include <string>
+#include <vector>
+
+#include "allocation.h"
+
+struct nnfw_session;
+
+namespace onert_run
+{
+class RandomGenerator
+{
+public:
+ RandomGenerator(nnfw_session *sess) : session_(sess) {}
+ void generate(std::vector<Allocation> &inputs);
+
+private:
+ nnfw_session *session_;
+};
+} // namespace onert_run
+
+#endif // __ONERT_RUN_RANDOMGEN_H__
diff --git a/tests/tools/onert_run/src/rawformatter.cc b/tests/tools/onert_run/src/rawformatter.cc
new file mode 100644
index 000000000..7cfab9904
--- /dev/null
+++ b/tests/tools/onert_run/src/rawformatter.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "rawformatter.h"
+#include "nnfw.h"
+#include "nnfw_util.h"
+
+#include <iostream>
+#include <fstream>
+#include <stdexcept>
+
+namespace onert_run
+{
+void RawFormatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
+{
+ uint32_t num_inputs;
+ NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
+
+ // Support multiple inputs
+ // Option 1: Get comman-separated input file list like --load:raw a,b,c
+ // Option 2: Get prefix --load:raw in
+ // Internally access in.0, in.1, in.2, ... in.{N-1} where N is determined by nnfw info
+ // query api.
+ //
+ // Currently Option 2 is implemented.
+ try
+ {
+ for (uint32_t i = 0; i < num_inputs; ++i)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));
+
+ // allocate memory for data
+ auto bufsz = bufsize_for(&ti);
+ inputs[i].alloc(bufsz);
+
+ std::ifstream file(filename + "." + std::to_string(i), std::ios::ate | std::ios::binary);
+ auto filesz = file.tellg();
+ if (bufsz != filesz)
+ {
+ throw std::runtime_error("Input " + std::to_string(i) +
+ " size does not match: " + std::to_string(bufsz) +
+ " expected, but " + std::to_string(filesz) + " provided.");
+ }
+ file.seekg(0, std::ios::beg);
+ file.read(reinterpret_cast<char *>(inputs[i].data()), filesz);
+ file.close();
+
+ NNPR_ENSURE_STATUS(nnfw_set_input(session_, i, ti.dtype, inputs[i].data(), bufsz));
+ NNPR_ENSURE_STATUS(nnfw_set_input_layout(session_, i, NNFW_LAYOUT_CHANNELS_LAST));
+ }
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ std::exit(-1);
+ }
+};
+
+void RawFormatter::dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs)
+{
+ uint32_t num_outputs;
+ NNPR_ENSURE_STATUS(nnfw_output_size(session_, &num_outputs));
+ try
+ {
+ for (uint32_t i = 0; i < num_outputs; i++)
+ {
+ nnfw_tensorinfo ti;
+ NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session_, i, &ti));
+ auto bufsz = bufsize_for(&ti);
+
+ std::ofstream file(filename + "." + std::to_string(i), std::ios::out | std::ios::binary);
+ file.write(reinterpret_cast<const char *>(outputs[i].data()), bufsz);
+ file.close();
+ std::cerr << filename + "." + std::to_string(i) + " is generated.\n";
+ }
+ }
+ catch (const std::runtime_error &e)
+ {
+ std::cerr << "Error during dumpOutputs on onert_run : " << e.what() << std::endl;
+ std::exit(-1);
+ }
+}
+} // end of namespace onert_run
diff --git a/tests/tools/onert_run/src/rawformatter.h b/tests/tools/onert_run/src/rawformatter.h
new file mode 100644
index 000000000..b6eaab66d
--- /dev/null
+++ b/tests/tools/onert_run/src/rawformatter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_RAWFORMATTER_H__
+#define __ONERT_RUN_RAWFORMATTER_H__
+
+#include "allocation.h"
+#include "formatter.h"
+#include "types.h"
+
+#include <string>
+#include <vector>
+
+struct nnfw_session;
+
+namespace onert_run
+{
+class RawFormatter : public Formatter
+{
+public:
+ RawFormatter(nnfw_session *sess) : Formatter(sess) {}
+ void loadInputs(const std::string &filename, std::vector<Allocation> &inputs) override;
+ void dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs) override;
+};
+} // namespace onert_run
+
+#endif // __ONERT_RUN_RAWFORMATTER_H__
diff --git a/tests/tools/onert_run/src/types.h b/tests/tools/onert_run/src/types.h
new file mode 100644
index 000000000..563c5e488
--- /dev/null
+++ b/tests/tools/onert_run/src/types.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_RUN_TYPES_H__
+#define __ONERT_RUN_TYPES_H__
+
+namespace onert_run
+{
+
+using TensorShape = std::vector<int>;
+
+} // end of namespace onert_run
+
+#endif // __ONERT_RUN_TYPES_H__