summaryrefslogtreecommitdiff
path: root/tests/tools/tflite_loader/src/tflite_loader.cc
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-03-05 15:10:09 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-03-05 15:22:53 +0900
commitd91a039e0eda6fd70dcd22672b8ce1817c1ca50e (patch)
tree62668ec548cf31fadbbf4e99522999ad13434a25 /tests/tools/tflite_loader/src/tflite_loader.cc
parentbd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff)
downloadnnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.gz
nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.tar.bz2
nnfw-d91a039e0eda6fd70dcd22672b8ce1817c1ca50e.zip
catch up to tizen_5.5 and remove unness dir
- update to tizen_5.5 - remove dirs
Diffstat (limited to 'tests/tools/tflite_loader/src/tflite_loader.cc')
-rw-r--r--tests/tools/tflite_loader/src/tflite_loader.cc289
1 files changed, 289 insertions, 0 deletions
diff --git a/tests/tools/tflite_loader/src/tflite_loader.cc b/tests/tools/tflite_loader/src/tflite_loader.cc
new file mode 100644
index 000000000..c2388f3cc
--- /dev/null
+++ b/tests/tools/tflite_loader/src/tflite_loader.cc
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "tflite/ext/kernels/register.h"
+
+#include "args.h"
+#include "tflite/InterpreterSession.h"
+#include "tflite/Assert.h"
+#include "tflite/Diff.h"
+#include "misc/tensor/IndexIterator.h"
+
+#include <iostream>
+#include <fstream>
+
+#include "compiler/Compiler.h"
+#include "exec/Execution.h"
+#include "graph/Graph.h"
+
+#include "loader.h"
+
+#include "cpp14/memory.h"
+
+const int RUN_FAILED = 1;
+
+using namespace tflite;
+using namespace nnfw::tflite;
+
+const int FILE_ERROR = 2;
+const float DIFFERENCE_THRESHOLD = 10e-5;
+
+// Read vector of floats from selected file
+std::vector<float> readData(const string &path)
+{
+ std::ifstream in(path);
+ if (!in.good())
+ {
+ std::cerr << "can not open data file " << path << "\n";
+ exit(FILE_ERROR);
+ }
+ in.seekg(0, std::ifstream::end);
+ size_t len = in.tellg();
+ in.seekg(0, std::ifstream::beg);
+ assert(len % sizeof(float) == 0);
+ size_t size = len / sizeof(float);
+ std::vector<float> vec(size);
+ for (size_t i = 0; i < size; ++i)
+ {
+ in.read(reinterpret_cast<char *>(&vec[i]), sizeof(float));
+ }
+ return vec;
+}
+
+std::vector<float> randomData(RandomGenerator &randgen, const uint64_t size)
+{
+ std::vector<float> vec(size);
+ for (uint64_t i = 0; i < size; i++)
+ {
+ vec[i] = randgen.generate<float>();
+ }
+ return vec;
+}
+
+void executeGraph(const std::shared_ptr<neurun::graph::Graph> &g,
+ const std::vector<std::vector<float>> &inputs,
+ std::vector<std::vector<float>> &outputs)
+{
+ auto compiler = new neurun::compiler::Compiler(g);
+ // Compilation
+ try
+ {
+ compiler->compile();
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << "[Execution] Can't compile model" << std::endl;
+ std::cerr << e.what() << std::endl;
+ exit(-1);
+ }
+
+ std::cout << "[Execution] Graph compiled!" << std::endl;
+
+ std::shared_ptr<neurun::exec::IExecutor> executor;
+ compiler->release(executor);
+ auto execution = std::make_shared<neurun::exec::Execution>(executor);
+
+ // Verify input shapes
+ auto num_inputs = inputs.size();
+ for (size_t i = 0; i < num_inputs; i++)
+ {
+ auto input_operand_idx = g->getInputs().at(i);
+ auto input_shape = g->operands().at(input_operand_idx).shape();
+ assert(inputs[i].size() == input_shape.num_elements());
+ }
+
+ // Set output shapes
+ auto num_outputs = g->getOutputs().size();
+ outputs.resize(num_outputs);
+ for (uint32_t i = 0; i < num_outputs; i++)
+ {
+ auto output_operand_idx = g->getOutputs().at(i);
+ auto output_shape = g->operands().at(output_operand_idx).shape();
+ outputs[i].resize(output_shape.num_elements());
+ }
+
+ // Setting IO
+ try
+ {
+ for (size_t i = 0; i < num_inputs; i++)
+ execution->setInput(neurun::model::IOIndex(i), inputs[i].data(),
+ inputs[i].size() * sizeof(float));
+ for (uint32_t i = 0; i < num_outputs; i++)
+ execution->setOutput(neurun::model::IOIndex(i), outputs[i].data(),
+ outputs[i].size() * sizeof(float));
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << "[Execution] Can't set model IO" << std::endl;
+ std::cerr << e.what() << '\n';
+ exit(-1);
+ }
+
+ execution->execute();
+ std::cout << "[Execution] Done!" << std::endl;
+
+ delete compiler;
+}
+
+int main(const int argc, char **argv)
+{
+ TFLiteRun::Args args(argc, argv);
+
+ auto tflite_file = args.getTFLiteFilename();
+ auto data_files = args.getDataFilenames();
+
+ if (tflite_file.empty())
+ {
+ args.print(argv);
+ return RUN_FAILED;
+ }
+
+ std::cout << "[Execution] Stage start!" << std::endl;
+ auto test_model = nnfw::cpp14::make_unique<neurun::model::Model>();
+ auto test_graph = std::make_shared<neurun::graph::Graph>(std::move(test_model));
+ // Loading
+ try
+ {
+ tflite_loader::Loader loader(*test_graph);
+ loader.loadFromFile(tflite_file.c_str());
+ }
+ catch (std::exception &e)
+ {
+ std::cerr << "[ ERROR ] "
+ << "Failure during model load" << std::endl;
+ std::cerr << e.what() << std::endl;
+ exit(-1);
+ }
+
+ // TODO: Support another input/output types
+ for (const auto &input_idx : test_graph->getInputs())
+ {
+ const auto input_type = test_graph->operands().at(input_idx).typeInfo().type();
+ assert(input_type == neurun::model::DataType::FLOAT32 && "Only FLOAT32 inputs are supported");
+ }
+ for (const auto &output_idx : test_graph->getOutputs())
+ {
+ const auto output_type = test_graph->operands().at(output_idx).typeInfo().type();
+ assert(output_type == neurun::model::DataType::FLOAT32 && "Only FLOAT32 outputs are supported");
+ }
+
+ std::cout << "[Execution] Model is deserialized!" << std::endl;
+ auto num_inputs = test_graph->getInputs().size();
+ std::vector<std::vector<float>> inputs(num_inputs);
+ bool generate_data = data_files.empty();
+ bool read_data = data_files.size() == num_inputs;
+ if (num_inputs == 0)
+ {
+ std::cerr << "[ ERROR ] "
+ << "No inputs in model => execution is not possible" << std::endl;
+ exit(1);
+ }
+ if (!generate_data && !read_data)
+ {
+ std::cerr << "[ ERROR ] "
+ << "Wrong number of input files." << std::endl;
+ exit(1);
+ }
+
+ const int seed = 1; /* TODO Add an option for seed value */
+ RandomGenerator randgen{seed, 0.0f, 2.0f};
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ if (generate_data)
+ {
+ uint64_t sz = test_graph->operands().at(test_graph->getInputs().at(i)).shape().num_elements();
+ inputs[i] = randomData(randgen, sz);
+ }
+ else /* read_data */
+ inputs[i] = readData(data_files[i]);
+ }
+ std::cout << "[Execution] Input data is defined!" << std::endl;
+ std::vector<std::vector<float>> outputs;
+ // Run graph
+ executeGraph(test_graph, inputs, outputs);
+ // Compare with tflite
+ std::cout << "[Comparison] Stage start!" << std::endl;
+ // Read tflite model
+ StderrReporter error_reporter;
+ auto model = FlatBufferModel::BuildFromFile(tflite_file.c_str(), &error_reporter);
+
+ BuiltinOpResolver resolver;
+ InterpreterBuilder builder(*model, resolver);
+
+ std::unique_ptr<Interpreter> interpreter;
+ try
+ {
+ TFLITE_ENSURE(builder(&interpreter));
+ }
+ catch (const std::exception &e)
+ {
+ std::cerr << e.what() << std::endl;
+ exit(FILE_ERROR);
+ }
+ interpreter->SetNumThreads(2);
+
+ auto sess = std::make_shared<nnfw::tflite::InterpreterSession>(interpreter.get());
+ sess->prepare();
+ // Set input and run
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ auto input_tensor = interpreter->tensor(interpreter->inputs().at(i));
+ memcpy(input_tensor->data.f, inputs[i].data(), inputs[i].size() * sizeof(float));
+ }
+ if (!sess->run())
+ {
+ std::cout << "[Comparison] TFLite run failed!" << std::endl;
+ assert(0 && "Run failed!");
+ }
+ std::cout << "[Comparison] TFLite run done!" << std::endl;
+
+ // Calculate max difference over all outputs
+ float max_difference = 0.0f;
+ auto num_outputs = test_graph->getOutputs().size();
+ for (uint32_t out_idx = 0; out_idx < num_outputs; out_idx++)
+ {
+ const auto &tflite_output_tensor = interpreter->tensor(interpreter->outputs().at(out_idx));
+ const auto &nnfw_output_tensor = outputs[out_idx];
+
+ if (nnfw_output_tensor.size() != tflite_output_tensor->bytes / sizeof(float))
+ std::cout << "[Comparison] Different size of outputs!" << std::endl;
+ // Check max difference
+ float *tflite_out_ptr = tflite_output_tensor->data.f;
+ for (const auto &nnfw_out : nnfw_output_tensor)
+ {
+ if (std::abs(nnfw_out - *tflite_out_ptr) > max_difference)
+ max_difference = std::abs(nnfw_out - *tflite_out_ptr);
+
+ tflite_out_ptr++;
+ }
+ }
+
+ // Print results
+ std::cout << "[Comparison] Max difference: " << max_difference << std::endl;
+ int ret = 0;
+ if (max_difference > DIFFERENCE_THRESHOLD)
+ {
+ std::cout << "[Comparison] Outputs is not equal!" << std::endl;
+ ret = 1;
+ }
+ else
+ {
+ std::cout << "[Comparison] Outputs is equal!" << std::endl;
+ }
+ std::cout << "[Comparison] Done!" << std::endl;
+
+ return ret;
+}