summaryrefslogtreecommitdiff
path: root/runtimes/neurun/test/core/exec/ExecInstance.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtimes/neurun/test/core/exec/ExecInstance.cc')
-rw-r--r--runtimes/neurun/test/core/exec/ExecInstance.cc312
1 files changed, 312 insertions, 0 deletions
diff --git a/runtimes/neurun/test/core/exec/ExecInstance.cc b/runtimes/neurun/test/core/exec/ExecInstance.cc
new file mode 100644
index 000000000..2e962a4b2
--- /dev/null
+++ b/runtimes/neurun/test/core/exec/ExecInstance.cc
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <thread>
+
+#include "graph/Graph.h"
+#include "model/Model.h"
+#include "compiler/Compiler.h"
+#include "exec/Execution.h"
+#include "model/operation/AddNode.h"
+
+namespace
+{
+
+using namespace neurun::model;
+using DataType = neurun::model::DataType;
+using Model = neurun::model::Model;
+
+class CompiledMockUpModel
+{
+public:
+ CompiledMockUpModel()
+ {
+ // Model: two elementwise add operation
+ // model input: lhs, rhs1
+ // model output: second add result (result2)
+ // constant: rhs2
+ // result1 <= (lhs + rhs)
+ // result2 <= (result1 + rhs2)
+ // lhs, rhs1, rh2, result1, result2 shape: {1, 2, 2, 1}
+ // activation: none (constant)
+ std::unique_ptr<neurun::model::Model> model = nnfw::cpp14::make_unique<neurun::model::Model>();
+ // 1st add operands (result1 <= lhs + rhs1)
+ Shape shape{1, 2, 2, 1};
+ TypeInfo type{DataType::FLOAT32};
+ static float rhs2_data[4] = {3, 1, -1, 5};
+ auto operand_lhs = model->operands.emplace(shape, type);
+ auto operand_rhs1 = model->operands.emplace(shape, type);
+ auto operand_result1 = model->operands.emplace(shape, type);
+ auto operand_rhs2 = model->operands.emplace(shape, type);
+ auto operand_result2 = model->operands.emplace(shape, type);
+ model->operands.at(operand_rhs2)
+ .data(nnfw::cpp14::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data),
+ 16));
+ // 2nd add operations (result2 <= result1 + rhs2)
+ operation::AddNode::Param param1;
+ param1.activation = neurun::model::Activation::NONE;
+ auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
+ auto output_set1 = OperandIndexSequence{operand_result1};
+ model->operations.push(
+ nnfw::cpp14::make_unique<operation::AddNode>(input_set1, output_set1, param1));
+ operation::AddNode::Param param2;
+ param2.activation = neurun::model::Activation::NONE;
+ auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
+ auto output_set2 = OperandIndexSequence{operand_result2};
+ model->operations.push(
+ nnfw::cpp14::make_unique<operation::AddNode>(input_set2, output_set2, param2));
+ // Identify model inputs and outputs
+ model->inputs.append(operand_lhs);
+ model->inputs.append(operand_rhs1);
+ model->outputs.append(operand_result2);
+ graph = std::make_shared<::neurun::graph::Graph>(std::move(model));
+ graph->finishBuilding();
+
+ // Compile
+ auto compiler = new neurun::compiler::Compiler{graph};
+ compiler->compile();
+ compiler->release(executor);
+ delete compiler;
+ }
+
+public:
+ std::shared_ptr<::neurun::graph::Graph> graph;
+ std::shared_ptr<neurun::exec::IExecutor> executor;
+};
+
+TEST(ExecInstance, simple)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor = mockup.executor;
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float input1_buffer[4] = {1, 0, -1, -2};
+ const float input2_buffer[4] = {1, -3, 2, -4};
+ float output_buffer[4] = {};
+ const float output_expected[4] = {5, -2, 0, -1};
+
+ auto execution = new neurun::exec::Execution(executor);
+
+ execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
+ execution->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
+ execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
+ execution->execute();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(output_buffer[i], output_expected[i]);
+ }
+
+ delete execution;
+}
+
+TEST(ExecInstance, twoCompile)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor1 = mockup.executor;
+ auto execution1 = new neurun::exec::Execution(executor1);
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float exe1_input1_buffer[4] = {1, 0, -1, -2};
+ const float exe1_input2_buffer[4] = {1, -3, 2, -4};
+ float exe1_output_buffer[4] = {};
+ const float exe1_output_expected[4] = {5, -2, 0, -1};
+
+ execution1->setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
+ execution1->setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
+ execution1->setOutput(output, reinterpret_cast<void *>(exe1_output_buffer), 16);
+
+ // Make new executor: compile again
+ auto compiler = new neurun::compiler::Compiler{graph};
+ compiler->compile();
+ std::shared_ptr<neurun::exec::IExecutor> executor2;
+ compiler->release(executor2);
+ auto execution2 = new neurun::exec::Execution(executor2);
+
+ const float exe2_input1_buffer[4] = {2, 1, -2, 0};
+ const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
+ float exe2_output_buffer[4] = {};
+ const float exe2_output_expected[4] = {2, 5, -2, 7};
+
+ execution2->setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
+ execution2->setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
+ execution2->setOutput(output, reinterpret_cast<void *>(exe2_output_buffer), 16);
+
+ execution1->execute();
+ execution2->execute();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
+ EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
+ }
+
+ delete compiler;
+ delete execution1;
+ delete execution2;
+}
+
+// Support two initialized execution instance then ordered execution
+TEST(ExecInstance, twoExecution)
+{
+ auto mockup = CompiledMockUpModel();
+ auto executor = mockup.executor;
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output1 = IOIndex{0};
+
+ const float exe1_input1_buffer[4] = {1, 0, -1, -2};
+ const float exe1_input2_buffer[4] = {1, -3, 2, -4};
+ float exe1_output_buffer[4] = {};
+ const float exe1_output_expected[4] = {5, -2, 0, -1};
+ const float exe2_output_expected[4] = {2, 5, -2, 7};
+
+ auto execution1 = new neurun::exec::Execution(executor);
+ execution1->setInput(input1, reinterpret_cast<const void *>(exe1_input1_buffer), 16);
+ execution1->setInput(input2, reinterpret_cast<const void *>(exe1_input2_buffer), 16);
+ execution1->setOutput(output1, reinterpret_cast<void *>(exe1_output_buffer), 16);
+
+ const float exe2_input1_buffer[4] = {2, 1, -2, 0};
+ const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
+ float exe2_output_buffer[4] = {};
+
+ // Make new execution
+ auto execution2 = new neurun::exec::Execution(executor);
+ execution2->setInput(input1, reinterpret_cast<const void *>(exe2_input1_buffer), 16);
+ execution2->setInput(input2, reinterpret_cast<const void *>(exe2_input2_buffer), 16);
+ execution2->setOutput(output1, reinterpret_cast<void *>(exe2_output_buffer), 16);
+
+ execution1->execute();
+ execution2->execute();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
+ EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
+ }
+
+ delete execution1;
+ delete execution2;
+}
+
+class Inference
+{
+public:
+ Inference(const float (&input1)[4], const float (&input2)[4], float (&output)[4],
+ std::shared_ptr<neurun::exec::IExecutor> &executor)
+ : _input1{input1}, _input2{input2}, _output{output}, _executor{executor}
+ {
+ // DO NOTHING
+ }
+
+ void inference(void)
+ {
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output1 = IOIndex{0};
+
+ auto execution = new neurun::exec::Execution(_executor);
+ execution->setInput(input1, reinterpret_cast<const void *>(_input1), 16);
+ execution->setInput(input2, reinterpret_cast<const void *>(_input2), 16);
+ execution->setOutput(output1, reinterpret_cast<void *>(_output), 16);
+
+ execution->execute();
+
+ delete execution;
+ }
+
+private:
+ const float (&_input1)[4];
+ const float (&_input2)[4];
+ float (&_output)[4];
+ std::shared_ptr<neurun::exec::IExecutor> &_executor;
+};
+
+// Support multi-thread execution
+TEST(ExecInstance, twoThreads)
+{
+ auto mockup = CompiledMockUpModel();
+ auto executor = mockup.executor;
+
+ const float exe1_input1_buffer[4] = {1, 0, -1, -2};
+ const float exe1_input2_buffer[4] = {1, -3, 2, -4};
+ float exe1_output_buffer[4] = {};
+ const float exe1_output_expected[4] = {5, -2, 0, -1};
+
+ Inference execution1{exe1_input1_buffer, exe1_input2_buffer, exe1_output_buffer, executor};
+
+ const float exe2_input1_buffer[4] = {2, 1, -2, 0};
+ const float exe2_input2_buffer[4] = {-3, 3, 1, 2};
+ float exe2_output_buffer[4] = {};
+ const float exe2_output_expected[4] = {2, 5, -2, 7};
+
+ Inference execution2{exe2_input1_buffer, exe2_input2_buffer, exe2_output_buffer, executor};
+
+ std::thread t1{&Inference::inference, &execution1};
+ std::thread t2{&Inference::inference, &execution2};
+
+ t1.join();
+ t2.join();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(exe1_output_buffer[i], exe1_output_expected[i]);
+ EXPECT_EQ(exe2_output_buffer[i], exe2_output_expected[i]);
+ }
+}
+
+// Support asynchronous execution
+TEST(ExecInstance, async)
+{
+ auto mockup = CompiledMockUpModel();
+ auto graph = mockup.graph;
+ auto executor = mockup.executor;
+
+ auto input1 = IOIndex{0};
+ auto input2 = IOIndex{1};
+ auto output = IOIndex{0};
+
+ const float input1_buffer[4] = {1, 0, -1, -2};
+ const float input2_buffer[4] = {1, -3, 2, -4};
+ float output_buffer[4] = {};
+ const float output_expected[4] = {5, -2, 0, -1};
+
+ auto execution = new neurun::exec::Execution(executor);
+
+ execution->setInput(input1, reinterpret_cast<const void *>(input1_buffer), 16);
+ execution->setInput(input2, reinterpret_cast<const void *>(input2_buffer), 16);
+ execution->setOutput(output, reinterpret_cast<void *>(output_buffer), 16);
+ execution->startExecute();
+ execution->waitFinish();
+
+ for (auto i = 0; i < 4; i++)
+ {
+ EXPECT_EQ(output_buffer[i], output_expected[i]);
+ }
+
+ delete execution;
+}
+
+} // namespace