summaryrefslogtreecommitdiff
path: root/runtime/onert/core/src/exec/Execution.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/onert/core/src/exec/Execution.cc')
-rw-r--r--runtime/onert/core/src/exec/Execution.cc93
1 files changed, 68 insertions, 25 deletions
diff --git a/runtime/onert/core/src/exec/Execution.cc b/runtime/onert/core/src/exec/Execution.cc
index 7feb3ab68..f51bed820 100644
--- a/runtime/onert/core/src/exec/Execution.cc
+++ b/runtime/onert/core/src/exec/Execution.cc
@@ -16,6 +16,8 @@
#include "exec/Execution.h"
+#include "train/TrainableExecutors.h"
+
#include "util/logging.h"
namespace onert
@@ -23,33 +25,30 @@ namespace onert
namespace exec
{
-Execution::Execution(const std::shared_ptr<ExecutorMap> &executors) : _executors{executors}
+Execution::Execution(const std::shared_ptr<IExecutors> &executors) : _executors{executors}
{
assert(executors != nullptr);
- assert(executors->at(ir::SubgraphIndex{0}) != nullptr);
- const auto &primary_subg = primary_subgraph();
- _io_desc.inputs.resize(primary_subg.getInputs().size());
- _io_desc.outputs.resize(primary_subg.getOutputs().size());
+ assert(executors->entryExecutor() != nullptr);
+ _io_desc.inputs.resize(_executors->inputSize());
+ _io_desc.outputs.resize(_executors->outputSize());
}
void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_shape)
{
- // This should be called BEFORE setInput.
- if (_io_desc.inputs.at(index.value()) != 0)
- throw std::runtime_error("Error in calling order");
-
// This will be used later to set input tensor dynamic
// Note that 'compiled' model will not be updated with new_shape
// but new_shape will change model input shape while 'running' the model
_io_desc.dynamic_input_shapes[index] = new_shape;
+
+ VERBOSE(Execution) << "Model input shape will be changed at the start of execute()"
+ << "(index: " << index << ")" << std::endl;
}
// TODO Remove default parameter
void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
ir::Layout layout)
{
- const auto input_index = primary_subgraph().getInputs().at(index);
- const auto info = primary_subgraph().operands().at(input_index).info();
+ const auto &info = _executors->inputInfo(index);
// TODO handle when (!buffer && length != 0) : setting the input as an optional tensor
@@ -58,10 +57,10 @@ void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t le
// note: input_shape_sig contains shape passed by nnfw_set_input_tensorinfo()
{
auto input_shape_sig = _io_desc.dynamic_input_shapes.find(index);
- auto size_required = (input_shape_sig != _io_desc.dynamic_input_shapes.end())
- ? input_shape_sig->second.num_elements() *
- onert::ir::sizeOfDataType(info.typeInfo().type())
- : info.total_size();
+ auto size_required =
+ (input_shape_sig != _io_desc.dynamic_input_shapes.end())
+ ? input_shape_sig->second.num_elements() * onert::ir::sizeOfDataType(info.typeInfo().type())
+ : info.total_size();
if (length < size_required)
{
@@ -89,8 +88,7 @@ void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, con
// TODO Remove default parameter
void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
{
- const auto output_index = primary_subgraph().getOutputs().at(index);
- const auto info = primary_subgraph().operands().at(output_index).info();
+ const auto &info = _executors->outputInfo(index);
if (length < info.total_size())
{
@@ -104,7 +102,7 @@ void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length,
void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
{
- auto info = ir::OperandInfo::createStaticInfo(shape, type);
+ const auto &info = ir::OperandInfo::createStaticInfo(shape, type);
if (length < info.total_size())
{
@@ -118,21 +116,21 @@ void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout)
{
const auto &input_desc = _io_desc.inputs.at(index.value());
_io_desc.inputs.at(index.value()) =
- std::make_unique<InputDesc>(input_desc->info, input_desc->buffer, input_desc->size, layout);
+ std::make_unique<InputDesc>(input_desc->info, input_desc->buffer, input_desc->size, layout);
}
void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout)
{
const auto &output_desc = _io_desc.outputs.at(index.value());
- _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(
- output_desc->info, output_desc->buffer, output_desc->size, layout);
+ _io_desc.outputs.at(index.value()) =
+ std::make_unique<OutputDesc>(output_desc->info, output_desc->buffer, output_desc->size, layout);
}
void Execution::execute()
{
VERBOSE(Execution) << "Start execution" << std::endl;
- primary_executor()->execute(_io_desc);
+ _executors->execute(_io_desc);
finished = true;
VERBOSE(Execution) << "Execution finished" << std::endl;
@@ -155,13 +153,41 @@ void Execution::waitFinish()
bool Execution::isFinished(void) const { return finished; }
+#ifdef ONERT_TRAIN
+void Execution::train(uint32_t training_step)
+{
+ auto execs = dynamic_cast<exec::train::TrainableExecutors *>(_executors.get());
+ if (!execs)
+ {
+ throw std::runtime_error{"Supported only TrainableExecutors"};
+ }
+
+ VERBOSE(Execution) << "Start training" << std::endl;
+
+ execs->train(_io_desc, training_step);
+ finished = true;
+
+ VERBOSE(Execution) << "training finished" << std::endl;
+}
+
+float Execution::getLoss(const ir::IOIndex &ind)
+{
+ auto execs = dynamic_cast<exec::train::TrainableExecutors *>(_executors.get());
+ if (!execs)
+ {
+ throw std::runtime_error{"Supported only TrainableExecutors"};
+ }
+
+ return execs->getLoss(ind);
+}
+#endif // ONERT_TRAIN
+
ir::Shape Execution::getInputShape(ir::IOIndex ind) const
{
auto itr = _io_desc.dynamic_input_shapes.find(ind);
if (itr == _io_desc.dynamic_input_shapes.end())
{
- auto operand_idx = primary_subgraph().getInputs().at(ind.value());
- return primary_subgraph().operands().at(operand_idx).shape();
+ return _executors->inputInfo(ind).shape();
}
else
{
@@ -169,15 +195,32 @@ ir::Shape Execution::getInputShape(ir::IOIndex ind) const
}
}
+// NNAPI return fail if ANeuralNetworksExecution_getOutputOperandRank or
+// ANeuralNetworksExecution_getOutputOperandDimensions is called before execution.
+// On the other hand, NNFW API return static shape inference result if nnfw_output_tensorinfo is
+// called before execution.
+// To handle both case, this method retun static shape inference result and fail will be handled on
+// NNAPI frontend.
ir::Shape Execution::getOutputShape(ir::IOIndex ind) const
{
if (!isFinished())
- throw std::runtime_error("Cannot get output shape before execution is finished");
+ return _executors->outputInfo(ind).shape();
const auto &output_desc = _io_desc.outputs.at(ind.value());
return output_desc->info.shape();
}
+size_t Execution::getInputTotalSize(ir::IOIndex ind) const
+{
+ // TODO Support dynamic shape
+ return _executors->inputInfo(ind).total_size();
+}
+
+size_t Execution::getOutputTotalSize(ir::IOIndex ind) const
+{
+ return _executors->outputInfo(ind).total_size();
+}
+
} // namespace exec
} // namespace onert