diff options
author | Hyeongseok Oh <hseok82.oh@samsung.com> | 2023-04-12 15:42:02 +0900 |
---|---|---|
committer | Hyeongseok Oh <hseok82.oh@samsung.com> | 2023-04-12 15:42:02 +0900 |
commit | 323663bb115ef625642391a5a8e9b35fee8b2ae3 (patch) | |
tree | 17e2a6b91535e6f53f4cacda5e4db6aa0303dd22 /runtime/onert/frontend | |
parent | c690d52bdd137ed6a17353aa7af35e8141ece77b (diff) | |
download | nnfw-323663bb115ef625642391a5a8e9b35fee8b2ae3.tar.gz nnfw-323663bb115ef625642391a5a8e9b35fee8b2ae3.tar.bz2 nnfw-323663bb115ef625642391a5a8e9b35fee8b2ae3.zip |
Imported Upstream version 1.22.0upstream/1.22.0
Diffstat (limited to 'runtime/onert/frontend')
9 files changed, 34 insertions, 26 deletions
diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h index cf080abbc..878a594cc 100644 --- a/runtime/onert/frontend/base_loader/include/base_loader.h +++ b/runtime/onert/frontend/base_loader/include/base_loader.h @@ -68,8 +68,7 @@ public: * @param model reference to model */ explicit BaseLoader(std::unique_ptr<ir::Model> &model) - : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _model(model), _domain_model{nullptr}, - _tensor_names(std::make_shared<std::unordered_map<ir::OperandIndex, std::string>>()) + : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _model(model), _domain_model{nullptr} { _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA); } @@ -194,7 +193,7 @@ protected: const Model *_domain_model; // Maps Tensor indices to onert Operands. std::vector<ir::OperandIndex> _tensor_to_operand; - std::shared_ptr<std::unordered_map<ir::OperandIndex, std::string>> _tensor_names; + std::unordered_map<ir::OperandIndex, std::string> _tensor_names; // Verifier std::unique_ptr<Verifier> _verifier; // Boolean flag to use MMAPED_DATA @@ -411,7 +410,7 @@ ir::OperandIndex BaseLoader<LoaderDomain>::loadOperand(const Tensor *tensor, ir: subg.setOperandValue(operand_index, std::move(data_obj)); } - _tensor_names->emplace(operand_index, tensor->name()->str()); + _tensor_names.emplace(operand_index, tensor->name()->str()); // Variable if (tensor->is_variable()) @@ -1297,8 +1296,8 @@ void BaseLoader<LoaderDomain>::loadIf(const Operator *op, ir::Graph &subg) verifySubgraphIndex(else_index); ir::operation::If::Param param; - param.then_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(then_index)}; - param.else_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(else_index)}; + param.then_subg_index = ir::SubgraphIndex{static_cast<uint16_t>(then_index)}; + param.else_subg_index = ir::SubgraphIndex{static_cast<uint16_t>(else_index)}; loadOperationTo<ir::operation::If>(op, subg, param); } @@ -1314,8 +1313,8 @@ void BaseLoader<LoaderDomain>::loadWhile(const Operator *op, ir::Graph &subg) verifySubgraphIndex(body_index); ir::operation::While::Param param; - param.cond_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(cond_index)}; - param.body_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(body_index)}; + param.cond_subg_index = ir::SubgraphIndex{static_cast<uint16_t>(cond_index)}; + param.body_subg_index = ir::SubgraphIndex{static_cast<uint16_t>(body_index)}; loadOperationTo<ir::operation::While>(op, subg, param); } @@ -1663,6 +1662,12 @@ void BaseLoader<LoaderDomain>::loadOperation(const Operator *op, ir::Graph &subg case BuiltinOperator::BuiltinOperator_DEPTH_TO_SPACE: loadDepthToSpace(op, subg); return; + case BuiltinOperator::BuiltinOperator_EMBEDDING_LOOKUP: + loadOperationTo<ir::operation::EmbeddingLookup>(op, subg); + return; + case BuiltinOperator::BuiltinOperator_HASHTABLE_LOOKUP: + loadOperationTo<ir::operation::HashtableLookup>(op, subg); + return; default: throw std::runtime_error( std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op))); @@ -1682,10 +1687,15 @@ template <typename LoaderDomain> void BaseLoader<LoaderDomain>::loadModel() // Load subgraphs and map operations on subgraph const auto subgraphs = _domain_model->subgraphs(); auto model = std::make_unique<ir::Model>(); - for (uint32_t subgraph_index = 0; subgraph_index < subgraphs->size(); ++subgraph_index) + if (subgraphs->size() - 1 > ir::SubgraphIndex::max()) + throw std::runtime_error{"The number of subgraphs cannot exceed " + + std::to_string(ir::SubgraphIndex::max() + 1)}; + for (uint16_t subgraph_index = 0; subgraph_index < subgraphs->size(); ++subgraph_index) { auto subg = loadSubgraph((*_domain_model->subgraphs())[subgraph_index]); - model->push(ir::SubgraphIndex{subgraph_index}, std::move(subg)); + // NOTE: Used () instead of {}, which does not check narrowing. + // It is okay since overflow is checked the above if-statement. + model->push(ir::SubgraphIndex(subgraph_index), std::move(subg)); } _model = std::move(model); } diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc index 5abcc9cd0..5bf626d6c 100644 --- a/runtime/onert/frontend/circle/src/circle_loader.cc +++ b/runtime/onert/frontend/circle/src/circle_loader.cc @@ -112,13 +112,13 @@ private: for (const std::int32_t input_ind : *circle_subg->inputs()) { subg->addInput(tensorIdxToOperandIdx(input_ind), - _tensor_names->at(_tensor_to_operand[input_ind])); + _tensor_names.at(_tensor_to_operand[input_ind])); } // Set outputs for (const std::int32_t output_ind : *circle_subg->outputs()) { subg->addOutput(tensorIdxToOperandIdx(output_ind), - _tensor_names->at(_tensor_to_operand[output_ind])); + _tensor_names.at(_tensor_to_operand[output_ind])); } // Create operations for (const auto *op : *circle_subg->operators()) diff --git a/runtime/onert/frontend/nnapi/CMakeLists.txt b/runtime/onert/frontend/nnapi/CMakeLists.txt index dafd84ccf..b66b32e89 100644 --- a/runtime/onert/frontend/nnapi/CMakeLists.txt +++ b/runtime/onert/frontend/nnapi/CMakeLists.txt @@ -24,4 +24,4 @@ target_link_libraries(test_onert_frontend_nnapi PRIVATE ${LIB_ONERT} dl) target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest) target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest_main) -install(TARGETS test_onert_frontend_nnapi DESTINATION unittest_standalone) +install(TARGETS test_onert_frontend_nnapi DESTINATION unittest) diff --git a/runtime/onert/frontend/nnapi/compilation.cc b/runtime/onert/frontend/nnapi/compilation.cc index 871c040ef..2c56f061a 100644 --- a/runtime/onert/frontend/nnapi/compilation.cc +++ b/runtime/onert/frontend/nnapi/compilation.cc @@ -58,7 +58,7 @@ int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) return ANEURALNETWORKS_UNEXPECTED_NULL; } - if (compilation->state() != ::onert::compiler::State::CREATED) + if (compilation->isFinished()) { VERBOSE(NNAPI::Compilation) << "finish: Already finished" << std::endl; return ANEURALNETWORKS_BAD_STATE; @@ -87,7 +87,7 @@ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compila return ANEURALNETWORKS_UNEXPECTED_NULL; } - if (compilation->state() != ::onert::compiler::State::CREATED) + if (compilation->isFinished()) { VERBOSE(NNAPI::Compilation) << "setPreference: Already finished" << std::endl; return ANEURALNETWORKS_BAD_STATE; diff --git a/runtime/onert/frontend/nnapi/execution.cc b/runtime/onert/frontend/nnapi/execution.cc index 19636a84d..4e1a985f3 100644 --- a/runtime/onert/frontend/nnapi/execution.cc +++ b/runtime/onert/frontend/nnapi/execution.cc @@ -37,7 +37,7 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, return ANEURALNETWORKS_UNEXPECTED_NULL; } - std::shared_ptr<onert::exec::Executors> executors; + std::shared_ptr<onert::exec::IExecutors> executors; compilation->publish(executors); diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc index bb247b97f..3b5edc180 100644 --- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc @@ -26,9 +26,7 @@ ANeuralNetworksCompilation::ANeuralNetworksCompilation(const ANeuralNetworksMode _compiler{std::make_shared<compiler::Compiler>(_model, *_coptions)} { if (model->allowedToFp16()) - { - _compiler->enableToFp16(); - } + _coptions->enableToFp16(); } bool ANeuralNetworksCompilation::finish() noexcept @@ -36,6 +34,7 @@ bool ANeuralNetworksCompilation::finish() noexcept try { _artifact = _compiler->compile(); + _compiler = nullptr; } catch (const std::exception &e) { diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h index dff5c6dc6..3898f1d5e 100644 --- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h @@ -22,7 +22,7 @@ #include "compiler/Compiler.h" #include "ir/Graph.h" #include "ir/Model.h" -#include "exec/Executors.h" +#include "exec/IExecutors.h" #include "util/TracingCtx.h" struct ANeuralNetworksCompilation @@ -32,9 +32,9 @@ public: public: bool finish() noexcept; + bool isFinished() noexcept { return _compiler == nullptr; } - onert::compiler::State state(void) noexcept { return _compiler->state(); } - void publish(std::shared_ptr<onert::exec::Executors> &executors) noexcept + void publish(std::shared_ptr<onert::exec::IExecutors> &executors) noexcept { executors = _artifact ? _artifact->_executors : nullptr; } diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h index 110c7cd55..6fbc4c2e0 100644 --- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h @@ -26,7 +26,7 @@ struct ANeuralNetworksExecution { public: - ANeuralNetworksExecution(const std::shared_ptr<onert::exec::Executors> &executors) + ANeuralNetworksExecution(const std::shared_ptr<onert::exec::IExecutors> &executors) : _execution{std::make_shared<onert::exec::Execution>(executors)} { // DO NOTHING diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc index fe69e4e2a..dc8564632 100644 --- a/runtime/onert/frontend/tflite/src/tflite_loader.cc +++ b/runtime/onert/frontend/tflite/src/tflite_loader.cc @@ -99,13 +99,13 @@ private: for (const std::int32_t input_ind : *tflite_subg->inputs()) { subg->addInput(tensorIdxToOperandIdx(input_ind), - _tensor_names->at(_tensor_to_operand[input_ind])); + _tensor_names.at(_tensor_to_operand[input_ind])); } // Set outputs for (const std::int32_t output_ind : *tflite_subg->outputs()) { subg->addOutput(tensorIdxToOperandIdx(output_ind), - _tensor_names->at(_tensor_to_operand[output_ind])); + _tensor_names.at(_tensor_to_operand[output_ind])); } // Create operations for (const auto *op : *tflite_subg->operators()) @@ -113,7 +113,6 @@ private: loadOperation(op, *subg); } - subg->setTensorName(_tensor_names); subg->verify(); return subg; |