summaryrefslogtreecommitdiff
path: root/runtime/onert/frontend
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-10-28 12:16:55 +0900
commitc55f8a6db48cda9d3a78048338b7f18c4cca62b8 (patch)
tree761ee8e171e5203f5c598ad93b2e7e0bc2e31aa2 /runtime/onert/frontend
parent74476a2d0296bdad70a2f7f90bc7419a8b05bffd (diff)
downloadnnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.gz
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.tar.bz2
nnfw-c55f8a6db48cda9d3a78048338b7f18c4cca62b8.zip
Diffstat (limited to 'runtime/onert/frontend')
-rw-r--r--runtime/onert/frontend/base_loader/include/base_loader.h1131
-rw-r--r--runtime/onert/frontend/circle/CMakeLists.txt2
-rw-r--r--runtime/onert/frontend/circle/src/circle_loader.cc5
-rw-r--r--runtime/onert/frontend/nnapi/execution.cc24
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc50
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h3
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc (renamed from runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc)6
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc155
-rw-r--r--runtime/onert/frontend/tflite/CMakeLists.txt2
-rw-r--r--runtime/onert/frontend/tflite/src/tflite_loader.cc5
10 files changed, 550 insertions, 833 deletions
diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h
index 480452e01..d21001e59 100644
--- a/runtime/onert/frontend/base_loader/include/base_loader.h
+++ b/runtime/onert/frontend/base_loader/include/base_loader.h
@@ -1,4 +1,5 @@
/*
+ * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
* Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -38,7 +39,7 @@ namespace onert
namespace base_loader
{
-template <typename LoaderDomain, typename SpecificLoader> class BaseLoader
+template <typename LoaderDomain> class BaseLoader
{
protected:
using Verifier = typename LoaderDomain::Verifier;
@@ -69,6 +70,7 @@ public:
explicit BaseLoader(std::unique_ptr<ir::Subgraphs> &subgs)
: _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _subgraphs(subgs), _model{nullptr}
{
+ _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA);
}
/**
@@ -93,7 +95,6 @@ protected:
ir::Activation convertActivation(ActivationFunctionType type);
ir::DataType tensorTypeToDataType(TensorType type);
ir::OperandIndex tensorIdxToOperandIdx(int32_t tensorIdx);
- void deallocateMmappedArea(uint8_t *ptr, size_t size);
// Create operands form tflite::Tensor
ir::OperandIndex loadOperand(const Tensor *tensor, ir::Graph &subg);
@@ -107,7 +108,11 @@ protected:
// Load Pool2D param
template <typename Param> void loadPool2DOptions(Param &param, const Pool2DOptions *options);
+private:
+ virtual std::unique_ptr<ir::Graph> loadSubgraph(const SubGraph *subg) = 0;
// Operations
+ template <typename OpIR, typename... Args>
+ const OpIR *loadOperationTo(const Operator *op, ir::Graph &subg, Args &&... args);
void loadConv2D(const Operator *op, ir::Graph &subg);
void loadDepthwiseConv2D(const Operator *op, ir::Graph &subg);
void loadTransposeConv(const Operator *op, ir::Graph &subg);
@@ -115,62 +120,50 @@ protected:
void loadReshape(const Operator *op, ir::Graph &subg);
void loadSoftmax(const Operator *op, ir::Graph &subg);
void loadConcatenation(const Operator *op, ir::Graph &subg);
- void loadFill(const Operator *op, ir::Graph &subg);
void loadFC(const Operator *op, ir::Graph &subg);
- template <ir::operation::BinaryArithmetic::ArithmeticType op_type>
- void loadBinaryArithmetic(const Operator *op, ir::Graph &subg);
+ void loadBinaryArithmetic(const Operator *op, ir::Graph &subg,
+ ir::operation::BinaryArithmetic::ArithmeticType op_type);
void loadAddV2(const Operator *op, ir::Graph &subg);
void loadPack(const Operator *op, ir::Graph &subg);
void loadResizeBilinear(const Operator *op, ir::Graph &subg);
void loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg);
- void loadSelect(const Operator *op, ir::Graph &subg);
- void loadSquaredDifference(const Operator *op, ir::Graph &subg);
- void loadTranspose(const Operator *op, ir::Graph &subg);
- template <ir::operation::Reduce::ReduceType reduce_type>
- void loadReduce(const Operator *op, ir::Graph &subg);
+ void loadReduce(const Operator *op, ir::Graph &subg,
+ ir::operation::Reduce::ReduceType reduce_type);
void loadReduceAll(const Operator *op, ir::Graph &subg);
- void loadReverseV2(const Operator *op, ir::Graph &subg);
- void loadPad(const Operator *op, ir::Graph &subg);
void loadElementwiseActivation(const Operator *op, ir::Graph &subg,
ir::operation::ElementwiseActivation::Type op_type,
float alpha = 0.f, float beta = 0.f);
- template <ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type>
- void loadElementwiseBinary(const Operator *op, ir::Graph &subg);
+ void loadElementwiseBinary(const Operator *op, ir::Graph &subg,
+ ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type);
void loadElementwiseUnary(const Operator *op, ir::Graph &subg,
ir::operation::ElementwiseUnary::Type op_type);
- void loadExpandDims(const Operator *op, ir::Graph &subg);
void loadGather(const Operator *op, ir::Graph &subg);
void loadCustom(const Operator *op, ir::Graph &subg);
- void loadSpaceToBatchND(const Operator *op, ir::Graph &subg);
void loadBatchMatMul(const Operator *op, ir::Graph &subg);
- void loadBatchToSpaceND(const Operator *op, ir::Graph &subg);
void loadSqueeze(const Operator *op, ir::Graph &subg);
- void loadPrelu(const Operator *op, ir::Graph &subg);
void loadSplit(const Operator *op, ir::Graph &subg);
void loadSplitV(const Operator *op, ir::Graph &subg);
- void loadSlice(const Operator *op, ir::Graph &subg);
void loadStridedSlice(const Operator *op, ir::Graph &subg);
void loadUnpack(const Operator *op, ir::Graph &subg);
void loadComparison(const Operator *op, ir::Graph &subg);
void loadEinsum(const Operator *op, ir::Graph &subg);
void loadOneHot(const Operator *op, ir::Graph &subg);
- void loadShape(const Operator *op, ir::Graph &subg);
void loadIf(const Operator *op, ir::Graph &subg);
void loadWhile(const Operator *op, ir::Graph &subg);
void loadArgMax(const Operator *op, ir::Graph &subg);
- void loadPow(const Operator *op, ir::Graph &subg);
- void loadTile(const Operator *op, ir::Graph &subg);
- void loadRange(const Operator *op, ir::Graph &subg);
- void loadRank(const Operator *op, ir::Graph &subg);
- void loadMatrixBandPart(const Operator *op, ir::Graph &subg);
- void loadBroadcastTo(const Operator *op, ir::Graph &subg);
void loadFusedBatchNorm(const Operator *op, ir::Graph &subg);
void loadLogSoftmax(const Operator *op, ir::Graph &subg);
void loadSpaceToDepth(const Operator *op, ir::Graph &subg);
- void loadStatelessRandomUniform(const Operator *op, ir::Graph &subg);
- void loadL2Normalization(const Operator *op, ir::Graph &subg);
void loadLeakyRelu(const Operator *op, ir::Graph &subg);
+ void verifySubgraphIndex(int subg_index)
+ {
+ const auto num_subgraphs = _model->subgraphs()->size();
+ if (subg_index < 0 || subg_index >= static_cast<int32_t>(num_subgraphs))
+ throw std::runtime_error{std::string{"Invalid subgraph index - "} +
+ std::to_string(subg_index)};
+ }
+
protected:
// Base address for mapped region for loading (if needed)
uint8_t *_base;
@@ -186,10 +179,12 @@ protected:
std::unordered_map<ir::OperandIndex, std::string> _tensor_names;
// Verifier
std::unique_ptr<Verifier> _verifier;
+ // Boolean flag to use MMAPED_DATA
+ bool _use_mmaped_data = false;
};
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromFile(const char *file_path)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::BaseLoader::loadFromFile(const char *file_path)
{
_fd = open(file_path, O_RDONLY);
if (_fd < 0)
@@ -216,22 +211,22 @@ void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromFile(const ch
_verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
loadModel();
+ munmap(_base, size);
close(_fd);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromBuffer(uint8_t *buffer,
- size_t size)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::BaseLoader::loadFromBuffer(uint8_t *buffer, size_t size)
{
_base = buffer;
_verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
loadModel();
}
-template <typename LoaderDomain, typename SpecificLoader>
-ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
- const ActivationFunctionType type)
+template <typename LoaderDomain>
+ir::Activation
+BaseLoader<LoaderDomain>::BaseLoader::convertActivation(const ActivationFunctionType type)
{
switch (type)
{
@@ -246,14 +241,13 @@ ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActi
case ActivationFunctionType::ActivationFunctionType_TANH:
return ir::Activation::TANH;
default:
- throw std::runtime_error(std::string("Unsupported activation type: ")
- .append(EnumNameActivationFunctionType(type)));
+ throw std::runtime_error(std::string("Unsupported or invalid activation type: ") +
+ std::to_string(static_cast<int>(type)));
}
}
-template <typename LoaderDomain, typename SpecificLoader>
-ir::DataType
-BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorTypeToDataType(const TensorType type)
+template <typename LoaderDomain>
+ir::DataType BaseLoader<LoaderDomain>::BaseLoader::tensorTypeToDataType(const TensorType type)
{
switch (type)
{
@@ -275,39 +269,13 @@ BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorTypeToDataType(const
}
}
-template <typename LoaderDomain, typename SpecificLoader>
-ir::OperandIndex
-BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorIdxToOperandIdx(int32_t tensorIdx)
+template <typename LoaderDomain>
+ir::OperandIndex BaseLoader<LoaderDomain>::BaseLoader::tensorIdxToOperandIdx(int32_t tensorIdx)
{
return isOptionalInputTensor(tensorIdx) ? ir::OperandIndex() : _tensor_to_operand[tensorIdx];
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::deallocateMmappedArea(uint8_t *ptr,
- size_t size)
-{
- // Calculate offset from base address of mapped region
- ptrdiff_t unaligned_offset_start = ptr - _base;
- ptrdiff_t unaligned_offset_end = unaligned_offset_start + size;
-
- // Calculated aligned offset from base address of mapped region
- // munmap accepts memory address which is a multiple of the pagesize
- ptrdiff_t aligned_offset_start =
- ((unaligned_offset_start + (_pagesize - 1)) / _pagesize) * _pagesize;
- ptrdiff_t aligned_offset_end = (unaligned_offset_end / _pagesize) * _pagesize;
-
- ptrdiff_t area_size = aligned_offset_end - aligned_offset_start;
- if (area_size > 0)
- {
- // Unmap mapped region for CachedData
- if (munmap(_base + aligned_offset_start, area_size) == -1)
- {
- VERBOSE(BASE_LOADER) << "munmap failed" << std::endl;
- }
- }
-}
-
-/* Copied from tensorflow lite. Need to append copyright */
+/* Copy is copied from tensorflow lite */
template <typename T> bool Copy(const T *data_ptr, std::vector<uint16_t> &arr)
{
if (data_ptr->values() == nullptr)
@@ -324,9 +292,8 @@ template <typename T> bool Copy(const T *data_ptr, std::vector<uint16_t> &arr)
return true;
}
-template <typename LoaderDomain, typename SpecificLoader>
-ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Tensor *tensor,
- ir::Graph &subg)
+template <typename LoaderDomain>
+ir::OperandIndex BaseLoader<LoaderDomain>::loadOperand(const Tensor *tensor, ir::Graph &subg)
{
ir::Shape shape;
// Shape
@@ -386,18 +353,44 @@ ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Ten
{
std::vector<uint16_t> w1_segments;
std::vector<uint16_t> w1_indices;
- // ignore traversal_order, block_map
+ // check traversal_order
+ if (src_sparsity->traversal_order())
+ {
+ const int traversal_order_size = src_sparsity->traversal_order()->size();
+ for (int i = 0; i < traversal_order_size; ++i)
+ {
+ if (i != src_sparsity->traversal_order()->Get(i))
+ throw std::runtime_error("traversal_order [0, 1, ..., n-1] is only supported.");
+ }
+ }
+ // check block_map
+ int block_rank = 0;
+ if (src_sparsity->block_map())
+ {
+ block_rank = src_sparsity->block_map()->size();
+ for (int i = 0; i < block_rank; ++i)
+ {
+ if (i != src_sparsity->block_map()->Get(i))
+ throw std::runtime_error("block_map [0, 1, ..., n-1] is only supported.");
+ }
+ }
// load metadata
- const size_t dim_metadata_size = src_sparsity->dim_metadata()->size();
- if (dim_metadata_size != 2)
- throw std::runtime_error("sparse tensor is supported only for 2D");
+ const int dim_metadata_size = src_sparsity->dim_metadata()->size();
+ auto dense_rank = shape.rank();
+ if (dense_rank + block_rank != dim_metadata_size)
+ throw std::runtime_error("sparsity dim_metadata length is wrong.");
+ bool random_sparsity = dim_metadata_size == 2 && block_rank == 0;
+ bool block2D_sparsity = dim_metadata_size == 4 && block_rank == 2;
+ if (dim_metadata_size != !random_sparsity && !block2D_sparsity)
+ throw std::runtime_error(
+ "sparsity is supported only for 2D tensor with random or 16x1 block sparsity.");
+
const auto *src_metadata = src_sparsity->dim_metadata()->Get(0);
if (src_metadata->format() != DimensionType::DimensionType_DENSE)
throw std::runtime_error("sparse tensor dim[0] is not DENSE");
src_metadata = src_sparsity->dim_metadata()->Get(1);
if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR)
throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR");
-
auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() {
if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr)
return false;
@@ -433,7 +426,17 @@ ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Ten
};
if (ParseSparseIndexVector() == false)
throw std::runtime_error("Error during parsing sparsity index information");
- type_info.sparse2DMetadata(std::move(w1_segments), std::move(w1_indices));
+ // Get block size
+ std::vector<int32_t> block_size;
+ for (int i = 0; i < block_rank; ++i)
+ {
+ auto block_metadata = src_sparsity->dim_metadata()->Get(dense_rank + i);
+ if (block_metadata->format() != DimensionType::DimensionType_DENSE)
+ throw std::runtime_error("block dimension must be DENSE.");
+ block_size.push_back(block_metadata->dense_size());
+ }
+ type_info.sparsity(std::make_shared<ir::Sparsity>(std::move(w1_segments), std::move(w1_indices),
+ std::move(block_size)));
}
// Create operand
const auto operand_index = subg.addOperand(shape, type_info);
@@ -450,8 +453,28 @@ ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Ten
}
else // Model is loaded(mmap'd) from a file
{
- data_obj = std::make_unique<ir::CachedData>(data->data(), data->size());
- deallocateMmappedArea(const_cast<uint8_t *>(data->data()), data->size());
+ size_t data_size = data->size();
+ ptrdiff_t unaligned_offset_start = data->data() - _base;
+ ptrdiff_t offset_end = unaligned_offset_start + data_size;
+
+ // Calculated aligned offset from base address of mapped region
+ // munmap accepts memory address which is a multiple of the pagesize
+ ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize;
+ size_t mmap_size = offset_end - aligned_offset_start;
+
+ if (_use_mmaped_data)
+ {
+ data_obj = std::make_unique<ir::MMapedData>(_fd, aligned_offset_start, mmap_size,
+ unaligned_offset_start, data_size);
+ }
+ else
+ {
+ size_t offset = unaligned_offset_start - aligned_offset_start;
+ uint8_t *mmap_base = static_cast<uint8_t *>(
+ mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, _fd, aligned_offset_start));
+ data_obj = std::make_unique<ir::CachedData>(mmap_base + offset, data_size);
+ munmap(mmap_base, mmap_size);
+ }
}
subg.setOperandValue(operand_index, std::move(data_obj));
}
@@ -465,10 +488,9 @@ ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Ten
return operand_index;
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOperationIO(const Operator *op,
- ir::OperandIndexSequence &inputs,
- ir::OperandIndexSequence &outputs)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
+ ir::OperandIndexSequence &outputs)
{
for (const std::int32_t idx : *op->inputs())
{
@@ -490,120 +512,116 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperationIO(const Operator *o
}
}
-template <typename LoaderDomain, typename SpecificLoader>
+template <typename LoaderDomain>
template <typename Param, typename OptionsType>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadStridesAndPaddings(Param &param,
- const OptionsType *options)
+void BaseLoader<LoaderDomain>::loadStridesAndPaddings(Param &param, const OptionsType *options)
{
// Strides
param.stride.vertical = options->stride_h();
param.stride.horizontal = options->stride_w();
// Paddings
- if (options->padding() == Padding::Padding_SAME)
- param.padding.type = ir::PaddingType::SAME;
- if (options->padding() == Padding::Padding_VALID)
- param.padding.type = ir::PaddingType::VALID;
+ switch (options->padding())
+ {
+ case Padding::Padding_SAME:
+ param.padding.type = ir::PaddingType::SAME;
+ break;
+ case Padding::Padding_VALID:
+ param.padding.type = ir::PaddingType::VALID;
+ break;
+ default:
+ throw std::runtime_error{"Invalid padding type"};
+ }
// param paddings indexes unused
}
-template <typename LoaderDomain, typename SpecificLoader>
+template <typename LoaderDomain>
template <typename Param>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2DOptions(Param &param,
- const Pool2DOptions *options)
+void BaseLoader<LoaderDomain>::loadPool2DOptions(Param &param, const Pool2DOptions *options)
{
// Strides and Paddings
+ if (options->stride_h() <= 0 || options->stride_w() <= 0)
+ throw std::runtime_error{"Invalid stride vertical or horizontal - both must be bigger than 0"};
loadStridesAndPaddings(param, options);
// Filter width and height
// Strides
+ if (options->filter_width() <= 0 || options->filter_height() <= 0)
+ throw std::runtime_error{"Invalid filter width or height - both must be bigger than 0"};
param.kw = options->filter_width();
param.kh = options->filter_height();
// Activation
param.activation = convertActivation(options->fused_activation_function());
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadConv2D(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+template <typename OpIR, typename... Args>
+const OpIR *BaseLoader<LoaderDomain>::loadOperationTo(const Operator *op, ir::Graph &subg,
+ Args &&... args)
{
+ static_assert(sizeof...(args) <= 1, "You can't have more than 1 arguments!");
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
+ std::unique_ptr<OpIR> new_op(new OpIR(inputs, outputs, std::forward<Args>(args)...));
+ auto ret = new_op.get();
+ subg.addOperation(std::move(new_op));
+
+ return ret;
+}
+
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadConv2D(const Operator *op, ir::Graph &subg)
+{
ir::operation::Conv2D::Param param;
const auto *options = op->builtin_options_as_Conv2DOptions();
param.activation = convertActivation(options->fused_activation_function());
loadStridesAndPaddings(param, options);
-
param.dilation.width_factor = options->dilation_w_factor();
param.dilation.height_factor = options->dilation_h_factor();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Conv2D(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Conv2D>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadDepthwiseConv2D(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadDepthwiseConv2D(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::DepthwiseConv2D::Param param;
const auto *options = op->builtin_options_as_DepthwiseConv2DOptions();
param.activation = convertActivation(options->fused_activation_function());
loadStridesAndPaddings(param, options);
- // Multiplier
param.multiplier = options->depth_multiplier();
// Dilation h/w factor unused
- std::unique_ptr<ir::Operation> new_op(new ir::operation::DepthwiseConv2D(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+
+ loadOperationTo<ir::operation::DepthwiseConv2D>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTransposeConv(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadTransposeConv(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::TransposeConv::Param param;
const auto *options = op->builtin_options_as_TransposeConvOptions();
loadStridesAndPaddings(param, options);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::TransposeConv(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+
+ loadOperationTo<ir::operation::TransposeConv>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2D(const Operator *op, ir::Graph &subg,
- ir::operation::Pool2D::PoolType op_type)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadPool2D(const Operator *op, ir::Graph &subg,
+ ir::operation::Pool2D::PoolType op_type)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Pool2D::Param param;
param.op_type = op_type;
const auto *options = op->builtin_options_as_Pool2DOptions();
loadPool2DOptions(param, options);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pool2D(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Pool2D>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReshape(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadReshape(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Reshape::Param param{};
const auto *options = op->builtin_options_as_ReshapeOptions();
if (options != nullptr)
@@ -611,99 +629,64 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadReshape(const Operator *op, i
const auto *new_shape = options->new_shape();
if (new_shape)
{
- for (uint i = 0; i < new_shape->Length(); ++i)
+ for (uint i = 0; i < new_shape->size(); ++i)
{
param.new_shape.push_back(new_shape->Get(i));
}
}
}
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Reshape(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Reshape>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSoftmax(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadSoftmax(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Softmax::Param param;
const auto *options = op->builtin_options_as_SoftmaxOptions();
// Beta
param.beta = options->beta();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Softmax(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Softmax>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadConcatenation(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadConcatenation(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Concat::Param param;
const auto *options = op->builtin_options_as_ConcatenationOptions();
// Axis
param.axis = options->axis();
// activation unused
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Concat(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Concat>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadFill(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadFC(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Fill(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
+ ir::operation::FullyConnected::Param param;
+ const auto *options = op->builtin_options_as_FullyConnectedOptions();
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadFC(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
+ param.activation = convertActivation(options->fused_activation_function());
+ // weights_format unused
- loadOperationIO(op, inputs, outputs);
+ const auto fc = loadOperationTo<ir::operation::FullyConnected>(op, subg, param);
- const auto &input_operand = subg.operands().at(inputs.at(ir::operation::FullyConnected::INPUT));
- auto &weights_operand = subg.operands().at(inputs.at(ir::operation::FullyConnected::WEIGHT));
+ const auto &input_operand =
+ subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::INPUT));
+ auto &weights_operand =
+ subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::WEIGHT));
if (input_operand.typeInfo().type() == ir::DataType::FLOAT32 &&
weights_operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
{
weights_operand.type(ir::DataType::QUANT_INT8_SYMM);
}
-
- ir::operation::FullyConnected::Param param;
- const auto *options = op->builtin_options_as_FullyConnectedOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
- // weights_format unused
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::FullyConnected(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAddV2(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadAddV2(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::BinaryArithmetic::Param param;
param.arithmetic_type = ir::operation::BinaryArithmetic::ArithmeticType::ADD;
@@ -722,21 +705,13 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadAddV2(const Operator *op, ir:
param.activation = convertActivation(fused_activation_func);
}
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::BinaryArithmetic(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-template <ir::operation::BinaryArithmetic::ArithmeticType op_type>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadBinaryArithmetic(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadBinaryArithmetic(
+ const Operator *op, ir::Graph &subg, ir::operation::BinaryArithmetic::ArithmeticType op_type)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::BinaryArithmetic::Param param;
param.arithmetic_type = op_type;
switch (op_type)
@@ -771,172 +746,66 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadBinaryArithmetic(const Operat
break;
}
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::BinaryArithmetic(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadPack(const Operator *op, ir::Graph &subg)
{
- // This runtime_error will be removed if the one of backend supports this operation
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Pack::Param param;
const auto *options = op->builtin_options_as_PackOptions();
param.num = options->values_count();
param.axis = options->axis();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pack(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Pack>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseActivation(
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadElementwiseActivation(
const Operator *op, ir::Graph &subg, ir::operation::ElementwiseActivation::Type op_type,
float alpha, float beta)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::ElementwiseActivation::Param param;
param.op_type = op_type;
param.alpha = alpha;
param.beta = beta;
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::ElementwiseActivation(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::ElementwiseActivation>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeBilinear(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadResizeBilinear(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto size = inputs.at(1);
-
- // FIXME Handle ResizeBilinearOptions.
- if (!subg.operands().at(size).isConstant())
- throw std::runtime_error("ResizeBilinear: non-constant 'size' is not supported.");
-
- std::vector<std::int32_t> size_v = subg.operands().at(size).template asVector<std::int32_t>();
-
ir::operation::ResizeBilinear::Param param;
- param.height_out = size_v[0];
- param.width_out = size_v[1];
param.align_corners = op->builtin_options_as_ResizeBilinearOptions()->align_corners();
param.half_pixel_centers = op->builtin_options_as_ResizeBilinearOptions()->half_pixel_centers();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ResizeBilinear({input}, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::ResizeBilinear>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeNearestNeighbor(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto size = inputs.at(1);
-
- if (!subg.operands().at(size).isConstant())
- throw std::runtime_error("ResizeNearestNeighbor: non-constant 'size' is not supported.");
-
- std::vector<std::int32_t> size_v = subg.operands().at(size).template asVector<std::int32_t>();
-
ir::operation::ResizeNearestNeighbor::Param param;
- param.height_out = size_v[0];
- param.width_out = size_v[1];
param.align_corners = op->builtin_options_as_ResizeNearestNeighborOptions()->align_corners();
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::ResizeNearestNeighbor({input}, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSelect(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Select(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::ResizeNearestNeighbor>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSquaredDifference(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadReduce(const Operator *op, ir::Graph &subg,
+ ir::operation::Reduce::ReduceType reduce_type)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SquaredDifference(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto perm = inputs.at(1);
-
- if (!subg.operands().at(perm).isConstant())
- throw std::runtime_error("Transpose: non-constant 'perm' is not supported.");
-
- ir::operation::Transpose::Param param;
- param.perm = subg.operands().at(perm).template asVector<int>();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Transpose({input}, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-template <ir::operation::Reduce::ReduceType reduce_type>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReduce(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Reduce::Param param;
param.reduce_type = reduce_type;
param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Reduce(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Reduce>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceAll(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadReduceAll(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Reduce::Param param;
param.reduce_type = ir::operation::Reduce::ReduceType::ALL;
if (op->custom_options() == nullptr)
@@ -952,64 +821,28 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceAll(const Operator *op,
param.keep_dims = attr_map["keep_dims"].AsBool();
}
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Reduce(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReverseV2(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Reverse(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPad(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pad(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Reduce>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-template <ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseBinary(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadElementwiseBinary(
+ const Operator *op, ir::Graph &subg,
+ ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::ElementwiseBinary::Param param;
param.op_type = op_type;
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::ElementwiseBinary(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::ElementwiseBinary>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseUnary(
- const Operator *op, ir::Graph &subg, ir::operation::ElementwiseUnary::Type op_type)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadElementwiseUnary(const Operator *op, ir::Graph &subg,
+ ir::operation::ElementwiseUnary::Type op_type)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::ElementwiseUnary::Param param;
param.op_type = op_type;
+ const auto eu = loadOperationTo<ir::operation::ElementwiseUnary>(op, subg, param);
if (op_type == ir::operation::ElementwiseUnary::Type::CAST)
{
auto qasymm8ToUint8 = [](ir::Operand &operand) {
@@ -1018,61 +851,24 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseUnary(
operand.type(ir::DataType::UINT8);
}
};
- qasymm8ToUint8(subg.operands().at(inputs.at(ir::operation::ElementwiseUnary::Input::INPUT)));
- qasymm8ToUint8(subg.operands().at(outputs.at(0)));
+ qasymm8ToUint8(
+ subg.operands().at(eu->getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)));
+ qasymm8ToUint8(subg.operands().at(eu->getOutputs().at(0)));
}
-
- std::unique_ptr<ir::Operation> new_op(
- new ir::operation::ElementwiseUnary(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadExpandDims(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ExpandDims(inputs, outputs));
- subg.addOperation(std::move(new_op));
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadGather(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadGather(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
ir::operation::Gather::Param param;
param.axis = op->builtin_options_as_GatherOptions()->axis();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Gather(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSpaceToBatchND(const Operator *op,
- ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::SpaceToBatchND{inputs, outputs}};
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Gather>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchMatMul(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadBatchMatMul(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
ir::operation::BatchMatMul::Param param;
const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
@@ -1105,89 +901,21 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchMatMul(const Operator *o
" as " + EnumNameBuiltinOperator(BuiltinOperator::BuiltinOperator_BATCH_MATMUL));
}
- std::unique_ptr<ir::Operation> new_op{new ir::operation::BatchMatMul{inputs, outputs, param}};
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchToSpaceND(const Operator *op,
- ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::BatchToSpaceND{inputs, outputs}};
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMatrixBandPart(const Operator *op,
- ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::MatrixBandPart(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::BatchMatMul>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadBroadcastTo(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadSpaceToDepth(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::BroadcastTo(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSpaceToDepth(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
ir::operation::SpaceToDepth::Param param;
-
const auto *options = op->builtin_options_as_SpaceToDepthOptions();
-
param.block_size = options->block_size();
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SpaceToDepth(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadStatelessRandomUniform(const Operator *op,
- ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::StatelessRandomUniform(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::SpaceToDepth>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRank(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Rank(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadCustom(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
@@ -1237,7 +965,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
loadReduceAll(op, subg);
break;
case BuiltinOP::MatrixBandPart:
- loadMatrixBandPart(op, subg);
+ loadOperationTo<ir::operation::MatrixBandPart>(op, subg);
break;
case BuiltinOP::BatchMatMul:
loadBatchMatMul(op, subg);
@@ -1246,13 +974,13 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
loadEinsum(op, subg);
break;
case BuiltinOP::BroadcastTo:
- loadBroadcastTo(op, subg);
+ loadOperationTo<ir::operation::BroadcastTo>(op, subg);
break;
case BuiltinOP::FusedBatchNorm:
loadFusedBatchNorm(op, subg);
break;
case BuiltinOP::StatelessRandomUniform:
- loadStatelessRandomUniform(op, subg);
+ loadOperationTo<ir::operation::StatelessRandomUniform>(op, subg);
break;
case BuiltinOP::Erf:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ERF);
@@ -1285,141 +1013,71 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
}
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSqueeze(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadSqueeze(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Squeeze::Param param{};
+ ir::operation::Squeeze::Param param;
const auto *options = op->builtin_options_as_SqueezeOptions();
const auto *dims = options->squeeze_dims();
if (dims)
{
- if (dims->Length() > sizeof(param.dims) / sizeof(param.dims[0]))
+ if (dims->size() > sizeof(param.dims) / sizeof(param.dims[0]))
throw std::runtime_error("Squeeze: 'param.ndims' is out of range.");
- param.ndim = dims->Length();
+ param.ndim = dims->size();
for (int i = 0; i < param.ndim; ++i)
param.dims[i] = dims->Get(i);
}
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Squeeze(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Squeeze>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPrelu(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadSplit(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::PReLU(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSplit(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- // Notice : input order is strange for tflite split
- auto input = inputs.at(1);
- auto axis = inputs.at(0);
-
- // FIXME Handle SplitOptions.
- if (!subg.operands().at(axis).isConstant())
- throw std::runtime_error("Split: non-constant 'axis' is not supported.");
-
- ir::operation::Split::Param param{};
- param.axis = subg.operands().at(axis).template asScalar<int>();
+ ir::operation::Split::Param param;
const auto *options = op->builtin_options_as_SplitOptions();
param.num_splits = options->num_splits();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Split({input}, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Split>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSplitV(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadSplitV(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::SplitV::Param param{};
-
+ ir::operation::SplitV::Param param;
const auto *options = op->builtin_options_as_SplitVOptions();
param.num_splits = options->num_splits();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SplitV(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSlice(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::Slice{inputs, outputs}};
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::SplitV>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadStridedSlice(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadStridedSlice(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::StridedSlice::Param param;
-
const auto *options = op->builtin_options_as_StridedSliceOptions();
param.begin_mask = options->begin_mask();
param.end_mask = options->end_mask();
param.shrink_axis_mask = options->shrink_axis_mask();
- std::unique_ptr<ir::Operation> new_op{new ir::operation::StridedSlice{inputs, outputs, param}};
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::StridedSlice>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadUnpack(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadUnpack(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Unpack::Param param;
const auto *options = op->builtin_options_as_UnpackOptions();
param.num = options->num();
param.axis = options->axis();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Unpack(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Unpack>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadComparison(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadComparison(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::Comparison::Param param;
-
const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
switch (builtin_op)
@@ -1447,24 +1105,13 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadComparison(const Operator *op
std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
}
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Comparison(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::Comparison>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadEinsum(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadEinsum(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
ir::operation::Einsum::Param param;
-
- if (inputs.size() != 2)
- {
- throw std::runtime_error{"Einsum: NYI input - only support two inputs"};
- }
-
if (op->custom_options() == nullptr)
{
throw std::runtime_error{"Einsum: empty equation"};
@@ -1478,24 +1125,16 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadEinsum(const Operator *op, ir
param.equation = attr_map["equation"].ToString();
}
- std::unique_ptr<ir::Operation> new_op{new ir::operation::Einsum{inputs, outputs, param}};
- subg.addOperation(std::move(new_op));
+ const auto es = loadOperationTo<ir::operation::Einsum>(op, subg, param);
+ if (es->getInputs().size() != 2)
+ {
+ throw std::runtime_error{"Einsum: NYI input - only support two inputs"};
+ }
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadFusedBatchNorm(const Operator *op,
- ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadFusedBatchNorm(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
ir::operation::FusedBatchNorm::Param param;
-
- if (inputs.size() != 5)
- {
- throw std::runtime_error{"FusedBatchNorm: NYI input - only support five inputs"};
- }
-
if (op->custom_options() == nullptr)
{
throw std::runtime_error{"FusedBatchNorm: empty option"};
@@ -1511,195 +1150,104 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadFusedBatchNorm(const Operator
param.data_format = attr_map["data_format"].ToString();
}
- std::unique_ptr<ir::Operation> new_op{new ir::operation::FusedBatchNorm{inputs, outputs, param}};
- subg.addOperation(std::move(new_op));
+ const auto fbn = loadOperationTo<ir::operation::FusedBatchNorm>(op, subg, param);
+
+ if (fbn->getInputs().size() != 5)
+ {
+ throw std::runtime_error{"FusedBatchNorm: NYI input - only support five inputs"};
+ }
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOneHot(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadOneHot(const Operator *op, ir::Graph &subg)
{
if (op->inputs()->size() != 4 || op->outputs()->size() != 1)
throw std::runtime_error("OneHot Op has wrong number of input or output tensors.");
- // Set input and output tensors
- ir::OperandIndexSequence inputs, outputs;
- loadOperationIO(op, inputs, outputs);
-
// Set parameter
- const auto axis = op->builtin_options_as_OneHotOptions()->axis();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::OneHot(inputs, outputs, {axis}));
- subg.addOperation(std::move(new_op));
-}
+ ir::operation::OneHot::Param param;
+ param.axis = op->builtin_options_as_OneHotOptions()->axis();
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadShape(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- // ir::operation::Shape::Param param;
- // const auto *options = op->builtin_options_as_ShapeOptions();
- // param.out_type = tensorTypeToDataType(options->out_type());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Shape(inputs, outputs /*, param*/));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::OneHot>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadIf(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadIf(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
+ const auto *options = op->builtin_options_as_IfOptions();
+ const int32_t then_index = options->then_subgraph_index();
+ const int32_t else_index = options->else_subgraph_index();
- loadOperationIO(op, inputs, outputs);
+ verifySubgraphIndex(then_index);
+ verifySubgraphIndex(else_index);
ir::operation::If::Param param;
- const auto *options = op->builtin_options_as_IfOptions();
- const uint32_t then_index = options->then_subgraph_index();
- const uint32_t else_index = options->else_subgraph_index();
- param.then_subg_index = ir::SubgraphIndex{then_index};
- param.else_subg_index = ir::SubgraphIndex{else_index};
+ param.then_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(then_index)};
+ param.else_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(else_index)};
- std::unique_ptr<ir::Operation> new_op(new ir::operation::If(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::If>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadWhile(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadWhile(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
+ const auto *options = op->builtin_options_as_WhileOptions();
+ const int32_t cond_index = options->cond_subgraph_index();
+ const int32_t body_index = options->body_subgraph_index();
- loadOperationIO(op, inputs, outputs);
+ verifySubgraphIndex(cond_index);
+ verifySubgraphIndex(body_index);
ir::operation::While::Param param;
- const auto *options = op->builtin_options_as_WhileOptions();
- const uint32_t cond_index = options->cond_subgraph_index();
- const uint32_t body_index = options->body_subgraph_index();
- param.cond_subg_index = ir::SubgraphIndex{cond_index};
- param.body_subg_index = ir::SubgraphIndex{body_index};
+ param.cond_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(cond_index)};
+ param.body_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(body_index)};
- std::unique_ptr<ir::Operation> new_op(new ir::operation::While(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::While>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadArgMax(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadArgMax(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- auto inputOperand = subg.operands().at(inputs.at(0));
- auto axisOperand = subg.operands().at(inputs.at(1));
-
- if (!axisOperand.isConstant())
- throw std::runtime_error("ArgMax: non-constant 'axis' is not supported.");
- if (!(axisOperand.operandSize() == 4 && (axisOperand.typeInfo().type() == ir::DataType::INT32 ||
- axisOperand.typeInfo().type() == ir::DataType::INT64)))
- throw std::runtime_error("ArgMax: `axis` with an int32 or int64 element is only supported.");
-
ir::operation::ArgMax::Param param;
- param.axis = axisOperand.template asVector<int>()[0];
const auto output_type = op->builtin_options_as_ArgMaxOptions()->output_type();
switch (output_type)
{
case TensorType::TensorType_INT32:
case TensorType::TensorType_INT64:
+ param.output_type = tensorTypeToDataType(output_type);
break;
default:
throw std::runtime_error("ArgMax: `output_type` must be either int32 or int64.");
}
- param.output_type = tensorTypeToDataType(output_type);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ArgMax(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
+ auto am = loadOperationTo<ir::operation::ArgMax>(op, subg, param);
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPow(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pow(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRange(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Range(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTile(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- auto multiples = inputs.at(ir::operation::Tile::MULTIPLES);
-
- if (!subg.operands().at(multiples).isConstant())
- throw std::runtime_error("Tile: non-constant 'multiples' is not supported.");
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Tile(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ auto &axisOperand = subg.operands().at(am->getInputs().at(ir::operation::ArgMax::Input::AXIS));
+ if (!(axisOperand.operandSize() == 4 && (axisOperand.typeInfo().type() == ir::DataType::INT32 ||
+ axisOperand.typeInfo().type() == ir::DataType::INT64)))
+ throw std::runtime_error("ArgMax: `axis` with an int32 or int64 element is only supported.");
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLogSoftmax(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadLogSoftmax(const Operator *op, ir::Graph &subg)
{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
ir::operation::LogSoftmax::Param param;
-
// In tflite, beta is fixed to 1.0 and axis is fixed to -1.
param.beta = 1.0f;
param.axis = -1;
- std::unique_ptr<ir::Operation> new_op(new ir::operation::LogSoftmax(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadL2Normalization(const Operator *op,
- ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::L2Normalization(inputs, outputs));
- subg.addOperation(std::move(new_op));
+ loadOperationTo<ir::operation::LogSoftmax>(op, subg, param);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLeakyRelu(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadLeakyRelu(const Operator *op, ir::Graph &subg)
{
float alpha = op->builtin_options_as_LeakyReluOptions()->alpha();
loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LEAKY_RELU, alpha,
1.f);
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op, ir::Graph &subg)
+template <typename LoaderDomain>
+void BaseLoader<LoaderDomain>::loadOperation(const Operator *op, ir::Graph &subg)
{
const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
@@ -1733,16 +1281,16 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadFC(op, subg);
return;
case BuiltinOperator::BuiltinOperator_ADD:
- loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::ADD>(op, subg);
+ loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::ADD);
return;
case BuiltinOperator::BuiltinOperator_SUB:
- loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::SUB>(op, subg);
+ loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::SUB);
return;
case BuiltinOperator::BuiltinOperator_MUL:
- loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::MUL>(op, subg);
+ loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::MUL);
return;
case BuiltinOperator::BuiltinOperator_DIV:
- loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::DIV>(op, subg);
+ loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::DIV);
return;
case BuiltinOperator::BuiltinOperator_PACK:
loadPack(op, subg);
@@ -1769,40 +1317,37 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
return;
case BuiltinOperator::BuiltinOperator_SELECT:
- loadSelect(op, subg);
- return;
case BuiltinOperator::BuiltinOperator_SELECT_V2:
- // Use same loader with BuiltinOperator_SELECT
- loadSelect(op, subg);
+ loadOperationTo<ir::operation::Select>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SQRT:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
return;
case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
- loadSquaredDifference(op, subg);
+ loadOperationTo<ir::operation::SquaredDifference>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_TANH:
loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
1.f);
return;
case BuiltinOperator::BuiltinOperator_TRANSPOSE:
- loadTranspose(op, subg);
+ loadOperationTo<ir::operation::Transpose>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MEAN:
- loadReduce<ir::operation::Reduce::ReduceType::MEAN>(op, subg);
+ loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
- loadReduce<ir::operation::Reduce::ReduceType::ANY>(op, subg);
+ loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
- loadReduce<ir::operation::Reduce::ReduceType::MAX>(op, subg);
+ loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
return;
case BuiltinOperator::BuiltinOperator_REVERSE_V2:
- loadReverseV2(op, subg);
+ loadOperationTo<ir::operation::Reverse>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_PAD:
case BuiltinOperator::BuiltinOperator_PADV2:
- loadPad(op, subg);
+ loadOperationTo<ir::operation::Pad>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_LOGISTIC:
loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
@@ -1811,19 +1356,19 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
return;
case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
- loadExpandDims(op, subg);
+ loadOperationTo<ir::operation::ExpandDims>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_GATHER:
loadGather(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
- loadSpaceToBatchND(op, subg);
+ loadOperationTo<ir::operation::SpaceToBatchND>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
- loadBatchToSpaceND(op, subg);
+ loadOperationTo<ir::operation::BatchToSpaceND>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SUM:
- loadReduce<ir::operation::Reduce::ReduceType::SUM>(op, subg);
+ loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
return;
case BuiltinOperator::BuiltinOperator_CUSTOM:
loadCustom(op, subg);
@@ -1832,7 +1377,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadSqueeze(op, subg);
return;
case BuiltinOperator::BuiltinOperator_PRELU:
- loadPrelu(op, subg);
+ loadOperationTo<ir::operation::PReLU>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SPLIT:
loadSplit(op, subg);
@@ -1841,7 +1386,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadSplitV(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SLICE:
- loadSlice(op, subg);
+ loadOperationTo<ir::operation::Slice>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
loadStridedSlice(op, subg);
@@ -1850,10 +1395,10 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadUnpack(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MINIMUM:
- loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN>(op, subg);
+ loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
return;
case BuiltinOperator::BuiltinOperator_MAXIMUM:
- loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX>(op, subg);
+ loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
return;
case BuiltinOperator::BuiltinOperator_CAST:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
@@ -1879,10 +1424,10 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
return;
case BuiltinOperator::BuiltinOperator_SHAPE:
- loadShape(op, subg);
+ loadOperationTo<ir::operation::Shape>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
- loadReduce<ir::operation::Reduce::ReduceType::PROD>(op, subg);
+ loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
return;
case BuiltinOperator::BuiltinOperator_IF:
loadIf(op, subg);
@@ -1903,26 +1448,26 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
return;
case BuiltinOperator::BuiltinOperator_POW:
- loadPow(op, subg);
+ loadOperationTo<ir::operation::Pow>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
- loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR>(
- op, subg);
+ loadElementwiseBinary(op, subg,
+ ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
return;
case BuiltinOperator::BuiltinOperator_FILL:
- loadFill(op, subg);
+ loadOperationTo<ir::operation::Fill>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
return;
case BuiltinOperator::BuiltinOperator_TILE:
- loadTile(op, subg);
+ loadOperationTo<ir::operation::Tile>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_RANGE:
- loadRange(op, subg);
+ loadOperationTo<ir::operation::Range>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
loadBatchMatMul(op, subg);
@@ -1937,13 +1482,13 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadSpaceToDepth(op, subg);
return;
case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
- loadL2Normalization(op, subg);
+ loadOperationTo<ir::operation::L2Normalization>(op, subg);
break;
case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
loadLeakyRelu(op, subg);
return;
case BuiltinOperator::BuiltinOperator_RANK:
- loadRank(op, subg);
+ loadOperationTo<ir::operation::Rank>(op, subg);
return;
default:
throw std::runtime_error(
@@ -1951,8 +1496,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
}
}
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadModel()
+template <typename LoaderDomain> void BaseLoader<LoaderDomain>::loadModel()
{
LoaderDomain::VerifyModelBuffer(*_verifier.get());
_model = LoaderDomain::GetModel(_base);
@@ -1967,8 +1511,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadModel()
auto subgraphs = std::make_unique<ir::Subgraphs>();
for (uint32_t subgraph_index = 0; subgraph_index < domain_subgraphs->size(); ++subgraph_index)
{
- auto subg =
- static_cast<SpecificLoader *>(this)->loadSubgraph((*_model->subgraphs())[subgraph_index]);
+ auto subg = loadSubgraph((*_model->subgraphs())[subgraph_index]);
subgraphs->push(ir::SubgraphIndex{subgraph_index}, std::move(subg));
}
_subgraphs = std::move(subgraphs);
diff --git a/runtime/onert/frontend/circle/CMakeLists.txt b/runtime/onert/frontend/circle/CMakeLists.txt
index 8bcf85dd3..76dca9989 100644
--- a/runtime/onert/frontend/circle/CMakeLists.txt
+++ b/runtime/onert/frontend/circle/CMakeLists.txt
@@ -8,7 +8,7 @@ add_library(circle_loader SHARED ${CIRCLE_LOADER_SOURCES})
target_include_directories(circle_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(circle_loader PUBLIC onert_core)
+target_link_libraries(circle_loader PRIVATE onert_core)
target_link_libraries(circle_loader PRIVATE base_loader nnfw_common nnfw_coverage)
target_link_libraries(circle_loader PRIVATE circle_schema)
diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc
index 92a9ee7a5..4565ffc00 100644
--- a/runtime/onert/frontend/circle/src/circle_loader.cc
+++ b/runtime/onert/frontend/circle/src/circle_loader.cc
@@ -69,7 +69,7 @@ struct LoaderDomain
static bool VerifyModelBuffer(Verifier &verifier) { return circle::VerifyModelBuffer(verifier); }
};
-class CircleLoader final : public base_loader::BaseLoader<LoaderDomain, CircleLoader>
+class CircleLoader final : public base_loader::BaseLoader<LoaderDomain>
{
protected:
void loadInstanceNorm(const Operator *op, ir::Graph &subg);
@@ -91,7 +91,8 @@ public:
}
}
- std::unique_ptr<ir::Graph> loadSubgraph(const circle::SubGraph *circle_subg)
+private:
+ std::unique_ptr<ir::Graph> loadSubgraph(const circle::SubGraph *circle_subg) override
{
auto subg = std::make_unique<ir::Graph>();
// Load tensors
diff --git a/runtime/onert/frontend/nnapi/execution.cc b/runtime/onert/frontend/nnapi/execution.cc
index ce7da579e..56ca5ef00 100644
--- a/runtime/onert/frontend/nnapi/execution.cc
+++ b/runtime/onert/frontend/nnapi/execution.cc
@@ -94,12 +94,36 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32
// Omitted optional input
// LSTM operation's some inputs can be optional input
+ // Transpose operation's permutation input can be optional input
if ((buffer == nullptr) && (length == 0))
{
+ uint32_t dims[1] = {0};
+ ANeuralNetworksOperandType compared_shape;
+ compared_shape.dimensionCount = 1;
+ compared_shape.dimensions = dims;
if (execution->hasUnspecifiedDims(operand_index))
{
return ANEURALNETWORKS_NO_ERROR;
}
+ else if (type == nullptr && execution->IsOptionalInput(operand_index))
+ {
+ if (!execution->setOptionalInput(index, type, buffer, length))
+ {
+ VERBOSE(NNAPI::Execution) << "setInput: Fail to set optional input" << std::endl;
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+ }
+ // TODO Changes the condition to check zero sized
+ else if (execution->compareShape(&compared_shape, operand_index))
+ {
+ if (!execution->setInput(index, type, buffer, length))
+ {
+ VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl;
+ return ANEURALNETWORKS_BAD_DATA;
+ }
+ return ANEURALNETWORKS_NO_ERROR;
+ }
else
{
VERBOSE(NNAPI::Execution) << "setInput: Cannot handle fully-specified shape on model build "
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
index eb12d7e76..6114b74b0 100644
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
+++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
@@ -98,6 +98,17 @@ bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *ty
return operand_shape == shape_from_type;
}
+bool ANeuralNetworksExecution::IsOptionalInput(const onert::ir::OperandIndex index) noexcept
+{
+ const auto &operand_shape = _execution->primary_subgraph().operands().at(index).shape();
+ for (int32_t i = 0; i < operand_shape.rank(); ++i)
+ {
+ if (operand_shape.dim(i) != 0)
+ return false;
+ }
+ return true;
+}
+
bool ANeuralNetworksExecution::hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept
{
const auto operand_shape = _execution->primary_subgraph().operands().at(index).shape();
@@ -148,6 +159,45 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe
return true;
}
+bool ANeuralNetworksExecution::setOptionalInput(uint32_t index,
+ const ANeuralNetworksOperandType *type,
+ const void *buffer, size_t length) noexcept
+{
+ assert(type == nullptr);
+ assert(buffer == nullptr);
+ assert(length == 0);
+ try
+ {
+ onert::ir::IOIndex input_index{index};
+ const auto operand_index = getInputOperandIndex(index);
+
+ const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo();
+ const auto shape = (type != nullptr)
+ ? NNAPIConvert::getShape(type)
+ : _execution->primary_subgraph().operands().at(operand_index).shape();
+
+ // ANeuralNetworksExecution::setInput() uses only shape information
+ ANeuralNetworksOperandType optional_input_type;
+ optional_input_type.dimensionCount = shape.rank();
+ std::vector<uint32_t> dims(optional_input_type.dimensionCount);
+ for (uint32_t i = 0; i < optional_input_type.dimensionCount; ++i)
+ {
+ dims.at(i) = shape.dim(i);
+ }
+ optional_input_type.dimensions = dims.data();
+
+ return setInput(index, &optional_input_type, buffer, length);
+ }
+ catch (const std::exception &e)
+ {
+ VERBOSE(EXCEPTION) << e.what() << std::endl;
+
+ return false;
+ }
+
+ return true;
+}
+
bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type,
void *buffer, size_t length) noexcept
{
diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
index 848ae743f..1f4b868f6 100644
--- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
+++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
@@ -35,6 +35,8 @@ public:
public:
bool setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer,
size_t length) noexcept;
+ bool setOptionalInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer,
+ size_t length) noexcept;
bool setOutput(uint32_t index, const ANeuralNetworksOperandType *type, void *buffer,
size_t length) noexcept;
bool startExecute(void) noexcept;
@@ -46,6 +48,7 @@ public:
const onert::ir::OperandIndex index) noexcept;
bool compareShape(const ANeuralNetworksOperandType *type,
const onert::ir::OperandIndex index) noexcept;
+ bool IsOptionalInput(const onert::ir::OperandIndex index) noexcept;
bool hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept;
size_t getOperandSize(const onert::ir::OperandIndex index) noexcept;
const std::shared_ptr<onert::exec::Execution> instance(void) noexcept;
diff --git a/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc
index 15a279a7e..bb42f2b08 100644
--- a/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc
+++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.test.cc
@@ -16,10 +16,10 @@
#include <gtest/gtest.h>
-#include "wrapper/ANeuralNetworksModel.h"
+#include "ANeuralNetworksModel.h"
-TEST(MODEL, model_build)
+TEST(MODEL, neg_model_build)
{
ANeuralNetworksModel model;
- ASSERT_EQ(model.isFinished(), false);
+ ASSERT_FALSE(model.isFinished());
}
diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
index 8e3d83db4..e6c38f5f8 100644
--- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
+++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
@@ -708,31 +708,7 @@ OperationFactory::OperationFactory()
return new operation::StridedSlice{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- // TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
-
- // Inputs
- // 0: An n-D tensor, specifying the tensor to be transposed.
- // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
- // the permutation of the dimensions of the input tensor.
- // The returned tensor's dimension i corresponds to the input dimension
- // perm[i]. If perm is not given, it is set to (n-1...0), where n is the
- // rank of the input tensor. Hence by default, this operation performs a
- // regular matrix transpose on 2-D input Tensors.
- assert(init_param.input_count == 2);
- assert(init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
- std::vector<std::int32_t> perm =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::Transpose::Param param;
- param.perm.assign(perm.cbegin(), perm.cend());
-
- return new operation::Transpose{inputs, outputs, param};
- };
+ _map[ANEURALNETWORKS_TRANSPOSE] = createSimpleBinaryOp<operation::Transpose>;
_map[ANEURALNETWORKS_MUL] =
getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::MUL);
@@ -982,6 +958,28 @@ OperationFactory::OperationFactory()
return new operation::ResizeBilinear{inputs, outputs, param};
};
+ _map[ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR] = [](const OperationFactory::Param &init_param,
+ Operands &operands) {
+ assert((init_param.input_count == 3 || init_param.input_count == 4) &&
+ init_param.output_count == 1);
+
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Index
+ // 1 -> Height Index
+ // 2 -> Width Index
+ OperandIndexSequence inputs{init_param.inputs[0]};
+
+ operation::ResizeNearestNeighbor::Param param;
+ param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
+ param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
+ param.align_corners = false;
+ // The layout input is not supported yet
+ return new operation::ResizeNearestNeighbor{inputs, outputs, param};
+ };
+
_map[ANEURALNETWORKS_RELU1] = getElementwiseActivationGenerator(
onert::ir::operation::ElementwiseActivation::Type::RELU, 1.f, -1.f);
@@ -1304,6 +1302,105 @@ OperationFactory::OperationFactory()
}
param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
+ // This is initialization to prevent warning or error by static code analyzer. LSTM operation
+ // does not need time_major
+ param.time_major = false;
+
+ return new operation::LSTM{inputs, outputs, param};
+ };
+
+ _map[ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM] = [](const OperationFactory::Param &init_param,
+ Operands &operands) {
+ assert((init_param.input_count >= 24 || init_param.input_count <= 28) &&
+ (init_param.output_count >= 1 && init_param.output_count <= 3));
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Input Tensor Index
+ // 1 -> Input to Input Tensor Index
+ // 2 -> Input to Forget Tensor Index
+ // 3 -> Input to Cell Tensor Index
+ // 4 -> Input to Output Tensor Index
+ // 5 -> Recurrent to Input Weights Tensor Index
+ // 6 -> Recurrent to Forget Weights Tensor Index
+ // 7 -> Recurrent to Cell Weights Tensor Index
+ // 8 -> Recurrent to Output Weights Tensor Index
+ // 9 -> Cell to Input Weights Tensor Index
+ // 10 -> Cell to Forget Weights Tensor Index
+ // 11 -> Cell to Output Weights Tensor Index
+ // 12 -> Input Gate Bias Tensor Index
+ // 13 -> Forget Gate Bias Tensor Index
+ // 14 -> Cell Bias Tensor Index
+ // 15 -> Output Gate Bias Tensor Index
+ // 16 -> Projection Weights Tensor Index
+ // 17 -> Projection Bias Tensor Index
+ // 18 -> Output State In Tensor Index
+ // 19 -> Cell State In Tensor Index
+ assert(init_param.input_count - 3 > 20);
+ OperandIndexSequence inputs;
+ for (uint32_t n = 0; n < 20; ++n)
+ {
+ inputs.append(OperandIndex{init_param.inputs[n]});
+ }
+
+ // 24 -> Input Layer Normalization Weights Tensor Index
+ // 25 -> Forget Layer Normalization Weights Tensor Index
+ // 26 -> Cell Layer Normalization Weights Tensor Index
+ // 27 -> Output Layer Normalization Weights Tensor Index
+ if (init_param.input_count > 24)
+ {
+ for (uint32_t n = 24; n < 28; ++n)
+ {
+ if (init_param.input_count > n)
+ {
+ inputs.append(OperandIndex{init_param.inputs[n]});
+ }
+ }
+ }
+
+ // Each output should be interpreted as follows:
+ //
+ // 0 -> Output Tensor Index -> 3
+ // 1 -> Output State Out Tensor Index
+ // 2 -> Cell State Out Tensor Index
+ const OperandIndex scratch_buffer_index;
+ OperandIndex output_state_index =
+ init_param.output_count >= 2 ? OperandIndex{init_param.outputs[1]} : OperandIndex();
+ OperandIndex cell_state_index =
+ init_param.output_count >= 3 ? OperandIndex{init_param.outputs[2]} : OperandIndex();
+ const OperandIndex output_index = OperandIndex{init_param.outputs[0]};
+ OperandIndexSequence outputs{scratch_buffer_index, output_state_index, cell_state_index,
+ output_index};
+
+ operation::LSTM::Param param;
+ const auto activation_index = OperandIndex{init_param.inputs[20]};
+ switch (operands.at(activation_index).asScalar<int32_t>())
+ {
+ case 0:
+ param.activation = Activation::NONE;
+ break;
+ case 1:
+ param.activation = Activation::RELU;
+ break;
+ case 2:
+ param.activation = Activation::RELU1;
+ break;
+ case 3:
+ param.activation = Activation::RELU6;
+ break;
+ case 4:
+ param.activation = Activation::TANH;
+ break;
+ case 6:
+ param.activation = Activation::SIGMOID;
+ break;
+ default:
+ throw std::runtime_error("Unsupported activation type");
+ break;
+ }
+ param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
+ param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
+ param.time_major = operands.at(OperandIndex{init_param.inputs[23]}).asScalar<bool>();
return new operation::LSTM{inputs, outputs, param};
};
@@ -1406,7 +1503,7 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_ABS_EX
_map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS];
- _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &operands) {
+ _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
@@ -1415,10 +1512,9 @@ OperationFactory::OperationFactory()
//
// 0 -> Input Tensor Index
// 1 -> Axis Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
operation::ArgMax::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
// NNAPI ARGMAX output type is always int32
param.output_type = DataType::INT32;
@@ -1517,7 +1613,7 @@ OperationFactory::OperationFactory()
assert(init_param.input_count == 3);
assert(init_param.output_count >= 1); // At least one output tensor and axis
- OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence inputs{init_param.inputs[1], init_param.inputs[0]};
OperandIndexSequence outputs;
for (uint32_t n = 0; n < init_param.output_count; ++n)
{
@@ -1525,7 +1621,6 @@ OperationFactory::OperationFactory()
}
operation::Split::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
return new operation::Split{inputs, outputs, param};
diff --git a/runtime/onert/frontend/tflite/CMakeLists.txt b/runtime/onert/frontend/tflite/CMakeLists.txt
index fcadf5223..604a9e4cb 100644
--- a/runtime/onert/frontend/tflite/CMakeLists.txt
+++ b/runtime/onert/frontend/tflite/CMakeLists.txt
@@ -8,7 +8,7 @@ add_library(tflite_loader SHARED ${TFLITE_LOADER_SOURCES})
target_include_directories(tflite_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(tflite_loader PUBLIC onert_core)
+target_link_libraries(tflite_loader PRIVATE onert_core)
target_link_libraries(tflite_loader PRIVATE base_loader nnfw_common nnfw_coverage)
install(TARGETS tflite_loader DESTINATION lib)
diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc
index 7eef15717..fe4295ada 100644
--- a/runtime/onert/frontend/tflite/src/tflite_loader.cc
+++ b/runtime/onert/frontend/tflite/src/tflite_loader.cc
@@ -62,7 +62,7 @@ struct LoaderDomain
}
};
-class TFLiteLoader final : public base_loader::BaseLoader<LoaderDomain, TFLiteLoader>
+class TFLiteLoader final : public base_loader::BaseLoader<LoaderDomain>
{
public:
using BaseLoader::BaseLoader;
@@ -78,7 +78,8 @@ public:
}
}
- std::unique_ptr<ir::Graph> loadSubgraph(const onert_tflite::SubGraph *tflite_subg)
+private:
+ std::unique_ptr<ir::Graph> loadSubgraph(const onert_tflite::SubGraph *tflite_subg) override
{
auto subg = std::make_unique<ir::Graph>();
// Load tensors