summaryrefslogtreecommitdiff
path: root/runtime/onert/frontend
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/onert/frontend')
-rw-r--r--runtime/onert/frontend/base_loader/include/base_loader.h557
-rw-r--r--runtime/onert/frontend/circle/src/circle_loader.cc6
-rw-r--r--runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc696
-rw-r--r--runtime/onert/frontend/tflite/src/tflite_loader.cc6
4 files changed, 556 insertions, 709 deletions
diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h
index 0f6a2a5d0..480452e01 100644
--- a/runtime/onert/frontend/base_loader/include/base_loader.h
+++ b/runtime/onert/frontend/base_loader/include/base_loader.h
@@ -105,40 +105,39 @@ protected:
template <typename Param, typename OptionsType>
void loadStridesAndPaddings(Param &param, const OptionsType *options);
// Load Pool2D param
- template <typename Param> void loadPool2D(Param &param, const Pool2DOptions *options);
+ template <typename Param> void loadPool2DOptions(Param &param, const Pool2DOptions *options);
// Operations
void loadConv2D(const Operator *op, ir::Graph &subg);
void loadDepthwiseConv2D(const Operator *op, ir::Graph &subg);
void loadTransposeConv(const Operator *op, ir::Graph &subg);
- void loadAvgPool2D(const Operator *op, ir::Graph &subg);
+ void loadPool2D(const Operator *op, ir::Graph &subg, ir::operation::Pool2D::PoolType op_type);
void loadReshape(const Operator *op, ir::Graph &subg);
void loadSoftmax(const Operator *op, ir::Graph &subg);
- void loadMaxPool2D(const Operator *op, ir::Graph &subg);
void loadConcatenation(const Operator *op, ir::Graph &subg);
void loadFill(const Operator *op, ir::Graph &subg);
void loadFC(const Operator *op, ir::Graph &subg);
- void loadAdd(const Operator *op, ir::Graph &subg);
- void loadSub(const Operator *op, ir::Graph &subg);
- void loadMul(const Operator *op, ir::Graph &subg);
- void loadDiv(const Operator *op, ir::Graph &subg);
+ template <ir::operation::BinaryArithmetic::ArithmeticType op_type>
+ void loadBinaryArithmetic(const Operator *op, ir::Graph &subg);
+ void loadAddV2(const Operator *op, ir::Graph &subg);
void loadPack(const Operator *op, ir::Graph &subg);
- void loadRelu(const Operator *op, ir::Graph &subg);
- void loadRelu6(const Operator *op, ir::Graph &subg);
void loadResizeBilinear(const Operator *op, ir::Graph &subg);
- void loadRsqrt(const Operator *op, ir::Graph &subg);
+ void loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg);
void loadSelect(const Operator *op, ir::Graph &subg);
- void loadSqrt(const Operator *op, ir::Graph &subg);
void loadSquaredDifference(const Operator *op, ir::Graph &subg);
- void loadTanh(const Operator *op, ir::Graph &subg);
void loadTranspose(const Operator *op, ir::Graph &subg);
- void loadReduce(const Operator *op, ir::Graph &subg,
- ir::operation::Reduce::ReduceType reduce_type);
+ template <ir::operation::Reduce::ReduceType reduce_type>
+ void loadReduce(const Operator *op, ir::Graph &subg);
void loadReduceAll(const Operator *op, ir::Graph &subg);
void loadReverseV2(const Operator *op, ir::Graph &subg);
void loadPad(const Operator *op, ir::Graph &subg);
- void loadLogistic(const Operator *op, ir::Graph &subg);
- void loadExp(const Operator *op, ir::Graph &subg);
+ void loadElementwiseActivation(const Operator *op, ir::Graph &subg,
+ ir::operation::ElementwiseActivation::Type op_type,
+ float alpha = 0.f, float beta = 0.f);
+ template <ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type>
+ void loadElementwiseBinary(const Operator *op, ir::Graph &subg);
+ void loadElementwiseUnary(const Operator *op, ir::Graph &subg,
+ ir::operation::ElementwiseUnary::Type op_type);
void loadExpandDims(const Operator *op, ir::Graph &subg);
void loadGather(const Operator *op, ir::Graph &subg);
void loadCustom(const Operator *op, ir::Graph &subg);
@@ -152,35 +151,25 @@ protected:
void loadSlice(const Operator *op, ir::Graph &subg);
void loadStridedSlice(const Operator *op, ir::Graph &subg);
void loadUnpack(const Operator *op, ir::Graph &subg);
- void loadMinimum(const Operator *op, ir::Graph &subg);
- void loadMaximum(const Operator *op, ir::Graph &subg);
- void loadCast(const Operator *op, ir::Graph &subg);
void loadComparison(const Operator *op, ir::Graph &subg);
void loadEinsum(const Operator *op, ir::Graph &subg);
void loadOneHot(const Operator *op, ir::Graph &subg);
- void loadAbs(const Operator *op, ir::Graph &subg);
- void loadCos(const Operator *op, ir::Graph &subg);
- void loadSin(const Operator *op, ir::Graph &subg);
void loadShape(const Operator *op, ir::Graph &subg);
void loadIf(const Operator *op, ir::Graph &subg);
void loadWhile(const Operator *op, ir::Graph &subg);
- void loadNeg(const Operator *op, ir::Graph &subg);
- void loadLog(const Operator *op, ir::Graph &subg);
void loadArgMax(const Operator *op, ir::Graph &subg);
- void loadRound(const Operator *op, ir::Graph &subg);
void loadPow(const Operator *op, ir::Graph &subg);
- void loadLogicalNot(const Operator *op, ir::Graph &subg);
- void loadZerosLike(const Operator *op, ir::Graph &subg);
void loadTile(const Operator *op, ir::Graph &subg);
- void loadLogicalOr(const Operator *op, ir::Graph &subg);
void loadRange(const Operator *op, ir::Graph &subg);
+ void loadRank(const Operator *op, ir::Graph &subg);
void loadMatrixBandPart(const Operator *op, ir::Graph &subg);
void loadBroadcastTo(const Operator *op, ir::Graph &subg);
void loadFusedBatchNorm(const Operator *op, ir::Graph &subg);
void loadLogSoftmax(const Operator *op, ir::Graph &subg);
- void loadQuantize(const Operator *op, ir::Graph &subg);
void loadSpaceToDepth(const Operator *op, ir::Graph &subg);
void loadStatelessRandomUniform(const Operator *op, ir::Graph &subg);
+ void loadL2Normalization(const Operator *op, ir::Graph &subg);
+ void loadLeakyRelu(const Operator *op, ir::Graph &subg);
protected:
// Base address for mapped region for loading (if needed)
@@ -194,6 +183,7 @@ protected:
const Model *_model;
// Maps Tensor indices to onert Operands.
std::vector<ir::OperandIndex> _tensor_to_operand;
+ std::unordered_map<ir::OperandIndex, std::string> _tensor_names;
// Verifier
std::unique_ptr<Verifier> _verifier;
};
@@ -466,8 +456,8 @@ ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Ten
subg.setOperandValue(operand_index, std::move(data_obj));
}
- // Name unused
- // auto name = tensor->name();
+ _tensor_names.emplace(operand_index, tensor->name()->str());
+
// Variablie
if (tensor->is_variable())
throw std::runtime_error("Variable tensor not supported!");
@@ -518,8 +508,8 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadStridesAndPaddings(Param &par
template <typename LoaderDomain, typename SpecificLoader>
template <typename Param>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2D(Param &param,
- const Pool2DOptions *options)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2DOptions(Param &param,
+ const Pool2DOptions *options)
{
// Strides and Paddings
loadStridesAndPaddings(param, options);
@@ -543,7 +533,10 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadConv2D(const Operator *op, ir
const auto *options = op->builtin_options_as_Conv2DOptions();
param.activation = convertActivation(options->fused_activation_function());
loadStridesAndPaddings(param, options);
- // Dilation h/w factor unused
+
+ param.dilation.width_factor = options->dilation_w_factor();
+ param.dilation.height_factor = options->dilation_h_factor();
+
std::unique_ptr<ir::Operation> new_op(new ir::operation::Conv2D(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
@@ -585,19 +578,21 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadTransposeConv(const Operator
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAvgPool2D(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2D(const Operator *op, ir::Graph &subg,
+ ir::operation::Pool2D::PoolType op_type)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- ir::operation::AvgPool2D::Param param;
+ ir::operation::Pool2D::Param param;
+ param.op_type = op_type;
const auto *options = op->builtin_options_as_Pool2DOptions();
- loadPool2D(param, options);
+ loadPool2DOptions(param, options);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::AvgPool2D(inputs, outputs, param));
+ std::unique_ptr<ir::Operation> new_op(new ir::operation::Pool2D(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
@@ -645,23 +640,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadSoftmax(const Operator *op, i
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMaxPool2D(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::MaxPool2D::Param param;
- const auto *options = op->builtin_options_as_Pool2DOptions();
-
- loadPool2D(param, options);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::MaxPool2D(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadConcatenation(const Operator *op,
ir::Graph &subg)
{
@@ -719,70 +697,82 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadFC(const Operator *op, ir::Gr
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAdd(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Add::Param param;
- const auto *options = op->builtin_options_as_AddOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Add(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSub(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Sub::Param param;
- const auto *options = op->builtin_options_as_SubOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Sub(inputs, outputs, param));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMul(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadAddV2(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- ir::operation::Mul::Param param;
- const auto *options = op->builtin_options_as_MulOptions();
+ ir::operation::BinaryArithmetic::Param param;
+ param.arithmetic_type = ir::operation::BinaryArithmetic::ArithmeticType::ADD;
- param.activation = convertActivation(options->fused_activation_function());
+ if (op->custom_options() == nullptr)
+ {
+ param.activation = ir::Activation::NONE;
+ }
+ else
+ {
+ size_t custom_op_data_size = op->custom_options()->size();
+ auto custom_op_data = op->custom_options()->Data();
+ auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
+ auto attr_map = data_root.AsMap();
+ const auto fused_activation_func = static_cast<typename LoaderDomain::ActivationFunctionType>(
+ attr_map["fused_activation_function"].AsInt8());
+ param.activation = convertActivation(fused_activation_func);
+ }
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Mul(inputs, outputs, param));
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::BinaryArithmetic(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadDiv(const Operator *op, ir::Graph &subg)
+template <ir::operation::BinaryArithmetic::ArithmeticType op_type>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadBinaryArithmetic(const Operator *op,
+ ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- ir::operation::Div::Param param;
- const auto *options = op->builtin_options_as_DivOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
+ ir::operation::BinaryArithmetic::Param param;
+ param.arithmetic_type = op_type;
+ switch (op_type)
+ {
+ case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
+ {
+ const auto *add_options = op->builtin_options_as_AddOptions();
+ param.activation = convertActivation(add_options->fused_activation_function());
+ break;
+ }
+ case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
+ {
+ const auto *sub_options = op->builtin_options_as_SubOptions();
+ param.activation = convertActivation(sub_options->fused_activation_function());
+ break;
+ }
+ case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
+ {
+ const auto *mul_options = op->builtin_options_as_MulOptions();
+ param.activation = convertActivation(mul_options->fused_activation_function());
+ break;
+ }
+ case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
+ {
+ const auto *div_options = op->builtin_options_as_DivOptions();
+ param.activation = convertActivation(div_options->fused_activation_function());
+ break;
+ }
+ default:
+ assert(false &&
+ "The function 'loadBinaryArithmetic' supports only BinaryArithmetic operations");
+ break;
+ }
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Div(inputs, outputs, param));
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::BinaryArithmetic(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
@@ -805,26 +795,22 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op, ir::
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseActivation(
+ const Operator *op, ir::Graph &subg, ir::operation::ElementwiseActivation::Type op_type,
+ float alpha, float beta)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
+ ir::operation::ElementwiseActivation::Param param;
+ param.op_type = op_type;
+ param.alpha = alpha;
+ param.beta = beta;
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu6(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU6(inputs, outputs));
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::ElementwiseActivation(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
@@ -856,38 +842,40 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeBilinear(const Operator
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRsqrt(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeNearestNeighbor(const Operator *op,
+ ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
+ auto input = inputs.at(0);
+ auto size = inputs.at(1);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::RSQRT(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
+ if (!subg.operands().at(size).isConstant())
+ throw std::runtime_error("ResizeNearestNeighbor: non-constant 'size' is not supported.");
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSelect(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
+ std::vector<std::int32_t> size_v = subg.operands().at(size).template asVector<std::int32_t>();
- loadOperationIO(op, inputs, outputs);
+ ir::operation::ResizeNearestNeighbor::Param param;
+ param.height_out = size_v[0];
+ param.width_out = size_v[1];
+ param.align_corners = op->builtin_options_as_ResizeNearestNeighborOptions()->align_corners();
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Select(inputs, outputs));
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::ResizeNearestNeighbor({input}, outputs, param));
subg.addOperation(std::move(new_op));
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSqrt(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadSelect(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SQRT(inputs, outputs));
+ std::unique_ptr<ir::Operation> new_op(new ir::operation::Select(inputs, outputs));
subg.addOperation(std::move(new_op));
}
@@ -905,18 +893,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadSquaredDifference(const Opera
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTanh(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Tanh(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -937,8 +913,8 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op,
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReduce(
- const Operator *op, ir::Graph &subg, ir::operation::Reduce::ReduceType reduce_type)
+template <ir::operation::Reduce::ReduceType reduce_type>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadReduce(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
@@ -1005,26 +981,49 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadPad(const Operator *op, ir::G
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLogistic(const Operator *op, ir::Graph &subg)
+template <ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseBinary(const Operator *op,
+ ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Logistic(inputs, outputs));
+ ir::operation::ElementwiseBinary::Param param;
+ param.op_type = op_type;
+
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::ElementwiseBinary(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadExp(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadElementwiseUnary(
+ const Operator *op, ir::Graph &subg, ir::operation::ElementwiseUnary::Type op_type)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Exp(inputs, outputs));
+ ir::operation::ElementwiseUnary::Param param;
+ param.op_type = op_type;
+
+ if (op_type == ir::operation::ElementwiseUnary::Type::CAST)
+ {
+ auto qasymm8ToUint8 = [](ir::Operand &operand) {
+ if (operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
+ {
+ operand.type(ir::DataType::UINT8);
+ }
+ };
+ qasymm8ToUint8(subg.operands().at(inputs.at(ir::operation::ElementwiseUnary::Input::INPUT)));
+ qasymm8ToUint8(subg.operands().at(outputs.at(0)));
+ }
+
+ std::unique_ptr<ir::Operation> new_op(
+ new ir::operation::ElementwiseUnary(inputs, outputs, param));
subg.addOperation(std::move(new_op));
}
@@ -1177,6 +1176,17 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadStatelessRandomUniform(const
}
template <typename LoaderDomain, typename SpecificLoader>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadRank(const Operator *op, ir::Graph &subg)
+{
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
+ loadOperationIO(op, inputs, outputs);
+
+ std::unique_ptr<ir::Operation> new_op(new ir::operation::Rank(inputs, outputs));
+ subg.addOperation(std::move(new_op));
+}
+
+template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1197,7 +1207,8 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
Einsum,
BroadcastTo,
FusedBatchNorm,
- StatelessRandomUniform
+ StatelessRandomUniform,
+ Erf
};
// Mapping from custom op name string to BuiltinOP enum
@@ -1210,6 +1221,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
{"FusedBatchNormV3", BuiltinOP::FusedBatchNorm},
{"BroadcastTo", BuiltinOP::BroadcastTo},
{"StatelessRandomUniform", BuiltinOP::StatelessRandomUniform},
+ {"Erf", BuiltinOP::Erf},
};
try
@@ -1219,7 +1231,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
switch (custom_op_id)
{
case BuiltinOP::AddV2:
- loadAdd(op, subg);
+ loadAddV2(op, subg);
break;
case BuiltinOP::ReduceAll:
loadReduceAll(op, subg);
@@ -1242,6 +1254,9 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir
case BuiltinOP::StatelessRandomUniform:
loadStatelessRandomUniform(op, subg);
break;
+ case BuiltinOP::Erf:
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ERF);
+ break;
default:
throw std::runtime_error{
"Loader: Custom OP map is defined but operation loader function is not defined"};
@@ -1396,51 +1411,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadUnpack(const Operator *op, ir
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMinimum(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Min(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMaximum(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Max(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadCast(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- auto qasymm8ToUint8 = [](ir::Operand &operand) {
- if (operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
- {
- operand.type(ir::DataType::UINT8);
- }
- };
- qasymm8ToUint8(subg.operands().at(inputs.at(ir::operation::Cast::Input::INPUT)));
- qasymm8ToUint8(subg.operands().at(outputs.at(0)));
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Cast(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadComparison(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1562,42 +1532,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOneHot(const Operator *op, ir
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAbs(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Abs(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadCos(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Cos(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSin(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Sin(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadShape(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1652,18 +1586,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadWhile(const Operator *op, ir:
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadNeg(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Neg(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadArgMax(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1697,30 +1619,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadArgMax(const Operator *op, ir
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLog(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Log(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRound(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Round(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadPow(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1733,31 +1631,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadPow(const Operator *op, ir::G
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLogicalNot(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::LogicalNot(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadZerosLike(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ZerosLike(inputs, outputs));
-
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadRange(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1787,18 +1660,6 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadTile(const Operator *op, ir::
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLogicalOr(const Operator *op, ir::Graph &subg)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::LogicalOr(inputs, outputs));
- subg.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadLogSoftmax(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
@@ -1817,18 +1678,27 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadLogSoftmax(const Operator *op
}
template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadQuantize(const Operator *op, ir::Graph &subg)
+void BaseLoader<LoaderDomain, SpecificLoader>::loadL2Normalization(const Operator *op,
+ ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Quantize(inputs, outputs));
+ std::unique_ptr<ir::Operation> new_op(new ir::operation::L2Normalization(inputs, outputs));
subg.addOperation(std::move(new_op));
}
template <typename LoaderDomain, typename SpecificLoader>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadLeakyRelu(const Operator *op, ir::Graph &subg)
+{
+ float alpha = op->builtin_options_as_LeakyReluOptions()->alpha();
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LEAKY_RELU, alpha,
+ 1.f);
+}
+
+template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op, ir::Graph &subg)
{
const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
@@ -1839,7 +1709,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadConv2D(op, subg);
return;
case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
- loadAvgPool2D(op, subg);
+ loadPool2D(op, subg, ir::operation::Pool2D::PoolType::AVG);
return;
case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
loadDepthwiseConv2D(op, subg);
@@ -1854,7 +1724,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadSoftmax(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
- loadMaxPool2D(op, subg);
+ loadPool2D(op, subg, ir::operation::Pool2D::PoolType::MAX);
return;
case BuiltinOperator::BuiltinOperator_CONCATENATION:
loadConcatenation(op, subg);
@@ -1863,31 +1733,40 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadFC(op, subg);
return;
case BuiltinOperator::BuiltinOperator_ADD:
- loadAdd(op, subg);
+ loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::ADD>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SUB:
- loadSub(op, subg);
+ loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::SUB>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MUL:
- loadMul(op, subg);
+ loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::MUL>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_DIV:
- loadDiv(op, subg);
+ loadBinaryArithmetic<ir::operation::BinaryArithmetic::ArithmeticType::DIV>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_PACK:
loadPack(op, subg);
return;
case BuiltinOperator::BuiltinOperator_RELU:
- loadRelu(op, subg);
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU,
+ ir::operation::ElementwiseActivation::infinity, 0.f);
+ return;
+ case BuiltinOperator::BuiltinOperator_RELU_N1_TO_1:
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 1.f,
+ -1.f);
return;
case BuiltinOperator::BuiltinOperator_RELU6:
- loadRelu6(op, subg);
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 6.f,
+ 0.f);
return;
case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
loadResizeBilinear(op, subg);
return;
+ case BuiltinOperator::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
+ loadResizeNearestNeighbor(op, subg);
+ return;
case BuiltinOperator::BuiltinOperator_RSQRT:
- loadRsqrt(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
return;
case BuiltinOperator::BuiltinOperator_SELECT:
loadSelect(op, subg);
@@ -1897,37 +1776,39 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadSelect(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SQRT:
- loadSqrt(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
return;
case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
loadSquaredDifference(op, subg);
return;
case BuiltinOperator::BuiltinOperator_TANH:
- loadTanh(op, subg);
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
+ 1.f);
return;
case BuiltinOperator::BuiltinOperator_TRANSPOSE:
loadTranspose(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MEAN:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
+ loadReduce<ir::operation::Reduce::ReduceType::MEAN>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
+ loadReduce<ir::operation::Reduce::ReduceType::ANY>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
+ loadReduce<ir::operation::Reduce::ReduceType::MAX>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REVERSE_V2:
loadReverseV2(op, subg);
return;
case BuiltinOperator::BuiltinOperator_PAD:
+ case BuiltinOperator::BuiltinOperator_PADV2:
loadPad(op, subg);
return;
case BuiltinOperator::BuiltinOperator_LOGISTIC:
- loadLogistic(op, subg);
+ loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
return;
case BuiltinOperator::BuiltinOperator_EXP:
- loadExp(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
return;
case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
loadExpandDims(op, subg);
@@ -1942,7 +1823,7 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadBatchToSpaceND(op, subg);
return;
case BuiltinOperator::BuiltinOperator_SUM:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
+ loadReduce<ir::operation::Reduce::ReduceType::SUM>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_CUSTOM:
loadCustom(op, subg);
@@ -1969,13 +1850,13 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadUnpack(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MINIMUM:
- loadMinimum(op, subg);
+ loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_MAXIMUM:
- loadMaximum(op, subg);
+ loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_CAST:
- loadCast(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
return;
case BuiltinOperator::BuiltinOperator_EQUAL:
case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
@@ -1989,19 +1870,19 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadOneHot(op, subg);
return;
case BuiltinOperator::BuiltinOperator_ABS:
- loadAbs(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ABS);
return;
case BuiltinOperator::BuiltinOperator_COS:
- loadCos(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::COS);
return;
case BuiltinOperator::BuiltinOperator_SIN:
- loadSin(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
return;
case BuiltinOperator::BuiltinOperator_SHAPE:
loadShape(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
- loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
+ loadReduce<ir::operation::Reduce::ReduceType::PROD>(op, subg);
return;
case BuiltinOperator::BuiltinOperator_IF:
loadIf(op, subg);
@@ -2010,31 +1891,32 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadWhile(op, subg);
return;
case BuiltinOperator::BuiltinOperator_NEG:
- loadNeg(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::NEG);
return;
case BuiltinOperator::BuiltinOperator_ARG_MAX:
loadArgMax(op, subg);
return;
case BuiltinOperator::BuiltinOperator_LOG:
- loadLog(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOG);
return;
case BuiltinOperator::BuiltinOperator_ROUND:
- loadRound(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
return;
case BuiltinOperator::BuiltinOperator_POW:
loadPow(op, subg);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
- loadLogicalNot(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
- loadLogicalOr(op, subg);
+ loadElementwiseBinary<ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR>(
+ op, subg);
return;
case BuiltinOperator::BuiltinOperator_FILL:
loadFill(op, subg);
return;
case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
- loadZerosLike(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
return;
case BuiltinOperator::BuiltinOperator_TILE:
loadTile(op, subg);
@@ -2049,11 +1931,20 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
loadLogSoftmax(op, subg);
return;
case BuiltinOperator::BuiltinOperator_QUANTIZE:
- loadQuantize(op, subg);
+ loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::QUANTIZE);
return;
case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH:
loadSpaceToDepth(op, subg);
return;
+ case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
+ loadL2Normalization(op, subg);
+ break;
+ case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
+ loadLeakyRelu(op, subg);
+ return;
+ case BuiltinOperator::BuiltinOperator_RANK:
+ loadRank(op, subg);
+ return;
default:
throw std::runtime_error(
std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc
index 96dd4698a..92a9ee7a5 100644
--- a/runtime/onert/frontend/circle/src/circle_loader.cc
+++ b/runtime/onert/frontend/circle/src/circle_loader.cc
@@ -103,12 +103,14 @@ public:
// Set inputs
for (const std::int32_t input_ind : *circle_subg->inputs())
{
- subg->addInput(tensorIdxToOperandIdx(input_ind));
+ subg->addInput(tensorIdxToOperandIdx(input_ind),
+ _tensor_names.at(_tensor_to_operand[input_ind]));
}
// Set outputs
for (const std::int32_t output_ind : *circle_subg->outputs())
{
- subg->addOutput(tensorIdxToOperandIdx(output_ind));
+ subg->addOutput(tensorIdxToOperandIdx(output_ind),
+ _tensor_names.at(_tensor_to_operand[output_ind]));
}
// Create operations
for (const auto *op : *circle_subg->operators())
diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
index 8ff6cbbfd..8e3d83db4 100644
--- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
+++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc
@@ -83,6 +83,189 @@ uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
}
OperationFactory::Generator
+getElementwiseActivationGenerator(const onert::ir::operation::ElementwiseActivation::Type op_type,
+ float alpha = 0.f, float beta = 0.f)
+{
+ return [op_type, alpha, beta](const OperationFactory::Param &init_param, Operands &) {
+ assert(init_param.input_count == 1);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Input Tensor Index
+
+ OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ operation::ElementwiseActivation::Param param;
+ param.op_type = op_type;
+ param.alpha = alpha;
+ param.beta = beta;
+
+ return new operation::ElementwiseActivation{inputs, outputs, param};
+ };
+}
+
+OperationFactory::Generator getElementwiseBinaryGenerator(
+ const onert::ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
+{
+ return [op_type](const OperationFactory::Param &init_param, Operands &) {
+ assert(init_param.input_count == 2);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Lefthand side operand
+ // 1 -> Righthand side operand
+
+ OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ operation::ElementwiseBinary::Param param;
+ param.op_type = op_type;
+
+ return new operation::ElementwiseBinary{inputs, outputs, param};
+ };
+}
+
+OperationFactory::Generator
+getElementwiseUnaryGenerator(const onert::ir::operation::ElementwiseUnary::Type op_type)
+{
+ return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
+ assert(init_param.input_count == 1);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Input Tensor Index
+
+ OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ operation::ElementwiseUnary::Param param;
+ param.op_type = op_type;
+
+ if (op_type == operation::ElementwiseUnary::Type::CAST)
+ {
+ // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's
+ // input/output
+ if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
+ {
+ replaceDataType(operands, inputs.at(0), DataType::UINT8);
+ }
+ if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
+ {
+ replaceDataType(operands, outputs.at(0), DataType::UINT8);
+ }
+ }
+
+ return new operation::ElementwiseUnary{inputs, outputs, param};
+ };
+}
+
+OperationFactory::Generator
+getBinaryArithmeticGenerator(const onert::ir::operation::BinaryArithmetic::ArithmeticType op_type)
+{
+ return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
+ assert(init_param.input_count == 3);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> Lefthand side operand
+ // 1 -> Righthand side operand
+
+ OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ operation::BinaryArithmetic::Param param;
+ param.arithmetic_type = op_type;
+ const auto activation_index = OperandIndex{init_param.inputs[2]};
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
+
+ return new operation::BinaryArithmetic{inputs, outputs, param};
+ };
+}
+
+OperationFactory::Generator
+getPool2DGenerator(const onert::ir::operation::Pool2D::PoolType pool_type)
+{
+ return [pool_type](const OperationFactory::Param &init_param, Operands &operands) {
+ assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.output_count == 1);
+
+ // In common
+ // 0 -> IFM Tensor Index
+ OperandIndexSequence inputs{init_param.inputs[0]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
+
+ operation::Pool2D::Param param;
+ param.op_type = pool_type;
+ if (init_param.input_count == 7) // support implicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ const auto padding_index = OperandIndex{init_param.inputs[1]};
+ const auto hstride_index = OperandIndex{init_param.inputs[2]};
+ const auto vstride_index = OperandIndex{init_param.inputs[3]};
+ const auto kw_index = OperandIndex{init_param.inputs[4]};
+ const auto kh_index = OperandIndex{init_param.inputs[5]};
+ const auto activation_index = OperandIndex{init_param.inputs[6]};
+
+ param.padding.type =
+ NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
+ param.stride = makeStride(operands, hstride_index, vstride_index);
+ param.kw = getUint32Scalar(operands, kw_index);
+ param.kh = operands.at(kh_index).asScalar<uint32_t>();
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
+ }
+ else // support explicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding_left index
+ // 2 -> Padding_right index
+ // 3 -> Padding_top index
+ // 4 -> Padding_bottom index
+ // 5 -> Horizontal (over width) Stride Index
+ // 6 -> Vertial (over height) Stride Index
+ // 7 -> Filter Width Index
+ // 8 -> Filter Height Index
+ // 9 -> FuseCode (activation) Index
+
+ const auto padding_left_index = OperandIndex{init_param.inputs[1]};
+ const auto padding_right_index = OperandIndex{init_param.inputs[2]};
+ const auto padding_top_index = OperandIndex{init_param.inputs[3]};
+ const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
+ const auto hstride_index = OperandIndex{init_param.inputs[5]};
+ const auto vstride_index = OperandIndex{init_param.inputs[6]};
+ const auto kw_index = OperandIndex{init_param.inputs[7]};
+ const auto kh_index = OperandIndex{init_param.inputs[8]};
+ const auto activation_index = OperandIndex{init_param.inputs[9]};
+
+ param.padding.type = PaddingType::EXPLICIT;
+ param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
+ padding_top_index, padding_bottom_index);
+ param.stride = makeStride(operands, hstride_index, vstride_index);
+ param.kw = getUint32Scalar(operands, kw_index);
+ param.kh = getUint32Scalar(operands, kh_index);
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
+ }
+ return new operation::Pool2D{inputs, outputs, param};
+ };
+}
+
+OperationFactory::Generator
getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type)
{
return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) {
@@ -133,79 +316,24 @@ Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Opera
return new T{inputs, outputs};
}
-// A generator function for binary ops with no params
-template <typename T>
-Operation *createPool2DOp(const OperationFactory::Param &init_param, Operands &operands)
+OperationFactory::Generator getComparisonGenerator(operation::Comparison::ComparisonType type)
{
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
+ return [type](const OperationFactory::Param &init_param, Operands &) -> Operation * {
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
- // In common
- // 0 -> IFM Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
+ OperandIndexSequence outputs{init_param.outputs[0]};
- typename T::Param param;
- if (init_param.input_count == 7) // support implicit padding
- {
// Each input should be interpreted as follows:
//
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- const auto padding_index = OperandIndex{init_param.inputs[1]};
- const auto hstride_index = OperandIndex{init_param.inputs[2]};
- const auto vstride_index = OperandIndex{init_param.inputs[3]};
- const auto kw_index = OperandIndex{init_param.inputs[4]};
- const auto kh_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
+ // 0 -> input0 Tensor Index
+ // 1 -> input1 Tensor Index
+ OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = operands.at(kh_index).asScalar<uint32_t>();
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[1]};
- const auto padding_right_index = OperandIndex{init_param.inputs[2]};
- const auto padding_top_index = OperandIndex{init_param.inputs[3]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
- const auto hstride_index = OperandIndex{init_param.inputs[5]};
- const auto vstride_index = OperandIndex{init_param.inputs[6]};
- const auto kw_index = OperandIndex{init_param.inputs[7]};
- const auto kh_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- return new T{inputs, outputs, param};
+ operation::Comparison::Param param;
+ param.comparison_type = type;
+
+ return new operation::Comparison{inputs, outputs, param};
+ };
}
} // namespace
@@ -295,9 +423,9 @@ OperationFactory::OperationFactory()
return new operation::DepthwiseConv2D{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_MAX_POOL_2D] = createPool2DOp<operation::MaxPool2D>;
+ _map[ANEURALNETWORKS_MAX_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::MAX);
- _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = createPool2DOp<operation::AvgPool2D>;
+ _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::AVG);
_map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
Operands &operands) {
@@ -383,27 +511,8 @@ OperationFactory::OperationFactory()
return new operation::Softmax{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output
- if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
- {
- replaceDataType(operands, inputs.at(0), DataType::UINT8);
- }
- if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
- {
- replaceDataType(operands, outputs.at(0), DataType::UINT8);
- }
-
- return new operation::Cast{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_CAST] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::CAST);
// ANEURALNETWORKS_CAST_EX is deprecated
// TODO Remove ANEURALNETWORKS_CAST_EX
@@ -416,7 +525,8 @@ OperationFactory::OperationFactory()
// inputCount is either 7 or 10 acccording to NN API specification.
// - Padding is implicit when inputCount is 7
// - Padding is explicit when inputCount is 10
- assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.input_count == 7 || init_param.input_count == 10 ||
+ init_param.input_count == 13);
assert(init_param.output_count == 1);
// 0 -> IFM Tensor Index
@@ -427,7 +537,6 @@ OperationFactory::OperationFactory()
OperandIndexSequence outputs{init_param.outputs[0]};
Conv2D::Param param;
-
if (init_param.input_count == 7) // support implicit padding
{
// Each input should be interpreted as follows:
@@ -445,6 +554,10 @@ OperationFactory::OperationFactory()
param.padding.type =
NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
param.stride = makeStride(operands, hstride_index, vstride_index);
+
+ param.dilation.width_factor = 1;
+ param.dilation.height_factor = 1;
+
param.activation =
NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
@@ -472,34 +585,62 @@ OperationFactory::OperationFactory()
param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
padding_top_index, padding_bottom_index);
param.stride = makeStride(operands, hstride_index, vstride_index);
+
+ param.dilation.width_factor = 1;
+ param.dilation.height_factor = 1;
+
param.activation =
NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
+ else if (init_param.input_count == 13) // support dilation
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding_left Index
+ // 4 -> Padding_right Index
+ // 5 -> Padding_top Index
+ // 6 -> Padding_bottom Index
+ // 7 -> Stride (width) Index
+ // 8 -> Stride (height) Index
+ // 9 -> Activation Index
+ // 11 -> Dilation (width_factor) Index
+ // 12 -> Dilation (height_factor) INdex
- return new Conv2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
+ const auto padding_left_index = OperandIndex{init_param.inputs[3]};
+ const auto padding_right_index = OperandIndex{init_param.inputs[4]};
+ const auto padding_top_index = OperandIndex{init_param.inputs[5]};
+ const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
+ const auto hstride_index = OperandIndex{init_param.inputs[7]};
+ const auto vstride_index = OperandIndex{init_param.inputs[8]};
+ const auto activation_index = OperandIndex{init_param.inputs[9]};
+ const auto width_factor_index = OperandIndex{init_param.inputs[11]};
+ const auto height_factor_index = OperandIndex{init_param.inputs[12]};
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
+ param.padding.type = PaddingType::EXPLICIT;
+ param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
+ padding_top_index, padding_bottom_index);
+ param.stride = makeStride(operands, hstride_index, vstride_index);
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
+ auto width_factor = operands.at(width_factor_index).asScalar<int32_t>();
+ auto height_factor = operands.at(height_factor_index).asScalar<int32_t>();
- operation::Add::Param param;
+ param.dilation.width_factor = width_factor;
+ param.dilation.height_factor = height_factor;
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
+ }
+ else
+ {
+ throw std::runtime_error{"Conv2D: unsupported input operand count"};
+ }
- return new operation::Add{inputs, outputs, param};
+ return new Conv2D{inputs, outputs, param};
};
+ _map[ANEURALNETWORKS_ADD] =
+ getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::ADD);
+
_map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD];
_map[ANEURALNETWORKS_REDUCE_SUM] =
@@ -509,26 +650,8 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX
_map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM];
- _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Sub::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Sub{inputs, outputs, param};
- };
+ _map[ANEURALNETWORKS_SUB] =
+ getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::SUB);
_map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
@@ -611,27 +734,8 @@ OperationFactory::OperationFactory()
return new operation::Transpose{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- // 2 -> Activation Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Mul::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Mul{inputs, outputs, param};
- };
+ _map[ANEURALNETWORKS_MUL] =
+ getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::MUL);
_map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
Operands &operands) {
@@ -672,34 +776,18 @@ OperationFactory::OperationFactory()
return new operation::Squeeze{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_TANH] = CreateSimpleUnaryOp<operation::Tanh>;
+ _map[ANEURALNETWORKS_TANH] = getElementwiseActivationGenerator(
+ onert::ir::operation::ElementwiseActivation::Type::TANH, 1.f, 1.f);
- _map[ANEURALNETWORKS_LOG] = CreateSimpleUnaryOp<operation::Log>;
+ _map[ANEURALNETWORKS_LOG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOG);
- _map[ANEURALNETWORKS_LOGISTIC] = CreateSimpleUnaryOp<operation::Logistic>;
+ _map[ANEURALNETWORKS_LOGISTIC] = getElementwiseActivationGenerator(
+ onert::ir::operation::ElementwiseActivation::Type::LOGISTIC);
- _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
+ _map[ANEURALNETWORKS_DIV] =
+ getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::DIV);
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- // 2 -> Activation Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Div::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Div{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_EXP] = CreateSimpleUnaryOp<operation::Exp>;
+ _map[ANEURALNETWORKS_EXP] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::EXP);
// ANEURALNETWORKS_EXP_EX is deprecated
// TODO Remove ANEURALNETWORKS_EXP_EX
@@ -710,39 +798,17 @@ OperationFactory::OperationFactory()
// 1 -> Axis Tensor Index
_map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp<operation::ExpandDims>;
- _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Greater;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_GREATER_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
-
- return new operation::Comparison{inputs, outputs, param};
- };
+ _map[ANEURALNETWORKS_GREATER] =
+ getComparisonGenerator(operation::Comparison::ComparisonType::Greater);
+ _map[ANEURALNETWORKS_GREATER_EQUAL] =
+ getComparisonGenerator(operation::Comparison::ComparisonType::GreaterEqual);
+ _map[ANEURALNETWORKS_LESS] = getComparisonGenerator(operation::Comparison::ComparisonType::Less);
+ _map[ANEURALNETWORKS_LESS_EQUAL] =
+ getComparisonGenerator(operation::Comparison::ComparisonType::LessEqual);
+ _map[ANEURALNETWORKS_NOT_EQUAL] =
+ getComparisonGenerator(operation::Comparison::ComparisonType::NotEqual);
+ _map[ANEURALNETWORKS_EQUAL] =
+ getComparisonGenerator(operation::Comparison::ComparisonType::Equal);
// ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated
// TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX
@@ -767,40 +833,6 @@ OperationFactory::OperationFactory()
return new operation::Comparison{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_LESS] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Less;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LESS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::LessEqual;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
// ANEURALNETWORKS_LESS_EX is deprecated
// TODO Remove ANEURALNETWORKS_LESS_EX
_map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
@@ -837,23 +869,6 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
_map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX];
- _map[ANEURALNETWORKS_NOT_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input1 Tensor Index
- // 1 -> input2 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
// ANEURALNETWORKS_NOT_EQUAL_EX is deprecated
// TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX
_map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
@@ -877,7 +892,8 @@ OperationFactory::OperationFactory()
return new operation::Comparison{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_LOGICAL_AND] = createSimpleBinaryOp<operation::LogicalAnd>;
+ _map[ANEURALNETWORKS_LOGICAL_AND] = getElementwiseBinaryGenerator(
+ operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND);
// ANEURALNETWORKS_LOGICAL_AND_EX is deprecated
// TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX
@@ -898,10 +914,14 @@ OperationFactory::OperationFactory()
replaceDataType(operands, inputs.at(1), DataType::BOOL8);
replaceDataType(operands, outputs.at(0), DataType::BOOL8);
- return new operation::LogicalAnd{inputs, outputs};
+ operation::ElementwiseBinary::Param param;
+ param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND;
+
+ return new operation::ElementwiseBinary{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_RSQRT] = CreateSimpleUnaryOp<operation::RSQRT>;
+ _map[ANEURALNETWORKS_RSQRT] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::RSQRT);
_map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
@@ -937,7 +957,9 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_RSQRT_EX
_map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
- _map[ANEURALNETWORKS_RELU] = CreateSimpleUnaryOp<operation::ReLU>;
+ _map[ANEURALNETWORKS_RELU] =
+ getElementwiseActivationGenerator(onert::ir::operation::ElementwiseActivation::Type::RELU,
+ onert::ir::operation::ElementwiseActivation::infinity, 0);
_map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
Operands &operands) {
@@ -960,9 +982,11 @@ OperationFactory::OperationFactory()
return new operation::ResizeBilinear{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_RELU1] = CreateSimpleUnaryOp<operation::ReLU1>;
+ _map[ANEURALNETWORKS_RELU1] = getElementwiseActivationGenerator(
+ onert::ir::operation::ElementwiseActivation::Type::RELU, 1.f, -1.f);
- _map[ANEURALNETWORKS_RELU6] = CreateSimpleUnaryOp<operation::ReLU6>;
+ _map[ANEURALNETWORKS_RELU6] = getElementwiseActivationGenerator(
+ onert::ir::operation::ElementwiseActivation::Type::RELU, 6.f, 0.f);
_map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
@@ -1009,17 +1033,8 @@ OperationFactory::OperationFactory()
return new operation::RNN{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Floor{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_FLOOR] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::FLOOR);
_map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
Operands &) {
@@ -1059,7 +1074,7 @@ OperationFactory::OperationFactory()
return new operation::SpaceToDepth{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_L2_POOL_2D] = createPool2DOp<operation::L2Pool2D>;
+ _map[ANEURALNETWORKS_L2_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::L2);
_map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
Operands &) {
@@ -1157,35 +1172,15 @@ OperationFactory::OperationFactory()
return new operation::TransposeConv{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- return new operation::SQRT{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_SQRT] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SQRT);
// ANEURALNETWORKS_SQRT_EX is deprecated
// TODO Remove ANEURALNETWORKS_SQRT_EX
_map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT];
- _map[ANEURALNETWORKS_LOGICAL_OR] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::LogicalOr{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_LOGICAL_OR] = getElementwiseBinaryGenerator(
+ operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
// ANEURALNETWORKS_LOGICAL_OR_EX is deprecated
// TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX
@@ -1206,10 +1201,14 @@ OperationFactory::OperationFactory()
replaceDataType(operands, inputs.at(1), DataType::BOOL8);
replaceDataType(operands, outputs.at(0), DataType::BOOL8);
- return new operation::LogicalOr{inputs, outputs};
+ operation::ElementwiseBinary::Param param;
+ param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR;
+
+ return new operation::ElementwiseBinary{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_LOGICAL_NOT] = CreateSimpleUnaryOp<operation::LogicalNot>;
+ _map[ANEURALNETWORKS_LOGICAL_NOT] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOGICAL_NOT);
// ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated
// TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX
@@ -1228,7 +1227,10 @@ OperationFactory::OperationFactory()
replaceDataType(operands, inputs.at(0), DataType::BOOL8);
replaceDataType(operands, outputs.at(0), DataType::BOOL8);
- return new operation::LogicalNot{inputs, outputs};
+ operation::ElementwiseUnary::Param param;
+ param.op_type = operation::ElementwiseUnary::Type::LOGICAL_NOT;
+
+ return new operation::ElementwiseUnary{inputs, outputs, param};
};
_map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
@@ -1306,23 +1308,6 @@ OperationFactory::OperationFactory()
return new operation::LSTM{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Equal;
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
// ANEURALNETWORKS_EQUAL_EX is deprecated
// TODO Remove ANEURALNETWORKS_EQUAL_EX
_map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
@@ -1409,13 +1394,13 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_GATHER_EX
_map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER];
- _map[ANEURALNETWORKS_NEG] = CreateSimpleUnaryOp<operation::Neg>;
+ _map[ANEURALNETWORKS_NEG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::NEG);
// ANEURALNETWORKS_NEG_EX is deprecated
// TODO Remove ANEURALNETWORKS_NEG_EX
_map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG];
- _map[ANEURALNETWORKS_ABS] = CreateSimpleUnaryOp<operation::Abs>;
+ _map[ANEURALNETWORKS_ABS] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ABS);
// ANEURALNETWORKS_ABS_EX is deprecated
// TODO Remove ANEURALNETWORKS_ABS_EX
@@ -1434,6 +1419,8 @@ OperationFactory::OperationFactory()
operation::ArgMax::Param param;
param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
+ // NNAPI ARGMAX output type is always int32
+ param.output_type = DataType::INT32;
return new operation::ArgMax{inputs, outputs, param};
};
@@ -1442,7 +1429,8 @@ OperationFactory::OperationFactory()
// TODO Remove ANEURALNETWORKS_ARGMAX_EX
_map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX];
- _map[ANEURALNETWORKS_DEQUANTIZE] = CreateSimpleUnaryOp<operation::Dequantize>;
+ _map[ANEURALNETWORKS_DEQUANTIZE] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::DEQUANTIZE);
_map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
@@ -1600,9 +1588,11 @@ OperationFactory::OperationFactory()
_map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD];
- _map[ANEURALNETWORKS_MINIMUM] = createSimpleBinaryOp<operation::Min>;
+ _map[ANEURALNETWORKS_MINIMUM] =
+ getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
- _map[ANEURALNETWORKS_MAXIMUM] = createSimpleBinaryOp<operation::Max>;
+ _map[ANEURALNETWORKS_MAXIMUM] =
+ getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
_map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param,
Operands &operands) {
@@ -1628,23 +1618,10 @@ OperationFactory::OperationFactory()
return new operation::OneHot{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_COS_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Cos{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_COS_EX] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::COS);
- _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Sin{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_SIN] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SIN);
_map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
@@ -1658,17 +1635,8 @@ OperationFactory::OperationFactory()
_map[ANEURALNETWORKS_REDUCE_PROD] =
getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD);
- _map[ANEURALNETWORKS_ROUND_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Round{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_ROUND_EX] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ROUND);
_map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
@@ -1695,18 +1663,8 @@ OperationFactory::OperationFactory()
// 1 -> A 1-D tensor, specifying the value
_map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp<operation::Fill>;
- _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::ZerosLike{inputs, outputs};
- };
-
+ _map[ANEURALNETWORKS_ZEROS_LIKE_EX] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ZEROS_LIKE);
// Each input should be interpreted as follows:
// 0 -> Input Tensor Index
// 1 -> Multiple Tensor Index
@@ -1845,14 +1803,8 @@ OperationFactory::OperationFactory()
return new operation::LogSoftmax{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_QUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Quantize{inputs, outputs};
- };
+ _map[ANEURALNETWORKS_QUANTIZE] =
+ getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::QUANTIZE);
}
Operation *OperationFactory::create(ANeuralNetworksOperationType type,
diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc
index 86c2c6bc7..7eef15717 100644
--- a/runtime/onert/frontend/tflite/src/tflite_loader.cc
+++ b/runtime/onert/frontend/tflite/src/tflite_loader.cc
@@ -90,12 +90,14 @@ public:
// Set inputs
for (const std::int32_t input_ind : *tflite_subg->inputs())
{
- subg->addInput(tensorIdxToOperandIdx(input_ind));
+ subg->addInput(tensorIdxToOperandIdx(input_ind),
+ _tensor_names.at(_tensor_to_operand[input_ind]));
}
// Set outputs
for (const std::int32_t output_ind : *tflite_subg->outputs())
{
- subg->addOutput(tensorIdxToOperandIdx(output_ind));
+ subg->addOutput(tensorIdxToOperandIdx(output_ind),
+ _tensor_names.at(_tensor_to_operand[output_ind]));
}
// Create operations
for (const auto *op : *tflite_subg->operators())