diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-09-05 21:49:46 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-09-05 21:49:46 +0900 |
commit | 74476a2d0296bdad70a2f7f90bc7419a8b05bffd (patch) | |
tree | 3f991636c1e9423d38eb16a384c20b569b0d678e /runtime/onert/frontend/nnapi | |
parent | 042b262b3633b6c0f577aed6cb4b980ad0c1dcf3 (diff) | |
download | nnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.tar.gz nnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.tar.bz2 nnfw-74476a2d0296bdad70a2f7f90bc7419a8b05bffd.zip |
Imported Upstream version 1.9.0upstream/1.9.0submit/tizen/20200905.125700accepted/tizen/unified/20200906.032650
Diffstat (limited to 'runtime/onert/frontend/nnapi')
-rw-r--r-- | runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc | 696 |
1 files changed, 324 insertions, 372 deletions
diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc index 8ff6cbbfd..8e3d83db4 100644 --- a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc @@ -83,6 +83,189 @@ uint32_t getUint32Scalar(Operands &operands, const OperandIndex index) } OperationFactory::Generator +getElementwiseActivationGenerator(const onert::ir::operation::ElementwiseActivation::Type op_type, + float alpha = 0.f, float beta = 0.f) +{ + return [op_type, alpha, beta](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::ElementwiseActivation::Param param; + param.op_type = op_type; + param.alpha = alpha; + param.beta = beta; + + return new operation::ElementwiseActivation{inputs, outputs, param}; + }; +} + +OperationFactory::Generator getElementwiseBinaryGenerator( + const onert::ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type) +{ + return [op_type](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::ElementwiseBinary::Param param; + param.op_type = op_type; + + return new operation::ElementwiseBinary{inputs, outputs, param}; + }; +} + +OperationFactory::Generator +getElementwiseUnaryGenerator(const onert::ir::operation::ElementwiseUnary::Type op_type) +{ + return [op_type](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 1); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::ElementwiseUnary::Param param; + param.op_type = op_type; + + if (op_type == operation::ElementwiseUnary::Type::CAST) + { + // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's + // input/output + if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM) + { + replaceDataType(operands, inputs.at(0), DataType::UINT8); + } + if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM) + { + replaceDataType(operands, outputs.at(0), DataType::UINT8); + } + } + + return new operation::ElementwiseUnary{inputs, outputs, param}; + }; +} + +OperationFactory::Generator +getBinaryArithmeticGenerator(const onert::ir::operation::BinaryArithmetic::ArithmeticType op_type) +{ + return [op_type](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::BinaryArithmetic::Param param; + param.arithmetic_type = op_type; + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::BinaryArithmetic{inputs, outputs, param}; + }; +} + +OperationFactory::Generator +getPool2DGenerator(const onert::ir::operation::Pool2D::PoolType pool_type) +{ + return [pool_type](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Pool2D::Param param; + param.op_type = pool_type; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = operands.at(kh_index).asScalar<uint32_t>(); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + return new operation::Pool2D{inputs, outputs, param}; + }; +} + +OperationFactory::Generator getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type) { return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) { @@ -133,79 +316,24 @@ Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Opera return new T{inputs, outputs}; } -// A generator function for binary ops with no params -template <typename T> -Operation *createPool2DOp(const OperationFactory::Param &init_param, Operands &operands) +OperationFactory::Generator getComparisonGenerator(operation::Comparison::ComparisonType type) { - assert(init_param.input_count == 7 || init_param.input_count == 10); - assert(init_param.output_count == 1); + return [type](const OperationFactory::Param &init_param, Operands &) -> Operation * { + assert(init_param.input_count == 2 && init_param.output_count == 1); - // In common - // 0 -> IFM Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; - typename T::Param param; - if (init_param.input_count == 7) // support implicit padding - { // Each input should be interpreted as follows: // - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - - const auto padding_index = OperandIndex{init_param.inputs[1]}; - const auto hstride_index = OperandIndex{init_param.inputs[2]}; - const auto vstride_index = OperandIndex{init_param.inputs[3]}; - const auto kw_index = OperandIndex{init_param.inputs[4]}; - const auto kh_index = OperandIndex{init_param.inputs[5]}; - const auto activation_index = OperandIndex{init_param.inputs[6]}; + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - param.padding.type = - NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = operands.at(kh_index).asScalar<uint32_t>(); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); - } - else // support explicit padding - { - // Each input should be interpreted as follows: - // - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - - const auto padding_left_index = OperandIndex{init_param.inputs[1]}; - const auto padding_right_index = OperandIndex{init_param.inputs[2]}; - const auto padding_top_index = OperandIndex{init_param.inputs[3]}; - const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; - const auto hstride_index = OperandIndex{init_param.inputs[5]}; - const auto vstride_index = OperandIndex{init_param.inputs[6]}; - const auto kw_index = OperandIndex{init_param.inputs[7]}; - const auto kh_index = OperandIndex{init_param.inputs[8]}; - const auto activation_index = OperandIndex{init_param.inputs[9]}; - - param.padding.type = PaddingType::EXPLICIT; - param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, - padding_top_index, padding_bottom_index); - param.stride = makeStride(operands, hstride_index, vstride_index); - param.kw = getUint32Scalar(operands, kw_index); - param.kh = getUint32Scalar(operands, kh_index); - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); - } - return new T{inputs, outputs, param}; + operation::Comparison::Param param; + param.comparison_type = type; + + return new operation::Comparison{inputs, outputs, param}; + }; } } // namespace @@ -295,9 +423,9 @@ OperationFactory::OperationFactory() return new operation::DepthwiseConv2D{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_MAX_POOL_2D] = createPool2DOp<operation::MaxPool2D>; + _map[ANEURALNETWORKS_MAX_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::MAX); - _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = createPool2DOp<operation::AvgPool2D>; + _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::AVG); _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -383,27 +511,8 @@ OperationFactory::OperationFactory() return new operation::Softmax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output - if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM) - { - replaceDataType(operands, inputs.at(0), DataType::UINT8); - } - if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM) - { - replaceDataType(operands, outputs.at(0), DataType::UINT8); - } - - return new operation::Cast{inputs, outputs}; - }; + _map[ANEURALNETWORKS_CAST] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::CAST); // ANEURALNETWORKS_CAST_EX is deprecated // TODO Remove ANEURALNETWORKS_CAST_EX @@ -416,7 +525,8 @@ OperationFactory::OperationFactory() // inputCount is either 7 or 10 acccording to NN API specification. // - Padding is implicit when inputCount is 7 // - Padding is explicit when inputCount is 10 - assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.input_count == 7 || init_param.input_count == 10 || + init_param.input_count == 13); assert(init_param.output_count == 1); // 0 -> IFM Tensor Index @@ -427,7 +537,6 @@ OperationFactory::OperationFactory() OperandIndexSequence outputs{init_param.outputs[0]}; Conv2D::Param param; - if (init_param.input_count == 7) // support implicit padding { // Each input should be interpreted as follows: @@ -445,6 +554,10 @@ OperationFactory::OperationFactory() param.padding.type = NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); param.stride = makeStride(operands, hstride_index, vstride_index); + + param.dilation.width_factor = 1; + param.dilation.height_factor = 1; + param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); } @@ -472,34 +585,62 @@ OperationFactory::OperationFactory() param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, padding_top_index, padding_bottom_index); param.stride = makeStride(operands, hstride_index, vstride_index); + + param.dilation.width_factor = 1; + param.dilation.height_factor = 1; + param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); } + else if (init_param.input_count == 13) // support dilation + { + // Each input should be interpreted as follows: + // + // 3 -> Padding_left Index + // 4 -> Padding_right Index + // 5 -> Padding_top Index + // 6 -> Padding_bottom Index + // 7 -> Stride (width) Index + // 8 -> Stride (height) Index + // 9 -> Activation Index + // 11 -> Dilation (width_factor) Index + // 12 -> Dilation (height_factor) INdex - return new Conv2D{inputs, outputs, param}; - }; - - _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) { - assert(init_param.input_count == 3); - assert(init_param.output_count == 1); + const auto padding_left_index = OperandIndex{init_param.inputs[3]}; + const auto padding_right_index = OperandIndex{init_param.inputs[4]}; + const auto padding_top_index = OperandIndex{init_param.inputs[5]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + const auto width_factor_index = OperandIndex{init_param.inputs[11]}; + const auto height_factor_index = OperandIndex{init_param.inputs[12]}; - // Each input should be interpreted as follows: - // - // 0 -> Lefthand side operand - // 1 -> Righthand side operand + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - OperandIndexSequence outputs{init_param.outputs[0]}; + auto width_factor = operands.at(width_factor_index).asScalar<int32_t>(); + auto height_factor = operands.at(height_factor_index).asScalar<int32_t>(); - operation::Add::Param param; + param.dilation.width_factor = width_factor; + param.dilation.height_factor = height_factor; - const auto activation_index = OperandIndex{init_param.inputs[2]}; - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else + { + throw std::runtime_error{"Conv2D: unsupported input operand count"}; + } - return new operation::Add{inputs, outputs, param}; + return new Conv2D{inputs, outputs, param}; }; + _map[ANEURALNETWORKS_ADD] = + getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::ADD); + _map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD]; _map[ANEURALNETWORKS_REDUCE_SUM] = @@ -509,26 +650,8 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM]; - _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) { - assert(init_param.input_count == 3); - assert(init_param.output_count == 1); - - // Each input should be interpreted as follows: - // - // 0 -> Lefthand side operand - // 1 -> Righthand side operand - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - operation::Sub::Param param; - - const auto activation_index = OperandIndex{init_param.inputs[2]}; - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); - - return new operation::Sub{inputs, outputs, param}; - }; + _map[ANEURALNETWORKS_SUB] = + getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::SUB); _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -611,27 +734,8 @@ OperationFactory::OperationFactory() return new operation::Transpose{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) { - assert(init_param.input_count == 3 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> LHS Tensor Index - // 1 -> RHS Tensor Index - // 2 -> Activation Index - - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Mul::Param param; - - const auto activation_index = OperandIndex{init_param.inputs[2]}; - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); - - return new operation::Mul{inputs, outputs, param}; - }; + _map[ANEURALNETWORKS_MUL] = + getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::MUL); _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -672,34 +776,18 @@ OperationFactory::OperationFactory() return new operation::Squeeze{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_TANH] = CreateSimpleUnaryOp<operation::Tanh>; + _map[ANEURALNETWORKS_TANH] = getElementwiseActivationGenerator( + onert::ir::operation::ElementwiseActivation::Type::TANH, 1.f, 1.f); - _map[ANEURALNETWORKS_LOG] = CreateSimpleUnaryOp<operation::Log>; + _map[ANEURALNETWORKS_LOG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOG); - _map[ANEURALNETWORKS_LOGISTIC] = CreateSimpleUnaryOp<operation::Logistic>; + _map[ANEURALNETWORKS_LOGISTIC] = getElementwiseActivationGenerator( + onert::ir::operation::ElementwiseActivation::Type::LOGISTIC); - _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) { - assert(init_param.input_count == 3 && init_param.output_count == 1); + _map[ANEURALNETWORKS_DIV] = + getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::DIV); - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> LHS Tensor Index - // 1 -> RHS Tensor Index - // 2 -> Activation Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Div::Param param; - - const auto activation_index = OperandIndex{init_param.inputs[2]}; - param.activation = - NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); - - return new operation::Div{inputs, outputs, param}; - }; - - _map[ANEURALNETWORKS_EXP] = CreateSimpleUnaryOp<operation::Exp>; + _map[ANEURALNETWORKS_EXP] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::EXP); // ANEURALNETWORKS_EXP_EX is deprecated // TODO Remove ANEURALNETWORKS_EXP_EX @@ -710,39 +798,17 @@ OperationFactory::OperationFactory() // 1 -> Axis Tensor Index _map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp<operation::ExpandDims>; - _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::Greater; - - return new operation::Comparison{inputs, outputs, param}; - }; - - _map[ANEURALNETWORKS_GREATER_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual; - - return new operation::Comparison{inputs, outputs, param}; - }; + _map[ANEURALNETWORKS_GREATER] = + getComparisonGenerator(operation::Comparison::ComparisonType::Greater); + _map[ANEURALNETWORKS_GREATER_EQUAL] = + getComparisonGenerator(operation::Comparison::ComparisonType::GreaterEqual); + _map[ANEURALNETWORKS_LESS] = getComparisonGenerator(operation::Comparison::ComparisonType::Less); + _map[ANEURALNETWORKS_LESS_EQUAL] = + getComparisonGenerator(operation::Comparison::ComparisonType::LessEqual); + _map[ANEURALNETWORKS_NOT_EQUAL] = + getComparisonGenerator(operation::Comparison::ComparisonType::NotEqual); + _map[ANEURALNETWORKS_EQUAL] = + getComparisonGenerator(operation::Comparison::ComparisonType::Equal); // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX @@ -767,40 +833,6 @@ OperationFactory::OperationFactory() return new operation::Comparison{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_LESS] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::Less; - - return new operation::Comparison{inputs, outputs, param}; - }; - - _map[ANEURALNETWORKS_LESS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::LessEqual; - - return new operation::Comparison{inputs, outputs, param}; - }; - // ANEURALNETWORKS_LESS_EX is deprecated // TODO Remove ANEURALNETWORKS_LESS_EX _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param, @@ -837,23 +869,6 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX]; - _map[ANEURALNETWORKS_NOT_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input1 Tensor Index - // 1 -> input2 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::NotEqual; - - return new operation::Comparison{inputs, outputs, param}; - }; - // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param, @@ -877,7 +892,8 @@ OperationFactory::OperationFactory() return new operation::Comparison{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_LOGICAL_AND] = createSimpleBinaryOp<operation::LogicalAnd>; + _map[ANEURALNETWORKS_LOGICAL_AND] = getElementwiseBinaryGenerator( + operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND); // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX @@ -898,10 +914,14 @@ OperationFactory::OperationFactory() replaceDataType(operands, inputs.at(1), DataType::BOOL8); replaceDataType(operands, outputs.at(0), DataType::BOOL8); - return new operation::LogicalAnd{inputs, outputs}; + operation::ElementwiseBinary::Param param; + param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND; + + return new operation::ElementwiseBinary{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RSQRT] = CreateSimpleUnaryOp<operation::RSQRT>; + _map[ANEURALNETWORKS_RSQRT] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::RSQRT); _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -937,7 +957,9 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_RSQRT_EX _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT]; - _map[ANEURALNETWORKS_RELU] = CreateSimpleUnaryOp<operation::ReLU>; + _map[ANEURALNETWORKS_RELU] = + getElementwiseActivationGenerator(onert::ir::operation::ElementwiseActivation::Type::RELU, + onert::ir::operation::ElementwiseActivation::infinity, 0); _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -960,9 +982,11 @@ OperationFactory::OperationFactory() return new operation::ResizeBilinear{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_RELU1] = CreateSimpleUnaryOp<operation::ReLU1>; + _map[ANEURALNETWORKS_RELU1] = getElementwiseActivationGenerator( + onert::ir::operation::ElementwiseActivation::Type::RELU, 1.f, -1.f); - _map[ANEURALNETWORKS_RELU6] = CreateSimpleUnaryOp<operation::ReLU6>; + _map[ANEURALNETWORKS_RELU6] = getElementwiseActivationGenerator( + onert::ir::operation::ElementwiseActivation::Type::RELU, 6.f, 0.f); _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 2 && init_param.output_count == 1); @@ -1009,17 +1033,8 @@ OperationFactory::OperationFactory() return new operation::RNN{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Floor{inputs, outputs}; - }; + _map[ANEURALNETWORKS_FLOOR] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::FLOOR); _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param, Operands &) { @@ -1059,7 +1074,7 @@ OperationFactory::OperationFactory() return new operation::SpaceToDepth{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_L2_POOL_2D] = createPool2DOp<operation::L2Pool2D>; + _map[ANEURALNETWORKS_L2_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::L2); _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, Operands &) { @@ -1157,35 +1172,15 @@ OperationFactory::OperationFactory() return new operation::TransposeConv{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // 0 -> input Tensor Index - - OperandIndexSequence inputs{init_param.inputs[0]}; - return new operation::SQRT{inputs, outputs}; - }; + _map[ANEURALNETWORKS_SQRT] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SQRT); // ANEURALNETWORKS_SQRT_EX is deprecated // TODO Remove ANEURALNETWORKS_SQRT_EX _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT]; - _map[ANEURALNETWORKS_LOGICAL_OR] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - return new operation::LogicalOr{inputs, outputs}; - }; + _map[ANEURALNETWORKS_LOGICAL_OR] = getElementwiseBinaryGenerator( + operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR); // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX @@ -1206,10 +1201,14 @@ OperationFactory::OperationFactory() replaceDataType(operands, inputs.at(1), DataType::BOOL8); replaceDataType(operands, outputs.at(0), DataType::BOOL8); - return new operation::LogicalOr{inputs, outputs}; + operation::ElementwiseBinary::Param param; + param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR; + + return new operation::ElementwiseBinary{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_LOGICAL_NOT] = CreateSimpleUnaryOp<operation::LogicalNot>; + _map[ANEURALNETWORKS_LOGICAL_NOT] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOGICAL_NOT); // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX @@ -1228,7 +1227,10 @@ OperationFactory::OperationFactory() replaceDataType(operands, inputs.at(0), DataType::BOOL8); replaceDataType(operands, outputs.at(0), DataType::BOOL8); - return new operation::LogicalNot{inputs, outputs}; + operation::ElementwiseUnary::Param param; + param.op_type = operation::ElementwiseUnary::Type::LOGICAL_NOT; + + return new operation::ElementwiseUnary{inputs, outputs, param}; }; _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -1306,23 +1308,6 @@ OperationFactory::OperationFactory() return new operation::LSTM{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 2 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // - // 0 -> input0 Tensor Index - // 1 -> input1 Tensor Index - OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; - - operation::Comparison::Param param; - param.comparison_type = operation::Comparison::ComparisonType::Equal; - - return new operation::Comparison{inputs, outputs, param}; - }; - // ANEURALNETWORKS_EQUAL_EX is deprecated // TODO Remove ANEURALNETWORKS_EQUAL_EX _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param, @@ -1409,13 +1394,13 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_GATHER_EX _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER]; - _map[ANEURALNETWORKS_NEG] = CreateSimpleUnaryOp<operation::Neg>; + _map[ANEURALNETWORKS_NEG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::NEG); // ANEURALNETWORKS_NEG_EX is deprecated // TODO Remove ANEURALNETWORKS_NEG_EX _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG]; - _map[ANEURALNETWORKS_ABS] = CreateSimpleUnaryOp<operation::Abs>; + _map[ANEURALNETWORKS_ABS] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ABS); // ANEURALNETWORKS_ABS_EX is deprecated // TODO Remove ANEURALNETWORKS_ABS_EX @@ -1434,6 +1419,8 @@ OperationFactory::OperationFactory() operation::ArgMax::Param param; param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + // NNAPI ARGMAX output type is always int32 + param.output_type = DataType::INT32; return new operation::ArgMax{inputs, outputs, param}; }; @@ -1442,7 +1429,8 @@ OperationFactory::OperationFactory() // TODO Remove ANEURALNETWORKS_ARGMAX_EX _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX]; - _map[ANEURALNETWORKS_DEQUANTIZE] = CreateSimpleUnaryOp<operation::Dequantize>; + _map[ANEURALNETWORKS_DEQUANTIZE] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::DEQUANTIZE); _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -1600,9 +1588,11 @@ OperationFactory::OperationFactory() _map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD]; - _map[ANEURALNETWORKS_MINIMUM] = createSimpleBinaryOp<operation::Min>; + _map[ANEURALNETWORKS_MINIMUM] = + getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MIN); - _map[ANEURALNETWORKS_MAXIMUM] = createSimpleBinaryOp<operation::Max>; + _map[ANEURALNETWORKS_MAXIMUM] = + getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MAX); _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param, Operands &operands) { @@ -1628,23 +1618,10 @@ OperationFactory::OperationFactory() return new operation::OneHot{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_COS_EX] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - return new operation::Cos{inputs, outputs}; - }; + _map[ANEURALNETWORKS_COS_EX] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::COS); - _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - return new operation::Sin{inputs, outputs}; - }; + _map[ANEURALNETWORKS_SIN] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SIN); _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 1 && init_param.output_count == 1); @@ -1658,17 +1635,8 @@ OperationFactory::OperationFactory() _map[ANEURALNETWORKS_REDUCE_PROD] = getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD); - _map[ANEURALNETWORKS_ROUND_EX] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::Round{inputs, outputs}; - }; + _map[ANEURALNETWORKS_ROUND_EX] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ROUND); _map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) { assert(init_param.input_count == 3 && init_param.output_count == 1); @@ -1695,18 +1663,8 @@ OperationFactory::OperationFactory() // 1 -> A 1-D tensor, specifying the value _map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp<operation::Fill>; - _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence outputs{init_param.outputs[0]}; - - // Each input should be interpreted as follows: - // 0 -> input Tensor Index - OperandIndexSequence inputs{init_param.inputs[0]}; - - return new operation::ZerosLike{inputs, outputs}; - }; - + _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ZEROS_LIKE); // Each input should be interpreted as follows: // 0 -> Input Tensor Index // 1 -> Multiple Tensor Index @@ -1845,14 +1803,8 @@ OperationFactory::OperationFactory() return new operation::LogSoftmax{inputs, outputs, param}; }; - _map[ANEURALNETWORKS_QUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { - assert(init_param.input_count == 1 && init_param.output_count == 1); - - OperandIndexSequence inputs{init_param.inputs[0]}; - OperandIndexSequence outputs{init_param.outputs[0]}; - - return new operation::Quantize{inputs, outputs}; - }; + _map[ANEURALNETWORKS_QUANTIZE] = + getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::QUANTIZE); } Operation *OperationFactory::create(ANeuralNetworksOperationType type, |